text stringlengths 8 6.05M |
|---|
# Generated by Django 2.0 on 2019-10-30 16:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='created_date',
),
migrations.RemoveField(
model_name='user',
name='last_seen',
),
migrations.RemoveField(
model_name='user',
name='name',
),
migrations.RemoveField(
model_name='user',
name='nick',
),
migrations.RemoveField(
model_name='user',
name='status',
),
]
|
# -*- coding: utf-8 -*-
"""
Updated 16 Dec 2017
10 sheep eat away at their environments
Greedy sheep are sick after 100 units
The model stops running when all sheep are at least half full
@author: amandaf
"""
import matplotlib.pyplot
import matplotlib.animation
import csv
import agentframework
import argparse
import random
#set up variables
parser = argparse.ArgumentParser(description='Define Agent parameters')
#num_of_agents = 10
parser.add_argument('num_of_agents', type=int,
help='Number of agents')
parser.add_argument('num_of_iterations', type=int,
help='Number of iterations')
parser.add_argument('neighbourhood', type=int,
help='Size of neighbourhood')
try:
args = parser.parse_args()
num_of_agents = args.num_of_agents
num_of_iterations = args.num_of_iterations
neighbourhood = args.neighbourhood
except:
print ("Please enter numbers only seperated by a space")
agents = []
#setup figure
#Set to 7 x 6 in order to prevent Qwindowswindows::unable to set geometry
fig = matplotlib.pyplot.figure(figsize=(7, 6))
ax = fig.add_axes([0, 0, 1, 1])
#Empty environmental list
environment = []
#Read the file
f = open('in.txt', newline='')
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
#Loop through file
for row in reader: # A list of rows
#create new rowlist
rowlist = []
for value in row: # A list of value
rowlist.append(value)
#Add rowlist to environment
environment.append(rowlist)
f.close()
#Calculate size of environment
maxEnv = len(environment)
#Setup up global stoppng variable
carry_on = True
# Make the agents.
for i in range(num_of_agents):
agents.append(agentframework.Agent(environment, agents, maxEnv))
# Move the agents.
def update(frame_number):
fig.clear()
global carry_on
#setup figure limits so it stops resizing
matplotlib.pyplot.xlim(0, maxEnv-1)
matplotlib.pyplot.ylim(0, maxEnv-1)
matplotlib.pyplot.imshow(environment)
matplotlib.pyplot.title("Iteration:" + str(frame_number) + "/" + str(num_of_iterations))
#randomise order
random.shuffle(agents)
#make the sheep move, eat and be sick
for j in range(num_of_iterations):
for i in range(num_of_agents):
agents[i].move()
#Agent eats values
agents[i].eat()
#print("Eating")
#Agent shares with neighbours
agents[i].share_with_neighbours(neighbourhood)
if agents[i].store > 100:
#Greedy agents are sick if they eat more than 100 units
agents[i].sick()
#print ("Being sick")
for i in range(num_of_agents):
# agent is half full
if agents[i].store > 50:
carry_on = False
else:
carry_on = True
#print (carry_on)
if carry_on == False:
print("All sheep are at least half full")
matplotlib.pyplot.title("Model finished! All sheep are at least half full.")
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x,agents[i].y)
#print(agents[i].x,agents[i].y)
def gen_function(b = [0]):
a = 0
global carry_on
global num_of_iterations
while (a < num_of_iterations) & (carry_on) :
yield a # Returns control and waits next call.
a = a + 1
#print (a)
#Animate and display the scenario
animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)
matplotlib.pyplot.show()
#Write out environment to file
f2 = open('environment.txt','w', newline='')
writer = csv.writer(f2)
for row in environment:
writer.writerow(row)
f2.close()
#Write store count to file
f2 = open('store.txt','a')
for i in range(num_of_agents):
f2.write(str(agents[i].store)+"\n")
f2.close()
|
#
# Validator for a pScheduler test and its result.
#
#
# Development Order #3: Test specification and result validation
#
# The functions in this file determine whether or not specifications
# and results for this test are valid.
#
from pscheduler import json_validate_from_standard_template
#
# Test Specification
#
# NOTE: A large dictionary of existing, commonly-used datatypes used
# throughout pScheduler is defined in
# pscheduler/python-pscheduler/pscheduler/pscheduler/jsonval.py.
# Please use those where possible.
SPEC_SCHEMA = {
"local": {
# Define any local types used in the spec here
"TestImplementation": {
"type": "string",
"enum": [ "system", "api" ]
},
},
"versions": {
# Initial version of the specification
"1": {
"type": "object",
# schema, host, host-node, and timeout are standard and
# should be included in most single-participant tests.
"properties": {
# The schema should always be constrained to a single
# value per version.
"schema": { "type": "integer", "enum": [ 1 ] },
"host": { "$ref": "#/pScheduler/Host" },
"host-node": { "$ref": "#/pScheduler/Host" },
"duration": { "$ref": "#/pScheduler/Duration" },
"timeout": { "$ref": "#/pScheduler/Duration" },
"implementation": { "$ref": "#/local/TestImplementation" },
},
# If listed here, these parameters MUST be in the test spec.
"required": [
"implementation",
],
# Treat other properties as acceptable. This should
# ALWAYS be false.
"additionalProperties": False
},
# Second and later versions of the specification
# "2": {
# "type": "object",
# "properties": {
# "schema": { "type": "integer", "enum": [ 2 ] },
# ...
# },
# "required": [
# "schema",
# ...
# ],
# "additionalProperties": False
#},
}
}
def spec_is_valid(json):
(valid, errors) = json_validate_from_standard_template(json, SPEC_SCHEMA)
if not valid:
return (valid, errors)
# If there are semantic relationships that can't be expressed the
# JSON Schema (e.g., parameter X can't be less then 5 when
# parameter Y is an odd number), evaluate them here and complain
# if there's a problem. E.g.,:
#
#if some_condition_which_is_an_error
# return(False, "...Error Message...")
# By this point, everything is okay.
return (valid, errors)
#
# Test Result
#
RESULT_SCHEMA = {
"local": {
# Define any local types here.
},
"versions": {
"1": {
"type": "object",
"properties": {
"schema": { "type": "integer", "enum": [ 1 ] },
"succeeded": { "$ref": "#/pScheduler/Boolean" },
"time": { "$ref": "#/pScheduler/Duration" },
},
"required": [
"succeeded",
"time",
],
"additionalProperties": False
}
}
}
def result_is_valid(json):
return json_validate_from_standard_template(json, RESULT_SCHEMA)
|
from onegov.activity import Period, PeriodCollection
from onegov.core.security import Secret
from onegov.feriennet import _, FeriennetApp
from onegov.feriennet.forms import PeriodForm
from onegov.feriennet.layout import PeriodCollectionLayout
from onegov.feriennet.layout import PeriodFormLayout
from onegov.core.elements import Link, Confirm, Intercooler, Block
from onegov.feriennet.models import PeriodMessage
from sqlalchemy import desc
from sqlalchemy.exc import IntegrityError
@FeriennetApp.html(
model=PeriodCollection,
template='periods.pt',
permission=Secret)
def view_periods(self, request):
layout = PeriodCollectionLayout(self, request)
def links(period):
if period.active:
yield Link(
text=_("Deactivate"),
url=layout.csrf_protected_url(
request.link(period, name='deactivate')
),
traits=(
Confirm(
_(
'Do you really want to deactivate "${title}"?',
mapping={'title': period.title}
),
_("This will hide all associated occasions"),
_("Deactivate Period"),
_("Cancel")
),
Intercooler(
request_method="POST",
redirect_after=request.link(self)
),
)
)
elif not period.archived:
yield Link(
text=_("Activate"),
url=layout.csrf_protected_url(
request.link(period, name='activate')
),
traits=(
Confirm(
_(
'Do you really want to activate "${title}"?',
mapping={'title': period.title}
),
_(
"This will deactivate the currently active "
"period. All associated occasions will be made "
"public"
),
_("Activate Period"),
_("Cancel")
),
Intercooler(
request_method="POST",
redirect_after=request.link(self)
),
)
)
yield Link(_("Edit"), request.link(period, 'edit'))
if not period.archived:
if period.confirmed and period.finalized or not period.finalizable:
yield Link(
text=_("Archive"),
url=layout.csrf_protected_url(
request.link(period, name='archive')
),
traits=(
Confirm(
_(
'Do you really want to archive "${title}"?',
mapping={'title': period.title}
),
_(
"This will archive all activities which do "
"not already have an occasion in a future "
"period. To publish archived activities again "
"a new publication request needs to be filed."
),
_("Archive Period"),
_("Cancel")
),
Intercooler(
request_method="POST",
redirect_after=request.link(self)
),
)
)
else:
yield Link(
text=_("Archive"),
url='#',
traits=(
Block(
_(
'"${title}" cannot be archived yet',
mapping={'title': period.title}
),
_(
"A period can only be archived once the "
"bookings have been made and the bills have "
"been compiled."
),
_("Cancel")
)
)
)
yield Link(
text=_("Delete"),
url=layout.csrf_protected_url(request.link(period)),
traits=(
Confirm(
_(
'Do you really want to delete "${title}"?',
mapping={'title': period.title}
),
_("This cannot be undone."),
_("Delete Period"),
_("Cancel")
),
Intercooler(
request_method="DELETE",
redirect_after=request.link(self)
),
)
)
return {
'layout': layout,
'periods': self.query().order_by(desc(Period.execution_start)).all(),
'title': _("Manage Periods"),
'links': links
}
@FeriennetApp.form(
model=PeriodCollection,
name='new',
form=PeriodForm,
template='period_form.pt',
permission=Secret)
def new_period(self, request, form):
if form.submitted(request):
period = self.add(
title=form.title.data,
prebooking=form.prebooking,
booking=form.booking,
execution=form.execution,
minutes_between=form.minutes_between.data,
confirmable=form.confirmable.data,
finalizable=form.finalizable.data,
active=False)
form.populate_obj(period)
request.success(_("The period was added successfully"))
return request.redirect(request.link(self))
return {
'layout': PeriodFormLayout(self, request, _("New Period")),
'form': form,
'title': _("New Period")
}
@FeriennetApp.form(
model=Period,
name='edit',
form=PeriodForm,
template='period_form.pt',
permission=Secret)
def edit_period(self, request, form):
if form.submitted(request):
form.populate_obj(self)
request.success(_("Your changes were saved"))
return request.redirect(request.class_link(PeriodCollection))
elif not request.POST:
form.process(obj=self)
return {
'layout': PeriodFormLayout(self, request, self.title),
'form': form,
'title': self.title
}
@FeriennetApp.view(
model=Period,
request_method='DELETE',
permission=Secret)
def delete_period(self, request):
request.assert_valid_csrf_token()
try:
PeriodCollection(request.session).delete(self)
except IntegrityError:
request.alert(
_("The period could not be deleted as it is still in use"))
else:
PeriodMessage.create(self, request, 'deleted')
request.success(
_("The period was deleted successfully"))
@request.after
def redirect_intercooler(response):
response.headers.add(
'X-IC-Redirect', request.class_link(PeriodCollection))
@FeriennetApp.view(
model=Period,
request_method='POST',
name='activate',
permission=Secret)
def activate_period(self, request):
request.assert_valid_csrf_token()
self.activate()
PeriodMessage.create(self, request, 'activated')
request.success(_("The period was activated successfully"))
@request.after
def redirect_intercooler(response):
response.headers.add(
'X-IC-Redirect', request.class_link(PeriodCollection))
@FeriennetApp.view(
model=Period,
request_method='POST',
name='deactivate',
permission=Secret)
def deactivate_period(self, request):
request.assert_valid_csrf_token()
self.deactivate()
PeriodMessage.create(self, request, 'deactivated')
request.success(_("The period was deactivated successfully"))
@request.after
def redirect_intercooler(response):
response.headers.add(
'X-IC-Redirect', request.class_link(PeriodCollection))
@FeriennetApp.view(
model=Period,
request_method='POST',
name='archive',
permission=Secret)
def archive_period(self, request):
request.assert_valid_csrf_token()
self.archive()
PeriodMessage.create(self, request, 'archived')
request.success(_("The period was archived successfully"))
@request.after
def redirect_intercooler(response):
response.headers.add(
'X-IC-Redirect', request.class_link(PeriodCollection))
|
import numpy, curses, logging
from heapq import *
class astar(object):
'''This class uses the A* algorithm to return list of tuples on shortest path from start to goal. Note: coordinates are x,y'''
def __init__(self, screen, start, goal, map_dims, space, enemy_symbol, player_symbol):
self._screen = screen
self._start = start
self._goal = goal
self._map_dims = map_dims
self._space = space
self._enemy_symbol = enemy_symbol
self._player_symbol = player_symbol
def get_shortest_path(self):
yrange, xrange = self._map_dims
map = self._make_array(self._screen, yrange, xrange, self._space, self._enemy_symbol, self._player_symbol)
new_start = (self._start[0]-yrange[0], self._start[1]-xrange[0])
new_goal = (self._goal[0]-yrange[0], self._goal[1]-xrange[0])
data = self._astar(map, new_start, new_goal)
if data:
next_step = data[-1]
step = [next_step[0]+yrange[0], next_step[1]+xrange[0]]
return step
else:
return None
def _heuristic(self, a, b):
'''returns distance from cell a to cell b'''
return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2
def _astar(self, array, start, goal):
'''returns list of tuples shortest distance from start cell to goal cell'''
neighbors = [(0, 1), (0, -1), (1, 0), (-1, 0)] #for 4 directions #,(1,1),(1,-1),(-1,1),(-1,-1)] #for all 8 directions
close_set = set() #walls or already checked cells
came_from = {}
gscore = {start: 0}
fscore = {start: self._heuristic(start, goal)} #distance from start to end
oheap = []
heappush(oheap, (fscore[start], start)) #push fscore to oheap
while oheap:
current = heappop(oheap)[1] #return smallest fscore
if current == goal: #when goal has been reached
data = []
while current in came_from:
data.append(current)
current = came_from[current]
return data
close_set.add(current) #add position to closed set
for i, j in neighbors:
neighbor = current[0] + i, current[1] + j #gets neighboring cells
tentative_g_score = gscore[current] + self._heuristic(current, neighbor)
if 0 <= neighbor[0] < array.shape[0]: #if neighbor in map
if 0 <= neighbor[1] < array.shape[1]:
if array[neighbor[0]][neighbor[1]] == 1:
continue
else:
# array bound y walls
continue
else:
# array bound x walls
continue
if neighbor in close_set and tentative_g_score >= gscore.get(neighbor, 0):
continue
if tentative_g_score < gscore.get(neighbor, 0) or neighbor not in [i[1]for i in oheap]:
came_from[neighbor] = current
gscore[neighbor] = tentative_g_score
fscore[neighbor] = tentative_g_score + self._heuristic(neighbor, goal)
heappush(oheap, (fscore[neighbor], neighbor))
return False #returns false if no path is found
def _make_array(self, screen, yrange, xrange, space, enemy_symbol, player_symbol):
map = []
for row in range(int(yrange[0]), int(yrange[1]), 1):
current_row = []
for column in range(int(xrange[0]), int(xrange[1]), 1):
cell = screen.inch(row, column)
if cell == space or cell == enemy_symbol or cell == player_symbol:
current_row.extend([0])
else:
current_row.extend([1])
map.append(current_row)
nmap = numpy.array(map)
return nmap
|
"""
This module defines some tools used for reading/writing XYZ files.
"""
from futile.Utils import write as safe_print
class XYZReader():
"""
A class which can be used to read from xyz files.
This class should behave like a standard file, which means you can use
it in ``with`` statements, and use the ``next`` command to iterate over it.
Attributes:
closed (bool): check if a file is open or not.
units (str): the units that the xyz file was in.
natoms (int): the number of atoms in the xyz file.
cell (list): a list of floats describing the cell.
Args:
filename (str): the file to read from. You can also specify a molecule
that might be in the database.
"""
def __init__(self, filename):
self.filename = filename
self.natoms = None
self.cell = None
self.atoms_positions = []
def open(self):
self.__enter__()
def __enter__(self):
from os.path import join, abspath, dirname
try:
self._handle = open(self.filename, "r")
except IOError: # see if the file is in the database
dirXYZ = join(dirname(__file__), "Database", "XYZs")
filename = abspath(join(dirXYZ, self.filename + ".xyz"))
self._handle = open(filename, "r")
line = next(self._handle)
self.natoms = int(line.split()[0])
try:
self.units = line.split()[1]
except IndexError:
self.units = "angstroem"
line = next(self._handle).split()
if len(line) == 0:
self.cell = None
elif line[0] == "#": # Comment line
self.cell = None
elif line[0] == "free":
self.cell = None
else:
self.cell = [float(x) for x in line[1:]]
return self
def next(self):
return self.__next__()
def __next__(self):
line = next(self._handle)
if self.natoms == len(self.atoms_positions):
raise StopIteration
split = line.split()
sym = split[0]
position = split[1:4]
this_pos = {'sym': sym, 'r': position, "units": self.units}
self.atoms_positions.append(this_pos)
return this_pos
def __iter__(self):
return self
def close(self):
if self.natoms != len(self.atoms_positions):
raise IOError('The number of atoms is not consistent with the'
' number of lines')
self.__exit__()
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
self._handle.close()
@property
def closed(self):
return self._handle.closed
class XYZWriter():
"""
A class for writing XYZ files.
This class should behave like a standard file, which means you can use
it in ``with`` statements and write.
Args:
filename (str): the file to write to.
natoms (int): how many atoms we will write.
units (str): the units of the file. Defaults to angstroem.
cell (list): the unit cell.
"""
def __init__(self, filename, natoms, units="angstroem", cell=None):
self.filename = filename
self.natoms = natoms
self.units = units
self.cell = cell
def open(self):
self.__enter__()
def __enter__(self):
self._handle = open(self.filename, "w")
self._handle.write(str(self.natoms) + " ")
self._handle.write(self.units)
self._handle.write("\n")
# The unit cell
self._handle.write(_xyz_bc_spec(self.cell))
self._handle.write("\n")
return self
def write(self, atomdict):
"""
Write an atom to the file.
Args:
atom (dict): a dictionary describing the atom.
"""
from BigDFT.Atom import Atom
at = Atom(atomdict)
self._handle.write(at.sym + " ")
pos = at.get_position(self.units)
self._handle.write(" ".join([str(x) for x in pos]))
self._handle.write("\n")
def close(self):
self.__exit__()
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
self._handle.close()
@property
def closed(self):
return self._handle.closed
def _xyz_bc_spec(cell):
"""
Defines the specification for expressing the Boundary Conditions starting
from a cell vector.
Args:
cell (list): array of the (orthorhombic) cell. Should be 0.0 on
directions with free BC. If None is given, the BC are assumed to
be Free.
Return:
(str): Line of the xyz file specifying the bc
"""
if cell is None:
return "free"
elif cell[1] == 0.0 and cell[2] != 0.0:
return "surface " + str(cell[0]) + " 0.0 " + str(cell[2]) + " "
elif cell[1] == 0.0 and cell[2] == 0.0:
return "wire 0.0 0.0 " + cell[2] + " "
else:
return "periodic " + str(cell[0]) + " " + str(cell[1]) + \
" " + str(cell[2]) + " "
if __name__ == "__main__":
"""Test the XYZ Module"""
from os.path import join
from os import system
for file in ["Si4.xyz", "SiO.xyz"]:
safe_print("First let's try reading an XYZ file.")
atom_list = []
with XYZReader(join("Database", "XYZs", file)) as reader:
safe_print(reader.closed)
for at in reader:
atom_list.append(at)
safe_print(reader.closed)
safe_print(atom_list)
safe_print()
safe_print("Now let's try writing an XYZ file.")
safe_print()
with XYZWriter("test.xyz", len(atom_list), cell=reader.cell,
units=reader.units) as writer:
safe_print(writer.closed)
for at in atom_list:
writer.write(at)
safe_print(writer.closed)
system("cat test.xyz")
safe_print()
safe_print("Using the explicit open and close.")
reader = XYZReader("test.xyz")
reader.open()
print(next(reader))
reader.close()
|
import praw
from pprint import pprint
from urllib import urlretrieve
import pickle
#Creates the Reddit object
r = praw.Reddit(user_agent='/r/museum scraper by /u/UnclePolycarp')
already_grabbed = pickle.load(open("save.p", "rb"))
#Code for pulling all recent submissions
subreddit = r.get_subreddit('museum')
for submission in subreddit.get_hot(limit=10):
if submission.id in already_grabbed:
break
else:
#Pulls url of /r/museum image
url = submission.url
title = submission.title
response = urlretrieve(url, title + '.jpg')
already_grabbed.append(submission.id)
pickle.dump(already_grabbed, open("save.p", "wb"))
|
"""
This file defines class BaseReward.
@author: Clemens Rosenbaum :: cgbr@cs.umass.edu
@created: 6/8/18
"""
from collections import deque
import abc
import torch
class PerActionBaseReward(object, metaclass=abc.ABCMeta):
"""
Class BaseReward defines the base class for per-action rewards.
"""
def __init__(self, history_window=256, *args, **kwargs):
self._hist_len = history_window
self._dists = deque(maxlen=history_window)
self._actions = deque(maxlen=history_window)
self._precomp = None
def register(self, dist, action):
self._dists.append(dist.detach())
self._actions.append(action.detach())
def clear(self):
self._dists = deque(maxlen=self._hist_len)
self._actions = deque(maxlen=self._hist_len)
self._precomp = None
def get_reward(self, dist, action):
return torch.FloatTensor([0.]).to(action.device)
|
p = ['butt', 'm', 'x', 'o', 'p']
y = 'x'
def change_x(arr, x):
for i in range(0, len(arr)):
if arr[i] == y:
arr[i] = "we changed X but we cant pop"
return arr
print change_x(p,y) |
import fileinput
import json
import os
import shutil
import subprocess
import pandas as pd
import numpy as np
from jupyterlab_server import add_handlers
from itertools import groupby
from random import randrange
import networkx as nx
import collections
import colourmap as cm
import functools as ft
from community import community_louvain
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# using local, modified version of d3graph, hope PRs will be accepted soon - can then change to pip version
import sys
sys.path.insert(0, '/api-visualization-tool/d3graph')
from d3graph import d3graph, vec2adjmat
from d3graph.d3graph import adjmat2dict
###
from jsonparsercreator import *
from Project import Project
# get method name
def getMethodName(x):
if " " in x:
x = x.split(" ")[-1]
if pd.isnull(x):
return ''
if not "::" in x:
x = x+"::"+x
return x.split("::")[1]
# get class name
def getClassName(x):
if " " in x:
x = x.split(" ")[-1]
if pd.isnull(x):
return ''
if not "::" in x:
x = x+"::"+x
return x.split("::")[0]
# get pkg name
def getPkgName(x):
if " " in x:
x = x.split(" ")[-1]
if pd.isnull(x):
return ''
if not "::" in x:
x = x+"::"+x
return ".".join(x.split("::")[0].split(".")[:-1])
# get lib name without version
def getVersionlessLibName(x):
if pd.isnull(x):
return ''
if ":" in x:
return ":".join(x.split(":")[0:2])
return x
# get a dict of pks to classes
def getPackagesToClassesMapping(fileName):
tmpDf = pd.read_csv(fileName, sep='\t', on_bad_lines='skip')
for i, row in tmpDf.iterrows():
for clsName in row['Classes'].split(':'):
pkg = '.'.join(clsName.split('.')[:-1])
sourceColToJars[pkg] = getVersionlessLibName(row['Library Name'])
packageToClassesMap = collections.defaultdict(list)
[packageToClassesMap['.'.join(eachClass.split('.')[:-1])].append(eachClass.split('.')[-1]) for classNames in list(tmpDf['Classes']) for eachClass in classNames.split(":") if isinstance(eachClass, str)]
return packageToClassesMap
def grep(filepath, library):
res = []
with open(filepath) as f:
for line in f.readlines():
if library in line:
return True
return False
# get list of clients and library from input JSON
def cloneRepos(datasetMetadata):
try:
os.mkdir('/api-visualization-tool/projects')
os.mkdir('/api-visualization-tool/apis-data')
os.mkdir('/api-visualization-tool/projects/api-surface-data')
except OSError as error:
print('Directory exists.')
for repo in datasetMetadata:
if 'url' in repo.keys():
project = Project(repo['url'])
try:
res=project.clone('/api-visualization-tool/projects')
project.checkout_commit(repo['commit'])
except CalledProcessError as error:
print('Directory exists.')
def filterTsvRow(x):
for repo in repoArtifacts:
if repo in x:
return True
return False
# create dataframe in the shape we want
def getInteractionsDF(arrayTsvFiles, lib):
global pkgToCls
global pkgToClsLength
global sourceColToJars
df = pd.DataFrame({'SourceMethod': [], 'SourceClass': [], 'SourcePackage': [], 'SourceJar': [], 'TargetMethod': [], 'TargetClass': [], 'TargetPackage': [], 'TargetJar': [], 'Type': [], 'Static': [], 'Count': []})
for file in arrayTsvFiles:
print(file)
if file.endswith("libsInfo.tsv"):
pkgToCls = getPackagesToClassesMapping(file)
for pkg in pkgToCls.keys():
pkgToClsLength[pkg] = len(pkgToCls[pkg])
if file.endswith("dynamic-invocations.tsv"):
tmpDf = pd.read_csv(file, sep='\t', on_bad_lines='skip')
checkDf = tmpDf.applymap(lambda x : type(x).__name__).eq({'Declared Callee Method': 'str', 'Declared Callee Library': 'str', 'Actual Callee Method': 'str', 'Actual Callee Library': 'str', 'Caller Method': 'str', 'Caller Library': 'str', 'Count': 'int', 'Callee Visibility': 'str','Reflective': 'bool','DynamicProxy': 'bool','Label': 'str'})
tmpDf = tmpDf.drop(list(checkDf[checkDf.isin([False]).any(axis=1)].index))
tmpDf = tmpDf.loc[(tmpDf['Actual Callee Library'].apply(filterTsvRow)) | (tmpDf['Declared Callee Library'].apply(filterTsvRow)) | (tmpDf['Caller Library'].apply(filterTsvRow))]
for index, row in tmpDf.iterrows():
stat = 'both' if row['Declared Callee Method'] == row['Actual Callee Method'] else 'dynamic'
method1, klass1, pkg1, lib1 = getMethodName(row['Caller Method']), getClassName(row['Caller Method']), getPkgName(row['Caller Method']), getVersionlessLibName(row['Caller Library'])
method2, klass2, pkg2, lib2 = getMethodName(row['Actual Callee Method']), getClassName(row['Actual Callee Method']), getPkgName(row['Actual Callee Method']), getVersionlessLibName(row['Actual Callee Library'])
sourceColToJars[method1], sourceColToJars[klass1], sourceColToJars[pkg1] = lib1, lib1, lib1
sourceColToJars[method2], sourceColToJars[klass2], sourceColToJars[pkg2] = lib2, lib2, lib2
newRow = {'SourceMethod': method1, 'SourceClass': klass1, 'SourcePackage': pkg1, 'SourceJar': lib1, 'TargetMethod': method2, 'TargetClass': klass2, 'TargetPackage': pkg2, 'TargetJar': lib2, 'Type': 'invocation', 'Static': stat, 'Count': row['Count']}
df = df.append(newRow, ignore_index = True)
if stat == 'dynamic':
method2, klass2, pkg2, lib2 = getMethodName(row['Declared Callee Method']), getClassName(row['Declared Callee Method']), getPkgName(row['Declared Callee Method']), getVersionlessLibName(row['Declared Callee Library'])
sourceColToJars[method2], sourceColToJars[klass2], sourceColToJars[pkg2] = lib2, lib2, lib2
newRow = {'SourceMethod': method1, 'SourceClass': klass1, 'SourcePackage': pkg1, 'SourceJar': lib1, 'TargetMethod': method2, 'TargetClass': klass2, 'TargetPackage': pkg2, 'TargetJar': lib2, 'Type': 'invocation', 'Static': 'static', 'Count': row['Count']}
df = df.append(newRow, ignore_index = True)
elif file.endswith("static-invocations.tsv"):
tmpDf = pd.read_csv(file, sep='\t', on_bad_lines='skip')
checkDf = tmpDf.applymap(lambda x : type(x).__name__).eq({'Declared Callee Method': 'str', 'Declared Callee Library': 'str', 'Caller Method': 'str', 'Caller Library': 'str', 'Count': 'int', 'Callee Visibility': 'str', 'Label': 'str'})
tmpDf = tmpDf.drop(list(checkDf[checkDf.isin([False]).any(axis=1)].index))
tmpDf = tmpDf.loc[(tmpDf['Declared Callee Library'].apply(filterTsvRow)) | (tmpDf['Caller Library'].apply(filterTsvRow))]
for index, row in tmpDf.iterrows():
method1, klass1, pkg1, lib1 = getMethodName(row['Caller Method']), getClassName(row['Caller Method']), getPkgName(row['Caller Method']), getVersionlessLibName(row['Caller Library'])
method2, klass2, pkg2, lib2 = getMethodName(row['Declared Callee Method']), getClassName(row['Declared Callee Method']), getPkgName(row['Declared Callee Method']), getVersionlessLibName(row['Declared Callee Library'])
sourceColToJars[method1], sourceColToJars[klass1], sourceColToJars[pkg1] = lib1, lib1, lib1
sourceColToJars[method2], sourceColToJars[klass2], sourceColToJars[pkg2] = lib2, lib2, lib2
newRow = {'SourceMethod': method1, 'SourceClass': klass1, 'SourcePackage': pkg1, 'SourceJar': lib1, 'TargetMethod': method2, 'TargetClass': klass2, 'TargetPackage': pkg2, 'TargetJar': lib2, 'Type': 'invocation', 'Static': 'static', 'Count': row['Count']}
df = df.append(newRow, ignore_index = True)
elif file.endswith("classesUsageInfo.tsv"):
pass
elif file.endswith("annotations.tsv"):
tmpDf = pd.read_csv(file, sep='\t', on_bad_lines='skip')
checkDf = tmpDf.applymap(lambda x : type(x).__name__).eq({'Class': 'str', 'Method': 'str', 'Field Name:Field Signature': 'str', 'Annotation': 'str', 'Annotated In Library': 'str', 'Annotation Visibility': 'str', 'Count': 'int', 'Annotation Library': 'str'})
tmpDf = tmpDf.drop(list(checkDf[checkDf.isin([False]).any(axis=1)].index))
tmpDf = tmpDf.loc[(tmpDf['Annotated In Library'].apply(filterTsvRow)) | (tmpDf['Annotation Library'].apply(filterTsvRow))]
for index, row in tmpDf.iterrows():
annotated = row['Class'] if not row['Class'] == '-' else (row['Method'] if not row['Method'] == '-' else row['Field Name:Field Signature'])
method1, klass1, pkg1, lib1 = getMethodName(annotated), getClassName(annotated), getPkgName(annotated), getVersionlessLibName(row['Annotated In Library'])
method2, klass2, pkg2, lib2 = getMethodName(row['Annotation']), getClassName(row['Annotation']), getPkgName(row['Annotation']), getVersionlessLibName(row['Annotation Library'])
sourceColToJars[method1], sourceColToJars[klass1], sourceColToJars[pkg1] = lib1, lib1, lib1
sourceColToJars[method2], sourceColToJars[klass2], sourceColToJars[pkg2] = lib2, lib2, lib2
newRow = {'SourceMethod': method1, 'SourceClass': klass1, 'SourcePackage': pkg1, 'SourceJar': lib1, 'TargetMethod': method2, 'TargetClass': klass2, 'TargetPackage': pkg2, 'TargetJar': lib2, 'Type': 'annotation', 'Static': 'static', 'Count': row['Count']}
df = df.append(newRow, ignore_index = True)
elif file.endswith("subtyping.tsv"):
tmpDf = pd.read_csv(file, sep='\t', on_bad_lines='skip')
checkDf = tmpDf.applymap(lambda x : type(x).__name__).eq({'SubClass': 'str', 'Sub Library': 'str', 'Super Class/Interface': 'str', 'Super Class/Interface Visibility': 'str', 'Super Library': 'str', 'Count': 'int'})
tmpDf = tmpDf.drop(list(checkDf[checkDf.isin([False]).any(axis=1)].index))
tmpDf = tmpDf.loc[(tmpDf['Sub Library'].apply(filterTsvRow)) | (tmpDf['Super Library'].apply(filterTsvRow))]
for index, row in tmpDf.iterrows():
method1, klass1, pkg1, lib1 = getMethodName(row['SubClass']), getClassName(row['SubClass']), getPkgName(row['SubClass']), getVersionlessLibName(row['Sub Library'])
method2, klass2, pkg2, lib2 = getMethodName(row['Super Class/Interface']), getClassName(row['Super Class/Interface']), getPkgName(row['Super Class/Interface']), getVersionlessLibName(row['Super Library'])
sourceColToJars[method1], sourceColToJars[klass1], sourceColToJars[pkg1] = lib1, lib1, lib1
sourceColToJars[method2], sourceColToJars[klass2], sourceColToJars[pkg2] = lib2, lib2, lib2
newRow = {'SourceMethod': method1, 'SourceClass': klass1, 'SourcePackage': pkg1, 'SourceJar': lib1, 'TargetMethod': method2, 'TargetClass': klass2, 'TargetPackage': pkg2, 'TargetJar': lib2, 'Type': 'subtyping', 'Static': 'static', 'Count': row['Count']}
df = df.append(newRow, ignore_index = True)
elif file.endswith("fields.tsv"):
tmpDf = pd.read_csv(file, sep='\t', on_bad_lines='skip')
if not 'Caller Class' in tmpDf.columns:
continue
checkDf = tmpDf.applymap(lambda x : type(x).__name__).eq({'Caller Class': 'str', 'Caller Library': 'str', 'Field Name': 'str', 'Declared Class': 'str', 'Actual Class': 'str', 'Field Signature': 'str', 'Count': 'int', 'Visibility': 'str','Reflective': 'bool','Static': 'bool','Field Library': 'str'})
tmpDf = tmpDf.drop(list(checkDf[checkDf.isin([False]).any(axis=1)].index))
tmpDf = tmpDf.loc[(tmpDf['Caller Library'].apply(filterTsvRow)) | (tmpDf['Field Library'].apply(filterTsvRow))]
for index, row in tmpDf.iterrows():
method1, klass1, pkg1, lib1 = getMethodName(row['Caller Class']), getClassName(row['Caller Class']), getPkgName(row['Caller Class']), getVersionlessLibName(row['Caller Library'])
sourceColToJars[method1], sourceColToJars[klass1], sourceColToJars[pkg1] = lib1, lib1, lib1
if row['Declared Class'] == row['Actual Class']:
method2, klass2, pkg2, lib2 = getMethodName(row['Field Name']), getClassName(row['Field Name']), getPkgName(row['Field Name']), getVersionlessLibName(row['Field Library'])
sourceColToJars[method2], sourceColToJars[klass2], sourceColToJars[pkg2] = lib2, lib2, lib2
newRow = {'SourceMethod': method1, 'SourceClass': klass1, 'SourcePackage': pkg1, 'SourceJar': lib1, 'TargetMethod': method2, 'TargetClass': klass2, 'TargetPackage': pkg2, 'TargetJar': lib2, 'Type': 'field', 'Static': 'both', 'Count': row['Count']}
df = df.append(newRow, ignore_index = True)
else:
stat = 'static' if row['Declared Class'] == 'static' else 'dynamic'
method2, klass2, pkg2, lib2 = getMethodName(row['Field Name']), getClassName(row['Field Name']), getPkgName(row['Field Name']), getVersionlessLibName(row['Field Library'])
sourceColToJars[method2], sourceColToJars[klass2], sourceColToJars[pkg2] = lib2, lib2, lib2
newRow = {'SourceMethod': method1, 'SourceClass': klass1, 'SourcePackage': pkg1, 'SourceJar': lib1, 'TargetMethod': method2, 'TargetClass': klass2, 'TargetPackage': pkg2, 'TargetJar': lib2, 'Type': 'field', 'Static': stat, 'Count': row['Count']}
df = df.append(newRow, ignore_index = True)
if stat == 'dynamic':
method2, klass2, pkg2, lib2 = getMethodName(row['Declared Class']), getClassName(row['Declared Class']), getPkgName(row['Declared Class']), getVersionlessLibName(row['Field Library'])
sourceColToJars[method2], sourceColToJars[klass2], sourceColToJars[pkg2] = lib2, lib2, lib2
newRow = {'SourceMethod': method1, 'SourceClass': klass1, 'SourcePackage': pkg1, 'SourceJar': lib1, 'TargetMethod': method2, 'TargetClass': klass2, 'TargetPackage': pkg2, 'TargetJar': lib2, 'Type': 'field', 'Static': stat, 'Count': row['Count']}
df = df.append(newRow, ignore_index = True)
return df
# helper for checking equivalent nodes for coascelence
def checkIfInEdgesEqual(G, key, otherKey):
if G.in_degree(key)==0 and G.in_degree(otherKey)==0:
return True
if not G.in_degree(key)==G.in_degree(otherKey):
return False
keyInEdgeSources = set()
otherKeyInEdgeSources = set()
for e in G.edges():
if e[1]==key:
keyInEdgeSources.add(e[0])
elif e[1]==otherKey:
otherKeyInEdgeSources.add(e[0])
if (keyInEdgeSources==otherKeyInEdgeSources):
return True
return False
# checking equivalent nodes for coascelence
def getEquivalentNodes(G):
adjList = list(nx.generate_adjlist(G))
actualAdjList = {}
for eachEntry in adjList:
splits = eachEntry.split(' ')
if len(splits) > 1:
actualAdjList[splits[0]] = splits[1:]
else:
actualAdjList[splits[0]] = list()
finalResultList = []
listKeysRead = list()
for key in actualAdjList.keys():
if key not in listKeysRead:
setToCreate = set()
setToCreate.add(key)
for otherKey in actualAdjList.keys():
if otherKey != key:
if (G.in_degree(key)==G.in_degree(otherKey)==0 and G.out_degree(key)==G.out_degree(otherKey)==0) or (set(actualAdjList[key]) == set(actualAdjList[otherKey]) and checkIfInEdgesEqual(G, key, otherKey)):
setToCreate.add(otherKey)
listKeysRead.append(otherKey)
finalResultList.append(list(setToCreate))
listKeysRead.append(key)
return finalResultList
# draw graph
def createGraph(sourceColumn, targetColumn, sourceClubColumn, targetClubColumn, df):
# check if club is smaller than base, and if none
G = nx.from_pandas_edgelist(df, source=sourceClubColumn, target=targetClubColumn, edge_attr=list(['Count', 'Static', 'Type']),create_using=nx.DiGraph())
# add unused pkgs
for pkg in pkgToCls.keys():
if not pkg in G.nodes:
if sourceColToJars[pkg].split(':')[-1] == library:
G.add_node(pkg)
G.add_edge(pkg, list(pkgToCls.keys())[randrange(len(pkgToCls)-1)], Count=1, Static='none', Type='none')
# coalesce nodes
eqvNodes = getEquivalentNodes(G)
for initNodesToMerge in eqvNodes:
try:
grouped = [list(g) for k, g in groupby(initNodesToMerge, lambda s: sourceColToJars[s])]
for nodesToMerge in grouped:
for n in nodesToMerge[1:]:
G = nx.contracted_nodes(G, nodesToMerge[0], n)
if len(nodesToMerge)>1:
nx.set_node_attributes(G, {nodesToMerge[0]:sum([pkgToClsLength[a] for a in nodesToMerge])}, 'number')
nx.set_node_attributes(G, {nodesToMerge[0]:sourceColToJars[nodesToMerge[0]]+" : "+" ".join(nodesToMerge)}, 'lbl')
except KeyError:
pass
# add invisible nodes to fix downstream and upstream positions
G.add_node('imagClient', lbl='', number=1)
sourceColToJars['imagClient'] = ''
G.add_node('imagLib', lbl='', number=1)
sourceColToJars['imagLib'] = ''
for repo in repos:
try:
if repo['type']=='client':
clientRows = df.loc[(df['SourceJar'].apply(lambda x: x.split(":")[-1]) == repo['artifact']) | (df['TargetJar'].apply(lambda x: x.split(":")[-1]) == repo['artifact'])]
for index, row in clientRows.iterrows():
nod = row[sourceClubColumn]
G.add_edge('imagClient', nod, Count=10, Static='none', Type='none')
elif repo['type']=='library':
libRows = df.loc[(df['SourceJar'].apply(lambda x: x.split(":")[-1]) == repo['artifact']) | (df['TargetJar'].apply(lambda x: x.split(":")[-1]) == repo['artifact'])]
for index, row in libRows.iterrows():
nod = row[sourceClubColumn]
G.add_edge('imagLib', nod, Count=10, Static='none', Type='none')
except KeyError:
pass
# set node attribute labels
numMax, numMin = 0, 0
for node in G.nodes:
try:
node_dict = G.nodes[node]
if 'lbl' not in node_dict or node_dict['lbl'] is None:
node_dict['lbl'] = sourceColToJars[node]+" : "+node
if 'number' not in node_dict or node_dict['number'] is None:
node_dict['number'] = pkgToClsLength[node]
if node_dict['number']> numMax:
numMax = node_dict['number']
elif node_dict['number']< numMin:
numMin = node_dict['number']
except KeyError:
pass
if '' in G.nodes:
G.remove_node('')
# draw graph using d3
d3 = d3graph(charge=500)
adjmat = nx.to_pandas_adjacency(G, multigraph_weight=max, weight='Count')
# clustering
cluster_labels = community_louvain.best_partition(G.to_undirected())
# Extract clusterlabels
y = list(map(lambda x: cluster_labels.get(x), cluster_labels.keys()))
hex_colors, _ = cm.fromlist(y, cmap='Paired', scheme='hex')
labx = {}
# Use the order of node_names
for i, key in enumerate(cluster_labels.keys()):
labx[key] = {}
labx[key]['name'] = key
if key=='imagClient' or key=='imagLib':
labx[key]['color'] = '#FFFFFF'
else:
labx[key]['color'] = hex_colors[i]
labx[key]['cluster_label'] = cluster_labels.get(key)
# return
node_names = adjmat.columns.astype(str)
color = np.array(list(map(lambda x: labx.get(x)['color'], node_names)))
d3.graph(adjmat)
for node in G.nodes:
node_dict = G.nodes[node]
if 'number' not in node_dict.keys() or node_dict['number'] is None:
node_dict['number'] = 1
sizes = [2*int((G.nodes[node]['number'] - numMin)*(6 - 4)/(numMax-numMin) + 4) for node in G.nodes]
print(cluster_labels)
# fix downstream clusters to the left, project to the middle, upstream to the right
fixedPos = list()
nodeColors, nodeEdgeColors, nodeEdgeSizes = list(), list(), list()
for i, node in enumerate(G.nodes):
repoTypeList = [repo for repo in repos if repo['artifact'] == sourceColToJars[node].split(':')[-1]]
if repoTypeList:
repoType = repoTypeList[0]['type']
if repoType == 'client':
nodeColors.append('#FFFFFF')
nodeEdgeColors.append(color[i])
nodeEdgeSizes.append(4)
elif repoType == 'library':
nodeColors.append(color[i])
nodeEdgeColors.append('#000000')
nodeEdgeSizes.append(3)
else:
if not node=='imagClient' and not node=='imagLib':
nodeColors.append(color[i])
nodeEdgeColors.append('#000000')
nodeEdgeSizes.append(0.1)
if node=='imagClient':
fixedPos.append(json.dumps({'isFixed':'true', 'x':50, 'y':350}))
nodeColors.append('#FFFFFF')
nodeEdgeColors.append('#FFFFFF')
nodeEdgeSizes.append(0.1)
elif node=='imagLib':
fixedPos.append(json.dumps({'isFixed':'true', 'x':800, 'y':200}))
nodeColors.append('#FFFFFF')
nodeEdgeColors.append('#FFFFFF')
nodeEdgeSizes.append(0.1)
else:
fixedPos.append(json.dumps({'isFixed':'false'}))
d3.set_node_properties(hover=[G.nodes[node]['lbl'] for node in G.nodes], label=['' for node in G.nodes], color=nodeColors, size=sizes, fixedPos=fixedPos, edge_color=nodeEdgeColors, edge_size=nodeEdgeSizes)
d3.set_edge_properties(directed=True)
for edge in G.edges:
if edge in d3.edge_properties.keys():
d3.edge_properties[edge]['hover'] = nx.get_edge_attributes(G,'Type')[edge]
staticAttr = G.get_edge_data(*edge)['Static']
if staticAttr == 'static':
d3.edge_properties[edge]['color']='#A91F01'
elif staticAttr == 'dynamic':
d3.edge_properties[edge]['color']='#625fad'
elif staticAttr == 'both':
d3.edge_properties[edge]['color']='#000000'
elif staticAttr == 'none':
d3.edge_properties[edge]['style']='none'
typeAttr = G.get_edge_data(*edge)['Type']
if typeAttr == 'invocation':
d3.edge_properties[edge]['style']='link'
elif typeAttr == 'field':
d3.edge_properties[edge]['style']='field-link'
elif typeAttr == 'subtyping':
d3.edge_properties[edge]['style']='subtyping-link'
elif typeAttr == 'annotation':
d3.edge_properties[edge]['style']='annotation-link'
d3.show(filepath='/api-visualization-tool/api-usage.html', title='VizAPI', graphLbl="VizAPI - API Usage : "+"-".join([repo['artifact'] for repo in repos]))
def getDataTsvs(dir, allClients, library):
global repos
tsvs = list()
repoArtifacts = [repo['artifact'] for repo in repos]
for file in os.listdir(dir):
d = os.path.join(dir, file)
if os.path.isfile(d):
if ':' in file and file.split(':')[1] in repoArtifacts:
tsvs.append(d)
else:
tsvs.extend(getDataTsvs(d, allClients, library))
if allClients:
clients = list()
for root, dirs, fnames in os.walk(dir):
for fname in fnames:
if fname.endswith('dynamic-invocations.tsv') and grep(os.path.join(root, fname), library):
artifact = root.split(':')[1]
if not artifact == library:
repos.append({'type': 'client', 'artifact':artifact})
for file in os.listdir(root):
clients.append(os.path.join(root, fname))
tsvs.extend(clients)
return list(set(tsvs))
def dataExists(dir, repos):
repoArtifacts = [repo['artifact'] for repo in repos]
for file in os.listdir(dir):
if ':' in file and (file.split(':')[1] in repoArtifacts):
return True
return False
if __name__ == "__main__":
sourceColToJars = dict()
pkgToCls = dict()
pkgToClsLength = dict()
repos = json.load(open('/api-visualization-tool/input.json','r'))
# get clients and library and run instrumentation on it - output tsvs
cloneRepos(repos)
projList = seeDirectory('/api-visualization-tool/projects', "")
# check if data already exists in apis-data directory
repoArtifacts = [repo['artifact'] for repo in repos]
projList = [proj for proj in projList if proj['execDir'] in repoArtifacts]
print(projList)
for file in os.listdir('/api-visualization-tool/apis-data'):
if ':' in file and (file.split(':')[1] in repoArtifacts):
projList = [proj for proj in projList if proj['execDir']!=file.split(':')[1]]
print(projList)
if len(projList) > 0:
createJson(projList)
subprocess.call(['java', '-jar', '/api-visualization-tool/dependencies/libs-info-project-runner-1.0-SNAPSHOT-jar-with-dependencies.jar'])
# move results to where we want
source_dir = '/api-visualization-tool/projects/api-surface-data'
target_dir = '/api-visualization-tool/apis-data'
file_names = os.listdir(source_dir)
for file_name in file_names:
shutil.move(os.path.join(source_dir, file_name), os.path.join(target_dir, file_name))
library = [repo['artifact'] for repo in repos if repo['type']=='library']
if library:
library = library[0]
if len(repos)==1:
allClients = True
else:
allClients = False
else:
library = ''
allClients = False
tsvs = getDataTsvs('/api-visualization-tool/apis-data', allClients, library)
interactionsDf = getInteractionsDF(tsvs, library)
createGraph("SourceJar","TargetJar","SourcePackage","TargetPackage", interactionsDf)
|
#!/usr/bin/python3
def print_matrix_integer(matrix=[[]]):
if len(matrix[0]) != 0:
for a in matrix:
for b in range(len(a)):
if b != len(a) - 1:
print('{:d} '.format(a[b]), end="")
else:
print('{:d}'.format(a[b]))
else:
print()
|
from common import *
DEBUG = True
# AWS_STORAGE_BUCKET_NAME = 'trade-paper-dev'
|
class E160_wall:
def __init__(self, wall_points, slope):
# set up walls
self.slope = slope
self.radius = 0.025
self.wall_points = wall_points
self.point1 = (wall_points[0], wall_points[1])
self.point2 = (wall_points[2], wall_points[3])
# assume top point is first
if slope == "vertical":
self.points = [wall_points[0]-self.radius, wall_points[1]+self.radius,
wall_points[0]+self.radius, wall_points[1]+self.radius,
wall_points[2]+self.radius, wall_points[3]-self.radius,
wall_points[2]-self.radius, wall_points[3]-self.radius]
# assume left point is first
elif slope == "horizontal":
self.points = [wall_points[0]-self.radius, wall_points[1]-self.radius,
wall_points[0]-self.radius, wall_points[1]+self.radius,
wall_points[2]+self.radius, wall_points[3]+self.radius,
wall_points[2]+self.radius, wall_points[3]-self.radius]
def contains_point(self, p):
fudge = 0.00001
x, y = p
p1x, p1y = self.point1
p2x, p2y = self.point2
min_x = min(p1x, p2x) - fudge
max_x = max(p1x, p2x) + fudge
min_y = min(p1y, p2y) - fudge
max_y = max(p1y, p2y) + fudge
return x <= max_x and x >= min_x and y >= min_y and y <= max_y
def slope_intercept(self):
p1x, p1y = self.point1
p2x, p2y = self.point2
# edge case: if x positions are same, intercept formula divides by 0
if p1x == p2x:
return (float('inf'), float('inf'))
slope = (p2y - p1y) / (p2x - p1x)
intercept = p1y - slope * p1x
return (slope, intercept)
|
#first try on my own
from functools import reduce
def triangle_num(n):
"""
generates a triangular number
i.g. the 7th triangle number would be 1+2+3+4+5+6+7=28
triangle_num(7) == 28
"""
return sum([x for x in range(n+1)])
def factors(n):
"""
returns a number of factors in a list
not so efficient for this problem
"""
lst = []
for i in range(1,n+1):
if n % i == 0:
lst.append(i)
return lst
def main():
n = 1
tri_lst = factors(n)
while len(tri_lst) <= 100:
n += 1
tri_lst = factors(n)
return reduce(lambda x,y: x*y, tri_lst)
# second try with help of online resources
import math
def get_factors(n):
"""
returns a number of factors of n.
note: n % i => False if divisible.
"""
return sum(2 for i in range(1, round(math.sqrt(n)+1)) if not n % i)
def generate_triangles(limit):
"""
returns n(n+1)/2 as needed
"""
l = 1
while l < limit:
yield sum(range(1, l+1))
l += 1
def test_triangles():
triangles = generate_triangles(100000)
for i in triangles:
if get_factors(i) >= 500:
return i
|
# riaps:keep_import:begin
from riaps.run.comp import Component
import logging
import ctypes
import time
# import capnp
# import memfail_capnp
# riaps:keep_import:end
class MemPublisher(Component):
# riaps:keep_constr:begin
def __init__(self):
super(MemPublisher, self).__init__()
self.val = 0
self.logger.info("-----init publishier-----")
def handleActivate(self):
self.logger.info("starting seg fault timer")
self.clock.setDelay(20.0)
self.clock.launch()
# riaps:keep_constr:end
# riaps:keep_trigger:begin
def on_trigger(self):
if self.val < 10:
now = self.trigger.recv_pyobj()
self.logger.info("on_trigger(): %s" % now)
self.logger.info("-----publishing value-----")
self.val = self.val + 1
self.pubport.send_pyobj(self.val)
# riaps:keep_trigger:end
# riaps:keep_clock:begin
def on_clock(self):
now = self.clock.recv_pyobj()
self.logger.info("on_clock(): %s" % now)
try:
f = open("check_seg.txt", "x")
f.close()
self.clock.halt()
self.logger.info("\n\n-----causing segmentation fault-----\n")
time.sleep(2)
ctypes.string_at(0)
except FileExistsError as e:
self.logger.info("file exists, actor restarted")
except:
self.logger.info("could not create file")
# riaps:keep_clock:end
# riaps:keep_impl:begin
# riaps:keep_impl:end |
# -*- coding:utf8 -*-
import sched
import time
from datetime import datetime
s = sched.scheduler(time.time, time.sleep)
def print_time():
dt = datetime.now()
print "From print_time", dt.strftime('%H:%M:%S %f')
def print_some_times():
print time.time()
s.enter(5, 1, print_time, ())
s.enter(10, 1, print_time, ())
s.run()
print time.time()
if __name__ == "__main__":
#print_some_times()
draw_str = "2018-04-13 16:00:00"
draw_date = datetime.strptime(draw_str, "%Y-%m-%d %H:%M:%S")
current_date = datetime.now()
if draw_date > current_date:
seconds = int((draw_date - current_date).total_seconds())
print "After ", seconds, "s can draw"
else:
print "draw coin"
#schedule.every().day.at("06:30").do(print_time)
|
import pandas as pd
import collections
import util
import tensorflow as tf
from keras.preprocessing import text, sequence
import numpy as np
print('loading data')
df = pd.read_csv(util.train_data)
print(df.shape)
train_comments = df['comment_text'].tolist()
y_toxic = df['toxic']
print(df.columns)
for col in ['toxic', 'severe_toxic', 'obscene', 'threat',\
'insult', 'identity_hate']:
print('{0}, {1:.6f}'.format(col, df[col].mean()))
exit(1)
tokenizer = text.Tokenizer()
tokenizer.fit_on_texts(train_comments[:1])
sample_comment_in_seq = tokenizer.texts_to_sequences(train_comments[:1])
print(tokenizer.char_level)
print(tokenizer.word_counts)
print(tokenizer.word_index)
print(tokenizer.word_index['nonsense'])
print(train_comments[:1])
print(text.text_to_word_sequence(train_comments[0]))
print(sample_comment_in_seq)
print(sequence.pad_sequences(sample_comment_in_seq, maxlen=util.maxlen))
# comment_seq = map(text.text_to_word_sequence, train_comment[:10])
# print(list(comment_seq)) |
# Generated by Django 3.0.1 on 2020-01-09 17:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shopUser', '0013_auto_20200109_1658'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='slug',
),
]
|
###############################################################################
#
# webcamperf.py
#
# Assess FPS performance of webcam
#
# January 20, 2018
#
###############################################################################
import cv2
import opencvconst as cv
import time
def main():
"""Assess FPS performance of webcam."""
#
# Open webcam device and set some capture properties
#
cap = cv2.VideoCapture(0)
#cap.set(cv.CV_CAP_PROP_FRAME_WIDTH, 320)
#cap.set(cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
print 'CV_CAP_PROP_FPS ', cap.get(cv.CV_CAP_PROP_FPS)
print 'CV_CAP_PROP_FOURCC {:08x}'.format(int(cap.get(cv.CV_CAP_PROP_FOURCC)))
print 'CV_CAP_PROP_CONVERT_RGB ', cap.get(cv.CV_CAP_PROP_CONVERT_RGB)
# not supported by webcam
#print "CV_CAP_PROP_GAIN ", cap.get(cv.CV_CAP_PROP_GAIN)
#
# Initialize tart time and frame count
#
frame_count = 0
start = time.time()
while True:
ret, frame = cap.read()
cv2.imshow('Image', frame)
frame_count = frame_count + 1
#
# Calculate and display FPS
#
end = time.time()
measure_interval = end - start
if measure_interval > 10:
fps = frame_count / measure_interval
print 'FPS {:.2f}'.format(fps)
frame_count = 0
start = time.time()
key = cv2.waitKey(1) & 0xFF
if key == 27:
break
#
# When done, release the capture
#
cap.release()
cv2.destroyAllWindows()
#
# start
#
if __name__ == '__main__':
main()
|
import json
import numpy.random as random
from scipy.spatial import distance
import numpy.linalg
import matplotlib.pyplot as plt
with open('egman.json') as json_data:
data = json.load(json_data)
dat = data['1']["EVACUEES"]
v_distance = 12
class Evacue:
def __init__(self, preevacuation, hspeed, vspeed, originx, originy):
self.preevacuation = preevacuation
self.hspeed = hspeed
self.vspeed = vspeed
self.hdistance = random.uniform(10, 80, 1)
self.time = self.preevacuation + v_distance/self.vspeed + self.hdistance/self.hspeed
self.originx = originx
self.originy = originy
class Evacuees:
def __init__(self, tab):
self.evacuees = tab
def add_evacues(self, evacues):
self.evacues = evacues
def getevacuationtime(self):
self.time = []
for i in self.evacuees:
self.time.append(i.time)
return max(self.time)
def getlagger(self, x):
evacuesid = self.time.index(x)
return evacuesid
evacues = []
i = 0
for a, b in data['1']["EVACUEES"].items():
preevacuation = data['1']["EVACUEES"][a]["PRE_EVACUATION"]
hspeed = data['1']["EVACUEES"][a]["H_SPEED"]
vspeed = data['1']["EVACUEES"][a]["V_SPEED"]
originx = data['1']["EVACUEES"][a]["ORIGIN"][0]
originy = data['1']["EVACUEES"][a]["ORIGIN"][1]
evacues.append(Evacue(preevacuation, hspeed, vspeed, originx, originy))
i += 1
all = Evacuees(evacues)
lagger = float(all.getevacuationtime())
name = all.getlagger(lagger)
print("ostatni był E{} z {}s czasem".format(name, lagger))
a = (1,2,3)
b = (4,5,6)
dst = distance.euclidean(a, b)
print(dst)
points = []
for i in dat.keys():
punkty = []
points.insert(0, dat[i]['ORIGIN'])
points.append(dat[i]['ROADMAP'])
punkty.append(dat[i]['ORIGIN'])
for n in dat[i]['ROADMAP']:
punkty.append(n)
plt.plot(dat[i]['ORIGIN'][0],dat[i]['ORIGIN'][1], 'o')
plt.text(dat[i]['ORIGIN'][0],dat[i]['ORIGIN'][1], i)
x, y = zip(*punkty)
plt.plot(x, y, linewidth=2)
#color="r"
plt.show() |
p1 = float(input("Digite a altura do triangulo: "))
p2 = float(input("Digite a largura do triangulo: "))
calC = (p1 * p2)/2
print("A área do seu triangulo é:",calC) |
#!/usr/bin/python
import time
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import Node, Switch
from mininet.cli import CLI
def topology():
net = Mininet()
net.addHost('h1')
net.addHost('h2')
net.addHost('h3')
net.addHost('h4')
net.addSwitch('s1', failMode='standalone')
net.addSwitch('s2', failMode='standalone')
net.addSwitch('s3', failMode='standalone')
net.addSwitch('s4', failMode='standalone')
net.addLink('h1', 's1')
net.addLink('h2', 's1')
net.addLink('h3', 's3')
net.addLink('h4', 's3')
net.addLink('s1', 's2')
net.addLink('s3', 's4')
net.addLink('s1', 's4')
net.addLink('s3', 's2')
net.start()
CLI(net)
net.stop()
if __name__=='__main__':
topology()
|
from rest_framework.generics import ListAPIView
from django.db.models import Q
from page.models import Mileage
from .serializers import MileageSerializer
class MileageListAPIView(ListAPIView):
queryset = Mileage.objects.all()
serializer_class = MileageSerializer
def get_queryset(self, *args, **kwargs):
queryset_list = Mileage.objects.all()
query_filial = self.request.GET.get('filial')
if query_filial:
print('filial=',query_filial)
f = query_filial.split(',')
# print('f=',f)
queryset_list = queryset_list.filter(
filial__slug__in = f
)
query_loko = self.request.GET.get('loko')
if query_loko:
# print('loko=',query_loko)
l = query_loko.split(',')
# print('l=',l)
queryset_list = queryset_list.filter(
loko__slug__in = l
)
query_years = self.request.GET.get('years')
if query_years:
y = query_years.split(',')
queryset_list = queryset_list.filter(
Q(year__gte = y[0]) &
Q(year__lte = y[1])
)
return queryset_list
|
from scrapy.cmdline import execute
execute('scrapy crawl zw_81'.split())
#execute(['scrapy' 'crawl' 'zw_81' '-s' 'JOBDIR="D:\\pycharm\\storages\\novel"'])
#断点续爬
#'scrapy crawl zw_81 -s JOBDIR="D:\pycharm\storages\novel" |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/main.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_smart_window(object):
def setupUi(self, smart_window):
smart_window.setObjectName(_fromUtf8("smart_window"))
smart_window.resize(694, 481)
self.centralwidget = QtGui.QWidget(smart_window)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.text_output = QtGui.QTextBrowser(self.centralwidget)
self.text_output.setObjectName(_fromUtf8("text_output"))
self.gridLayout.addWidget(self.text_output, 0, 1, 1, 1)
self.text_input = QtGui.QTextEdit(self.centralwidget)
self.text_input.setObjectName(_fromUtf8("text_input"))
self.gridLayout.addWidget(self.text_input, 0, 0, 1, 1)
self.seg_button = QtGui.QPushButton(self.centralwidget)
self.seg_button.setMinimumSize(QtCore.QSize(0, 0))
self.seg_button.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.seg_button.setObjectName(_fromUtf8("seg_button"))
self.gridLayout.addWidget(self.seg_button, 1, 0, 1, 1)
smart_window.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(smart_window)
self.menubar.setGeometry(QtCore.QRect(0, 0, 694, 26))
self.menubar.setObjectName(_fromUtf8("menubar"))
smart_window.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(smart_window)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
smart_window.setStatusBar(self.statusbar)
self.retranslateUi(smart_window)
QtCore.QMetaObject.connectSlotsByName(smart_window)
def retranslateUi(self, smart_window):
smart_window.setWindowTitle(_translate("smart_window", "Smart Segmentation", None))
self.seg_button.setText(_translate("smart_window", "segment", None))
|
## Automatically adapted for numpy.oldnumeric Jul 30, 2007 by
import unittest
from test_cube import TestBase
import sys
import numpy.oldnumeric as Numeric
import numpy.oldnumeric.random_array as RandomArray
from opengltk.OpenGL import GL, GLUT
MY_LIST = 1
NUMDOTS = 500
NUMDOTS2 = 600
MAX_AGE = 13
class TestDots(TestBase):
def setUp(self):
from opengltk.OpenGL import GL, GLUT
print "GL imported from: ", GL.__file__
#print "Hit any key to quit."
self.x = RandomArray.random( NUMDOTS) * 2 - 1
self.y = RandomArray.random( NUMDOTS) * 2 - 1
self.age = RandomArray.randint( 0,MAX_AGE, (NUMDOTS,))
move_length = 0.005 # 1.0 = screen width
angle = 0 # in radians
delta_angle = 0.2 # in radians
self.move_x = move_length * Numeric.cos( angle)
self.move_y = move_length * Numeric.sin( angle)
self.halted = 0
def display(self):
GL.glClearColor( 0.0, 0.0, 0.0, 0.0)
GL.glClear( GL.GL_COLOR_BUFFER_BIT)
GL.glColor3f( 1.0,1.0,0.0)
self.x = self.x + self.move_x
self.y = self.y + self.move_y
self.age = self.age + 1
which = Numeric.greater( self.age, MAX_AGE)
self.x = Numeric.choose( which, (self.x, RandomArray.random( NUMDOTS)))
selfy = Numeric.choose( which, (self.y, RandomArray.random( NUMDOTS)))
self.age = Numeric.choose( which, (self.age, 0))
self.x = Numeric.choose( Numeric.greater( self.x, 1.0), (self.x, self.x - 1.0))
self.y = Numeric.choose( Numeric.greater( self.y, 1.0), (self.y, self.y - 1.0))
x2 = RandomArray.random( NUMDOTS2)
y2 = RandomArray.random( NUMDOTS2)
v = Numeric.concatenate(
(Numeric.transpose( Numeric.array( [self.x, self.y])),
Numeric.transpose( Numeric.array( [self.x - 0.005, self.y + 0.005])),
Numeric.transpose( Numeric.array( [self.x + 0.005, self.y - 0.005])),
Numeric.transpose( Numeric.array( [x2, y2]))))
#from opengltk.util import GLdouble
#av = bufarray.readArray( v, GLdouble)
#GL.glVertexPointer( 2, av)
GL.glVertexPointer( 2, v)
GL.glEnableClientState( GL.GL_VERTEX_ARRAY)
#glplus.DrawArrays( GL.POINTS, len( av))
from opengltk import glplus
glplus.DrawArrays( GL.GL_POINTS, len( v))
#GL.glDisableClientState( GL.VERTEX_ARRAY)
GL.glFlush()
GLUT.glutSwapBuffers()
def keyboard( self, key, x, y):
print '--> keyboard( %s <%c>, %i, %i)' % ( key, chr( key), x, y)
import sys
sys.exit()
def setup_viewport(self):
GL.glMatrixMode( GL.GL_PROJECTION)
GL.glLoadIdentity()
GL.glOrtho( 0.0, 1.0, 0.0, 1.0, 0.0, 1.0)
def reshape(self, w, h):
GL.glViewport( 0, 0, w, h)
self.setup_viewport()
def test_dots(self):
GLUT.glutInit( sys.argv)
GLUT.glutInitDisplayMode( GLUT.GLUT_DOUBLE | GLUT.GLUT_RGB)
GLUT.glutInitWindowSize( 300, 300)
GLUT.glutCreateWindow( 'Dots')
self.setup_viewport()
GLUT.glutReshapeFunc( self.reshape)
GLUT.glutDisplayFunc( self.display)
GLUT.glutIdleFunc( None)
GLUT.glutIdleFunc( self.display)
GLUT.glutKeyboardFunc( self.keyboard)
GLUT.glutTimerFunc(1000, self.exitloop, 0)
GLUT.glutMainLoop()
if __name__ == '__main__':
test_cases = ['TestDots']
unittest.main(argv=([__name__, '-v'])+test_cases )
|
import turtle
bob = turtle.Turtle()
print(bob)
def koch(t, length, n):
if n ==0 :
bob.fd(length)
return
else:
angle = 60
koch(t, length, n-1)
t.fd(length)
t.lt(angle)
koch(t, length, n-1)
t.fd(length)
t.rt(angle*2)
koch(t, length, n-1)
t.fd(length)
t.lt(angle)
koch(t, length, n-1)
def snowflake(n):
s = 120
t = bob
length = 30 / n**2
for i in range(3):
koch(t, length, n)
bob.rt(s)
snowflake(0)
|
class Solution(object):
def reverseKGroup(self, head, k):
if not head or k == 1:
return head
dummy = ListNode(0)
dummy.next, ptr, n = head, dummy, 0
while ptr.next:
n += 1
ptr = ptr.next
pre = dummy
while n >= k:
ptr = pre.next
for i in range(1, k):
tmp = ptr.next
ptr.next = tmp.next
tmp.next = pre.next
pre.next = tmp
pre = ptr
n -= k
return dummy.next |
from django.contrib import admin
from .models import Causa
from .models import Hallazgo
from .models import Agrupador
# Register your models here.
admin.site.register(Causa)
admin.site.register(Hallazgo)
admin.site.register(Agrupador)
|
from django import forms
from django.forms import ModelForm
from .models import Lesson
class LessonForm(forms.ModelForm):
class Meta:
model = Lesson
fields = ('name', 'time', 'image')
widgets = {
'time':forms.TextInput(attrs={'type':'datetime-local'}),
} |
import unittest
from repository.task import TaskRepository
from dao.database import Database
from model.project import Project
from model.task import Task
from model.user import User
class Test(unittest.TestCase):
def setUp(self):
self.repo = TaskRepository(Database())
def tearDown(self):
pass
def test1(self):
result = self.repo.find_one("project1", 1)
#print(result)
def test2(self):
result = self.repo.fetch_task_list()
for item in result:
print(item)
def test3(self):
pass
#task = self.repo.get_latest(1)
#print(task)
def test4(self):
project = Project(1, None, None, None, None, None)
task = Task(None, "task", None, "2015-01-31 0:00:00", None, "aaaaa", None)
user = User(1, None)
#self.repo.create_task(project, task, user)
def test5(self):
task = Task(3, "task2", None, "2015-01-31 0:00:00", None, "bbbbb", 1)
user = User(1, None)
#self.repo.update_task(task, user)
def test6(self):
ret = self.repo.get_latest(1)
print ret
if __name__ == '__main__':
unittest.main()
|
import struct
import time
import heapq
import re
import socket
import requests
import json
from enum import Enum
from hashlib import md5
from error import *
from urllib import parse
from collections import deque
import uuid
import math
#小型日志系统
class Log(object):
#level 0: 直接输出(默认)
#level 1或其他: 输出至文件,后面需要跟文件名
#日志级别:
#WARNING: 警告,没有错误,能继续运行
#ERROR: 错误,但能继续运行
#FATAL: 错误,程序退出
def __init__(self, LogName, method=0, filename='', infoType=('DEBUG', 'ERROR', 'FATAL')):
if method != 0 and filename == '':
exit('Usage : Log() or Log(1, filename) ...')
elif filename != '':
try:
f = open(filename, 'w')
self.__file = f
except:
exit('Wrong file name!')
self.__method = method
self.__infoLevel = infoType
self.__name = LogName
def write(self, msg, level=0):
# timeStr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
# finalStr = ('%s\n%s %s %s') % (self.__name ,timeStr, self.__infoLevel[level], msg)
#下面是不需要时间戳
finalStr = ('%s : %s %s') % (self.__name , self.__infoLevel[level], msg)
if self.__method == 0:
print(finalStr)
else:
self.__file.write(finalStr+'\n')
def close(self):
if self.__method != 0:
self.__file.close()
#定时器,有待加工
class Timer(object):
def __init__(self):
self.log = Log('Timer')
self.heap = []
def addTimer(self, danmu_sock, timeout_intern, current_time, absolute=False):
if absolute == True:
timeout = timeout_intern
else:
currTime = current_time
timeout = currTime + timeout_intern
heapq.heappush(self.heap, (timeout, danmu_sock))
self.log.write(danmu_sock.url + ' has joined successfully')
def delTimer(self, danmu_sock, current_time):
#已超时的也不处理,实质上就是list,所以可以list遍历
for index,ds in enumerate(self.heap):
if ds[1] == danmu_sock:
del self.heap[index]
self.log.write('delete %s success' % danmu_sock.url)
return True
return False
def getPopTopTimer(self):
timer = None
try:
timer = heapq.heappop(self.heap)
self.log.write('still %d timers' % len(self.heap))
except:
self.log.write('No timer exists! %d' % len(self.heap))
pass
return timer
#已超时返回true , 否则返回false
def isTopTimeOut(self, current_time):
if self.isEmptyTimer():
return False
#下面一句话有点难理解,,
#return self.heap[0][0] < current_time
if self.heap[0][0] <= current_time:
return True
else:
return False
def isEmptyTimer(self):
return True if len(self.heap) == 0 else False
#当前连接的状态,分为尚未连接(只会出现一会儿,在对象创建和连接成功期间是这种), 正在连接(正在传输弹幕), 即将关闭(客户要求关闭,但我这里延迟关闭)
class DMSStatus(Enum):
closed = 0
connecting = 1
closing = 2
#当前连接的超时事件类型, 心跳事件,关闭事件(有关闭事件正是因为延迟关闭才有的)
class DMTEventType(Enum):
nothing = 0
keepAlive = 1
closing = 2
def getMd5Hex(string):
md5_o = md5()
md5_o.update(string)
md5_str = md5_o.hexdigest()
return md5_str
#分3种连接,一个是监听的,一个是ws那边的,另外n个是dm server那边的
#下面这个是dm server的
class DanmuSocket(object):
def __init__(self, url, keepAliveIntern=40):
#对应room的地址
self.url = url
#状态,一个是在关闭的状态,一个是正常有人想要这个的状态,一个是即将关闭状态
#之所以要这个即将关闭的状态,是因为可能在关闭之后再次打开的几率比较高
#关闭的状态是指创建这个对象后正在连接dm server但还没成功时
self.status = DMSStatus.closed
#要关闭的绝对时间,,,,,这里要考虑到ws server中途断开的问题!!!!
self.closingTimeout = -1
#延迟时间
self.delayTimeout = -1
#在dm连接池里的标志,这里我使用url的md5值来作为存在池中的key
#因为要在之后用到,比如某个room即将断开,但又被需要,这时候就要先在dm pool里找一遍有没有
#url作为key容易找
#self.md5Mark = getMd5Hex(url)
self.Mark = -1
#对应的socket
self.sock = None
#操作对象,此对象能根据给定的url连接dm server等操作
self.operation = Douyu_DanMuGet(url)
#超时事件,这里有两种,一个是心跳事件,一个是即将关闭事件
self.timeoutEventType = DMTEventType.nothing
#绑定超时函数
self.timeoutEvent = None
#心跳间隔
self.keepAliveIntern = keepAliveIntern
def getDanmuServerSock(self):
self.operation.get_verify_server_list()
self.operation.get_username_gid()
self.sock = self.operation.get_danmu_server_sock()
self.Mark = self.sock.fileno()
self.status = DMSStatus.connecting
#既然这个对象被作为元素的一部分放到Timer里去了,又Timer是一个优先队列,所以该类的所有实例必须能进行比较
#但是,我这里对于相同的时间谁先被处理并不介意,所以就随便指定元素比较了
def __eq__(self, other):
return self.Mark == other.Mark
def __ge__(self, other):
return self.Mark >= other.Mark
def __gt__(self, other):
return self.Mark > other.Mark
def __le__(self, other):
return self.Mark <= other.Mark
def __lt__(self, other):
return self.Mark < other.Mark
#dm连接池,就是一个字典
#这里我默认socket的fileno作为key,DanmuSocket作为value
class DanmuSocket_Pool(dict):
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, item):
if self.has_key(item) and self[item] != None:
return self[item]
else:
return None
#这是所有连接,这个将来放在select系列函数里,就是个列表
class SocketPool(list):
'''All of the connected socket array'''
def __init__(self, li=[]):
super(SocketPool, self).__init__(li)
#这里我只弄一个ws server了,如果要多个ws server的话,那么就要给个ws server pool,将来数据分发也要考虑,这里就不考虑了
class BaseServer(object):
def __init__(self, addr, port):
self.log = Log('BaseServer')
#可读可写监听池
self.inSockets = SocketPool()
#可写里我只监听ws server, 虽然dm server 的心跳包也是out,但我感觉不至于要通过select来监听吧?
self.outSockets = SocketPool()
#这里心跳的时候要可写事件
self.Danmus = DanmuSocket_Pool()
#对应的ws servre,只支持一个
#什么时候可写呢?
self.ws_sock = None
self.ws_sock_writeEvent = False
self.ws_sock_isWriteLeft = False
self.ws_sock_writeLeft = ''
#一个定时器
self.mainTimer = Timer()
if isinstance(addr, str):
if addr == 'localhost':
pass
else:
a = re.match(r'([0-9]{1,3}\.){3}[0-9]{1,3}', addr)
if a.group() == addr: pass
else: raise WSError('Not valid address')
else:
raise WSTypeError('Address type error.')
if isinstance(port, int):
if port > 1024 and port < 65536: pass
else: raise WSError('Not valid port.')
else: raise WSTypeError('Port type error.')
self.Host = self.__createHost((str(addr), int(port)))
self.inSockets.append(self.Host)
def __createHost(self, ap):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(ap)
sock.listen(100)
return sock
#斗鱼的每条消息都有格式,且格式是统一的,所以封装起来
class message(object):
#参数为传输的内容
def __init__(self, content):
#协议格式: 长度 + 未知(重复长度)+ 未知(固定) + content + end
self.dateLen = len(content) + 9
self.dateLenRepeat = self.dateLen
self.magicCode = 0x2b1
self.content = content
self.end = 0x0
def getMsg(self):
str2send = struct.pack('iii', self.dateLen, self.dateLenRepeat, self.magicCode) + self.content.encode('utf-8') + struct.pack('b', self.end)
return str2send
#获取弹幕的
class Douyu_DanMuGet(object):
def __init__(self, url, logMethod=0, logFilename=''):
self.url = url
self.log = Log('Douyu_DanMuGet', logMethod, logFilename)
def uuid_str(self):
#本身uuid产生的字符串是 xxx-xxx-xxx的,这里处理成 xxxxxxx样子的
uuidStrInit = str(uuid.uuid1())
uuid_list = uuidStrInit.split('-')
uuidStr = ''.join(uuid_list)
return uuidStr
def msg_loginreq(self):
#房间id
roomid = self.roomId
#设备id,这里用uuid产生,比较随意
devid = self.uuid_str()
#时间戳
rt = str(int(time.time()))
#这个看别人的
vk_str = rt + '7oE9nPEG9xXV69phU31FYCLUagKeYtsF' + devid
vk_need_md5 = vk_str.encode()
m = md5()
m.update(vk_need_md5)
vk = m.hexdigest()
content = 'type@=loginreq/username@=/ct@=0/password@=/roomid@='+str(roomid)+'/devid@='+devid+'/rt@='+rt+'/vk@='+vk+'/ver@=20150929/'
return content
#获取认证服务器列表(每次打开某个主播页面好像都不一样,会变)
def get_verify_server_list(self):
#得到整个网页
wholePage = requests.get(self.url)
#获取两个变量
roomInfo = ''
roomArgs = ''
infoNeeded = re.search(r'\$ROOM = (.*?);\r.*?\$ROOM.args = (.*?);\r', wholePage.text, re.S)
if infoNeeded is None:
self.log.write('Not found roomInfo or roomArgs', 2)
exit(-1)
else:
roomInfo = infoNeeded.group(1)
roomArgs = infoNeeded.group(2)
self.log.write('roomInfo : ' + roomInfo)
self.log.write('roomArgs : ' + roomArgs)
#因为格式与json一样的字符串,我用json来解析
#这样第一步就是获取到了这两个变量
self.roomInfo = json.loads(roomInfo)
self.roomArgs = json.loads(roomArgs)
#获取roomid
#除了roomid这个信息之外,这里面可能还有一个主播当前是否在直播的字段有价值
#字段名为show_status, 为1表示在直播,为2表示当前不在直播
self.roomId = self.roomInfo['room_id']
#获取认证服务器地址
servers_address_list_str = parse.unquote(self.roomArgs['server_config'])
self.servers_address_list = json.loads(servers_address_list_str)
self.log.write(str(self.servers_address_list))
#这里的server_list必须是[{'ip': 'xxx.xxx.xxx.xxx', 'port': 'xxxx'}, {'ip': 'xxx.xxx.xxx.xxx', 'port': 'xxxx'}, {'ip': 'xxx.xxx.xxx.xxx', 'port': 'xxxx'}]
def connect_2_server(self, server_list):
chosen_server_index = 0
while True:
if chosen_server_index == -1 or chosen_server_index == len(server_list):
break
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ipaddr_port = (server_list[chosen_server_index]['ip'], int(server_list[chosen_server_index]['port']))
try:
#connect里的是一组tuple, ipaddr(str) + port(int)
sock.connect(ipaddr_port)
chosen_server_index = -1
except:
sock.close()
chosen_server_index += 1
sock = None
return sock
#因为粘包问题,要循环处理一段接收到的bytes,返回这段bytes中的消息列表
def deal_with_recved_bytes_main(self, content):
# content_cp = content
# try:
# msgList = []
# total_len = len(content)
# len_addressed = 0
# while True:
# len_prev = struct.unpack('i', content[0:4])[0]+4
# str_ret = content[12:len_prev].decode()
# len_addressed += len_prev
# msgList.append(str_ret)
#
# if len_addressed == total_len:
# break
#
# content = content[len_prev:]
# return msgList
# except:
# self.log.write('unpack or decode error...')
# self.log.write(content_cp)
# return []
#在新增加了数据流缓冲区(BytesBuffer)后,这个处理方法就可以简化了
try:
len_content = len(content)
str_ret = content[12:len_content-1].decode()
return str_ret
except:
self.log.write('content decode error------')
self.log.write(content)
return None
#上面不是说这个函数已经被抛弃了吗?。。。哦好吧,因为这个函数用在弹幕服务器连接上没出现过问题,那里又不想改了,所以放着吧。。。
#所以上面的函数改个名字,放到server.py里作为主要用的那个处理函数
def deal_with_recved_bytes(self, content):
content_cp = content
try:
msgList = []
total_len = len(content)
len_addressed = 0
while True:
len_prev = struct.unpack('i', content[0:4])[0]+4
str_ret = content[12:len_prev].decode()
len_addressed += len_prev
msgList.append(str_ret)
if len_addressed == total_len:
break
content = content[len_prev:]
return msgList
except:
self.log.write('unpack or decode error...')
self.log.write(content_cp)
return []
#获取在弹幕服务器中的用户名和所在组id
def get_username_gid(self):
content = self.msg_loginreq()
#尚未编码
msg2send = message(content)
#连接认证服务器
self.verify_sock = self.connect_2_server(self.servers_address_list)
if self.verify_sock == None:
self.log.write('Failed to connect verify server!', 2)
exit(-1)
#向认证服务器发送消息
self.verify_sock.send(msg2send.getMsg())
#下面是接收数据,可以加入select或是多线程,但这里不用
#因为这里就为了获取两个数据username和gid
#且只要接收两段数据
#当然,在第二段数据里好几段msg, gid那段在后面,虽然第一段我们不在意,但是在第一段里有弹幕服务器地址及端口,不关注是因为别人搞出来了,且这是不变的所以不再关注
self.username = ''
self.gid = ''
for i in range(2):
content_recv = self.verify_sock.recv(1024)
msgList = self.deal_with_recved_bytes(content_recv)
for respond in msgList:
if self.username == '':
username_re = re.search(r'username@=(.*?)/', respond)
if username_re is not None:
self.username = username_re.group(1)
if self.gid == '':
gid_re = re.search(r'gid@=(.*?)/', respond)
if gid_re is not None:
self.gid = gid_re.group(1)
if self.username == '' or self.gid == '':
self.log.write('username or gid is null !', 2)
exit(-1)
self.verify_sock.close()
self.log.write('username : ' + self.username + ' and gid : ' + self.gid)
#连接至弹幕服务器,并发送认证信息
def get_danmu_server_sock(self):
#发给弹幕服务器的信息就不像认证的那么多那么繁琐了,所以就不单独立一个函数出来了
content = "type@=loginreq/username@=" + self.username + "/password@=1234567890123456/roomid@=" + str(self.roomId) + "/"
msg2send = message(content)
#构建danmu_server_list,这个是不变的
portList = [8061, 8062, 12601, 12602]
danmu_server_list = []
for portNum in portList:
ip_port = {}
ip_port['ip'] = 'danmu.douyutv.com'
ip_port['port'] = portNum
danmu_server_list.append(ip_port)
self.danmu_server_sock = self.connect_2_server(danmu_server_list)
self.danmu_server_sock.send(msg2send.getMsg())
content = "type@=joingroup/rid@=" + str(self.roomId) + "/gid@="+self.gid+"/"
msg2send = message(content)
self.danmu_server_sock.send(msg2send.getMsg())
return self.danmu_server_sock
# def danmu_get(self):
# while True:
# content_recv = self.danmu_server_sock.recv(1024)
# msgList = self.deal_with_recved_bytes(content_recv)
# for msg in msgList:
# print(msg)
#心跳包
def keep_alive_package(self):
content = "type@=mrkl/"
msg2send = message(content)
self.danmu_server_sock.send(msg2send.getMsg())
#整个流程
# def run(self):
# self.get_verify_server_list()
# self.get_username_gid()
# self.get_danmu_server_sock()
#
# self.log.write('弹幕接收开始...')
# recv_socks = []
# recv_socks.append(self.danmu_server_sock)
# timeout = 40
# timeout_absolute = 40 + int(time.time())
# while True:
# rs, ws, es = select.select(recv_socks, [], [], timeout)
#
# for s in rs:
# if s == self.danmu_server_sock:
# content_recv = self.danmu_server_sock.recv(1024)
# msgList = self.deal_with_recved_bytes(content_recv)
# for msg in msgList:
# u,c = self.extractMainContent(msg)
# print(u + ':' + c)
#
# time.sleep(5)
# currentTimeSec = int(time.time())
# if currentTimeSec > timeout_absolute:
# self.keep_alive_package()
# timeout_absolute = 40 + currentTimeSec
#数据像下面这样
#type@=chatmsg/rid@=7911/uid@=3949372/nn@=温暖冬天/txt@=即使再有第二个科比,也没有青春去追随。/cid@=b0d294e9926f43e020b4310000000000/level@=2/
#type@=chatmsg/rid@=7911/uid@=13632978/nn@=panwen205/txt@=厉害/cid@=b0d294e9926f43e021b4310000000000/level@=4/ct@=2/
#type@=chatmsg/rid@=7911/uid@=5708144/nn@=君殇sn/txt@=韦神求网易云ID,求歌单5/cid@=b0d294e9926f43e026b4310000000000/level@=3/
#我要取用户名和说的话
def extractMainContent(self, msg):
msg_list = msg.split('/')
if len(msg_list) < 5:
return None
typeStr = msg_list[0]
typeData = typeStr.split('=')[1]
#还有gift, ranklist等类型消息被我在这里去掉了
if typeData != 'chatmsg':
return None
usernameStr = msg_list[3]
contentStr = msg_list[4]
username = usernameStr.split('=')[1]
content = contentStr.split('=')[1]
return username, content
#将要发送到ws server的消息缓冲区
class MessageListBuffer(object):
def __init__(self, totalMax, singleMax):
self.__totalMax = totalMax
self.__singleMax = singleMax
self.__log = Log('MessageListBuffer')
#这字典里的value是deque类型, key是url字符串
self.__dic = dict()
def appendItem(self, url, type, username, message):
if self.lengthItem() >= self.__totalMax:
self.__log.write('too many messages in buffer !', 1)
return
#这里的type是指消息类型,可能是礼物啊什么的
content = '@type=' + str(type) + '/@username=' + username + '/@msg=' + message
if url in self.__dic:
if len(self.__dic[url]) >= self.__singleMax:
self.__log.write('too many for this room:'+url, 1)
else:
self.__dic[url].append(content)
else:
self.__dic[url] = deque()
self.__dic[url].append(content)
#之所以要用dict的方式来存消息,是我不想某个room的dm突然占据所有空间
#下面取出消息就是去每个房间取n个消息
def getPopItem(self):
if self.lengthItem() == 0:
return {}
#返回的msgBox形式是:
#{'urlCount':len, 'urlDict':{url1:[], url2:[]...}}
msgBox = {}
urlCount = 0
msgBox['urlDict'] = {}
#最多获取每个房间的dm数量
maxGetLen = 15
#这里的__dic形式是:
#{url1:deque, url2:deque}
for u,m in self.__dic.items():
if len(m) == 0:
continue
urlMsgBox = []
getLen = len(m) if len(m) < maxGetLen else maxGetLen
#msgBox.extend((list(m))[0:getLen])
for i in range(getLen):
urlCount += 1
urlMsgBox.append(m.popleft())
msgBox['urlDict'][u] = urlMsgBox
msgBox['urlCount'] = urlCount
return msgBox
def lengthItem(self):
# print(str(len(self))+'...')
if len(self.__dic) == 0:
return 0
length = 0
for k, v in self.__dic.items():
length += len(v)
return length
def hasKey(self, url):
return url in self.__dic
def deleteItem(self, url):
if self.hasKey(url):
del self.__dic[url]
return True
else:
return False
#每个连接都需要一个对应的字节流缓冲区,所以这里我就使用字典形式: {sock1:[bytes], sock2:[bytes],...}
class BytesBuffer(dict):
#key为套接字文件描述符,value即为二进制字符串形式(b''这样,所以这里我使用bytearray)
#但是注意传给构造函数的必须是DanmuSocket类型哦!!!!
def __setattr__(self, sock, bytesBuffer):
self[sock.Mark] = bytesBuffer
def __getattr__(self, sock):
if self.has_key(sock.Mark) and self[sock.Mark] != None:
return self[sock.Mark]
else:
return None
#单纯的添加数据,不做其他事
def appendData(self, sock, bytesContent):
fn = sock.Mark
if fn not in self:
self[fn] = bytearray()
self[fn].extend(bytesContent)
#单纯的移除数据,不做其他事
def removeData(self, sock, len):
fn = sock.Mark
if fn not in self:
return False
self[fn] = self[fn][len:]
#返回当前某个连接buffer流中[完整的消息]的列表
#一个完整的消息包含两部分,消息长度+消息体
def getFullMsgList(self, sock):
fn = sock.Mark
msgList = []
while True:
currBufferLen = self.lengthOfBuffer(sock)
#消息长度占4个字节
if currBufferLen < 4:
return msgList
#包体长度
msgLen = struct.unpack('i', self[fn][0:4])[0]
#整个包长度(包长 + 包体长)
msgFullLen = msgLen + 4
#不够一个完整的包,不再继续解析
if currBufferLen < msgFullLen:
return msgList
else:
msgList.append(self[fn][0:msgFullLen])
self.removeData(sock, msgFullLen)
#返回某个连接的buffer池中二进制数据长度
def lengthOfBuffer(self, sock):
return len(self[sock.Mark])
if __name__ == '__main__':
mb = MessageListBuffer(100,10)
mb.appendItem('aaaaa', 1, 'aaa', 'bbb')
mb.appendItem('aaaaa', 1, 'aaa', 'bbb')
mb.appendItem('aaaaaccc', 1, 'aaa', 'bbb')
print(mb.lengthItem())
|
# Generated by Django 3.0.5 on 2020-08-19 19:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('csp_observer', '0005_auto_20200819_1908'),
]
operations = [
migrations.AlterField(
model_name='csprule',
name='cause',
field=models.CharField(choices=[('extension', 'Browser Extension'), ('browser', 'Web Browser'), ('malware', 'Malware'), ('other', 'Other')], default='other', max_length=255),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
# LingTime Copyright (C) 2012 suizokukan
# Contact: suizokukan _A.T._ orange dot fr
#
# This file is part of LingTime.
# LingTime is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LingTime is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LingTime. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""
LingTime by suizokukan (suizokukan AT orange DOT fr)
________________________________________________________________________
tests/tests.py
to launch the tests :
$ nosetests
or
$ python -m unittest tests/tests.py
________________________________________________________________________
class
▪ GOYTests
▪ LingTimeTests
"""
import unittest
from lingtime.goy import GOY
from lingtime.chronology import Chronology
from lingtime.lingtime import LingTime
Chronology.minimal_year = -4999
Chronology.maximal_year = 2999
class GOYTests(unittest.TestCase):
"""
GOYTests class
________________________________________________________________________
use this class to test the lingtime/goy.py::GOY class
________________________________________________________________________
no class attribute
no instance attribute
methods :
● test__goy(self)
"""
def test__goy(self):
"""
GOYTests.test__goy()
____________________________________________________________________
Test of the lingtime/goy.py::GOY class
____________________________________________________________________
no PARAMETER, no RETURNED VALUE
"""
goy0a = GOY(1, -1)
goy0b = GOY(0, 10)
goy0c = GOY(10, 0)
goy1 = GOY(200, 220)
goy2 = GOY(200, 220)
goy3 = GOY(210, 230)
goy4 = GOY(300, 400)
goy5 = GOY(212, 212)
goy6 = GOY(212, 213)
self.assertEqual(goy0a.has_been_set(), False)
self.assertEqual(goy0b.has_been_set(), False)
self.assertEqual(goy0c.has_been_set(), False)
self.assertEqual(goy1.is_a_momentum(), False)
self.assertEqual(goy1.has_been_set(), True)
self.assertEqual(goy1.is_a_momentum(), False)
self.assertEqual(goy5.is_a_momentum(), True)
self.assertEqual(goy6.is_a_momentum(), False)
self.assertEqual(goy1 == goy1, True)
self.assertEqual(goy1 == goy2, True)
self.assertEqual(goy1 != goy3, True)
self.assertEqual(goy1 != goy2, False)
self.assertEqual(goy1 < goy2, False)
self.assertEqual(goy1 <= goy2, True)
self.assertEqual(goy1 < goy3, False)
self.assertEqual(goy1 < goy4, True)
self.assertEqual(goy1 > goy2, False)
self.assertEqual(goy1 >= goy2, True)
self.assertEqual(goy1 > goy3, True)
self.assertEqual(goy1 > goy4, False)
self.assertEqual(goy1.contains(goy1), True)
self.assertEqual(goy1.contains(goy2), True)
self.assertEqual(goy1.contains(goy3), False)
self.assertEqual(goy1.contains(goy4), False)
self.assertEqual(goy1.contains(goy5), True)
self.assertEqual(goy1.contains(goy6), True)
self.assertEqual(goy2.contains(goy1), True)
self.assertEqual(goy2.contains(goy2), True)
self.assertEqual(goy2.contains(goy3), False)
self.assertEqual(goy2.contains(goy4), False)
self.assertEqual(goy2.contains(goy5), True)
self.assertEqual(goy2.contains(goy6), True)
class LingTimeTests(unittest.TestCase):
"""
LingTimeTests class
________________________________________________________________________
use this class to test the lingtime/lingtime.py::LingTime class
________________________________________________________________________
no class attribute
no instance attribute
methods :
● test001(self)
● test002(self)
● test003(self)
● test004(self)
● test005(self)
● test006(self)
● test007(self)
"""
def test001(self):
"""
LingTimeTests.test001()
____________________________________________________________________
Test of the lingtime/lingtime.py::LingTime class
P1 -|----------------------------------------------|-------
1 100
P2 |-------|
20 30
|
law1
|
____________________________________________________________________
no PARAMETER, no RETURNED VALUE
"""
lingtime = LingTime()
LingTime.lingtime = lingtime
lingtime.add("law1", Chronology(constraints=["in P1",
"in P2"]))
lingtime.add("P1", Chronology(src="1 100"))
lingtime.add("P2", Chronology(src="20 30"))
success = lingtime.set_chronology()
self.assertEqual(success, True)
self.assertEqual(lingtime["P1"].goy, GOY(1, 100))
self.assertEqual(lingtime["P2"].goy, GOY(20, 30))
self.assertEqual(lingtime["law1"].goy, GOY(20, 30))
def test002(self):
"""
LingTimeTests.test002()
____________________________________________________________________
Test of the lingtime/lingtime.py::LingTime class
P1 -|----------------------------------------------|-------
1 100
P2 |-------|
20 30
|
law1 (momentum)
|
____________________________________________________________________
no PARAMETER, no RETURNED VALUE
"""
lingtime = LingTime()
LingTime.lingtime = lingtime
lingtime.add("law1", Chronology(constraints=["in P1",
"in P2",
"momentum"]))
lingtime.add("P1", Chronology(src="1 100"))
lingtime.add("P2", Chronology(src="20 30"))
success = lingtime.set_chronology()
self.assertEqual(success, True)
self.assertEqual(lingtime["P1"].goy, GOY(1, 100))
self.assertEqual(lingtime["P2"].goy, GOY(20, 30))
self.assertEqual(lingtime["law1"].goy, GOY(25, 25))
def test003(self):
"""
LingTimeTests.test003()
____________________________________________________________________
Test of the lingtime/lingtime.py::LingTime class
P1 -|----------------------------------------------|-------
-∞ +∞
P2 |-------|
20 30
|
law1 (momentum)
|
____________________________________________________________________
no PARAMETER, no RETURNED VALUE
"""
lingtime = LingTime()
LingTime.lingtime = lingtime
lingtime.add("law1", Chronology(constraints=["in P1",
"in P2",
"momentum"]))
lingtime.add("P1", Chronology(src="-∞ +∞"))
lingtime.add("P2", Chronology(src="20 30"))
success = lingtime.set_chronology()
self.assertEqual(success, True)
self.assertEqual(lingtime["P1"].goy, GOY(Chronology.minimal_year,
Chronology.maximal_year))
self.assertEqual(lingtime["P2"].goy, GOY(20, 30))
self.assertEqual(lingtime["law1"].goy, GOY(25, 25))
def test004(self):
"""
LingTimeTests.test004()
____________________________________________________________________
Test of the lingtime/lingtime.py::LingTime class
P4 -|----------------------------------------------|-------
-∞ +∞
P3 -|--------|
-∞ 30
P2 |-----|
20 30
P1 |-----------------|
1 100
|
law1
|
|
law2 (momentum)
|
|
law3 (momentum)
|
____________________________________________________________________
no PARAMETER, no RETURNED VALUE
"""
lingtime = LingTime()
LingTime.lingtime = lingtime
lingtime.add("law1", Chronology(constraints=["in P1",
"in P2",
"in P4"]))
lingtime.add("law2", Chronology(constraints=["ante law1",
"in P1", "momentum"]))
lingtime.add("law3", Chronology(constraints=["in P1",
"post law2", "momentum"]))
lingtime.add("P1", Chronology(src="1 100"))
lingtime.add("P2", Chronology(src="20 30"))
lingtime.add("P3", Chronology(src="-∞ 30"))
lingtime.add("P4", Chronology(src="-∞ +∞"))
success = lingtime.set_chronology()
self.assertEqual(success, True)
self.assertEqual(lingtime["P1"].goy, GOY(1, 100))
self.assertEqual(lingtime["P2"].goy, GOY(20, 30))
self.assertEqual(lingtime["P3"].goy, GOY(Chronology.minimal_year, 30))
self.assertEqual(lingtime["P4"].goy, GOY(Chronology.minimal_year,
Chronology.maximal_year))
self.assertEqual(lingtime["law1"].goy, GOY(20, 30))
self.assertEqual(lingtime["law2"].goy, GOY(10, 10))
self.assertEqual(lingtime["law3"].goy, GOY(55, 55))
def test005(self):
"""
LingTimeTests.test005()
____________________________________________________________________
Test of the lingtime/lingtime.py::LingTime class
NB : same as test004 but the definitions of Px and of lawx have
been written in a different order.
P4 -|----------------------------------------------|-------
-∞ +∞
P3 -|--------|
-∞ 30
P2 |-----|
20 30
P1 |-----------------|
1 100
|
law1
|
|
law2 (momentum)
|
|
law3 (momentum)
|
____________________________________________________________________
no PARAMETER, no RETURNED VALUE
"""
lingtime = LingTime()
LingTime.lingtime = lingtime
lingtime.add("law3", Chronology(constraints=["in P1",
"post law2", "momentum"]))
lingtime.add("law2", Chronology(constraints=["ante law1",
"in P1", "momentum"]))
lingtime.add("law1", Chronology(constraints=["in P1",
"in P2",
"in P4"]))
lingtime.add("P1", Chronology(src="1 100"))
lingtime.add("P4", Chronology(src="-∞ +∞"))
lingtime.add("P2", Chronology(src="20 30"))
lingtime.add("P3", Chronology(src="-∞ 30"))
success = lingtime.set_chronology()
self.assertEqual(success, True)
self.assertEqual(lingtime["P1"].goy, GOY(1, 100))
self.assertEqual(lingtime["P2"].goy, GOY(20, 30))
self.assertEqual(lingtime["P3"].goy, GOY(Chronology.minimal_year, 30))
self.assertEqual(lingtime["P4"].goy, GOY(Chronology.minimal_year,
Chronology.maximal_year))
self.assertEqual(lingtime["law1"].goy, GOY(20, 30))
self.assertEqual(lingtime["law2"].goy, GOY(10, 10))
self.assertEqual(lingtime["law3"].goy, GOY(55, 55))
def test006(self):
"""
LingTimeTests.test006()
____________________________________________________________________
Test of the lingtime/lingtime.py::LingTime class
____________________________________________________________________
no PARAMETER, no RETURNED VALUE
"""
lingtime = LingTime()
LingTime.lingtime = lingtime
lingtime.add("law1", Chronology(constraints=["post law1", ]))
lingtime.add("law2", Chronology(constraints=["ante law2", ]))
success = lingtime.set_chronology()
self.assertEqual(success, False)
def test007(self):
"""
LingTimeTests.test007()
____________________________________________________________________
Test of the lingtime/lingtime.py::LingTime class
____________________________________________________________________
no PARAMETER, no RETURNED VALUE
"""
lingtime = LingTime()
LingTime.lingtime = lingtime
lingtime.add("law1", Chronology(constraints=["post law1", ]))
success = lingtime.set_chronology()
self.assertEqual(success, False)
|
# simple example for a neural net
# taken and tested from
# https://dev.to/shamdasani/build-a-flexible-neural-network-with-backpropagation-in-python
from neural_net import NeuralNet
import numpy as np
def sigmoid(x):
return 1 / (1 + exp(-x))
def sigmoid_derivate(x):
return sigmoid(x) * (1-sigmoid(x))
x = np.array(([2, 9], [1, 5], [3, 6], [4, 4], [5, 3], [4, 4.1]), dtype=float)
t = np.array(([0.92], [0.86], [0.89], [0.90], [0.93], [0.95]))
W1 = np.random.randn(3, 2)
W2 = np.random.randn(1, 3)
a1 = W1.dot(x.T)
z1 = sigmoid(a1)
a2 = W2.dot(z1)
z2 = sigmoid(a2)
y = z2.T
y_error = (t - y).T
y_delta = y_error * sigmoid_derivate(y.T)
W2 += z1.dot(y_delta.T).T
z1_error = W2.T.dot(y_error)
z1_delta = z1_error * sigmoid_derivate(z1)
W1 += x.T.dot(z1_delta.T).T
neural_net = NeuralNet()
y = neural_net.forward(x)
neural_net.backward(x, t, y)
print("Predicted Output: " + str(y))
print("Acutal Output: " + str(t))
for b in range(10):
neural_net.train(x, t)
y = neural_net.forward(x)
print("Predicted Output: " + str(y))
print("Acutal Output: " + str(t)) |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-23 00:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Character',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('name_jp', models.CharField(blank=True, max_length=200)),
('age', models.IntegerField(blank=True)),
('status', models.CharField(max_length=7)),
('first_appearance', models.IntegerField()),
('desc', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Membership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.IntegerField()),
('character_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Character')),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('name_jp', models.CharField(blank=True, max_length=200)),
('org_type', models.CharField(max_length=200)),
('active', models.CharField(max_length=7)),
('first_appearance', models.IntegerField()),
('desc', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('location', models.CharField(blank=True, max_length=200)),
('first_appearance', models.IntegerField()),
('desc', models.TextField(blank=True)),
],
),
migrations.AddField(
model_name='organization',
name='base',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='base.Place'),
),
migrations.AddField(
model_name='organization',
name='leader',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='base.Character'),
),
migrations.AddField(
model_name='membership',
name='org_name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Organization'),
),
]
|
with keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
listener.join() |
from random import randint
board = []
minOfSize = 2
maxOfSize = 6
numberOfChoice = 2
sizeOfBoard = randint(minOfSize, maxOfSize)
for i in range(sizeOfBoard):
board.append(["O"] * sizeOfBoard)
def print_board(matrix):
for row in matrix:
print ""
print "", " ".join(row)
ship_row = randint(0, sizeOfBoard - 1)
ship_col = randint(0, sizeOfBoard - 1)
# ship_pos = ship_row * sizeOfBoard + ship_col
# print ship_row + 1, ship_col + 1, ship_pos
def import_your_guess():
your_guess = []
guess_row = abs(int(raw_input("You choice row: ")))
guess_col = abs(int(raw_input("You choice col: ")))
your_guess.append(guess_row - 1)
your_guess.append(guess_col - 1)
return your_guess
# this function is to check your choice in allowed area
def check_condition(condition):
if condition[0] in range(sizeOfBoard) and condition[1] in range(sizeOfBoard):
return True
else:
return False
print "Let's play Battleship!"
print_board(board)
for turn in range(numberOfChoice):
print "\nTurn", turn + 1, "of A player"
Aguess = import_your_guess()
if Aguess[0] == ship_row and Aguess[1] == ship_col:
print "**A player wins**"
break
print "\nTurn", turn + 1, "of B player"
Bguess = import_your_guess()
if Bguess[0] == ship_row and Bguess[1] == ship_col:
print "**B player wins**"
break
print "No one wins turn", turn + 1, "\n"
if not check_condition(Aguess) and not check_condition(Bguess):
print "Both A and B player guessed out of range"
elif not check_condition(Aguess):
print "A guessed out of range"
elif not check_condition(Bguess):
print "B guessed out of range"
if check_condition(Aguess):
if (board[Aguess[0]][Aguess[1]] == "A") or board[Aguess[0]][Aguess[1]] == "B":
print "**A player lose**"
break
else:
board[Aguess[0]][Aguess[1]] = "A"
if check_condition(Bguess):
if (board[Bguess[0]][Bguess[1]] == "A") or board[Bguess[0]][Bguess[1]] == "B":
print "**B player lose**"
break
else:
board[Bguess[0]][Bguess[1]] = "B"
if turn == (numberOfChoice - 1):
board[ship_row][ship_col] = "X"
print_board(board)
print "\n\n****Game Over****"
else:
print_board(board)
|
# Generated by Django 2.2.12 on 2020-04-25 11:33
from django.db import migrations, models
import django.db.models.deletion
import src.incidents.models
import uuid
class Migration(migrations.Migration):
dependencies = [
('incidents', '0042_auto_20200415_1504'),
]
operations = [
migrations.CreateModel(
name='Recipient',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(blank=True, max_length=200, null=True)),
('sn_name', models.CharField(blank=True, max_length=200, null=True)),
('tm_name', models.CharField(blank=True, max_length=200, null=True)),
('email', models.CharField(blank=True, max_length=200, null=True)),
('telephone', models.CharField(blank=True, max_length=200, null=True)),
('mobile', models.CharField(blank=True, max_length=200, null=True)),
('address', models.CharField(blank=True, max_length=200, null=True)),
('district', models.CharField(blank=True, max_length=50, null=True)),
('gn_division', models.CharField(blank=True, max_length=50, null=True)),
('contact_type', models.CharField(blank=True, choices=[('INDIVIDUAL', 'Individual'), ('ORGANIZATION', 'Organization')], default=src.incidents.models.ContactType('Individual'), max_length=50, null=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ('id',),
},
),
migrations.AddField(
model_name='incident',
name='recipient',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='incidents.Recipient'),
),
]
|
# --------------------------------------------------------------------
import os
import pickle
import shelve
# --------------------------------------------------------------------
with open ("input.txt", "r") as inp: # The with construct makes sure the file is closed automatically at the end of the with-block
for line in inp:
print (line, end='')
print ()
with open ("input.txt", "r") as inp:
i = 0
line = inp.readline ()
while line != '':
i = i + 1
print (line.rstrip ('\n'), "", i)
line = inp.readline ()
print ("One", "Two", "Three", i) # Leaves a space in between
f = open ('binfile.bin', 'wb') # Read-write binary
written = f.write (b'0123456789abcdef')
f.close () # Could be left open...
f = open ('binfile.bin', 'rb') # Read
f.seek (5) # Go to the 6th byte in the file
onebyte = f.read (1) # Read 1 byte
f.seek (-3, 2) # Go to the 3rd byte before the end
print (f.tell ()) # Get current seek position
twobytes = f.read (2)
print (f.tell ()) # Position moves by two bytes (as expected)
f.close () # Needs closing as not in a with construct
# --------------------------------------------------------------------
poem = open ("input.txt").readlines() # Read all in one as a list
print (poem[2]) # To access the 3rd line
poem = open ("input.txt").read() # Read all in one as a string
print(poem[12:20]) # Access various chars in that string
# --------------------------------------------------------------------
cities = ["Paris", "Dijon", "Lyon", "Strasbourg"]
fh = open ("data.pkl", "bw")
pickle.dump (cities, fh) # Dumps an object into binary
fh.close ()
fh = open("data.pkl", "rb") # Read the dump file back
villes = pickle.load (fh)
print (villes)
s = shelve.open ("MyShelve") # Pickle reads everything back, but we only need certain bits
s["street"] = "Fleet Str"
s["city"] = "London"
s.close ()
s = shelve.open ("MyShelve") # This could be opened in another py script
print (s["street"])
# --------------------------------------------------------------------
os.system ("pause")
|
# Copyright 2018 Nicholas Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from flask import Flask
from flask_cors import CORS
from galini_dashboard.API.blueprints.logs import create_logs_blueprint
def create_app():
static_path = os.environ['GALINI_LOGS_DIR']
app = Flask(__name__)
CORS(app)
logs_endpoint = create_logs_blueprint(static_path)
app.register_blueprint(logs_endpoint, url_prefix="/logs")
return app
|
# Standard imports
import requests as r
from bs4 import BeautifulSoup as soup
import pandas as pd
# Scrape Mars news headline and teaser copy.
def marsNasaNewsScrape():
marsNasaUrl = 'https://mars.nasa.gov/news/'
marsNasaUrlData = r.get(marsNasaUrl)
marsNasaUrlSoup = soup(marsNasaUrlData.text, 'html.parser')
marsHeadline = marsNasaUrlSoup.find('div', {'class': 'image_and_description_container'})
news_title = marsHeadline.find_all('img')[1]['alt']
news_p = marsHeadline.find('div', {'class': 'rollover_description_inner'}).text.strip()
return news_title, news_p
# Scrape Mars weather from Twitter account.
def marsWeatherScrape():
marsWeather = 'https://twitter.com/MarsWxReport/status/966145463425650694'
marsWeatherData = r.get(marsWeather)
marsWeatherSoup = soup(marsWeatherData.text, 'html.parser')
mars_weather = marsWeatherSoup.find_all('div', {'class': 'js-tweet-text-container'})[0].find('p').text
return mars_weather
# Scrape Mars featured image from JPL website.
def marsImageScrape():
marsImage = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
marsImageData = r.get(marsImage)
marsImageDataSoup = soup(marsImageData.text, 'html.parser')
marsFeatureImageUrl = marsImageDataSoup.find('div', {'class': 'carousel_container'}).find('article')['style'].split('\'')[1]
baseUrl = 'https://www.jpl.nasa.gov/'
featured_image_url = baseUrl + marsFeatureImageUrl
return featured_image_url
# Scrape photos of the Mars Hemispheres and store in a list of dictionaries.
def marsHemiScrape():
marsHemi = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
marsHemiData = r.get(marsHemi)
marsHemiDataSoup = soup(marsHemiData.text, 'html.parser')
all_list = marsHemiDataSoup.find_all('div', {'class': 'description'})
nameList = [each.text for each in all_list]
image1 = 'https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/cerberus_enhanced.tif/full.jpg'
image2 = 'https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/schiaparelli_enhanced.tif/full.jpg'
image3 = 'https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/syrtis_major_enhanced.tif/full.jpg'
image4 = 'https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/valles_marineris_enhanced.tif/full.jpg'
nameUrl = [image1, image2, image3, image4]
hemisphere_image_urls = [{'title': title, 'img_url': img_url} for title, img_url in zip(nameList, nameUrl)]
return hemisphere_image_urls
# Use Pandas to scrape the data from the Mars table.
def marsFactsScrape():
marsFacts = 'https://space-facts.com/mars/'
tables = pd.read_html(marsFacts)
df = tables[0]
df.columns = ['description', 'value']
df_table_html = df.to_html().replace('\n', '')
return df_table_html
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib import messages
from . forms import TermForm, ReportForm, ActivityForm, StrandForm, SubStrandForm, ObjectiveForm, AssessmentForm, \
TARForm, TSForm, TRForm
from . models import Term, Report, Activity, Strand, SubStrand, Objective, Assessment, TermActivityRemark, TermRemark, \
TermSummary
from django.views import View
@method_decorator(login_required, name='dispatch')
class TermAdd(View):
template_name = 'CBC/term_add.html'
def get(self, request):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
form = TermForm()
return render(request, self.template_name, {'form': form})
else:
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
def post(self, request):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
form = TermForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
is_active = form.cleaned_data['is_active']
if is_active and Term.objects.filter(is_active=True).exists():
messages.error(request, 'Sorry ! There can only be one active term. Please try again')
return redirect('term_add')
form = form.save(commit=False)
if form.opening_date >= form.closing_date:
messages.error(request, 'Sorry ! closing date should be after opening date. Please try again')
return redirect('term_add')
form.save()
messages.success(request, 'Success ! New term {} added'.format(name))
return redirect('term_add')
messages.error(request, 'Sorry ! An error has occurred. Please try again')
return redirect('term_add')
else:
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def term_change(request, pk):
if request.method == 'POST':
if request.user.groups.filter(name__in=['cbc_manager']).exists():
term = get_object_or_404(Term, pk=pk)
form = TermForm(request.POST, instance=term)
if form.is_valid():
is_active = form.cleaned_data['is_active']
if is_active and Term.objects.filter(is_active=True).exclude(pk=pk).exists():
messages.error(request, 'Sorry ! There can only be one active term. Please try again')
return redirect('term_single', pk)
form.save()
name = form.cleaned_data['name']
messages.success(request, 'Success ! The term {} updated'.format(name))
return redirect('term_single', pk)
messages.error(request, 'Sorry ! An error has occurred. Please try again')
return redirect('term_single', pk)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def term_remove(request, pk):
if request.method == 'POST':
if request.user.groups.filter(name__in=['cbc_manager']).exists():
term = get_object_or_404(Term, pk=pk)
term.delete()
messages.success(request, 'Success ! Term removed')
return redirect('term_all')
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def term_single(request, pk):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
term = get_object_or_404(Term, pk=pk)
template_name = 'CBC/term_single.html'
form = TermForm(instance=term)
context = {'term': term, 'form': form}
return render(request, template_name, context)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def term_all(request):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
template_name = 'CBC/term_all.html'
terms = Term.objects.order_by('-opening_date')
context = {'terms': terms}
return render(request, template_name, context)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@method_decorator(login_required, name='dispatch')
class ReportAdd(View):
template_name = 'CBC/report_add.html'
def get(self, request):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
form = ReportForm()
return render(request, self.template_name, {'form': form})
else:
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
def post(self, request):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
form = ReportForm(request.POST)
if form.is_valid():
student = form.cleaned_data['student']
form.save()
messages.success(request, 'Success ! CBC report for {} created'.format(student))
return redirect('cbc_report_add')
messages.error(request, 'Sorry ! An error has occurred. Please try again')
return redirect('cbc_report_add')
else:
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def report_change(request, pk):
if request.method == 'POST':
if request.user.groups.filter(name__in=['cbc_manager']).exists():
report = get_object_or_404(Report, pk=pk)
form = ReportForm(request.POST, instance=report)
if form.is_valid():
student = form.cleaned_data['student']
form.save()
messages.success(request, 'Success ! CBC report for {} updated'.format(student))
return redirect('cbc_report_single', pk)
messages.error(request, 'Sorry ! An error has occurred. Please try again')
return redirect('cbc_report_single', pk)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def report_remove(request, pk):
if request.method == 'POST':
if request.user.groups.filter(name__in=['cbc_manager']).exists():
report = get_object_or_404(Report, pk=pk)
report.delete()
messages.success(request, 'Success ! CBC report removed')
return redirect('cbc_report_all')
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def report_single(request, pk):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
report = get_object_or_404(Report, pk=pk)
template_name = 'CBC/report_single.html'
form = ReportForm(instance=report)
context = {'report': report, 'form': form}
return render(request, template_name, context)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def report_all(request):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
template_name = 'CBC/report_all.html'
reports = Report.objects.order_by('-student')
context = {'reports': reports}
return render(request, template_name, context)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@method_decorator(login_required, name='dispatch')
class ActivityAdd(View):
template_name = 'CBC/activity_add.html'
def get(self, request):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
form = ActivityForm()
return render(request, self.template_name, {'form': form})
else:
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
def post(self, request):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
form = ActivityForm(request.POST)
if form.is_valid():
title = form.cleaned_data['title']
form.save()
messages.success(request, 'Success ! CBC activity {} created'.format(title))
return redirect('cbc_activity_add')
messages.error(request, 'Sorry ! An error has occurred. Please try again')
return redirect('cbc_activity_add')
else:
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def activity_change(request, pk):
if request.method == 'POST':
if request.user.groups.filter(name__in=['cbc_manager']).exists():
activity = get_object_or_404(Activity, pk=pk)
form = ActivityForm(request.POST, instance=activity)
if form.is_valid():
title = form.cleaned_data['title']
form.save()
messages.success(request, 'Success ! CBC activity {} updated'.format(title))
return redirect('cbc_activity_single', pk)
messages.error(request, 'Sorry ! An error has occurred. Please try again')
return redirect('cbc_activity_single', pk)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def activity_remove(request, pk):
if request.method == 'POST':
if request.user.groups.filter(name__in=['cbc_manager']).exists():
activity = get_object_or_404(Activity, pk=pk)
activity.delete()
messages.success(request, 'Success ! CBC activity removed')
return redirect('cbc_activity_all')
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def activity_single(request, pk):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
activity = get_object_or_404(Activity, pk=pk)
template_name = 'CBC/activity_single.html'
form = ActivityForm(instance=activity)
strands = Strand.objects.filter(activity=activity)
strand_form = StrandForm()
context = {'activity': activity, 'form': form, 'strands': strands, 'strand_form': strand_form}
return render(request, template_name, context)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def activity_all(request):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
template_name = 'CBC/activity_all.html'
activities = Activity.objects.all().order_by('title')
context = {'activities': activities}
return render(request, template_name, context)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def strand_add(request, pk_ac):
if request.method == 'POST' and request.user.groups.filter(name__in=['cbc_manager']).exists():
activity = get_object_or_404(Activity, pk=pk_ac)
form = StrandForm(request.POST)
if form.is_valid():
title = form.cleaned_data['title']
if Strand.objects.filter(title=title, activity=activity).exists():
messages.error(request, 'Sorry ! Could not create a strand. It exists.')
return redirect('cbc_activity_single', pk_ac)
form = form.save(commit=False)
form.activity = activity
form.save()
messages.success(request, 'Success ! Strand {} added to {} activity'.format(title, activity))
return redirect('cbc_activity_single', pk_ac)
messages.error(request, 'Sorry ! Could not create a strand. Please try again')
return redirect('cbc_activity_single', pk_ac)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def strand_remove(request, pk):
if request.method == 'POST':
if request.user.groups.filter(name__in=['cbc_manager']).exists():
strand = get_object_or_404(Strand, pk=pk)
pk = strand.activity_id
strand.delete()
messages.success(request, 'Success ! Strand removed')
return redirect('cbc_activity_single', pk)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def strand_single(request, pk):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
template_name = 'CBC/strand_single.html'
strand = get_object_or_404(Strand, pk=pk)
sub_strand_form = SubStrandForm()
sub_strands = SubStrand.objects.filter(strand=strand)
form = StrandForm(instance=strand)
return render(request, template_name, {'form': form, 'strand': strand, 'sub_strands': sub_strands,
'sub_strand_form': sub_strand_form})
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def strand_change(request, pk):
if request.method == 'POST' and request.user.groups.filter(name__in=['cbc_manager']).exists():
strand = get_object_or_404(Strand, pk=pk)
form = StrandForm(request.POST, instance=strand)
if form.is_valid():
form.save()
messages.success(request, 'Success ! CBC strand updated.')
return redirect('cbc_strand_single', pk)
messages.error(request, 'Sorry ! An error has occurred. Please try again')
return redirect('cbc_strand_single', pk)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def sub_strand_add(request, pk_st):
if request.method == 'POST' and request.user.groups.filter(name__in=['cbc_manager']).exists():
strand = get_object_or_404(Strand, pk=pk_st)
form = SubStrandForm(request.POST)
if form.is_valid():
title = form.cleaned_data['title']
if SubStrand.objects.filter(title=title, strand=strand).exists():
messages.error(request, 'Sorry ! Could not create the sub strand. It exists.')
return redirect('cbc_strand_single', pk_st)
form = form.save(commit=False)
form.strand = strand
form.save()
messages.success(request, 'Success ! Sub Strand {} added to {} strand'.format(title, strand))
return redirect('cbc_strand_single', pk_st)
messages.error(request, 'Sorry ! Could not create the sub strand. Please try again')
return redirect('cbc_strand_single', pk_st)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def sub_strand_single(request, pk):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
template_name = 'CBC/sub_strand_single.html'
sub_strand = get_object_or_404(SubStrand, pk=pk)
objectives = Objective.objects.filter(sub_strand=sub_strand)
form = SubStrandForm(instance=sub_strand)
objective_form = ObjectiveForm()
return render(request, template_name, {'form': form, 'sub_strand': sub_strand, 'objectives': objectives,
'objective_form': objective_form})
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def sub_strand_change(request, pk):
if request.method == 'POST' and request.user.groups.filter(name__in=['cbc_manager']).exists():
sub_strand = get_object_or_404(SubStrand, pk=pk)
form = SubStrandForm(request.POST, instance=sub_strand)
if form.is_valid():
form.save()
messages.success(request, 'Success ! CBC sub strand updated.')
return redirect('cbc_sub_strand_single', pk)
messages.error(request, 'Sorry ! An error has occurred. Please try again')
return redirect('cbc_sub_strand_single', pk)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def sub_strand_remove(request, pk):
if request.method == 'POST':
if request.user.groups.filter(name__in=['cbc_manager']).exists():
sub_strand = get_object_or_404(SubStrand, pk=pk)
pk = sub_strand.strand_id
sub_strand.delete()
messages.success(request, 'Success ! Sub Strand removed')
return redirect('cbc_strand_single', pk)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def objective_add(request, pk_sst):
if request.method == 'POST' and request.user.groups.filter(name__in=['cbc_manager']).exists():
sub_strand = get_object_or_404(SubStrand, pk=pk_sst)
form = ObjectiveForm(request.POST)
if form.is_valid():
title = form.cleaned_data['title']
if Objective.objects.filter(title=title, sub_strand=sub_strand).exists():
messages.error(request, 'Sorry ! Could not create the objective. It exists.')
return redirect('cbc_sub_strand_single', pk_sst)
form = form.save(commit=False)
form.sub_strand = sub_strand
form.save()
messages.success(request, 'Success ! Objective {} added.'.format(title))
return redirect('cbc_sub_strand_single', pk_sst)
messages.error(request, 'Sorry ! Could not create the objective. Please try again')
return redirect('cbc_sub_strand_single', pk_sst)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def objective_single(request, pk):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
template_name = 'CBC/objective_single.html'
objective = get_object_or_404(Objective, pk=pk)
form = ObjectiveForm(instance=objective)
return render(request, template_name, {'form': form, 'objective': objective})
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def objective_change(request, pk):
if request.method == 'POST' and request.user.groups.filter(name__in=['cbc_manager']).exists():
objective = get_object_or_404(Objective, pk=pk)
form = ObjectiveForm(request.POST, instance=objective)
if form.is_valid():
form.save()
messages.success(request, 'Success ! CBC objective updated.')
return redirect('cbc_objective_single', pk)
messages.error(request, 'Sorry ! An error has occurred. Please try again')
return redirect('cbc_objective_single', pk)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def objective_remove(request, pk):
if request.method == 'POST':
if request.user.groups.filter(name__in=['cbc_manager']).exists():
objective = get_object_or_404(Objective, pk=pk)
pk = objective.sub_strand_id
objective.delete()
messages.success(request, 'Success ! Objective removed')
return redirect('cbc_sub_strand_single', pk)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def assess_activity(request, pk_rp):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
template_name = 'CBC/assess_activity.html'
report = get_object_or_404(Report, pk=pk_rp)
term = report.term
class_room = report.class_room
activities = Activity.objects.filter(term=term, class_room=class_room)
return render(request, template_name, {'report': report, 'activities': activities})
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def assessment_view(request, pk_rp, pk_ac):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
template_name = 'CBC/assessment.html'
activity = get_object_or_404(Activity, pk=pk_ac)
report = get_object_or_404(Report, pk=pk_rp)
strands = Strand.objects.filter(activity=activity)
sub_strands = SubStrand.objects.filter(strand__activity=activity)
objectives = Objective.objects.filter(sub_strand__strand__activity=activity)
assessments = Assessment.objects.filter(report=report)
if TermActivityRemark.objects.filter(report=report, activity=activity).exists():
tar = get_object_or_404(TermActivityRemark, report=report, activity=activity)
tar_form = TARForm(instance=tar)
else:
tar_form = TARForm()
context = {
'report': report, 'activity': activity, 'strands': strands, 'sub_strands': sub_strands,
'objectives': objectives, 'assessments': assessments, 'tar_form': tar_form
}
return render(request, template_name, context)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def assessment_store(request, pk_rp, pk_ob):
if request.method == 'POST' and request.user.groups.filter(name__in=['cbc_manager']).exists():
report = get_object_or_404(Report, pk=pk_rp)
objective = get_object_or_404(Objective, pk=pk_ob)
if Assessment.objects.filter(report=report, objective=objective).exists():
assessment = get_object_or_404(Assessment, report=report, objective=objective)
form = AssessmentForm(request.POST, instance=assessment)
else:
form = AssessmentForm(request.POST)
if form.is_valid():
form = form.save(commit=False)
form.report = report
form.objective = objective
form.save()
messages.success(request, 'Success ! Assessment score submitted')
return redirect('cbc_assessment', pk_rp, objective.sub_strand.strand.activity_id)
messages.error(request, 'Sorry ! An error has occurred. Please try again')
return redirect('cbc_assessment', pk_rp, objective.sub_strand.strand.activity_id)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def tar_store(request, pk_rp, pk_ac):
if request.method == 'POST' and request.user.groups.filter(name__in=['cbc_manager']).exists():
report = get_object_or_404(Report, pk=pk_rp)
activity = get_object_or_404(Activity, pk=pk_ac)
if TermActivityRemark.objects.filter(report=report, activity=activity).exists():
tar = get_object_or_404(TermActivityRemark, report=report, activity=activity)
form = TARForm(request.POST, instance=tar)
else:
form = TARForm(request.POST)
if form.is_valid():
form = form.save(commit=False)
form.report = report
form.activity = activity
form.save()
messages.success(request, 'Success ! Class teacher overall comments submitted')
return redirect('cbc_assessment', pk_rp, pk_ac)
messages.error(request, 'Sorry ! An error has occurred. Please try again')
return redirect('cbc_assessment', pk_rp, pk_ac)
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def term_rs_view(request, pk_rp):
if request.user.groups.filter(name__in=['cbc_manager']).exists():
template_name = 'CBC/term_rs_view.html'
report = get_object_or_404(Report, pk=pk_rp)
term = report.term
class_room = report.class_room
activities = Activity.objects.filter(term=term, class_room=class_room)
term_sums = TermSummary.objects.filter(report=report)
if TermRemark.objects.filter(report=report).exists():
tr = get_object_or_404(TermRemark, report=report)
tr_form = TRForm(instance=tr)
else:
tr_form = TRForm()
context = {'tr_form': tr_form, 'report': report, 'activities': activities, 'term_sums': term_sums}
return render(request, template_name, context)
else:
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def term_remark_store(request, pk_rp):
if request.method == 'POST' and request.user.groups.filter(name__in=['cbc_manager']).exists():
report = get_object_or_404(Report, pk=pk_rp)
if TermRemark.objects.filter(report=report).exists():
tr = get_object_or_404(TermRemark, report=report)
form = TRForm(request.POST, instance=tr)
else:
form = TRForm(request.POST)
if form.is_valid():
form = form.save(commit=False)
form.report = report
form.save()
messages.success(request, 'Success ! Termly remarks submitted')
return redirect('cbc_trs_view', pk_rp)
messages.error(request, 'Sorry ! Cannot submit termly remarks. Please try again')
return redirect('cbc_trs_view', pk_rp)
else:
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
@login_required
def term_summary_store(request, pk_rp, pk_ac):
if request.method == 'POST' and request.user.groups.filter(name__in=['cbc_manager']).exists():
report = get_object_or_404(Report, pk=pk_rp)
activity = get_object_or_404(Activity, pk=pk_ac)
if TermSummary.objects.filter(report=report, activity=activity).exists():
ts = get_object_or_404(TermSummary, report=report, activity=activity)
form = TSForm(request.POST, instance=ts)
else:
form = TSForm(request.POST)
if form.is_valid():
form = form.save(commit=False)
form.report = report
form.activity = activity
objectives = Objective.objects.filter(sub_strand__strand__activity=activity)
length = len(objectives)
points = 0
for objective in objectives:
if Assessment.objects.filter(report=report, objective=objective).exists():
assess = Assessment.objects.get(report=report, objective=objective)
points = points + int(assess.score)
if length > 0:
form.score = int(points/length)
else:
form.score = 0
form.save()
messages.success(request, 'Success ! Summative summary submitted')
return redirect('cbc_trs_view', pk_rp)
messages.error(request, 'Sorry ! Cannot submit tSummative summary. Please try again')
return redirect('cbc_trs_view', pk_rp)
else:
messages.error(request, f'Sorry ! Access denied')
return redirect('home')
|
import re
import requests
from google_play_scraper import app
def _guess_store(appid):
"""
Return either 'AppStore' or 'PlayStore' based on the string pattern
if string pattern conforms to a known pattern.
"""
if re.fullmatch(r"^id(\d){8,}$", appid):
return "AppStore"
elif re.fullmatch(r"^(\w+\.){2,}\w+$", appid):
return "PlayStore"
else:
raise Exception(
"The app id you've provided cannot be found in that country's app store."
)
def validate_appid(appid: str, country: str):
store = _guess_store(appid)
assert store in ["AppStore", "PlayStore"]
if store == "AppStore":
url = f"http://apps.apple.com/{country}/app/{appid}"
res = requests.get(url)
if res.status_code == 200:
appname = re.search('(?<="name":").*?(?=")', res.text).group(0)
publisher = re.search(
'(?<="author":).*("name":")(.*?)(?=")', res.text
).group(2)
category = re.search(
'(?<="applicationCategory":").*?(?=")', res.text
).group(0)
return appname, store, publisher, category
else:
raise Exception(
"Did not receive a valid response. Response code", res.status_code
)
if store == "PlayStore":
try:
appinfo = app(appid, country=country)
appname = appinfo["title"]
publisher = appinfo["developer"]
category = appinfo["genre"]
return appname, store, publisher, category
except err as err:
raise Exception("Did not receive a valid response.", err)
|
from app import db, Artist, Artist_Genre
import sys
# Add artist data
groban = Artist(
name='Groban',
city='New York',
state='NY',
phone='212-121-3940',
website='www.groban.com',
image_link='www.groban.com/img',
facebook_link='www.facebook.com/groban'
)
groban_genre1 = Artist_Genre(genre='rock')
groban_genre2 = Artist_Genre(genre='contemporary')
groban_genre1.artist = groban
groban_genre2.artist = groban
mature_five = Artist(
name='Mature 5',
city='New York',
state='NY',
phone='212-304-2399',
website='www.maturefive.com',
image_link='www.maturefive.com/img',
facebook_link='www.facebook.com/mature_five'
)
mature_five_genre1 = Artist_Genre(genre='pop')
mature_five_genre2 = Artist_Genre(genre='contemporary')
mature_five_genre1.artist = mature_five
mature_five_genre2.artist = mature_five
diablo_bull = Artist(
name='Diablo Bull',
city='Miami',
state='FL',
phone='305-193-4584',
website='www.eldiablobull.com',
image_link='www.eldiablobull.com/pic',
facebook_link='www.facebook.com/diablo_bull'
)
diablo_bull_genre1 = Artist_Genre(genre='hip hop')
diablo_bull_genre2 = Artist_Genre(genre='latin')
diablo_bull_genre1.artist = diablo_bull
diablo_bull_genre2.artist = diablo_bull
nail_in_coffin = Artist(
name='Nail in the Coffin',
city='Seattle',
state='WA',
phone='564-908-1827',
website='www.nailincoffin.com',
image_link='www.nailincoffin.com/profile',
facebook_link='www.facebook.com/nail_in_coffin'
)
nail_in_coffin_genre1 = Artist_Genre(genre='metal')
nail_in_coffin_genre1.artist = nail_in_coffin
blake = Artist(
name='Blake',
city='San Francisco',
state='CA',
phone='415-783-0394',
website='www.thisisblake.com',
image_link='www.thisisblake.com/img',
facebook_link='www.facebook.com/blake'
)
blake_genre1 = Artist_Genre(genre='hip hop')
blake_genre1.artist = blake
oos = Artist(
name='OOS',
city='Syracuse',
state='NY',
phone='315-988-1847',
website = 'www.outofschool.com',
image_link='www.outofschool.com/img',
facebook_link='www.facebook.com/oos'
)
oos_genre1 = Artist_Genre(genre='k-pop')
oos_genre2 = Artist_Genre(genre='pop')
oos_genre3 = Artist_Genre(genre='electronic')
oos_genre1.artist = oos
oos_genre2.artist = oos
oos_genre3.artist = oos
sara_braile = Artist(
name='Sara Braile',
city='San Francisco',
state='CA',
phone='415-304-2390',
website = 'www.braile.com',
image_link='www.braile.com/img',
facebook_link='www.facebook.com/sara_braile'
)
sara_braile_genre1 = Artist_Genre(genre='indie')
sara_braile_genre2 = Artist_Genre(genre='contemporary')
sara_braile_genre3 = Artist_Genre(genre='jazz')
sara_braile_genre1.artist = sara_braile
sara_braile_genre2.artist = sara_braile
sara_braile_genre2.artist = sara_braile
ja_mon = Artist(
name='Ja Mon',
city='New York',
state='NY',
phone='212-391-2038',
website = 'www.jamon.com',
image_link='www.jamon.com/img',
facebook_link='www.facebook.com/ja_mon'
)
ja_mon_genre1 = Artist_Genre(genre='regae')
ja_mon_genre1.artist = ja_mon
try:
db.session.add(groban)
# db.session.add(diablo_bull)
# db.session.add(mature_five)
# db.session.add(nail_in_coffin)
# db.session.add(blake)
# db.session.add(oos)
# db.session.add(sara_braile)
# db.session.add(ja_mon)
db.session.commit()
except:
db.session.rollback()
print(sys.exc_info())
finally:
db.session.close()
|
#%%
import math
verbose = False
def printAngleRadians(name, angleToPrint, isRadians = True):
toPrint = angleToPrint
if (verbose):
if(isRadians):
toPrint = math.degrees(angleToPrint)
print(name + str( toPrint))
return toPrint
#%%
# sSA & ASs
# law of sines
def calculateServoTriangle (beta, servoToMidleLength, servoArmLength, verbose = False):
c = servoToMidleLength
b = servoArmLength
D = (c/b) * math.sin(beta)
if (D > 1):
if (verbose):
print ('ERROR D > 1 angle not solvable')
printAngleRadians('D: ' + str(D) + ' rad:', D)
return -1
gamma = math.asin(D)
# two possibilities we need the larger angle for our model
# moost likely gamma is always < 90
gammaLarge = math.pi - gamma
if(gammaLarge < gamma):
print('didt expect gamma to be larger. reavaluate formula')
alpha = math.pi - beta - gammaLarge
#medeanToArmConnectionLength = b * (sin(alpha)/ sin(beta))
return alpha
#%%
#math.degrees(calculateServoTriangle(math.radians(5),servoToMidleLength= 8, servoArmLength= 1))
#%%
def calculateServoAngle(HightPointY, ServoPointX, desiredAngle, servoArmLength):
a = HightPointY
b = ServoPointX
servoToMidleLength = math.sqrt(a**2 + b**2)
alpha2 = math.atan(a/b)
#alpha2Deg = printAngleRadians('alpha2 ', alpha2)
beta2 = math.atan(b/a)
#beta2Deg = printAngleRadians('beta2 ', beta2)
hoek2 = math.radians(90.0+ desiredAngle)
#hoek2Deg = printAngleRadians('hoek2 ', hoek2)
beta1 = hoek2 - beta2
#beta1Deg = printAngleRadians('beta1 ', beta1)
alpha1 = calculateServoTriangle(beta1, servoToMidleLength = servoToMidleLength , servoArmLength = servoArmLength)
if (alpha1< 0): return -1
alphaTotaal = alpha1 + alpha2
return math.degrees(alphaTotaal)
def applyRotation(theta, array):
rot_z = np.array([
[np.cos(np.deg2rad(theta)), -np.sin(np.deg2rad(theta)), 0],
[np.sin(np.deg2rad(theta)), np.cos(np.deg2rad(theta)), 0],
[0, 0, 1]
])
return np.matmul(array, rot_z)
#def calculateServoState():
#%%
calculateServoAngle(HightPointY = 5, ServoPointX = 10, desiredAngle = 0, servoArmLength = 9)
|
#encoding:utf-8
import datetime
import csv
import logging
from multiprocessing import Process
import yaml
from croniter import croniter
from supplier import supply
logger = logging.getLogger(__name__)
def read_own_cron(own_cron_filename, config):
with open(own_cron_filename) as tsv_file:
tsv_reader = csv.DictReader(tsv_file, delimiter='\t')
for row in tsv_reader:
now = datetime.datetime.now()
cron = croniter(row['MASK'])
prev_run = cron.get_prev(datetime.datetime)
diff = now - prev_run
diff_seconds = diff.total_seconds()
if 0.0 <= diff_seconds and diff_seconds <= 59.9:
supplying_process = Process(target=supply, args=(row['submodule_name'], config))
supplying_process.start()
def main(config_filename):
with open(config_filename) as config_file:
config = yaml.safe_load(config_file.read())
read_own_cron(config['cron_file'], config)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='configs/prod.yml')
args = parser.parse_args()
main(args.config)
|
from .models import Task
import logging
logger = logging.getLogger(__name__)
def getTasksCategorized(tasks):
"""
getTasksCategorized(tasks)
gets a queryset
and returns a `dict` containing keys
which are `group name` and values are
that group's tasks
"""
groups = Task.Groups.choices
grouped = dict()
for group, groupName in groups:
grouped[groupName] = tasks.filter(group=group)
return grouped
|
from .base import *
from decouple import config
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
DEBUG = False
ALLOWED_HOSTS = ['*', ]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('NAME'),
'USER': config('USER'),
'PASSWORD': config('PASSWORD'),
'HOST': config('HOST'),
'PORT': '',
}
}
EMAIL_HOST = config('EMAIL_HOST', default='localhost')
EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
ENVIRONMENT_NAME = "Duka Connect Dashboard"
ENVIRONMENT_COLOR = "#2f4f4f"
|
# Create your views here.
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render,get_object_or_404,redirect
from django.template import RequestContext
from .constants import RUN_URL, secret
from .models import Pen
from .forms import CodeBoxRun
import requests
def home(request):
if request.method == "POST":
form = CodeBoxRun(data = request.POST)
if form.is_valid():
data = form.data['text']
std_in = form.data['inp']
lang = form.cleaned_data['langs']
name = form.cleaned_data['name']
source = data
json_src = {
'client_secret': secret,
'async': 0,
'source': source,
'lang': lang,
'input':std_in,
}
r = requests.post(RUN_URL,data=json_src)
r1 = r.json()
hash = str(r1['code_id'])
penObject = Pen()
penObject.code = data
penObject.lang = lang
penObject.url_count += 1
penObject.hash = str(r1['code_id'])
penObject.publish()
sessionTransferDict = {}
sessionTransferDict['i'] = std_in
sessionTransferDict['s'] = str(r1['run_status']['status'])
sessionTransferDict['sd'] = r1['run_status']['status_detail']
if sessionTransferDict['s'] == "CE":
sessionTransferDict['o'] = r1['compile_status']
sessionTransferDict['t'] = "0.0"
sessionTransferDict['m'] = "0"
else:
sessionTransferDict['o'] = r1['run_status']['output_html']
sessionTransferDict['t'] = r1['run_status']['time_used']
sessionTransferDict['m'] = r1['run_status']['memory_used']
request.session['data'] = sessionTransferDict
redirect_url = '/'+str(hash)
return redirect(redirect_url)
else:
form = CodeBoxRun()
return render(request,'codebox/home.html',{'form':form})
def display(request, hash):
penObject = get_object_or_404(Pen, hash=hash)
source = penObject.code
lang = penObject.lang
count = penObject.get_count()
name = penObject.name
resultData = {}
if 'data' in request.session:
data_pass = request.session['data']
resultData['output'] = data_pass['o']
resultData['time']= data_pass['t']
resultData['memory']= data_pass['m']
resultData['status_detail'] = data_pass['sd']
resultData['status']=data_pass['s']
priorData = {}
priorData['inp']=data_pass['i']
priorData['text']=source
priorData['langs']=lang
priorData['name']=name
form = CodeBoxRun(initial = priorData)
method = 1
request.session.pop('data')
data_pass = {}
else:
method = 0
resultData['output'] = ""
resultData['time']= ""
resultData['memory']= ""
resultData['status_detail'] = ""
priorData = {}
priorData['text'] = source
priorData['langs'] = lang
form = CodeBoxRun(initial=priorData)
return render(request,'codebox/display.html',{'form':form,'out':resultData,'method':method,'count':count }) |
#!/usr/bin/env python
# encoding: utf-8
from setuptools import setup
from numpy.distutils.core import setup, Extension
setup(
name='CCBlade',
version='1.1.1',
description='Blade element momentum aerodynamics for wind turbines',
author='S. Andrew Ning',
author_email='andrew.ning@nrel.gov',
package_dir={'': 'src'},
py_modules=['ccblade'],
install_requires=['airfoilprep.py>=0.1'], # , 'zope.interface'],
# test_suite='test.test_ccblade.py',
license='Apache License, Version 2.0',
ext_modules=[Extension('_bem', ['src/bem.f90'], extra_compile_args=['-O2'])],
dependency_links=['https://github.com/WISDEM/AirfoilPreppy/tarball/master#egg=airfoilprep.py-0.1'],
zip_safe=False
)
|
"""
__version__.py
~~~~~~~~~~~~~~
Information about the current version of the mamp-cli package.
"""
__title__ = 'mamp_cli'
__description__ = 'mamp_cli - command line tools for MAMP and WordPress'
__version__ = '0.1.0'
__author__ = 'Arash Bahrami'
__author_email__ = 'arash.b7@gmail.com'
__license__ = 'MIT'
__url__ = 'https://github.com/Honda-a/mamp-cli'
|
N = int (input ())
R = []
x = 0
for i in range (N): x += sum (list (map (int, input ().split ())))
print (x // 2) |
#from fuzzywuzzy import fuzz
#from fuzzywuzzy import process
|
#!/usr/bin/python -tt
'''
Created on Nov 21, 2012
@author: niklas
'''
import re
import bbClasses
def convertScore(bdscore):
return str(int(bdscore) * 10)
def getOrientationInfo(orientation):
## Orientation is: <no_reads><pos_strand><no_reads><neg_strand>. E.g. 12+12-
ori_parts = re.search(r'(\d+)(\+)(\d+)(\-)', orientation)
posReads = ori_parts.group(1)
negReads = ori_parts.group(3)
return (posReads, negReads)
def isNotZero(numReads):
if numReads == "0":
return False
else:
return True
def main(inputFile, outputFile, minScore, onlyCommon):
minScore = convertScore(minScore)
FH_INPUT = open(inputFile, "rU")
FH_OUTPUT = open(outputFile, "w")
## Read input
for line in FH_INPUT:
## Skip the header
if not re.search(r'^#', line):
splitline = line.split('\t')
chromosomes = [splitline[0], splitline[3]]
start_pos = splitline[1]
end_pos = splitline[4]
score = convertScore(splitline[8])
sv_type = splitline[6]
## Check whether or not the chromosomes in the SV breakpoints are the same
if chromosomes[0] == chromosomes[1]:
oInfo = (getOrientationInfo(splitline[2]), getOrientationInfo(splitline[5]))
## Check if the strands are the same
## Positive strand:
if isNotZero(oInfo[0][0]) and isNotZero(oInfo[0][1]):
## If yes: move on to printing
## If only common SVs should be included
if onlyCommon > 0:
commonSamples = splitline[10].split(':')
if onlyCommon == len(commonSamples):
## Check score threshold
if score >= minScore:
entry = bbClasses.BEDentry(chromosomes[0], start_pos, end_pos, sv_type)
entry.setScore(score)
entry.setStrand("+")
print entry.printEntry(),
FH_OUTPUT.write(entry.printEntry())
else:
## Check score threshold
if score >= minScore:
entry = bbClasses.BEDentry(chromosomes[0], start_pos, end_pos, sv_type)
entry.setScore(score)
entry.setStrand("+")
print entry.printEntry(),
FH_OUTPUT.write(entry.printEntry())
## Check if only one breakpoint has reads on the positive strand
# else:
#
# if isNotZero(oInfo[0][0]):
# ## Reads are present only at the first position
# ## TODO: Figure out how to handle this
#
# elif isNotZero(oInfo[0][1]):
# ## Reads are present only at the first position
# ## TODO: Figure out how to handle this
## Negative strand:
if isNotZero(oInfo[1][0]) and isNotZero(oInfo[1][1]):
## If yes: move on to printing
## If only common SVs should be included
if onlyCommon > 0:
commonSamples = splitline[10].split(':')
if onlyCommon == len(commonSamples):
## Check score threshold
if score >= minScore:
entry = bbClasses.BEDentry(chromosomes[0], start_pos, end_pos, sv_type)
entry.setScore(score)
entry.setStrand("-")
print entry.printEntry(),
FH_OUTPUT.write(entry.printEntry())
else:
## Check score threshold
if score >= minScore:
entry = bbClasses.BEDentry(chromosomes[0], start_pos, end_pos, sv_type)
entry.setScore(score)
entry.setStrand("-")
print entry.printEntry(),
FH_OUTPUT.write(entry.printEntry())
else:
for chromosome in chromosomes:
oInfo = (getOrientationInfo(splitline[2]), getOrientationInfo(splitline[5]))
## Check if the strands are the same
## Positive strand:
if isNotZero(oInfo[0][0]) and isNotZero(oInfo[0][1]):
## If yes: move on to printing
## If only common SVs should be included
if onlyCommon > 0:
commonSamples = splitline[10].split(':')
if onlyCommon == len(commonSamples):
## Check score threshold
if score >= minScore:
entry = bbClasses.BEDentry(chromosomes[0], start_pos, end_pos, sv_type)
entry.setScore(score)
entry.setStrand("+")
print entry.printEntry(),
FH_OUTPUT.write(entry.printEntry())
else:
## Check score threshold
if score >= minScore:
entry = bbClasses.BEDentry(chromosome, start_pos, end_pos, sv_type)
entry.setScore(score)
entry.setStrand("+")
print entry.printEntry(),
FH_OUTPUT.write(entry.printEntry())
## Check if only one breakpoint has reads on the positive strand
# else:
#
# if isNotZero(oInfo[0][0]):
# ## Reads are present only at the first position
# ## TODO: Figure out how to handle this
#
# elif isNotZero(oInfo[0][1]):
# ## Reads are present only at the first position
# ## TODO: Figure out how to handle this
## Negative strand:
if isNotZero(oInfo[1][0]) and isNotZero(oInfo[1][1]):
## If yes: move on to printing
## If only common SVs should be included
if onlyCommon > 0:
commonSamples = splitline[10].split(':')
if onlyCommon == len(commonSamples):
## Check score threshold
if score >= minScore:
entry = bbClasses.BEDentry(chromosomes[0], start_pos, end_pos, sv_type)
entry.setScore(score)
entry.setStrand("-")
print entry.printEntry(),
FH_OUTPUT.write(entry.printEntry())
else:
## Check score threshold
if score >= minScore:
entry = bbClasses.BEDentry(chromosome, start_pos, end_pos, sv_type)
entry.setScore(score)
entry.setStrand("-")
print entry.printEntry(),
FH_OUTPUT.write(entry.printEntry())
#FH_OUTPUT.write("\n")
FH_INPUT.close()
FH_OUTPUT.close()
if __name__ == '__main__':
main() |
class Solution:
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
ans = []
if(len(nums)<3):
return ans
nums.sort()
for i in range(len(nums)-2):
if i==0 or nums[i]>nums[i-1]:
head = i + 1
rear = len(nums) - 1
while head<rear:
if(nums[head]+nums[rear]+nums[i]==0):
ans.append([nums[i],nums[head],nums[rear]])
head+=1
rear-=1
while(head<rear and nums[head]==nums[head-1]):
head+=1
while(head<rear and nums[rear]==nums[rear+1]):
rear-=1
elif(nums[head]+nums[rear]+nums[i]>0):
rear-=1
else:
head+=1
return ans
if __name__=='__main__':
print(Solution.threeSum(Solution,[-4,-2,-2,-2,0,1,2,2,2,3,3,4,4,6,6])) |
from time import sleep
from utils.decorators import logger_doc
from pages.app.ios.basePage import basePage
class MainPage(basePage):
"""网易云主页类"""
@logger_doc()
def my(self):
"""主页-我的"""
return self.poco("我的").click()
|
str1='\u4f60'
str2 = str1.encode()
str3 = str2.decode()
print(str3) |
from manuf import manuf
import ipaddress
class Device:
MAC = ""
IP = ""
Manufacturer = ""
class Filter:
__profile_device = Device()
__device_list = []
def __init__(self, cap, cap_sum):
self.cap = cap
self.cap_sum = cap_sum
def create_device_list(self):
device_list_unfiltered = []
print("Please wait while we generate the device list.")
mac_parser = manuf.MacParser(update=True)
for pkt in self.cap:
for device in device_list_unfiltered:
if device.MAC == pkt.eth.src:
try:
if device.IP == "" and pkt.ip.src != "0.0.0.0" and ipaddress.ip_address(pkt.ip.src).is_private:
device.IP = pkt.ip.src
except AttributeError:
pass
break
else:
manufacturer = str(mac_parser.get_manuf(pkt.eth.src))
if manufacturer != "None":
new_device = Device()
new_device.MAC = pkt.eth.src
new_device.Manufacturer = manufacturer
try:
if pkt.ip.src != "0.0.0.0" and ipaddress.ip_address(pkt.ip.src).is_private:
new_device.IP = pkt.ip.src
else:
raise AttributeError
except AttributeError:
new_device.IP = ""
device_list_unfiltered.append(new_device)
for device in device_list_unfiltered:
if device.MAC == pkt.eth.dst:
try:
if device.IP == "" and pkt.ip.dst != "0.0.0.0" and ipaddress.ip_address(pkt.ip.dst).is_private:
device.IP = pkt.ip.dst
except AttributeError:
pass
break
else:
manufacturer = str(mac_parser.get_manuf(pkt.eth.dst))
if manufacturer != "None":
new_device = Device()
new_device.MAC = pkt.eth.dst
new_device.Manufacturer = manufacturer
try:
if pkt.ip.dst != "0.0.0.0" and ipaddress.ip_address(pkt.ip.dst).is_private:
new_device.IP = pkt.ip.dst
else:
raise AttributeError
except AttributeError:
new_device.IP = ""
device_list_unfiltered.append(new_device)
for device in device_list_unfiltered:
if device.IP != "":
self.__device_list.append(device)
def print_device_list(self):
print()
print('{:^62s}'.format("Device List"))
print('--------------------------------------------------------------')
print('| {:^3s} | {:^17s} | {:^15s} | {:^14s} |'.format("No.", "MAC", "Private IP", "Manufacturer"))
print('--------------------------------------------------------------')
for i in range(0, len(self.__device_list)):
print('| {:^3s} | {:^17s} | {:^15s} | {:^14s} |'.format(str(i), str(self.__device_list[i].MAC), str(self.__device_list[i].IP),
str(self.__device_list[i].Manufacturer)))
print('--------------------------------------------------------------')
print("Note: Devices without a private IP address are struck out from the list.")
print()
def ask_for_device(self):
while True:
try:
device_number = int(input("Please select the device you want to profile. (Enter device no.) "))
if device_number < 0 or device_number > len(self.__device_list) - 1:
raise ValueError
self.__profile_device = self.__device_list[device_number]
print("You selected: " + self.__profile_device.Manufacturer)
return
except ValueError:
print("Invalid input! Please try again.")
def filter_packets(self):
filtered_cap = []
filtered_cap_sum = []
packet_numbers = []
print("Now filtering packets", end="", flush=True)
for pkt in self.cap:
if self.__profile_device.MAC == pkt.eth.src or self.__profile_device.MAC == pkt.eth.dst:
filtered_cap.append(pkt)
packet_numbers.append(pkt.number)
for pkt in self.cap_sum:
if int(pkt.no) < int(packet_numbers[0]):
continue
while int(pkt.no) > int(packet_numbers[0]):
packet_numbers.remove(packet_numbers[0])
if not packet_numbers:
break
if not packet_numbers:
break
if pkt.no == packet_numbers[0]:
filtered_cap_sum.append(pkt)
packet_numbers.remove(packet_numbers[0])
if not packet_numbers:
break
print("...Done")
print()
return filtered_cap, filtered_cap_sum
def get_profile_device_ip(self):
return self.__profile_device.IP
def get_profile_device_mac(self):
return self.__profile_device.MAC
def get_profile_device_manufacturer(self):
return self.__profile_device.Manufacturer
if __name__ == "__main__":
import pyshark
import sys
unfiltered_cap = pyshark.FileCapture(sys.argv[1]) # should not use only_summaries
unfiltered_cap_sum = pyshark.FileCapture(sys.argv[1], only_summaries=True)
pkt_filter = Filter(unfiltered_cap, unfiltered_cap_sum)
|
import sys
import getopt
import csv
import GA
import PGA
import random
from tester import Tester
def main():
pga = False
try:
opts, args = getopt.getopt(sys.argv[1:],'p:m:n:l:c:r:')
except getopt.GetoptError as err:
print(str(err))
help()
sys.exit(1)
pga = "False"
mutation_rate = 0.05
arg_num = 5
max_value = 20
condition_range = 5
error_rate = 0.3
for opt, arg in opts:
if (opt == '-p'):
pga = arg
elif (opt == '-m'):
mutation_rate = float(arg)
elif (opt == '-n'):
arg_num = int(arg)
elif (opt == '-l'):
max_value = int(arg)
elif (opt == '-c'):
condition_range = int(arg)
elif (opt == '-r'):
error_rate = float(arg)
population = []
for _ in range(10):
sequence = []
for _ in range(5):
gene = []
for _ in range(arg_num):
gene.append(random.randint(0,max_value))
sequence.append(gene)
population.append(sequence)
evaluator = Tester()
#without correction range
# evaluator.reset(argnum=arg_num, max_value=max_value, condition_range=condition_range, error_rate=error_rate, correction_range=[])
#with correction range
evaluator.reset(argnum=arg_num, max_value=max_value, condition_range=condition_range, error_rate=error_rate, correction_range = [[(0, range(0,3), 0.7), (1, range(0,3), 0.7), (2, range(0,3), 0.7)] ,[(0, range(3,6), 0.7), (1, range(3,6), 0.7), (2, range(3,6), 0.7)] ,[(0, range(6,9), 0.7), (1, range(6,9), 0.7), (2, range(6,9), 0.7)]])
if pga == "True":
best_input, best_value, fitness_step, total_population_size, running_time = PGA.main(population = population, mutation_rate = mutation_rate, evaluator = evaluator, n = 3, m = 1, k = 20) # n, m, k hyperparamter
else:
best_input, best_value, fitness_step, total_population_size, running_time = GA.main(population = population, mutation_rate = mutation_rate, evaluator = evaluator)
print(best_value, fitness_step, total_population_size, running_time)
if __name__ == '__main__':
main() |
import sys
import random
from datetime import datetime, timedelta
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
'''
here you can change the time period of wind's velocities generation and the start time's wind velocity
'''
CURRENT_TIME = datetime.strptime('2021-05-23 00:30:00', TIME_FORMAT) # start time
END_TIME = datetime.strptime('2021-05-23 23:30:30', TIME_FORMAT)
CURRENT_WIND_VELOCITY = 47999 # must be greater than 0
random.seed(CURRENT_WIND_VELOCITY)
class WindVelocity(object):
def __init__(self, velocity):
self.velocity = velocity
def __eq__(self, other):
return self.velocity == other.velocity
def __next__(self):
self.velocity += random.randrange(-1000, 1000)
if (self.velocity < 0):
self.velocity *= -1
return self
class WindTimestamp(object):
def __init__(self, current_time=CURRENT_TIME, current_vel=CURRENT_WIND_VELOCITY):
self.current_time = current_time
self.current_velocity = WindVelocity(current_vel)
self.__prev_velocity = WindVelocity(None) # private member
def __iter__(self): # made WindTimestamp an iterable object
return self
def __next__(self): # generates next timestamp and velocity
self.current_time += timedelta(seconds = random.randint(1, 60)) # moves time to the future random number seconds
self.__prev_velocity.velocity = self.current_velocity.velocity
next(self.current_velocity) # generates another wind velocity for this current time
if self.current_velocity == self.__prev_velocity: # if the new generated velocity is the same with the last one, calculates a new time, velocity pair
return next(self)
timestamp = str(self.current_time.strftime(TIME_FORMAT))
return '{0},{1}\n'.format(timestamp, self.current_velocity.velocity) # returns the "timestamp, velocity" as string to be pushed in the output file
def main():
try:
outfile = sys.argv[1]
except IndexError:
print('Give output file')
sys.exit(1)
# initialize
timestamp = WindTimestamp(CURRENT_TIME, CURRENT_WIND_VELOCITY)
wind = iter(timestamp)
with open(outfile, 'w') as f:
# f.write("date, wind velocity\n")
while (timestamp.current_time <= END_TIME):
f.write(next(wind))
if __name__ == '__main__':
main()
|
#! /usr/bin/python
# -*- encoding: utf-8 -*-
from django.contrib import admin
from models import *
class TuitAdmin(admin.ModelAdmin):
list_display = 'texto', 'usuario'
list_filter = ('usuario', )
admin.site.register(Tuit, TuitAdmin)
|
import os
import sys
import numpy as np
# add BADE_DIR to path
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
""" Generate meta-training and meta-testing data """
from experiments.data_sim import SinusoidDataset
random_state = np.random.RandomState(26)
task_environment = SinusoidDataset(random_state=random_state)
meta_train_data = task_environment.generate_meta_train_data(n_tasks=20, n_samples=5)
meta_test_data = task_environment.generate_meta_test_data(n_tasks=20, n_samples_context=5, n_samples_test=50)
""" Meta-Training w/ PACOH-MAP """
from meta_learn import GPRegressionMetaLearned
random_gp = GPRegressionMetaLearned(meta_train_data, weight_decay=0.2, num_iter_fit=12000, random_seed=30)
random_gp.meta_fit(meta_test_data, log_period=1000)
""" Meta-Testing w/ PACOH-MAP"""
print('\n')
ll, rmse, calib_err = random_gp.eval_datasets(meta_test_data)
print('Test log-likelihood:', ll)
print('Test RMSE:', rmse)
print('Test calibration error:', calib_err)
try:
from matplotlib import pyplot as plt
x_plot = np.linspace(-5, 5, num=150)
x_context, y_context, x_test, y_test = meta_test_data[0]
pred_mean, pred_std = random_gp.predict(x_context, y_context, x_plot)
ucb, lcb = random_gp.confidence_intervals(x_context, y_context, x_plot, confidence=0.9)
plt.scatter(x_test, y_test, label='target_testing points' )
plt.scatter(x_context, y_context, label='target training points')
plt.plot(x_plot, pred_mean)
plt.fill_between(x_plot, lcb, ucb, alpha=0.2, label='90 % confidence interval')
plt.legend()
plt.title("meta-testing prediction on new target task")
plt.show()
except:
print('\n Could not plot results since matplotlib package is not installed. ')
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import array
import difflib
import distutils.dir_util
import filecmp
import functools
import operator
import os
import re
import shutil
import struct
import subprocess
import sys
import tempfile
import uuid
def ZapTimestamp(filename):
contents = open(filename, 'rb').read()
# midl.exe writes timestamp 2147483647 (2^31 - 1) as creation date into its
# outputs, but using the local timezone. To make the output timezone-
# independent, replace that date with a fixed string of the same length.
# Also blank out the minor version number.
if filename.endswith('.tlb'):
# See https://chromium-review.googlesource.com/c/chromium/src/+/693223 for
# a fairly complete description of the .tlb binary format.
# TLB files start with a 54 byte header. Offset 0x20 stores how many types
# are defined in the file, and the header is followed by that many uint32s.
# After that, 15 section headers appear. Each section header is 16 bytes,
# starting with offset and length uint32s.
# Section 12 in the file contains custom() data. custom() data has a type
# (int, string, etc). Each custom data chunk starts with a uint16_t
# describing its type. Type 8 is string data, consisting of a uint32_t
# len, followed by that many data bytes, followed by 'W' bytes to pad to a
# 4 byte boundary. Type 0x13 is uint32 data, followed by 4 data bytes,
# followed by two 'W' to pad to a 4 byte boundary.
# The custom block always starts with one string containing "Created by
# MIDL version 8...", followed by one uint32 containing 0x7fffffff,
# followed by another uint32 containing the MIDL compiler version (e.g.
# 0x0801026e for v8.1.622 -- 0x26e == 622). These 3 fields take 0x54 bytes.
# There might be more custom data after that, but these 3 blocks are always
# there for file-level metadata.
# All data is little-endian in the file.
assert contents[0:8] == 'MSFT\x02\x00\x01\x00'
ntypes, = struct.unpack_from('<I', contents, 0x20)
custom_off, custom_len = struct.unpack_from(
'<II', contents, 0x54 + 4*ntypes + 11*16)
assert custom_len >= 0x54
# First: Type string (0x8), followed by 0x3e characters.
assert contents[custom_off:custom_off+6] == '\x08\x00\x3e\x00\x00\x00'
assert re.match(
'Created by MIDL version 8\.\d\d\.\d{4} at ... Jan 1. ..:..:.. 2038\n',
contents[custom_off+6:custom_off+6+0x3e])
# Second: Type uint32 (0x13) storing 0x7fffffff (followed by WW / 0x57 pad)
assert contents[custom_off+6+0x3e:custom_off+6+0x3e+8] == \
'\x13\x00\xff\xff\xff\x7f\x57\x57'
# Third: Type uint32 (0x13) storing MIDL compiler version.
assert contents[custom_off+6+0x3e+8:custom_off+6+0x3e+8+2] == '\x13\x00'
# Replace "Created by" string with fixed string, and fixed MIDL version with
# 8.1.622 always.
contents = (contents[0:custom_off+6] +
'Created by MIDL version 8.xx.xxxx at a redacted point in time\n' +
# uint32 (0x13) val 0x7fffffff, WW, uint32 (0x13), val 0x0801026e, WW
'\x13\x00\xff\xff\xff\x7f\x57\x57\x13\x00\x6e\x02\x01\x08\x57\x57' +
contents[custom_off + 0x54:])
else:
contents = re.sub(
'File created by MIDL compiler version 8\.\d\d\.\d{4} \*/\r\n'
'/\* at ... Jan 1. ..:..:.. 2038',
'File created by MIDL compiler version 8.xx.xxxx */\r\n'
'/* at a redacted point in time',
contents)
contents = re.sub(
' Oicf, W1, Zp8, env=(.....) \(32b run\), '
'target_arch=(AMD64|X86) 8\.\d\d\.\d{4}',
' Oicf, W1, Zp8, env=\\1 (32b run), target_arch=\\2 8.xx.xxxx',
contents)
# TODO(thakis): If we need more hacks than these, try to verify checked-in
# outputs when we're using the hermetic toolchain.
# midl.exe older than 8.1.622 omit '//' after #endif, fix that:
contents = contents.replace('#endif !_MIDL_USE_GUIDDEF_',
'#endif // !_MIDL_USE_GUIDDEF_')
# midl.exe puts the midl version into code in one place. To have
# predictable output, lie about the midl version if it's not 8.1.622.
# This is unfortunate, but remember that there's beauty too in imperfection.
contents = contents.replace('0x801026c, /* MIDL Version 8.1.620 */',
'0x801026e, /* MIDL Version 8.1.622 */')
open(filename, 'wb').write(contents)
def overwrite_cls_guid_h(h_file, dynamic_guid):
contents = open(h_file, 'rb').read()
contents = re.sub('class DECLSPEC_UUID\("[^"]*"\)',
'class DECLSPEC_UUID("%s")' % str(dynamic_guid), contents)
open(h_file, 'wb').write(contents)
def overwrite_cls_guid_iid(iid_file, dynamic_guid):
contents = open(iid_file, 'rb').read()
hexuuid = '0x%08x,0x%04x,0x%04x,' % dynamic_guid.fields[0:3]
hexuuid += ','.join('0x%02x' % ord(b) for b in dynamic_guid.bytes[8:])
contents = re.sub(r'MIDL_DEFINE_GUID\(CLSID, ([^,]*),[^)]*\)',
r'MIDL_DEFINE_GUID(CLSID, \1,%s)' % hexuuid, contents)
open(iid_file, 'wb').write(contents)
def overwrite_cls_guid_tlb(tlb_file, dynamic_guid):
# See ZapTimestamp() for a short overview of the .tlb format. The 1st
# section contains type descriptions, and the first type should be our
# coclass. It points to the type's GUID in section 6, the GUID section.
contents = open(tlb_file, 'rb').read()
assert contents[0:8] == 'MSFT\x02\x00\x01\x00'
ntypes, = struct.unpack_from('<I', contents, 0x20)
type_off, type_len = struct.unpack_from('<II', contents, 0x54 + 4*ntypes)
assert ord(contents[type_off]) == 0x25, "expected coclass"
guidind = struct.unpack_from('<I', contents, type_off + 0x2c)[0]
guid_off, guid_len = struct.unpack_from(
'<II', contents, 0x54 + 4*ntypes + 5*16)
assert guidind + 14 <= guid_len
contents = array.array('c', contents)
struct.pack_into('<IHH8s', contents, guid_off + guidind,
*(dynamic_guid.fields[0:3] + (dynamic_guid.bytes[8:],)))
# The GUID is correct now, but there's also a GUID hashtable in section 5.
# Need to recreate that too. Since the hash table uses chaining, it's
# easiest to recompute it from scratch rather than trying to patch it up.
hashtab = [0xffffffff] * (0x80 / 4)
for guidind in range(guid_off, guid_off + guid_len, 24):
guidbytes, typeoff, nextguid = struct.unpack_from(
'<16sII', contents, guidind)
words = struct.unpack('<8H', guidbytes)
# midl seems to use the following simple hash function for GUIDs:
guidhash = functools.reduce(operator.xor, [w for w in words]) % (0x80 / 4)
nextguid = hashtab[guidhash]
struct.pack_into('<I', contents, guidind + 0x14, nextguid)
hashtab[guidhash] = guidind - guid_off
hash_off, hash_len = struct.unpack_from(
'<II', contents, 0x54 + 4*ntypes + 4*16)
for i, hashval in enumerate(hashtab):
struct.pack_into('<I', contents, hash_off + 4*i, hashval)
open(tlb_file, 'wb').write(contents)
def overwrite_cls_guid(h_file, iid_file, tlb_file, dynamic_guid):
# Fix up GUID in .h, _i.c, and .tlb. This currently assumes that there's
# only one coclass in the idl file, and that that's the type with the
# dynamic type.
overwrite_cls_guid_h(h_file, dynamic_guid)
overwrite_cls_guid_iid(iid_file, dynamic_guid)
overwrite_cls_guid_tlb(tlb_file, dynamic_guid)
def main(arch, outdir, dynamic_guid, tlb, h, dlldata, iid, proxy, idl, *flags):
# Copy checked-in outputs to final location.
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
source = os.path.join(THIS_DIR, '..', '..', '..',
'third_party', 'win_build_output', outdir.replace('gen/', 'midl/'))
if os.path.isdir(os.path.join(source, os.path.basename(idl))):
source = os.path.join(source, os.path.basename(idl))
source = os.path.join(source, arch.split('.')[1]) # Append 'x86' or 'x64'.
source = os.path.normpath(source)
distutils.dir_util.copy_tree(source, outdir, preserve_times=False)
if dynamic_guid != 'none':
overwrite_cls_guid(os.path.join(outdir, h),
os.path.join(outdir, iid),
os.path.join(outdir, tlb),
uuid.UUID(dynamic_guid))
# On non-Windows, that's all we can do.
if sys.platform != 'win32':
return 0
# On Windows, run midl.exe on the input and check that its outputs are
# identical to the checked-in outputs (after possibly replacing their main
# class guid).
tmp_dir = tempfile.mkdtemp()
delete_tmp_dir = True
# Read the environment block from the file. This is stored in the format used
# by CreateProcess. Drop last 2 NULs, one for list terminator, one for
# trailing vs. separator.
env_pairs = open(arch).read()[:-2].split('\0')
env_dict = dict([item.split('=', 1) for item in env_pairs])
args = ['midl', '/nologo'] + list(flags) + [
'/out', tmp_dir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
try:
popen = subprocess.Popen(args, shell=True, env=env_dict,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.decode('utf-8').splitlines()
prefixes = ('Processing ', '64 bit Processing ')
processing = set(os.path.basename(x)
for x in lines if x.startswith(prefixes))
for line in lines:
if not line.startswith(prefixes) and line not in processing:
print(line)
if popen.returncode != 0:
return popen.returncode
for f in os.listdir(tmp_dir):
ZapTimestamp(os.path.join(tmp_dir, f))
# Now compare the output in tmp_dir to the copied-over outputs.
diff = filecmp.dircmp(tmp_dir, outdir)
if diff.diff_files:
print('midl.exe output different from files in %s, see %s'
% (outdir, tmp_dir))
for f in diff.diff_files:
if f.endswith('.tlb'): continue
fromfile = os.path.join(outdir, f)
tofile = os.path.join(tmp_dir, f)
print(''.join(difflib.unified_diff(open(fromfile, 'U').readlines(),
open(tofile, 'U').readlines(),
fromfile, tofile)))
delete_tmp_dir = False
print('To rebaseline:')
print(' copy /y %s\* %s' % (tmp_dir, source))
sys.exit(1)
return 0
finally:
if os.path.exists(tmp_dir) and delete_tmp_dir:
shutil.rmtree(tmp_dir)
if __name__ == '__main__':
sys.exit(main(*sys.argv[1:]))
|
from _mandatory_requirement import MandatoryRequirement
class SmokeServiceIffAgreementHasSmokeDetectors(MandatoryRequirement):
def check(self):
if self.total_quantities['WSMOKE'] > 0 and not self.total_quantities['SMOKE']:
self.add_mandatory_product('SMOKE', 1)
|
def sum(x, y = 10):
return x + y
# 20
print(sum(10))
# 30
print(sum(10,20))
def var_sum(*args):
sum = 0
for e in args:
sum += e
return sum
# 60
print(var_sum(10, 20, 30))
|
from unittest.mock import patch
from django.core. management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db to be available"""
with patch("django.db.utils.ConnectionHandler.__getitem__") as gi:
gi.return_value = True
call_command("wait_for_db")
self.assertEqual(gi.call_count, 1)
# Get rid of sleep time only during testing to speed up execution
@patch("time.sleep", return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
# Raise operational error first 5 times, 6th time return True
gi.side_effect = [OperationalError] * 5 + [True]
call_command("wait_for_db")
self.assertEqual(gi.call_count, 6)
|
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from apps.client.decorators import sn_required, snlogin_required
urlpatterns = patterns('',
(r'^friendList/$', 'apps.client.v_friend.friendList'),
)
|
"""
文件的信息处理
"""
f = open("D:/test/笔记.txt","r")
#查看文件编码
print(f.encoding)
#查看文件名
print(f.name)
#查看文件是否关闭,如果文件已经关闭,返回True,否则返回False
print(f.closed)
#查看文件的读写权限
print(f.mode)
|
"""
SGD optimizer class
Siddharth Sigtia
Feb,2014
C4DM
"""
import numpy, sys
import theano
import theano.tensor as T
import cPickle
import os
from theano.compat.python2x import OrderedDict
import copy
import pdb
class SGD_Optimizer():
def __init__(self,params,inputs,costs,updates_old=None,consider_constant=[],momentum=True):
"""
params: parameters of the model
inputs: list of symbolic inputs to the graph
costs: list of costs to be evaluated. The first element MUST be the objective.
updates_old: OrderedDict from previous graphs that need to be accounted for by SGD, typically when scan is used.
consider_constant: list of theano variables that are passed on to the grad method. Typically RBM.
"""
self.inputs = inputs
self.params = params
self.momentum = momentum
if self.momentum:
self.params_mom = []
for param in self.params:
param_init = theano.shared(value=numpy.zeros(param.get_value().shape,dtype=theano.config.floatX),name=param.name+'_mom')
self.params_mom.append(param_init)
self.costs = costs
self.num_costs = len(costs)
print"+++++++costs+++++++++++++"
print costs[1]
print"++++++++++++++++++++"
assert (isinstance(costs,list)), "The costs given to the SGD class must be a list, even for one element."
self.updates_old = updates_old
self.consider_constant = consider_constant
self.build_train_fn()
def build_train_fn(self,):
self.lr_theano = T.scalar('lr')
self.grad_inputs = self.inputs + [self.lr_theano]
if self.momentum:
self.mom_theano = T.scalar('mom')
self.grad_inputs = self.grad_inputs + [self.mom_theano]
self.gparams = T.grad(self.costs[0],self.params,consider_constant=self.consider_constant)
if not self.momentum:
print '=======================7.Building SGD optimization graph without momentum'
updates = OrderedDict((i, i - self.lr_theano*j) for i, j in zip(self.params, self.gparams))
else:
print '========================7.Building SGD optimization graph with momentum'
updates = OrderedDict()
for param,param_mom,gparam in zip(self.params,self.params_mom,self.gparams):
param_inc = self.mom_theano * param_mom - self.lr_theano * gparam
updates[param_mom] = param_inc
updates[param] = param + param_inc
self.calc_cost = theano.function(self.inputs,self.costs)
if self.updates_old:
updates_old = copy.copy(self.updates_old) #To avoid updating the model dict if updates dict belongs to model class, very unlikely case.
self.updates_old.update(updates)
else:
self.updates_old = OrderedDict()
self.updates_old.update(updates)
self.f = theano.function(self.grad_inputs, self.costs, updates=self.updates_old,allow_input_downcast=True)
def train(self,train_set,valid_set=None,learning_rate=0.1,num_epochs=500,save=False,output_folder=None,lr_update=None,mom_rate=0.9):
self.best_cost = numpy.inf
self.init_lr = learning_rate
self.lr = numpy.array(learning_rate)
self.mom_rate = mom_rate
self.output_folder = output_folder
self.train_set = train_set
self.valid_set = valid_set
self.save = save
self.lr_update = lr_update
try:
for u in xrange(num_epochs):
cost = []
for i in self.train_set.iterate(True):
inputs = i + [self.lr]
if self.momentum:
inputs = inputs + [self.mom_rate]
#cost.append(self.f(*inputs))
cost.append(self.f(*inputs))
mean_costs = numpy.mean(cost,axis=0)
print '===Epoch %i===' %(u+1)
print '***Train Results***'
for i in xrange(self.num_costs):
print "Cost %i: %f"%(i,mean_costs[i])
if not valid_set:
this_cost = numpy.absolute(numpy.mean(cost, axis=0))
if this_cost < best_cost:
best_cost = this_cost
print 'Best Params!'
if save:
self.save_model()
sys.stdout.flush()
else:
self.perform_validation()
if lr_update:
self.update_lr(u+1,begin_anneal=1)
except KeyboardInterrupt:
print 'Training interrupted.'
def perform_validation(self,):
cost = []
for i in self.valid_set.iterate(True):
cost.append(self.calc_cost(*i))
mean_costs = numpy.mean(cost,axis=0)
print '***Validation Results***'
for i in xrange(self.num_costs):
print "Cost %i: %f"%(i,mean_costs[i])
this_cost = numpy.absolute(numpy.mean(cost, axis=0))[1] #Using accuracy as metric
if this_cost < self.best_cost:
self.best_cost = this_cost
print 'Best Params!'
if self.save:
self.save_model()
def save_model(self,):
best_params = [param.get_value().copy() for param in self.params]
if not self.output_folder:
cPickle.dump(best_params,open('best_params.pickle','w'))
else:
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
save_path = os.path.join(self.output_folder,'best_params.pickle')
cPickle.dump(best_params,open(save_path,'w'))
def update_lr(self,count,update_type='annealed',begin_anneal=500.,min_lr=0.01,decay_factor=1.2):
if update_type=='annealed':
scale_factor = float(begin_anneal)/count
self.lr = self.init_lr*min(1.,scale_factor)
if update_type=='exponential':
new_lr = float(self.init_lr)/(decay_factor**count)
if new_lr < min_lr:
self.lr = min_lr
else:
self.lr = new_lr
|
from __future__ import (division, print_function)
from WMCore.REST.HeartbeatMonitorBase import HeartbeatMonitorBase
from WMCore.WorkQueue.WorkQueue import globalQueue
class HeartbeatMonitor(HeartbeatMonitorBase):
def addAdditionalMonitorReport(self, config):
"""
Collect some statistics for Global Workqueue and upload it to WMStats. They are:
- by status: count of elements and total number of estimated jobs
- by status: count of elements and sum of jobs by *priority*.
- by agent: count of elements and sum of jobs by *status*
- by agent: count of elements and sum of jobs by *priority*
- by status: unique (distributed) and possible (total assigned) number
of jobs and elements per *site*, taking into account data locality
- by status: unique (distributed) and possible (total assigned) number
of jobs and elements per *site*, regardless data locality (using AAA)
TODO: these still need to be done
* for Available workqueue elements:
- WQE without a common site list (that does not pass the work restrictions)
- WQE older than 7 days (or whatever number we decide)
- WQE that create > 30k jobs (or whatever number we decide)
* for Acquired workqueue elements
- WQE older than 7 days (or whatever the number is)
"""
self.logger.info("Collecting GlobalWorkqueue statistics...")
# retrieve whole docs for these status in order to create site metrics
status = ['Available', 'Negotiating', 'Acquired']
globalQ = globalQueue(**config.queueParams)
results = globalQ.monitorWorkQueue(status)
return results |
rate=int(input('Enter in your rate:'))
years=int(input('Enter in the amount of years:'))
mi=int(input('Enter in your monthly investment:'))
periods=years*12
percent=rate/100
mr=rate/(1200)
fv=mi*(((1+mr)**(periods))-1)/(mr)
print('input annual rate without % sign:', rate)
print('Your monthly rate is', mr)
print('input years you plan on saving for:', years)
print('3 years converted to months =', periods)
print('input monthly investment:', mi)
print('The future value of your investment will be', format(fv,'.2f'))
|
from pathlib import Path
from pie import *
from pie_docker import *
from pie_docker_compose import *
from pie_env_ext import *
from .utils import requires_compose_project_name
ROOT_DIR = Path('.').absolute()
ENV_DIR = ROOT_DIR/'docker'
DOCKER_COMPOSE = DockerCompose(ROOT_DIR/'docker/shared_db.docker-compose.yml')
def INSTANCE_ENVIRONMENT():
COMPOSE_PROJECT_NAME=requires_compose_project_name()
return env.from_files(
ENV_DIR/'shared_db.env',
ENV_DIR/f'shared_db_{COMPOSE_PROJECT_NAME}.env',
ENV_DIR/f'shared_db_{COMPOSE_PROJECT_NAME}_local.env')
@task
def start():
with INSTANCE_ENVIRONMENT():
DOCKER_COMPOSE.cmd('up',options=['-d'])
@task
def stop():
with INSTANCE_ENVIRONMENT():
DOCKER_COMPOSE.cmd('down')
@task
def restart():
stop()
start()
@task
def reset():
"""Removes the postgres_data volume"""
COMPOSE_PROJECT_NAME=requires_compose_project_name()
Docker().cmd('volume rm',[f'{COMPOSE_PROJECT_NAME}_postgresql_data'])
@task
def destroy():
"""Destroys containers, images, networks and volumes"""
with INSTANCE_ENVIRONMENT():
DOCKER_COMPOSE.cmd('down',options=['-v','--rmi local'])
@task
def logs():
with INSTANCE_ENVIRONMENT():
DOCKER_COMPOSE.cmd('logs', options=['--tail=40', '-f'])
@task
def show_env():
COMPOSE_PROJECT_NAME=requires_compose_project_name()
Docker().cmd('exec',[f'{COMPOSE_PROJECT_NAME}_postgres_1','env'])
|
if __name__ == "__main__":
from function import sum
print(sum(7,8)) |
#/usr/bin/env python3
import argparse
from http import server as httpserver
class TestingRequestHandler(httpserver.SimpleHTTPRequestHandler):
def translate_path(self, path):
if not path.startswith("/static/"):
if path == "/":
path = "/global"
path = "/out" + path + ".html"
return super(TestingRequestHandler, self).translate_path(path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('port', action='store',
default=8000, type=int,
nargs='?',
help='Specify alternate port [default: 8000]')
args = parser.parse_args()
handler_class = TestingRequestHandler
httpserver.test(HandlerClass=handler_class, port=args.port)
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 08 11:38:05 2014
@author: amaccione
Utility functions
"""
import numpy as np
# sampling frequency (to be set or by default 7022)
sampFreq = 7022
### function to convert sec to frames
def SToF(sec):
'''
converts [s] to frame
sec:
the sec to be converted
'''
return sec * sampFreq
def MsToF(mSec):
'''
converts [ms] to frame
mSec:
the msec to be converted
'''
return mSec * sampFreq / 1000
def FToS(frame):
'''
converts frames to [s]
frame:
the frame to be converted
'''
return frame / sampFreq
def MsToS(mSec):
'''
converts [ms] to [s]
mSec:
the mSec to be converted
'''
return mSec * 0.001
def StringArrayComparison(array1, array2):
'''
compare two array of strings and return a matrix containing for each row
the string that match in both list, the index position in the first string
array and the index position in the secod string array
array1:
the first list of string to be compared
array2:
the second list of string to be compared
'''
resultList = []
for id1 in range(len(array1)):
for id2 in range(len(array2)):
if array1[id1] == array2[id2]:
resultList.append([array1[id1], id1, id2])
return resultList |
from pystae import * |
import requests
from bs4 import BeautifulSoup as bs
import re
url ="https://www.coupang.com/np/search?q=%EC%97%90%EC%96%B4%ED%8C%9F&channel=recent"
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36"
}
res = requests.get(url, headers=headers)
soup = bs(res.text, "lxml")
items = soup.find_all("li", attrs={"class":re.compile("^search-product")})
# print(items[0].find("div", attrs={"class":"name"}).get_text())
for item in items:
name = item.find("div", attrs={"class":"name"}).get_text()
price = item.find("strong", attrs={"class":"price-value"}).get_text()
rating = item.find("em", attrs={"class":"rating"}).get.text()
print(name,price,rating)
|
from .db import db
from app.models import User, Chef
from datetime import datetime
class Appointment(db.Model):
__tablename__ = 'appointments'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
chef_id = db.Column(db.Integer, db.ForeignKey('chefs.id'), nullable=False)
notes = db.Column(db.String(255))
date = db.Column(db.DateTime, nullable=False)
createdAt = db.Column(db.DateTime(timezone=True), server_default = db.func.now())
user = db.relationship('User', back_populates='appointment')
chef = db.relationship('Chef', back_populates='appointment')
def to_dict(self):
return {
"id": self.id,
"user": self.user.username,
"user_id": self.user_id,
"chef_id": self.chef_id,
"notes": self.notes,
"date": self.date,
"createdAt": self.createdAt,
}
|
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import os
import random
import sys
class MyDataset(Dataset):
def __init__(self, filename):
#Initialize epoch_len
#Identify where the data is, and store as object variable
self.df = np.genfromtxt(filename, delimiter=',')
self.epoch_len = len(self.df)
self.feature_len = len(self.df[0]) - 1
def __len__(self):
return self.epoch_len
def __getitem__(self, idx):
#Return a dict with the sample, like the code snippet below:
sample = {"x": self.df[idx][1:], 'y': self.df[idx][0]}
return sample
'''
class MolDataset(Dataset):
def __init__(self, filename):
#Initialize epoch_len
#Identify where the data is, and store as object variable
self.df = np.load(filename).items()
self.chemids = self.df.keys()
self.epoch_len = len(self.chemids)
def __len__(self):
return self.epoch_len
def __getitem__(self, idx):
#Return a dict with the sample, like the code snippet below:
chemid = self.chemids[idx]
sample = {"chemid": chemid, 'label': self.df[chemid][0], 'smiles' : self.df[chemid][1], 'adj' : self.df[chemid][2]}
return sample
''' |
import copy
import os
from box import Box
C = Box()
# misc options
C.auto_lr_find = True
C.checkpoint = Box()
C.checkpoint.name = '{epoch}-{val_loss:.2f}-{val_dice:.2f}'
C.checkpoint.monitor = 'val_loss'
C.checkpoint.monitor_mode = 'max'
C.early_stopping = Box()
C.early_stopping.min_delta = 0.1
C.early_stopping.patience = 10
C.early_stopping.verbose = True
# trainer
C.trainer = Box()
C.trainer.accumulate_grad_batches = 1
# inputs
C.inputs = Box()
C.inputs.size = [256, 256]
C.inputs.normalize = Box()
C.inputs.normalize.mean = [0.65459856, 0.48386562, 0.69428385]
C.inputs.normalize.std = [0.15167958, 0.23584107, 0.13146145]
C.inputs.normalize.always_apply = True
# augmentationn
C.augmentation = Box()
C.augmentation.enable = True
# models
C.model = Box()
C.model.name = 'basicunet'
C.model.device = "cuda"
C.model.parameters = Box()
# datasets
C.datasets = Box()
# list of the dataset names for training, as present in paths_catalog.py
C.datasets.train = Box(data_name="hubmapd0_overfit", split="train")
# list of the dataset names for testing, as present in paths_catalog.py
C.datasets.test = Box(data_name="hubmapd0_overfit", split="validation")
C.datasets.num_classes = 1
# dataloader
C.dataloader = Box()
C.dataloader.shuffle = True
C.dataloader.num_workers = 4
# solver
C.solver = Box()
C.solver.loss = Box()
C.solver.loss.params = Box()
C.solver.loss.params.dice = 0.5
C.solver.loss.params.bce = 1.0
C.solver.loss.params.lovasz = 0.0
C.solver.loss.params.soft_bce = False
C.solver.optimizer = Box()
C.solver.optimizer.name = "Adam"
C.solver.optimizer.params = {}
C.solver.scheduler = Box()
C.solver.scheduler.name = "OneCycleLR"
C.solver.scheduler.params = Box(total_steps=10000)
C.solver.scheduler_meta = Box(interval="step", monitor="val_loss")
C.solver.ims_per_batch = 32
C.solver.default_lr = 0.0001
# test specific
C.test = Box()
C.test.expected_results = []
C.test.expected_results_sigma_tol = 4
# number of images per batch
# this is global, so if we have 8 gpus and ims_per_batch = 16, each gpu will
# see 2 images per batch
C.test.ims_per_batch = 8
def get():
return copy.deepcopy(C)
|
from flask_wtf import FlaskForm
from wtforms import StringField, BooleanField, SubmitField, PasswordField
from wtforms.validators import DataRequired
|
r"""
###############################################################################
:mod:`OpenPNM.Network`: Classes related the creation of network topology
###############################################################################
Contents
--------
**GenericNetwork** -- Contains many methods ` for working with the topology of the
networks
**Subclasses** -- Inherit from GenericNetwork, and contain additional methods for
actually generating topology.
Classes
-------
.. autoclass:: GenericNetwork
:members:
.. autoclass:: Cubic
:members:
.. autoclass:: Delaunay
:members:
.. autoclass:: DelaunayCubic
:members:
.. autoclass:: MatFile
:members:
"""
from .__GenericNetwork__ import GenericNetwork
from .__Cubic__ import Cubic
from .__Delaunay__ import Delaunay
from .__DelaunayCubic__ import DelaunayCubic
from .__MatFile__ import MatFile
from .__TestNet__ import TestNet
from . import models
|
#!/usr/bin/env python
"""
File: model_trainer.py
Date: 11/17/18
Author: Jon Deaton (jdeaton@stanford.edu)
"""
import os, sys
import logging, argparse
import datetime
import tensorflow as tf
from deep_model.config import Configuration
from deep_model.params import Params
from deep_model.ops import f1
class ModelTrainer(object):
def __init__(self, model, config, params, logger, restore_model_path=None):
assert isinstance(logger, logging.Logger)
assert isinstance(config, Configuration)
assert isinstance(params, Params)
self.model = model
self.config = config
self.params = params
self.logger = logger
self.restore = restore_model_path is not None
self.restore_model_path = restore_model_path
self.tensorboard_dir = os.path.join(config.tensorboard_dir, self._get_job_name())
self.logging_metrics = dict()
self.tensorboard_metrics = dict()
tf.random.set_random_seed(params.seed)
self.epoch = 0
def train(self, train_dataset, test_dataset, trainable_scopes=None):
assert isinstance(train_dataset, tf.data.Dataset)
assert isinstance(test_dataset, tf.data.Dataset)
self._setup_dataset_iterators(train_dataset, test_dataset)
endpoints = self.iterator.get_next()
labels = endpoints[-1]
model_inputs = endpoints[:-1]
# Create the model's computation graph
self.logger.info("Instantiating model...")
self.is_training = tf.placeholder(tf.bool)
self.output, self.cost = self.model(*model_inputs, labels, self.is_training)
self.output = tf.identity(self.output, "output")
self._define_logging_metrics(self.output, labels)
# Get list of variables to train
var_list = None
if trainable_scopes is not None:
self.logger.info("Trainable scopes: %s" % trainable_scopes)
var_list = list()
for scope in trainable_scopes:
vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
var_list.extend(vars)
# Define the optimization strategy
self.optimizer, self.global_step = self._get_optimizer(self.cost, var_list=var_list)
init = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
self.logger.debug("Creating TensorFlow session")
with tf.Session() as self.sess:
self._configure_tensorboard()
# Initialize graph, data iterators, and model saver
self.sess.run(init)
self.sess.run(init_l)
self.train_handle = self.sess.run(self.train_iterator.string_handle())
self.saver = self._make_saver()
self.saver.save(self.sess, self.config.model_file, global_step=self.global_step)
if self.restore:
self.logger.info("Restoring model from checkpoint: %s" % self.restore_model_path)
saver = tf.train.Saver(self.model.restore_tensors)
saver.restore(self.sess, tf.train.latest_checkpoint(self.restore_model_path))
self.logger.info("Model restored.")
# Training epochs
self.logger.info("Training...")
for self.epoch in range(self.params.epochs):
self.sess.run(self.train_iterator.initializer)
self.batch = 0
while True:
try:
self._train_batch(self.train_handle)
if self.batch % self.config.tensorboard_freq == 0:
self.sess.run(self.test_iterator.initializer)
self.test_handle = self.sess.run(self.test_iterator.string_handle())
self._report_batch()
self._log_tensorboard()
if self.batch % self.config.save_freq == 0:
self._save_model()
except tf.errors.OutOfRangeError:
self.logger.info("End of epoch %d" % self.epoch)
break
self.logger.info("Training complete.")
self._save_model()
def _make_saver(self):
vars_to_save = tf.trainable_variables() + self.model.variables_to_save
return tf.train.Saver(vars_to_save, save_relative_paths=True,
max_to_keep=self.config.max_to_keep,
keep_checkpoint_every_n_hours=self.config.keep_checkpoint_every_n_hours)
def _define_logging_metrics(self, output, labels):
predictions = tf.round(output)
correct = tf.equal(predictions, tf.round(labels))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
positive_mask = tf.equal(tf.round(labels), 1)
correct_positive = tf.boolean_mask(correct, positive_mask)
positive_accuracy = tf.reduce_mean(tf.cast(correct_positive, tf.float32))
self.logging_metrics["cost"] = self.cost
self.logging_metrics["F1"] = f1(labels, predictions)
self.logging_metrics["accuracy"] = accuracy
self.logging_metrics["positive accuracy"] = positive_accuracy
def _setup_dataset_iterators(self, train_dataset, test_dataset):
assert isinstance(train_dataset, tf.data.Dataset)
assert isinstance(test_dataset, tf.data.Dataset)
# dataset iterators (for selecting dataset to feed in)
self.dataset_handle = tf.placeholder(tf.string, shape=[])
self.iterator = tf.data.Iterator.from_string_handle(self.dataset_handle,
train_dataset.output_types,
train_dataset.output_shapes)
self.train_iterator = train_dataset.make_initializable_iterator()
self.test_iterator = test_dataset.make_initializable_iterator()
def _configure_tensorboard(self):
# Configure all of the metrics to log to TensorBoard
metrics = list()
train_cost = tf.summary.scalar('train_cost', self.cost)
metrics.append(train_cost)
# Also add all of the logging metrics
for metric_name in self.logging_metrics:
metric_tensor = self.logging_metrics[metric_name]
metric_summary = tf.summary.scalar("test_%s" % metric_name, metric_tensor)
metrics.append(metric_summary)
self.merged_summary = tf.summary.merge(metrics)
self.writer = tf.summary.FileWriter(logdir=self.tensorboard_dir)
# Add the pretty graph viz
self.writer.add_graph(self.sess.graph)
def _train_batch(self, train_set_handle):
feed_dict = {self.is_training: True,
self.dataset_handle: train_set_handle}
train_summary, _, cost = self.sess.run([self.merged_summary, self.optimizer, self.cost],
feed_dict=feed_dict)
self.logger.info("Epoch: %d, Batch %d: cost: %f" % (self.epoch, self.batch, cost))
self.writer.add_summary(train_summary, global_step=self.sess.run(self.global_step))
self.batch += 1
def _report_batch(self):
for metric_name in self.logging_metrics:
tensor = self.logging_metrics[metric_name]
value = self.sess.run(tensor, feed_dict={self.is_training: False,
self.dataset_handle: self.test_handle})
self.logger.info("Test %s: %s" % (metric_name, value))
def _log_tensorboard(self):
self.logger.info("Logging test output to TensorBoard")
test_summary = self.sess.run(self.merged_summary,
feed_dict={self.is_training: False,
self.dataset_handle: self.test_handle})
self.writer.add_summary(test_summary, global_step=self.sess.run(self.global_step))
self.writer.flush()
def _save_model(self):
self.logger.info("Saving model...")
self.saver.save(self.sess, self.config.model_file, global_step=self.global_step)
self.logger.info("Model save complete.")
def _get_optimizer(self, cost, var_list=None):
global_step = tf.Variable(0, name='global_step', trainable=False)
if self.params.adam:
# with Adam optimization: no learning rate decay
learning_rate = tf.constant(self.params.learning_rate, dtype=tf.float32)
sgd = tf.train.AdamOptimizer(learning_rate=learning_rate, name="Adam")
else:
# Stochastic Gradient Descent Optimizer with exponential learning rate decay
learning_rate = tf.train.exponential_decay(self.params.learning_rate,
global_step=global_step,
decay_steps=100000,
decay_rate=self.params.learning_decay_rate,
staircase=False,
name="learning_rate")
sgd = tf.train.GradientDescentOptimizer(learning_rate=learning_rate, name="SGD")
# this incantation ensures the BatchNorm moving mean/variance are updated with each step
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = sgd.minimize(cost, var_list=var_list, name='optimizer', global_step=global_step)
return optimizer, global_step
def _get_job_name(self):
# makes an identifying name for this training session
now = '{:%Y-%m-%d.%H-%M}'.format(datetime.datetime.now())
return "%s_%s" % (self.params.model_version, now)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-09 15:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('carto', '0039_auto_20171109_1616'),
]
operations = [
migrations.CreateModel(
name='PointTopo',
fields=[
('id_pointTopo', models.AutoField(primary_key=True, serialize=False)),
('x', models.DecimalField(blank=True, decimal_places=500, max_digits=999, null=True)),
('y', models.DecimalField(blank=True, decimal_places=500, max_digits=999, null=True)),
('precision', models.CharField(blank=True, max_length=255, null=True)),
('data_type', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'db_table': 'point_topo',
},
),
migrations.CreateModel(
name='Topographie',
fields=[
('id_topographie', models.AutoField(primary_key=True, serialize=False)),
('datecreation', models.DateField()),
('datedestruction', models.DateField(blank=True)),
('commentaire', models.CharField(blank=True, max_length=255)),
('libelle', models.CharField(max_length=255)),
('source', models.CharField(blank=True, max_length=255)),
('contactadresse', models.CharField(blank=True, max_length=255, null=True)),
('contactnom', models.CharField(blank=True, max_length=255, null=True)),
('contactmail', models.CharField(blank=True, max_length=255, null=True)),
('contacttel1', models.CharField(blank=True, max_length=255, null=True)),
('contacttel2', models.CharField(blank=True, max_length=255, null=True)),
('description', models.CharField(blank=True, max_length=255, null=True)),
('geometry', models.BinaryField()),
],
options={
'db_table': 'topographie',
},
),
migrations.AddField(
model_name='pointtopo',
name='Topogragphie',
field=models.ForeignKey(db_column='id_topographie', on_delete=django.db.models.deletion.CASCADE, to='carto.Topographie'),
),
]
|
import numpy as np
import subprocess as sp
import cv2
cap = cv2.VideoCapture("toystory.mp4")
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = cap.get(cv2.CAP_PROP_FPS)
print(width, height, fps)
#url = 'rtmp://localhost:1935/dash/live'
#command = 'ffmpeg -i - -vcodec libx264 -f flv {}'.format(url)
#print(command)
#proc = sp.Popen(command, stdin=sp.PIPE, shell=True, bufsize=10**8)
command = ['ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo',
'-vcodec','rawvideo',
'-s', '640x360', # size of one frame
'-pix_fmt', 'rgb24',
'-r', '29.97', # frames per second
'-i', '-', # The imput comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-vcodec', 'libx264',
'my_output_videofile.mp4' ]
stream_command = ['ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo',
'-vcodec','rawvideo',
'-s', '640x360', # size of one frame
'-pix_fmt', 'rgb24',
'-r', '29.97', # frames per second
'-i', '-', # The imput comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-vcodec', 'libx264',
'-f', 'flv',
'rtmp://localhost:1935/dash/live' ]
pipe = sp.Popen(command, stdin=sp.PIPE, stderr=sp.PIPE)
stream = sp.Popen(stream_command, stdin=sp.PIPE, stderr=sp.PIPE)
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read() # frame size: 640x360x3(=691200)
if ret:
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
print(gray.size) # (= 230400)
# Display the resulting frame
cv2.imshow('frame', gray)
print(frame.size) # 640x360x3(=691200)
pipe.stdin.write(gray.tostring())
stream.stdin.write(frame.tostring())
else:
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
from itertools import combinations_with_replacement
a = input().split()
s = ''.join(sorted(a[0]))
res = list(combinations_with_replacement(s, int(a[1])))
for i in res:
print(''.join(i))
|
def test_primary():
assert prime_factor(0) == []
assert prime_factor(1) == []
assert prime_factor(2) == [2]
assert prime_factor(3) == [3]
assert prime_factor(4) == [2,2]
assert prime_factor(5) == [5]
def prime_factor(input):
if input < 2 :
return []
elif input >= 2 :
factors = []
if input % 2 == 0 and input != 2:
factors.extend([2,2])
else:
factors.append(input)
return factors
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input
from lib.gcn import GraphConv, TemporalConv
from lib.graph import Graph
skeleton = Graph("sbu", "spatial")
input_features = Input([30, skeleton.num_node, 3], dtype="float32")
x = tf.keras.layers.Conv2D(64 * 3, (3, 1), padding="same")(input_features)
input_A = Input(tensor=tf.keras.backend.constant(skeleton.A))
x, A = GraphConv(64, t_kernels=3)([input_features, input_A])
x = TemporalConv(64, dropout=0.5)(x)
x, A = GraphConv(128, t_kernels=3)([x, A])
x = TemporalConv(128, dropout=0.5)(x)
print(x.shape) |
class Solution(object):
def isAnagram(self, s, t):
if len(s) != len(t): return False
words = [0] * 256
for ch in s: words[ord(ch) - ord('a')] += 1
for ch in t: words[ord(ch) - ord('a')] -= 1
if any(words): return False
return True
|
from contextlib import contextmanager
import tensorflow as tf
import numpy as np
import os
import shutil
from tensorflow.contrib.layers import xavier_initializer
def affine_layer(inputs, out_dim, name = 'affine_layer'):
in_dim=inputs.get_shape().as_list()[1]
with tf.variable_scope(name):
init = tf.random_uniform_initializer(-0.08, 0.08)
weights = tf.get_variable(name = 'weights',shape = [in_dim,out_dim]
, dtype = tf.float32, initializer = init)
outputs = tf.matmul(inputs, weights)
return outputs
def conv_layer(inputs, filter_shape, stride, name = 'conv_layer'):
with tf.variable_scope(name):
init = tf.contrib.layers.xavier_initializer()
filter1 = tf.get_variable(name = 'filt_weights', shape = filter_shape, dtype = tf.float32, initializer = init)
output = tf.nn.conv2d(inputs, filter1, strides = stride, padding = 'SAME')
return output
def average_gradients(tower_grads):
""" Calculate the average gradient for each shared variable across towers.
Note that this function provides a sync point across al towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer
list is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
"""
with tf.name_scope('average_gradients'):
average_grads = []
for grad_and_vars in zip(*tower_grads):
# each grad is ((grad0_gpu0, var0_gpu0), ..., (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dim to gradients to represent tower
if g is None:
print('Gradient for {} is None'.format(_.name))
g = tf.zeros_like(_)
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension that we will average over below
grads.append(expanded_g)
# Build the tensor and average along tower dimension
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# The Variables are redundant because they are shared across towers
# just return first tower's pointer to the Variable
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
@contextmanager
def variables_on_first_device(device_name):
old_fn = tf.get_variable
def new_fn(*args, **kwargs):
with tf.device(device_name):
return old_fn(*args, **kwargs)
tf.get_variable = new_fn
yield
tf.get_variable = old_fn
def scalar_summary(name, x):
try:
summ = tf.summary.scalar(name, x)
except AttributeError:
summ = tf.scalar_summary(name, x)
return summ
def histogram_summary(name, x):
try:
summ = tf.summary.histogram(name, x)
except AttributeError:
summ = tf.histogram_summary(name, x)
return summ
def leakyrelu(x, alpha=0.3, name='lrelu'):
with tf.name_scope(name):
return tf.maximum(x, alpha * x, name=name)
def downconv(x, output_dim, k=[5, 5], pool=[2, 2], name='downconv', is_bias = False):
""" Downsampled convolution 2d """
w_init = xavier_initializer()
with tf.variable_scope(name):
W = tf.get_variable('W', k + [x.get_shape()[-1], output_dim], initializer=w_init)
conv = tf.nn.conv2d(x, W, strides=[1] + pool + [1], padding='SAME')
if is_bias:
b = tf.get_variable('b', [output_dim], initializer=tf.zeros_initializer())
conv = tf.nn.bias_add(conv, b)
return conv
def deconv(x, output_dim, output_shape, k=[5, 5], pool=[2, 2], name='downconv', is_bias = False):
""" Deconvolution 2d """
w_init = xavier_initializer()
with tf.variable_scope(name):
W = tf.get_variable('W', k + [output_dim, x.get_shape()[-1]], initializer=w_init)
conv = tf.nn.conv2d_transpose(x, W, strides=[1] + pool + [1],
output_shape = output_shape,
padding='SAME')
if is_bias:
b = tf.get_variable('b', [output_dim],
initializer=tf.zeros_initializer())
conv = tf.nn.bias_add(conv, b)
return conv
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def write_tfrecords(out_file, var_list, name_list):
dict1 = {}
for i in range(len(var_list)):
dict1[name_list[i]] = _bytes_feature(var_list[i].tostring())
example = tf.train.Example(features = tf.train.Features(feature = dict1))
out_file.write(example.SerializeToString())
def write_tfrecords_val(out_file, var_list, name_list):
dict1 = {}
for i in range(2):
dict1[name_list[i]] = _bytes_feature(var_list[i].tostring())
dict1[name_list[2]] = _bytes_feature(var_list[2])
dict1[name_list[3]] = _bytes_feature(var_list[3])
example = tf.train.Example(features = tf.train.Features(feature = dict1))
out_file.write(example.SerializeToString())
def read_data(filepath, name_list, shape_list, dtype_list):
with tf.name_scope('read_data'):
filename_queue = tf.train.string_input_producer([filepath])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
dict1={}
for i in range(len(name_list)):
dict1[name_list[i]] = tf.FixedLenFeature([], tf.string)
features = tf.parse_single_example(serialized_example, features = dict1)
outputs = []
for i in range(len(name_list)):
temp = tf.decode_raw(features[name_list[i]], dtype_list[i])
temp = tf.reshape(temp, shape_list[i])
outputs.append(temp)
return outputs
def read_val_data(filepath, name_list, shape_list, dtype_list):
with tf.name_scope('read_data'):
filename_queue = tf.train.string_input_producer([filepath], shuffle=False)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
dict1={}
for i in range(len(name_list)):
dict1[name_list[i]] = tf.FixedLenFeature([], tf.string)
features = tf.parse_single_example(serialized_example, features = dict1)
outputs = []
#print('name_list => {}'.format(len(name_list)))
for i in range(len(name_list) - 2):
temp = tf.decode_raw(features[name_list[i]], dtype_list[i])
temp = tf.reshape(temp, shape_list[i])
outputs.append(temp)
temp = features[name_list[len(name_list) - 2]]
outputs.append(temp)
temp = features[name_list[len(name_list) - 1]]
outputs.append(temp)
return outputs
def batch_data(data, batch_size):
with tf.name_scope('batch_and_shuffle_data'):
output = tf.train.shuffle_batch(data, batch_size = batch_size,
num_threads = 4,
capacity = 1000 + 3 * batch_size,
min_after_dequeue = 1000,
enqueue_many = False,
name='in_and_out')
return output
|
import datetime
from flask import request, jsonify, make_response
from flask_restful import Resource
from flask_jwt_extended import (create_access_token,
create_refresh_token,
jwt_required,
set_access_cookies,
set_refresh_cookies,
unset_jwt_cookies,
get_jwt_identity)
from database.User.models import User
from database.Subreddit.models import Subreddit
class SubredditRequests(Resource):
@jwt_required
def get(self, subreddit):
curr = get_jwt_identity()
reddit = User.objects.get(username=curr).generate_praw_instance()
try:
sr = Subreddit.objects.get(sr=subreddit)
sub = reddit.subreddit(sr.sr)
return_obj = sr.make_return_object(sub)
resp = make_response(return_obj, 200)
return resp
except Subreddit.DoesNotExist:
resp = make_response(
{"error": "No subreddit info found. No mods have signed up"}, 200)
return resp
|
import brickpi
import time
interface=brickpi.Interface()
interface.initialize()
motor = 2
speed = 0.5
interface.motorEnable(motor)
touch_port = 0
interface.sensorEnable(0, brickpi.SensorType.SENSOR_TOUCH)
motorParams = interface.MotorAngleControllerParameters()
motorParams.maxRotationAcceleration = 0.7
motorParams.maxRotationSpeed = 1.0
motorParams.feedForwardGain = 255/20.0
motorParams.minPWM = 13.0
motorParams.pidParameters.minOutput = -255
motorParams.pidParameters.maxOutput = 255
motorParams.pidParameters.k_p = 000.0
motorParams.pidParameters.k_i = 0.0
motorParams.pidParameters.k_d = 0.0
def stop():
interface.setMotorRotationSpeedReference(motor,0.1)
interface.setMotorPwm(motor,0)
return
interface.setMotorAngleControllerParameters(motor,motorParams)
interface.setMotorRotationSpeedReferences([motor],[-speed])
time.sleep(3)
print "change"
stop()
time.sleep(0.2)
interface.setMotorRotationSpeedReferences([motor],[speed])
print "Press Ctrl+C to exit"
while True:
time.sleep(1)
result = interface.getSensorValue(0)
if result:
touched = result[0]
result = interface.getSensorValue(1)
if result:
touched = result[0]
if touched:
break
interface.terminate()
|
def add(user_entry, email_entry, password_entry, list_user, list_email, list_password):
u = user_entry.text()
e = email_entry.text()
p = password_entry.text()
f = open("emails.txt", "a")
f.write(u)
f.write(",")
f.write(e)
f.write(",")
f.write(p)
f.write(",\n")
list_user.addItem(u)
list_email.addItem(e)
list_password.addItem(p)
def show(listWidget_user, listWidget_email, listWidget_password):
try:
f = open('emails.txt', 'r')
except FileNotFoundError:
temp = open('emails.txt','w')
temp.close()
for line in f:
split = line.split(',')
listWidget_user.addItem(split[0])
listWidget_email.addItem(split[1])
listWidget_password.addItem(split[2])
f.close()
|
word = "plamen"
print(word[-1:2:-1])
print(word[:3]) |
# Escribir un programa que pregunte el nombre del usuario en la consola y después de que el usuario
# lo introduzca muestre por pantalla <NOMBRE> tiene <n> letras, donde <NOMBRE> es el nombre de usuario
# en mayúsculas y <n> es el número de letras que tienen el nombre.
def run():
nombre = input("Escribe tu nombre: ").upper()
longitud = (len(nombre))
print("Tu nombre {} tiene {} letras".format(nombre,longitud))
if __name__ == "__main__":
run() |
from util import *
import data_handling
from funcs import cluster, misc, models
def parse_arguments(args):
"""Takes in command line arguments and outputs them as a dictionary.
The main argument or "command" is handled by the parser, all subsequent
argument are passed to the corresponding subparser.
Arguments:
args {list}
As given by sys.argv[:1]
Returns:
dict
Contains all args in a condensed/formatted way
"""
full_call = " ".join(args)
p = argparse.ArgumentParser(
prog=None,
description="Cluster dataset and create submodels for each.",
)
# ADD SUBPARSERS
subp = p.add_subparsers(title="commands", dest="command", help=None)
subp.required = True
p_cluster = subp.add_parser(
"cluster",
help="cluster dataset",
)
p_train = subp.add_parser(
"train",
help="train improved model",
)
p_cluster_error = subp.add_parser(
"cluster_error",
help="Cluster dataset, calculate and plot errors of given model",
)
p_plot_cluster_error = subp.add_parser(
"plot_error",
help="Plot the errors for given info file",
)
p_cluster_xyz = subp.add_parser("xyz", help="Write xyz files for every cluster")
subp_all = [p_cluster, p_train, p_cluster_error, p_cluster_xyz]
# all except p_plot_cluster_error
# ADD ARGUMENTS FOR ALL
for x in subp_all:
x.add_argument(
"-d",
"--dataset",
metavar="<dataset_file>",
dest="dataset_file",
help="path to dataset file",
required=True,
)
x.add_argument(
"-p",
"--para",
metavar="<para_file>",
dest="para_file",
help="name of para file",
required=False,
default="default",
)
x.add_argument(
"-c",
"--cluster",
metavar="<cluster_file>",
dest="cluster_file",
help="path to cluster file",
default=None,
)
# ADD ARGUMENTS FOR INFO-DEPENDENTS
info_boys = [p_plot_cluster_error]
for x in info_boys:
x.add_argument(
"-i",
"--info",
metavar="<info_file>",
dest="info_file",
help="path to info file",
required=True,
)
x.add_argument(
"-p",
"--para",
metavar="<para_file>",
dest="para_file",
help="name of para file",
required=False,
default="default",
)
# ADD ARGUMENTS FOR RESUME
resume_boys = []
for x in resume_boys:
x.add_argument(
"-r",
"--resume",
metavar="<resume_dir>",
dest="resume_dir",
help="path to save file from which to resume",
required=False,
)
# ADD SPECIFIC ARGUMENTS FOR SUBS
for x in [p_train]:
x.add_argument(
"-n",
"--n_steps",
metavar="<n_steps>",
dest="n_steps",
help="Number of steps",
required=True,
type=int,
)
x.add_argument(
"-s",
"--size",
metavar="<step_size>",
dest="step_size",
help="Step size (in number of points)",
required=True,
type=int,
)
x.add_argument(
"-i",
"--init",
metavar="<init>",
dest="init",
help="Initial model (path) or initial number of points (int)",
required=True,
)
for x in [p_cluster_error]:
x.add_argument(
"-i",
"--init",
metavar="<init>",
dest="init",
help="Initial model to calculate errors of",
required=True,
)
# PARSE
args = p.parse_args()
args = vars(args)
# HANDLE ARGS
# find para file
sys.path.append("paras")
para_file = args["para_file"]
para_file = para_file.replace(
".py", ""
) # in case the user includes '.py' in the name
para_file = os.path.basename(para_file)
file = os.path.join("paras", para_file)
if os.path.exists(file + ".py"):
args["para_file"] = para_file
else:
print_error(f"No valid para file found under name: {args['para_file']}")
resume_dir = args.get("resume_dir", False)
if resume_dir:
args["resume"] = True
if not os.path.exists(resume_dir):
print_error(f"Tried to resume from path {resume_dir}, but doesn't exist")
else:
args["resume"] = False
# add full call
args["full_call"] = full_call
return args
class MainHandler:
"""Main class of the program
This class will contain all information needed for every operation. All
modules (which correspond to commands such as "cluster" or "train") inherit
this class.
Variables:
needs_dataset {bool}
Default True, sometimes set to False when command/arguments
combination doesn't need a dataset
vars {list}
List of descriptors/data in the dataset, as extracted by the
var funcs. See load_dataset -> var funcs in the parameter file
info {dict}
Information that is worth saving (for example cluster indices)
is put in here throughout the run
SEARCH_MODULES {list}
See find_function method
current_stage {number}
Counts at which stage the program is currently at, purely for
ui reasons
n_main_stages {number}
Total number of stages, ui reasons
"""
def __init__(self, args, needs_dataset=True):
"""The main use of this function is to save arguments in the object,
determine whether this is a resumed job, load the parameters and load
the dataset.
Arguments:
args {dict}
Argument dictionary as returned by parse_arguments
Keyword Arguments:
needs_dataset {bool} -- (default: {True})
Decides whether the load_dataset function gets called
"""
self.args = args
self.resumed = args["resume"]
self.load_paras("default", args["para_file"])
n_cores = int(self.call_para("n_cores") or 1)
if n_cores == 0:
n_cores = 1
elif n_cores < 0:
n_cores = os.cpu_count() + n_cores
self.n_cores = n_cores
# merge exceptions
if self.para.get("load_dataset", {}).get("var_funcs", None) is not None:
self.para["load_dataset"]["var_funcs"] = self.para["load_dataset"][
"var_funcs"
]
if not needs_dataset:
self.n_main_stages -= 2
self.needs_dataset = False
def load_paras(self, default, para_file):
"""Loads the parameter file(s)
Saves the combination of the default parameter file and the -p given
file to `self.para`. The `default.py` file is updated with the given
parameter file, not replaced!
Arguments:
default {[type]} -- [description]
para_file {[type]} -- [description]
"""
args = self.args
para_mod = __import__(para_file)
para = para_mod.parameters
funcs = func_dict_from_module(para_mod)
# merge with defaults
para_def_mod = __import__(default)
para_def = para_def_mod.parameters
# extracts any function from the para.py file
funcs_def = func_dict_from_module(para_def_mod)
merge_para_dicts(para_def, para) # WAI
# make an exception for certain things
if ("load_dataset" in para) and ("var_funcs" in para["load_dataset"]):
para_def["load_dataset"]["var_funcs"] = para["load_dataset"]["var_funcs"]
self.para = para_def
z = {**funcs_def, **funcs} # WAI
self.funcs = z
needs_dataset = True
vars = []
info = {}
SEARCH_MODULES = [cluster, data_handling, misc, models]
def find_function(self, name):
"""Finds a function of a given variable name
This method looks through all modules given in SEARCH_MODULES (in order)
and searches a function of the given name. If found, it returns a
pointer to the function.
Arguments:
name {string}
Name of the function
Returns:
[function] or [None]
Returns the function or None if not found
"""
return find_function(name, self.funcs, self.SEARCH_MODULES)
def generate_para_args(self, args):
"""Dummy function, see generate_custom_args in the `utils.py` file"""
return generate_custom_args(self, args)
def generate_para_kwargs(self, kwargs):
"""Dummy function, see generate_custom_kwargs in the `utils.py` file"""
return generate_custom_kwargs(self, kwargs)
def call_para(self, *path, args=[], kwargs={}):
"""Given a path in the parameter file, calls the parameter if callable,
otherwise return it.
If a not-None input is found under the given parameter path, we check
if it contains a `func:` prefix. If yes, the find_function function is
called, which returns a pointer to the function of the same name as
given in the parameter file. The function is then called with the args
and kwargs passed to the function as well as those contained in the
`*_args` parameter (where * is the function name).
If the parameter does not contain a `func:` prefix, the parameter itself
is simply returned.
Arguments:
*path {list}
List of steps to take to reach the given parameter. For
example: ['clusters', 0, 'n_clusters']
Keyword Arguments:
args {list} (default: {[]})
List of args as passed by the program
kwargs {dict} (default: {{}})
List of kwargs as passed by the program
Returns:
[/]
Returns None if no function is found under the given name,
otherwise returns the outputs of the function or the
parameter itself if no `func:` prefix was present
"""
if len(path) == 0:
return None
para = self.para
subdict = para
step = para
for x in path:
subdict = step
step = step.get(x, None)
# handle functions
if type(step) == str and step.startswith("func:"):
f_name = step.replace("func:", "")
f = self.find_function(f_name)
arg_name = str(path[-1]) + "_args"
args_add = self.generate_para_args(subdict.get(arg_name, []))
kwarg_name = str(path[-1]) + "_kwargs"
kwargs_add = self.generate_para_kwargs(subdict.get(kwarg_name, {}))
kwargs.update(kwargs_add)
args_full = args + args_add
return f(*args_full, **kwargs)
elif type(step) == str and step.startswith("para:"):
new_args = step.replace("para:", "").split(",")
return self.call_para(*newargs, args=args)
elif step is None:
return None
else:
# Not needed any more, call_para is more versatile and so is the default now
# print_warning(f"Tried to call para: {path}={step}, but not callable. Value returned instead.")
return step
def return_partial_func(self, *path, kwargs={}):
if len(path) == 0:
return None
para = self.para
subdict = para
step = para
for x in path:
subdict = step
step = step.get(x, None)
# handle functions
if type(step) == str and step.startswith("func:"):
f_name = step.replace("func:", "")
f = self.find_function(f_name)
kwarg_name = str(path[-1]) + "_kwargs"
kwargs_add = self.generate_para_kwargs(subdict.get(kwarg_name, {}))
kwargs.update(kwargs_add)
else:
print_error(f"Para {path} not a function")
func = self.get_para(*path)
return partial(func, **kwargs)
def get_para(self, *path, args=[]):
"""See call_para. Same idea except it returns a pointer to the function
if a function is found (with the `func:` prefix)"""
if len(path) == 0:
return None
para = self.para
step = para
for x in path:
step = step.get(x, None)
# handle functions
if type(step) == str and step.startswith("func:"):
f_name = step.replace("func:", "")
f = self.find_function(f_name)
return f
elif type(step) == str and step.startswith("para:"):
new_args = step.replace("para:", "").split(",")
return self.get_para(*newargs, args=args)
else:
return step
def print_stage(self, s):
"""Prints the current stage in the UI
Arguments:
s {string}
Title of the stage
"""
print_stage(s, self.current_stage, self.n_stages)
self.current_stage += 1
current_stage = 1
n_main_stages = 4
def run(self):
"""
Runs the program. The dataset is loaded, the vars and storage are
prepared and then the `.run_command` method is called, which is defined
in the called module (for example: `cluster` or `train`)
At the end, information is saved and temporary files are deleted.
"""
if self.needs_dataset:
self.print_stage("Load dataset")
self.load_dataset()
self.print_stage("Prepare vars")
self.prepare_vars()
self.print_stage("Prepare storage")
if self.args["resume"]:
self.resume_storage()
self.resume_command()
else:
self.prepare_storage()
self.run_command()
self.print_stage("Save in storage")
self.save_main()
self.save_command()
self.delete_temp()
def delete_temp(self):
"""
Deletes all temporary files from storage
"""
if self.call_para("remove_temp_files"):
shutil.rmtree(self.temp_dir)
def load_dataset(self):
"""Loads the dataset and stores it in `.dataset`
Currently supported:
xyz format - needs to be extended with energy in the comment and forces
npz format - as given by sGDML, needs to contain 'R', 'E', 'F'
db format - as given by schnetpack
"""
path = self.args["dataset_file"]
if path is None:
print_error(
f"No dataset given. Please use the -d arg followed by the path to the dataset."
)
elif not os.path.exists(path):
print_error(f"Dataset path {path} is not valid.")
ext = os.path.splitext(path)[-1]
# xyz file
if ext == ".xyz":
print_ongoing_process(f"Loading xyz file {path}")
try:
file = open(path)
dat = read_concat_ext_xyz(file)
data = {
"R": np.array(dat[0]),
"z": dat[1],
"E": np.reshape(dat[2], (len(dat[2]), 1)),
"F": np.array(dat[3]),
}
except Exception as e:
print(e)
print_error("Couldn't load .xyz file.")
print_ongoing_process(f"Loaded xyz file {path}", True)
# npz file
elif ext == ".npz":
print_ongoing_process(f"Loading npz file {path}")
try:
data = np.load(path, allow_pickle=True)
except Exception as e:
print(e)
print_error("Couldn't load .npz file.")
print_ongoing_process(f"Loaded npz file {path}", True)
# schnetpack .db
elif ext == ".db":
print_ongoing_process(f"Loading db file {path}")
from schnetpack import AtomsData
data = AtomsData(path)
print_ongoing_process(f"Loaded db file {path}", True)
else:
print_error(
f"Unsupported data type {ext} for given dataset {path} (xyz, npz, schnetpack .db supported)."
)
self.dataset = data
self.dataset_path = path
if self.get_para("load_dataset", "post_processing") is not None:
print_ongoing_process("Post-processing dataset")
self.call_para("load_dataset", "post_processing", args=[self])
print_ongoing_process("Post-processing dataset", True)
def prepare_vars(self):
"""Prepares the descriptors/information in the dataset
Loop through every function given in the load_dataset->var_funcs
parameter file and call them with the dataset as an argument. The
output of those functions is then saved in `self.vars[i]` where i is
the index of the function in the var_funcs parameter list
"""
dataset = self.dataset
# get the needed vars ready
# parses through data set and uses the given functions to generate the needed variables
# f.e. interatomic distances and energies
var_funcs = self.call_para("load_dataset", "var_funcs")
keys = list(var_funcs.keys())
for i in range(len(keys)):
print_x_out_of_y("Extracting vars", i, len(keys))
x = keys[i]
self.vars.append(
self.call_para(
"load_dataset", "var_funcs", x, args=[self, self.dataset]
)
)
print_x_out_of_y("Extracting vars", len(keys), len(keys), True)
# SUMMARY
summary_table = {}
for i in range(len(self.vars)):
try:
summary_table[i] = self.vars[i].shape
except:
summary_table[i] = "No shape"
print_table("Vars summary:", "index", "shape", summary_table)
def do_nothing(*args):
"""Useless dummy/debug method"""
print_debug("Doing nothing. Please be patient.")
def resume_storage(self):
"""When a task is resumed, re-use the same storage and check what it
contains
"""
args = self.args
path = args["resume_dir"]
self.storage_dir = path
self.temp_dir = os.path.join(path, "temp")
if os.path.exists(path):
print_ongoing_process(f"Save path {path} found.", True)
cp_path = os.path.join(path, "checkpoint.p")
if not os.path.exists(cp_path):
print_error(f"No checkpoint file found at {cp_path}")
with open(cp_path, "rb") as file:
info = pickle.loads(file.read())
self.resume_info = info
if self.call_para("storage", "save_original_call"):
print_ongoing_process("Saving call")
with open(os.path.join(path, "Call.txt"), "a+") as file:
print(f"Resume call: {args.get('full_call','N/A')}", file=file)
print_ongoing_process(
f'Saved resume call in {os.path.join(path,"Call.txt")}', True
)
# NOT YET SUPPORTED
# print_ongoing_process("Searching for parameter files")
# def_para = "default"
# add_para = args['para_file']
# for file in glob.glob(os.path.join(path,'*.py')):
# print(file)
# if file.startswith("default"):
# def_para = file
# else:
# add_para = file
# self.load_paras(def_para, add_para)
# print_ongoing_process("Searching for parameter files", True)
def prepare_storage(self):
"""
Prepares the storage directory. By default, the name of the storage is
"{command_name}_{basename_of_dataset}" and is saved inside the "saves/"
folder.
"""
print_ongoing_process("Preparing save directory")
storage_dir = self.call_para("storage", "storage_dir")
dir_name = f"{self.args['command']}_{self.call_para('storage','dir_name')}"
path = find_valid_path(os.path.join(storage_dir, dir_name))
self.storage_dir = path
if not os.path.exists(path):
os.makedirs(path)
else:
print_warning(
f"Save path {path} already exists. How? Overwriting of files possible."
)
print_ongoing_process(f"Prepared save directory {path}", True)
# copy user para file
if self.call_para("storage", "save_para_user"):
print_ongoing_process("Saving user para file")
file_name = self.args.get("para_file") + ".py"
file = os.path.join("paras", file_name)
if os.path.exists(file):
shutil.copy(file, os.path.join(path, file_name))
print_ongoing_process(
f"Saved user para file {os.path.join(path,file_name)}", True
)
else:
print_warning(f"Tried copying user parameter file {file}. Not found")
# copy default para file
if self.call_para("storage", "save_para_default"):
print_ongoing_process("Saving default para file")
file_name = "default.py"
file = os.path.join("paras", file_name)
if os.path.exists(file):
shutil.copy(file, os.path.join(path, file_name))
print_ongoing_process(
f"Saved default para file {os.path.join(path,file_name)}", True
)
else:
print_warning(f"Tried copying default parameter file {file}. Not found")
if self.call_para("storage", "save_original_call"):
print_ongoing_process("Saving original call")
with open(os.path.join(path, "Call.txt"), "w+") as file:
print(f"Original call: {self.args.get('full_call','N/A')}", file=file)
print_ongoing_process(
f'Saved original call at {os.path.join(path,"Call.txt")}', True
)
# create temp folder
self.temp_dir = os.path.join(self.storage_dir, "temp")
os.mkdir(self.temp_dir)
def save_main(self):
"""The main saving function that every command goes through
By default, pickles and saves the `.info` dictionary only (all other
things are saved by the modules corresponding to the chosen command)
"""
self.info["para"] = self.para
self.info["args"] = self.args
print_ongoing_process("Saving info file")
info_path = os.path.join(self.storage_dir, "info.p")
with open(info_path, "wb") as file:
pickle.dump(self.info, file)
print_ongoing_process("Saved info file", True)
def load_info_file(self, path):
"""
Loads the info file, used for resuming tasks
"""
print_ongoing_process("Loading info file")
with open(path, "rb") as file:
info = pickle.loads(file.read())
self.info = info
if "cluster_indices" in info:
self.cluster_indices = info["cluster_indices"]
if "errors" in info:
self.errors = info["errors"]
info["args"] = self.args
print_ongoing_process("Loaded info file", True)
summary_table = {}
for k, v in info.items():
summary_table[k] = f"{type(v)}"
print_table("Items found:", "Key", "Value", summary_table, width=22)
if __name__ == "__main__":
args = parse_arguments(sys.argv[1:])
command = args["command"]
if command == "cluster":
from modules.cluster import ClusterHandler
hand = ClusterHandler(args)
elif command == "train":
from modules.train import TrainHandler
hand = TrainHandler(args)
elif command == "cluster_error":
from modules.clustererror import ClusterErrorHandler
hand = ClusterErrorHandler(args)
elif command == "plot_error":
from modules.plotclustererror import PlotClusterErrorHandler
hand = PlotClusterErrorHandler(args, needs_dataset=False)
elif command == "xyz":
from modules.cluster_xyz import ClusterXYZHandler
hand = ClusterXYZHandler(args)
# actually run the friggin' thing
hand.run()
print_successful_exit("run.py exited successfully")
|
import socket
import threading
import os
import datetime
print('')
family = socket.AF_INET
protocol = socket.SOCK_DGRAM
socket1 = socket.socket(family , protocol)
server_ip1 = input('enter your ip : ')
server_port1 = int(input('enter your port number : '))
socket1.bind((server_ip1 , server_port1))
socket2 = socket.socket(family , protocol)
server_ip2 = input('enter other end ip : ')
server_port2 = int(input('enter other end port number : '))
print('')
def time():
t = str(datetime.datetime.now())
t = t.split(' ')[1][:5]
return t
def receive():
while 1:
buffer_size = 1024
msg = socket1.recvfrom(buffer_size)
os.system('tput setaf 32')
print('\U0001F464' + ' ' + msg[0].decode())
os.system('tput setaf 7')
t_ = time()
print('__'*(36) + t_ + '\n')
def send():
while 1:
msg = input()
socket2.sendto(msg.encode() , (server_ip2 , server_port2))
t_ = time()
print('__'*(36) + t_ + '\n')
print('~'*(32) + 'TERMINAL-CHAT' + '~'*(32) + '\n')
receiveThread = threading.Thread(target = receive)
sendThread = threading.Thread(target = send)
receiveThread.start()
sendThread.start()
|
# encoding:utf-8
'''
Created on 2015-6-8
@author: jianfeizhang
'''
'''
car run in road
'you can use extends implement below'
taxi run in road
bus run in road
'you should use bridge design pattern'
taxi run in street
bus run in highway
'''
import sys
class Car:
def __init__(self):
self.road = None
def run(self):
sys.stdout.write("I am car ")
self.road.run()
class Taxi(Car):
def run(self):
sys.stdout.write("I am taxi ")
self.road.run()
class Bus(Car):
def run(self):
sys.stdout.write("I am bus ")
self.road.run()
class Road:
def run(self):
print "run in road"
class Street(Road):
def run(self):
print "run in street"
class Highway(Road):
def run(self):
print "run in highway"
def main():
car1 = Taxi()
car2 = Bus()
car1.road = Street()
car1.run()
car1.road = Highway()
car1.run()
car2.road = Street()
car2.run()
car2.road = Highway()
car2.run()
if __name__ == '__main__':
main()
|
lista = [1, 2, 3, 4, 5]
tupla = ("viernes", "sabado", "domingo")
diccionario = {'nombre': "computacion", 'edad': 20}
print(lista)
lista.extend([6, 7, 8, 9])
print(lista)
print(tupla)
print(diccionario['nombre'])
diccionario['pelicula'] = "mi pobre angelito"
print(diccionario)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.