blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
418d9168458420a60d8cfac86770de662bf6654f
|
fe2b094102fe46abcdcd71d37adb6724b2b97c5f
|
/skxray/core/fitting/funcs.py
|
05b07cc1cd0f42e81ca82a042f4a1ae7d8d80aac
|
[] |
no_license
|
scikit-xray/scikit-beam
|
5e8ae09a406dbf7137e05e2eb09af6b782dba55d
|
488de03741fbcf743ac96cea5079df8eee01eca9
|
refs/heads/master
| 2021-01-18T18:55:27.169831
| 2015-12-09T22:08:39
| 2015-12-09T22:08:39
| 47,767,405
| 0
| 2
| null | 2015-12-10T14:35:17
| 2015-12-10T14:35:16
| null |
UTF-8
|
Python
| false
| false
| 1,095
|
py
|
from __future__ import absolute_import, division, print_function
import six
import numpy as np
def fit_quad_to_peak(x, y):
"""
Fits a quadratic to the data points handed in
to the from y = b[0](x-b[1])**2 + b[2] and R2
(measure of goodness of fit)
Parameters
----------
x : ndarray
locations
y : ndarray
values
Returns
-------
b : tuple
coefficients of form y = b[0](x-b[1])**2 + b[2]
R2 : float
R2 value
"""
lenx = len(x)
# some sanity checks
if lenx < 3:
raise Exception('insufficient points handed in ')
# set up fitting array
X = np.vstack((x ** 2, x, np.ones(lenx))).T
# use linear least squares fitting
beta, _, _, _ = np.linalg.lstsq(X, y)
SSerr = np.sum((np.polyval(beta, x) - y)**2)
SStot = np.sum((y - np.mean(y))**2)
# re-map the returned value to match the form we want
ret_beta = (beta[0],
-beta[1] / (2 * beta[0]),
beta[2] - beta[0] * (beta[1] / (2 * beta[0])) ** 2)
return ret_beta, 1 - SSerr / SStot
|
[
"edill@bnl.gov"
] |
edill@bnl.gov
|
7f68a16e5d51f3eee977e0c90686267e068bbed1
|
c50a97d71f1103162b5577d8f16c4ef17faa0386
|
/testapp/urls.py
|
de16235afe87858038ad64e1b5f1cc9bc78bd2c4
|
[] |
no_license
|
mfantonidas/CoreNet
|
e3913829f50891395d1b38bd3e278b9c1f98e694
|
af8b96dfc17a2dfb30e5d7a4c27d013bfbf0933f
|
refs/heads/master
| 2020-12-24T17:36:08.078185
| 2014-09-09T12:42:30
| 2014-09-09T12:42:30
| 21,345,116
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
from django.conf.urls import *
from testapp.views import hello1
urlpatterns = patterns('',
url(r'', hello1),
)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
8349e5a184400657b2562a3f810e49de0eb18d9b
|
ed2636c9cdd12555308238af108bbe670a3cda50
|
/part 1/preCogTeam.py
|
d35422c934873f7b9597e44425e8549d6134e5f4
|
[] |
no_license
|
zzeleznick/contest1
|
b4c0706047268d77d83f3b87fcbf832020d26044
|
93ba5288d25b467d21b009419b620b4c5060f402
|
refs/heads/master
| 2021-01-17T07:52:51.054125
| 2015-07-21T09:37:54
| 2015-07-21T09:37:54
| 38,462,903
| 0
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,522
|
py
|
# myTeam.py
# ---------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
from captureAgents import CaptureAgent
import random, time, util
from game import Directions
from game import Actions
from util import nearestPoint
import itertools
import copy
"""
print list(itertools.product([1,2,3], [4,5,6]))
[(1, 4), (1, 5), (1, 6),
(2, 4), (2, 5), (2, 6),
(3, 4), (3, 5), (3, 6)]
"""
#################
# Team creation #
#################
def createTeam(firstIndex, secondIndex, isRed,
first = 'DummyAgent', second = 'DummyAgent'):
"""
This function should return a list of two agents that will form the
team, initialized using firstIndex and secondIndex as their agent
index numbers. isRed is True if the red team is being created, and
will be False if the blue team is being created.
As a potentially helpful development aid, this function can take
additional string-valued keyword arguments ("first" and "second" are
such arguments in the case of this function), which will come from
the --redOpts and --blueOpts command-line arguments to capture.py.
For the nightly contest, however, your team will be created without
any extra arguments, so you should make sure that the default
behavior is what you want for the nightly contest.
"""
# The following line is an example only; feel free to change it.
return [eval(first)(firstIndex), eval(second)(secondIndex)]
##########
# Agents #
##########
class DummyAgent(CaptureAgent):
"""
A Dummy agent to serve as an example of the necessary agent structure.
You should look at baselineTeam.py for more details about how to
create an agent as this is the bare minimum.
"""
def registerInitialState(self, gameState):
"""
This method handles the initial setup of the
agent to populate useful fields (such as what team
we're on).
A distanceCalculator instance caches the maze distances
between each pair of positions, so your agents can use:
self.distancer.getDistance(p1, p2)
IMPORTANT: This method may run for at most 15 seconds.
"""
'''
Make sure you do not delete the following line. If you would like to
use Manhattan distances instead of maze distances in order to save
on initialization time, please take a look at
CaptureAgent.registerInitialState in captureAgents.py.
'''
self.start = gameState.getAgentPosition(self.index)
CaptureAgent.registerInitialState(self, gameState)
"G A M E K E Y L O C A T I O N S D E T E R M I N A T I O N"
if self.red:
leftEdge = gameState.data.layout.width / 2
rightEdge = gameState.data.layout.width - 2 #don't need the last wall
self.safeColumn = leftEdge - 2 # -1 doesn't always seem to work
else:
leftEdge = 1
rightEdge = gameState.data.layout.width / 2
self.safeColumn = rightEdge + 2
self.safeSpaces = []
for h in xrange(1,gameState.data.layout.height-1):
if not gameState.data.layout.isWall((self.safeColumn, h)):
self.safeSpaces += [(self.safeColumn, h)]
"S T A T E A S S I G N M E N T"
pos = gameState.getAgentState(self.index).getPosition()
self.friend = min(2 + int(not self.red), 2 - self.index + 2 * int(not self.red))
friendPos = gameState.getAgentState(self.friend).getPosition()
opps = [gameState.getAgentState(el).getPosition() for el in [1 - int(not self.red), 3 - int(not self.red)] ]
print "I am agent", self.index, "at position ", pos
#print "agent 0:", gameState.getAgentState(0).getPosition()
print "My friend agent", self.friend, "is at position ", friendPos
print "My first enemy agent is at position ", opps[0]
print "My second enemy agent is at position ", opps[1]
self.top = False
self.undecided = False
if pos[1] > friendPos[1]:
print "My friend is lower on the map, and I will take top Quad"
self.top = True
elif pos[1] < friendPos[1]:
print "My friend is higher on the map, and I will take bottom Quad"
else:
self.undecided = True
"F O O D A S S I G N M E N T"
self.initFood = self.getFood(gameState).asList()
self.myFood = self.initFood[:] #this is will be updated during our A* Search for theoretical consumption
"I N I T I A L F O O D A S S I G N M E N T S "
start = time.time()
ourMoves = aStarSearch(self, gameState, heuristic = baseHeuristic, alternateStart = ( self.safeSpaces[-int(self.top)], self.safeSpaces[-int(not self.top)]) )
self.moves = ourMoves[0]
print 'eval time for moves: %.4f' % (time.time() - start)
print "Optimal Moves: ", self.moves
"D E B U G G I N G"
print "Coloring my safe column white"
self.debugDraw([(self.safeColumn, el) for el in xrange(0, gameState.data.layout.height)], [1,1,1], clear=False)
print "Coloring my safe spaces", self.safeSpaces, "blue"
self.debugDraw(self.safeSpaces, [0,0,1], clear=False)
def chooseAction(self, gameState):
"""
Picks among actions randomly.
"""
#actions = gameState.getLegalActions(self.index)
# You can profile your evaluation time by uncommenting these lines
# start = time.time()
#values = [self.evaluate(gameState, a) for a in actions] #no evaluation currently
# print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)
#return random.choice(actions)
while self.moves:
move = self.moves[0]
self.moves = self.moves[1:]
print "Using predetermined move:", move
return move
def euc(xy1,xy2):
return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5
def man(xy1, xy2):
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
'''
def debugDraw(self, cells, color, clear=False):
Draws a colored box on each of the cells you specify. If clear is True, will clear all old drawings before drawing on the specified cells. This is useful for debugging the locations that your code works with. color: list of RGB values between 0 and 1 (i.e. [1,0,0] for red) cells: list of game positions to draw on (i.e. [(20,5), (3,22)])
'''
class Node:
def __init__(self, states = [], directions = [ [], [] ], cost = 0):
"""
:param states: an array containing tuples of position and food remaining
:param directions: an array of two lists containing North, East, South, West, or Stop directions for both agents
:param cost: an integer representing the cost of directions
:return: a node
"""
self.states = states # [ ((1,2),(1,1)), 20), ... ]
self.directions = directions #[ ['North' ], ['North'] ]
self.cost = cost
self.hCost = 0
def getLastState(self):
return self.states[-1]
def getStates(self):
return self.states
def getDir(self):
return self.directions
def getCost(self):
return self.cost
def addState(self, state):
self.states += [state]
def addDir(self, newDirection, playerID): #0 for first, or 1 for second ['N', 'S']
self.directions[playerID] += [newDirection]
print "My moves as player", playerID, ":", self.directions[playerID]
def sillyCost(actions):
return 1
def addNodeImmutable(self, state, dir, cost ): #['North', 'West']
s = self.states[:]
print "current stored directions: ", self.directions[:]
print "new directions: ", dir
d = copy.deepcopy(self.directions)
#can't use shallow copy e.g. copy.copy or [:] for lists in list
c = self.cost
output = Node(s,d, c)
#for lists if you reference one list to a second, the second is a pointer, not a copy, use slice for lists
output.addState(state)
output.addDir(dir[0], 0)
output.addDir(dir[1], 1)
#print output.getDir()
output.cost = cost
return output
def agentsAtGoalState(agent, gameState, positions, fg = None):
if not fg:
fg = agent.getFood(gameState).asList()
unvisited = {}
for el in fg:
unvisited.update({el: False})
for visit in positions:
if visit in unvisited:
unvisited.pop(visit)
myPos = gameState.getAgentState(agent.index).getPosition()
friendPos = gameState.getAgentState(agent.friend).getPosition()
if agent.red:
meHome = myPos[0] <= agent.safeColumn
friendHome = friendPos[0] <= agent.safeColumn
else:
meHome = myPos[0] >= agent.safeColumn
friendHome = friendPos[0] >= agent.safeColumn
#return len(unvisited) == 0
#print "There are", len(unvisited), "dots remaining", "cutoff is ", len(agent.initFood) / 5
return len(unvisited) <=2 and meHome and friendHome
def getSuccessorsAlt(gameState, pos):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = pos
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not gameState.data.layout.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = 1 #self.costFn(nextState)
successors.append( ( nextState, action, cost) )
return successors
def getCostOfActions(gameState, pos, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999.
"""
if actions == None: return 999999
x,y = pos
cost = 0
print "ACTIONS Performed: ", actions
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
print "Checking", (x,y)
if gameState.data.layout.walls[x][y]: print "Wall at", (x, y); return 999999
cost += 1 #self.costFn((x,y))
return cost
def baseHeuristic(node, myFood, gameState, distanceFn = 1):
print "Passed in", myFood
if not myFood:
return 0
allPositions = []
# [ ( ((x,y), (x2,y2)), 10 ), ( ((x3,y3), (x4,y4)), 9 ) ...
for tup in node.getStates():
allPositions += [ tup[0][0] ]
allPositions += [ tup[0][1] ]
unvisited = {}
for el in myFood:
unvisited.update({el: False})
for visit in allPositions:
if visit in unvisited:
print "Found dot:", visit
unvisited.pop(visit)
print "Visited:", allPositions
return len(unvisited)
def aStarSearch(agent, gameState, heuristic = baseHeuristic, alternateStart = False):
"""Search the node that has the lowest combined cost and heuristic first."""
if not alternateStart:
myStart = gameState.getAgentState(agent.index).getPosition()
friendStart = gameState.getAgentState(agent.friend).getPosition()
else:
myStart = alternateStart[0]
friendStart = alternateStart[1]
closed = {}
initNumFood = len(agent.getFood(gameState).asList())
fringe = util.PriorityQueue()
init = Node( [ ((myStart, friendStart),initNumFood )], [ [],[] ], 0)
init.hCost = initNumFood
fringe.push(init, init.hCost) #PriorityQueue.push(item, priority)
while True:
if fringe.isEmpty():
print "Fringe is empty"
return None
node = fringe.pop()
state = node.getLastState()
print "State passed as", state
allPositions = []
# [ ( ((x,y), (x2,y2)), 10 ), ( ((x3,y3), (x4,y4)), 9 ) ...
for tup in node.getStates():
allPositions += [ tup[0] ]
if agentsAtGoalState(agent, gameState, allPositions, fg = agent.myFood): #TODO Resolve Redundancy in Goal State and Heuristic
print "reached goal!"
return node.getDir()
if state not in closed:
print "Unvisited:", state
closed.update({state: node.hCost})
#state -> ( ( (x,y), (x2,y2) ), 10 )
myPos = state[0][0]
friendPos = state[0][1]
#curNumFood = state[1]
#note that get successors returns ((5,4) 'South', 1) --> direction encoded
mySucs = getSuccessorsAlt(gameState, myPos)
friendSucs = getSuccessorsAlt(gameState, friendPos)
myNextPositions, myPotentialActions = [], []
for tup in mySucs:
myNextPositions += [ tup[0] ]
myPotentialActions += [ tup[1] ]
friendNextPositions, friendPotentialActions = [], []
for tup in friendSucs:
friendNextPositions += [ tup[0] ]
friendPotentialActions += [ tup[1] ]
allActionPairs = list(itertools.product( myPotentialActions, friendPotentialActions))
#>>> zz =['N', 'S']
#>>> zzz = ['W', 'N', 'S', 'E']
#>>> z = list(itertools.product(zz, zzz))
#>>> z
#[('N', 'W'), ('N', 'N'), ('N', 'S'), ('N', 'E'), ('S', 'W'), ('S', 'N'), ('S', 'S'), ('S', 'E')]
#print "walls at", gameState.data.layout.walls.asList()
for child in allActionPairs: #
print "Action Pair:", child
myActionList = node.getDir()[0] + [ child[0] ]
friendActonList = node.getDir()[1] + [ child[1] ]
print "my next action", child[0]
print "friend next action", child[1]
dx, dy = Actions.directionToVector(child[0])
myNextPos = (int(myPos[0] + dx), int(myPos[1] + dy))
fdx, fdy = Actions.directionToVector(child[1])
friendNextPos = (int(friendPos[0] + fdx), int(friendPos[1] + fdy))
#note that the food encoding in the state of each node will be that of it's parent
modnode = node.addNodeImmutable( ((myNextPos, friendNextPos), node.hCost ), (child[0], child[1]), getCostOfActions(gameState, myStart, myActionList) + getCostOfActions(gameState, friendStart, friendActonList) )
#addNode will add the new action and state, and then recalculate input cost
#the getCost() of the returned node will be updated
#h = heuristic(modnode.getLastState(), agent.myFood, gameState, distanceFn = agent.getMazeDistance)
h = heuristic(modnode, agent.myFood, gameState, distanceFn = agent.getMazeDistance)
g = modnode.getCost()
modnode.hCost = h
print "Heuristic cost at", h, "; Step cost at", g, "from ", myPos, "and ", friendPos
print "Intended Destinations: ", myNextPos, "and ", friendNextPos, "\n"
fringe.push(modnode, h + g )
|
[
"zzeleznick@gmail.com"
] |
zzeleznick@gmail.com
|
56da948fc6caac73637afb3a249d57bed39a5b9c
|
2135b82781924597cf3ac9ad6b36663fecba1744
|
/Day 5/S9-10.py
|
0cd696aefe8255a757d60aca78558b05e3e47c50
|
[] |
no_license
|
TJWeiten/Advent-of-Code-2020
|
4266198bc95418f5f2d6874d5b3f8473d29e8b85
|
04ba25b93ac21a7856a9a0db839369591ca71115
|
refs/heads/main
| 2023-01-29T20:57:09.955241
| 2020-12-13T09:46:18
| 2020-12-13T09:46:18
| 317,803,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,681
|
py
|
import enum
import os, sys
import math
import re
def startup_code():
current_working_directory = __file__.rsplit( '/', 1 )[0]
with open( current_working_directory + '/D5_Input.txt', 'r' ) as file:
input_list = file.read().splitlines()
return list( input_list )
'''
Binary search off of a coded string,
given an index range we're searching and
return the index we end up at
'''
def binary_search(binary_str, low, high):
for i, c in enumerate(binary_str):
if c == "F" or c == "L":
high = high-((high-low)//2) - 1
else:
low = low+((high-low)//2) + 1
return low
'''
Figure out for all the boarding passes we have access to,
what all their seat id's are
'''
def build_seat_ids(input_list):
seat_ids = []
for boarding_pass in input_list:
row_str, seat_str = boarding_pass[0:7], boarding_pass[7:]
seat_row = binary_search(row_str, 0, 127)
seat_column = binary_search(seat_str, 0, 7)
seat_ids.append(seat_row * 8 + seat_column)
return seat_ids
'''
Just find the max boarding pass
'''
def star_9_solution(seat_ids):
return max(seat_ids)
'''
Find the gap seat by using the mathematical property that
our missing seat must be equal to the sum of all seats minus
the sum of all seats present except ours
'''
def star_10_solution(seat_ids):
return sum( range( min(seat_ids), max(seat_ids) + 1 ) ) - sum(seat_ids)
if __name__ == "__main__":
input_list = startup_code()
seat_ids = build_seat_ids(input_list)
print( "Star 9 Solution: {}".format(star_9_solution(seat_ids)) )
print( "Star 10 Solution: {}".format(star_10_solution(seat_ids)) )
|
[
"me@tjweiten.com"
] |
me@tjweiten.com
|
c70aa0b43782f72bcc444591df7880c3e0f778b2
|
9128fa598cc7a3e1d494243b7da26adaed098412
|
/distributed_gp/utility/event_feature_sparse.py
|
61a7fac8b6141e09629e8a72d90ed9706e342fed
|
[] |
no_license
|
daifanxiang/CityBeat
|
ff45967f48fc7a65337300fc32cf9f8088471fe3
|
6b7bbb4fc50446f7718dd456e6cd4fcd8082fca3
|
refs/heads/master
| 2021-01-15T21:19:35.069155
| 2013-04-11T02:27:43
| 2013-04-11T02:27:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
from event_interface import EventInterface
from event_feature import EventFeature
from photo_interface import PhotoInterface
from photo import Photo
from region import Region
from event import Event
from caption_parser import CaptionParser
from stopwords import Stopwords
from corpus import Corpus
from _kl_divergence import kldiv
import kl_divergence as KLDivergence
import sys
import operator
import string
import types
import random
import math
class EventFeatureSparse(EventFeature):
# this class is the extension of class Event, especially for feature extraction
# to prevent the class Event from being too long to read
def __init__(self, event, corpus=None, representor=None):
super(EventFeatureSparse, self).__init__(event, corpus, representor)
def getAllWordTFIDF(self):
index_list, word_list, tfidf_list = self._representor.getTfidfVector(self._event)
feature_list = []
for i in xrange(0, len(index_list)):
feature_list.append([index_list[i], word_list[i], tfidf_list[i]])
return feature_list
def printFeatures(self, word_index):
print '{',
tfidf_list = self.getAllWordTFIDF()
sorted_tfidf_list = []
for ind,word,freq in tfidf_list:
assert word in word_index
sorted_tfidf_list.append([word_index[word], freq])
sorted_tfidf_list.sort(key=operator.itemgetter(0))
start = True
for ind, freq in sorted_tfidf_list:
if start:
start = False
print ind,freq,
else:
print ',',ind,freq,
print ',',len(word_index),int(self.getLabel()),
print '}'
@staticmethod
def GenerateArffFileHeader(word_list):
print '@relation CityBeatEvents'
for word in word_list:
print '@attribute tfidf_' + word.encode('utf8') + ' real'
print '@attribute label {1,-1}'
print '@data'
if __name__=='__main__':
generateData()
# ei = EventInterface()
# ei.setDB('historic_alarm')
# ei.setCollection('labeled_event')
# event = ei.getDocument()
# e = EventFeature(event)
# e.getHistoricFeatures()
|
[
"sasnzy13@163.com"
] |
sasnzy13@163.com
|
a7922a052863ee4e38d070e04d430cdadf728cef
|
861bc07f24c17811d6a0e73f441016d02cd55900
|
/src/uebungen/gpio/led-an.py
|
19cd349863abbd74445cba360df8fce1f2003f5c
|
[
"Apache-2.0"
] |
permissive
|
mobi3006/pico
|
ac05c3eda17d78aba906aceb11f79d484556abf9
|
a94225415fec1308a9b728d6750b19a406509ec8
|
refs/heads/master
| 2022-07-19T03:50:39.878730
| 2022-07-05T12:15:42
| 2022-07-05T12:15:42
| 188,662,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
from RPi import GPIO
# GPIO initialisieren
GPIO.setmode(GPIO.BOARD)
GPIO.setup(37, GPIO.OUT)
while True:
print("an oder aus?")
state = input()
if state == "an":
# Spannungspotential aufbauen (verbunden mit +3,3V) => Strom fliessen lassen
GPIO.output(37, False)
else:
GPIO.output(37, True)
GPIO.cleanup
|
[
"mobi3006@googlemail.com"
] |
mobi3006@googlemail.com
|
340d5f828a3eb62c081cbab6d61d7d9e6124a415
|
b6f92007e9ca0a9cf5e7bb91e1f31ecb7241b224
|
/day18/day18home/app01/migrations/0010_auto_20170601_2258.py
|
816a1c4094c4e82f9cb603fc5908dff01ae7b730
|
[] |
no_license
|
NoSongmaster/home_project
|
291fe98879ad87541f4c2426d723918d70402e99
|
2e95823315532e3173f8a513cb4cc4006cb1b70d
|
refs/heads/master
| 2021-01-25T04:42:53.245646
| 2017-11-23T01:53:48
| 2017-11-23T01:53:48
| 93,472,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-01 14:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app01', '0009_auto_20170601_2256'),
]
operations = [
migrations.RemoveField(
model_name='host',
name='firm',
),
migrations.RemoveField(
model_name='userinfo',
name='m',
),
migrations.DeleteModel(
name='Host',
),
]
|
[
"stu142601@71e50119-498d-4a86-9c23-cb20dfe762fb"
] |
stu142601@71e50119-498d-4a86-9c23-cb20dfe762fb
|
8d8935debebe2fddce3908d8769a895a02af0e7f
|
c51623de835fad70e704b5050ca48551b89b3fe1
|
/bend-flux.py
|
2658376d62e89ee9b18b0f3aea6dd9e19bc99382
|
[] |
no_license
|
deyh2020/meepDEMO
|
e254140f04a59776000dfd2bcf8a3463691d8a94
|
5188cf7477195e7a44244ddccaa2a58a45d76bb0
|
refs/heads/master
| 2023-03-22T14:40:28.164875
| 2019-11-16T18:18:38
| 2019-11-16T18:18:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,627
|
py
|
# -*- coding: utf-8 -*-
# transmission around a 90-degree waveguide bend in 2d
from __future__ import division
import meep as mp
import numpy as np
import matplotlib.pyplot as plt
resolution = 10 # pixels/um
sx = 16 # size of cell in X direction
sy = 32 # size of cell in Y direction
cell = mp.Vector3(sx, sy, 0)
dpml = 1.0
pml_layers = [mp.PML(dpml)]
pad = 4 # padding distance between waveguide and cell edge
w = 1 # width of waveguide
wvg_xcen = 0.5 * (sx - w - 2 * pad) # x center of vert. wvg
wvg_ycen = -0.5 * (sy - w - 2 * pad) # y center of horiz. wvg
geometry = [mp.Block(size=mp.Vector3(mp.inf, w, mp.inf),
center=mp.Vector3(0, wvg_ycen, 0),
material=mp.Medium(epsilon=12))]
fcen = 0.15 # pulse center frequency
df = 0.1 # pulse width (in frequency)
sources = [mp.Source(mp.GaussianSource(fcen, fwidth=df),
component=mp.Ez,
center=mp.Vector3(-0.5 * sx + dpml, wvg_ycen, 0),
size=mp.Vector3(0, w, 0))]
sim = mp.Simulation(cell_size=cell,
boundary_layers=pml_layers,
geometry=geometry,
sources=sources,
resolution=resolution)
nfreq = 100 # number of frequencies at which to compute flux
# reflected flux
refl_fr = mp.FluxRegion(center=mp.Vector3(-0.5 * sx +
dpml + 0.5, wvg_ycen, 0), size=mp.Vector3(0, 2 * w, 0))
refl = sim.add_flux(fcen, df, nfreq, refl_fr)
# transmitted flux
tran_fr = mp.FluxRegion(center=mp.Vector3(
0.5 * sx - dpml, wvg_ycen, 0), size=mp.Vector3(0, 2 * w, 0))
tran = sim.add_flux(fcen, df, nfreq, tran_fr)
pt = mp.Vector3(0.5 * sx - dpml - 0.5, wvg_ycen)
sim.run(until_after_sources=mp.stop_when_fields_decayed(50, mp.Ez, pt, 1e-3))
# for normalization run, save flux fields data for reflection plane
straight_refl_data = sim.get_flux_data(refl)
# save incident power for transmission plane
straight_tran_flux = mp.get_fluxes(tran)
sim.reset_meep()
geometry = [mp.Block(mp.Vector3(sx - pad, w, mp.inf), center=mp.Vector3(-0.5 * pad, wvg_ycen), material=mp.Medium(epsilon=12)),
mp.Block(mp.Vector3(w, sy - pad, mp.inf), center=mp.Vector3(wvg_xcen, 0.5 * pad), material=mp.Medium(epsilon=12))]
sim = mp.Simulation(cell_size=cell,
boundary_layers=pml_layers,
geometry=geometry,
sources=sources,
resolution=resolution)
# reflected flux
refl = sim.add_flux(fcen, df, nfreq, refl_fr)
tran_fr = mp.FluxRegion(center=mp.Vector3(
wvg_xcen, 0.5 * sy - dpml - 0.5, 0), size=mp.Vector3(2 * w, 0, 0))
tran = sim.add_flux(fcen, df, nfreq, tran_fr)
# for normal run, load negated fields to subtract incident from refl. fields
sim.load_minus_flux_data(refl, straight_refl_data)
pt = mp.Vector3(wvg_xcen, 0.5 * sy - dpml - 0.5)
sim.run(until_after_sources=mp.stop_when_fields_decayed(50, mp.Ez, pt, 1e-3))
bend_refl_flux = mp.get_fluxes(refl)
bend_tran_flux = mp.get_fluxes(tran)
flux_freqs = mp.get_flux_freqs(refl)
wl = []
Rs = []
Ts = []
for i in range(nfreq):
wl = np.append(wl, 1 / flux_freqs[i])
Rs = np.append(Rs, -bend_refl_flux[i] / straight_tran_flux[i])
Ts = np.append(Ts, bend_tran_flux[i] / straight_tran_flux[i])
if mp.am_master():
plt.figure()
plt.plot(wl, Rs, 'bo-', label='reflectance')
plt.plot(wl, Ts, 'ro-', label='transmittance')
plt.plot(wl, 1 - Rs - Ts, 'go-', label='loss')
plt.axis([5.0, 10.0, 0, 1])
plt.xlabel("wavelength (μm)")
plt.legend(loc="upper right")
plt.show()
|
[
"tnakaicode@gmail.com"
] |
tnakaicode@gmail.com
|
e7217d4b86f2ae132f9c4a314daf2121c60d224e
|
8ee8457045d2a73899b630f896c19f839782c2c2
|
/prac_05/state_names.py
|
1fa12994546f6d143487d4f7c7841176b3933083
|
[] |
no_license
|
ChrisPrince99/Practicals_CP1804_2019
|
44b65b57a9a86986ca6a19489cfcdfc6acaa61bd
|
efaeadcd047295bb49d8e60cc83c7120c837e71c
|
refs/heads/master
| 2020-07-04T10:15:53.848666
| 2019-11-02T03:48:09
| 2019-11-02T03:48:09
| 202,253,613
| 0
| 0
| null | 2019-09-04T03:24:36
| 2019-08-14T01:56:04
|
Python
|
UTF-8
|
Python
| false
| false
| 647
|
py
|
"""
CP1404/CP5632 Practical
State names in a dictionary
File needs reformatting
"""
STATE_NAMES = {"QLD": "Queensland", "NSW": "New South Wales", "NT": "Northern Territory", "WA": "Western Australia",
"ACT": "Australian Capital Territory", "VIC": "Victoria", "TAS": "Tasmania"}
# print(STATE_NAMES)
# for name, long_name in STATE_NAMES.items():
# print("{} is {}".format(name, long_name))
state = input("Enter short state: ").upper()
while state != "":
if state in STATE_NAMES:
print(state, "is", STATE_NAMES[state])
else:
print("Invalid short state")
state = input("Enter short state: ").upper()
|
[
"Christopher.Prince@my.jcu.edu.au"
] |
Christopher.Prince@my.jcu.edu.au
|
7494e4c7157f0fc992ffa6cb0882c644c86a41c9
|
59339fbfab6ef6f65236719e1e32345381a90a88
|
/1. Algorithm Toolbox/Programming Assignment 1 Sum of Two Digits/sum.py
|
7bd4828e1e04eeca4191c490d56588d6dd7cea35
|
[] |
no_license
|
craigpauga/Data-Structure-and-Algorithms
|
24cce4540129821de0f0b390bb86520c7d9033ea
|
be236e02d0a617636bb2c5d3df1072db4ccfc8f7
|
refs/heads/master
| 2020-05-19T16:02:11.622400
| 2019-05-06T00:28:47
| 2019-05-06T00:28:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
# python 3
import sys
[a, b] = map(int, sys.stdin.readline().split())
c = a + b
print(c)
|
[
"craig.m.pauga@gmail.com"
] |
craig.m.pauga@gmail.com
|
bd1457cf855018859a1f0ea37a49f192eaf8c7ed
|
bafccce6b5c7f2e50768af3cfe6559f33536225d
|
/hw4pr4/hw4pr4.py
|
7e68fb545216001a29ee4aae92b948fd9037ed05
|
[] |
no_license
|
oobazuaye/CS-Research-Summer-2011
|
3e7b7820e3c84ce0eea9ab1fe5b90055bd14b35b
|
046a535d11e9028d7f0e88269c91640b79d6b367
|
refs/heads/master
| 2021-01-20T15:44:27.944947
| 2013-02-16T10:42:52
| 2013-02-16T10:42:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,194
|
py
|
# createBinaryImages.py
from cs5png import *
def testBinaryImage():
""" run this function to create an 8x8 alien image
named binary.png
"""
ALIEN = "0"*8 + "11011011"*2 + "0"*8 + "00001000" + \
"01000010" + "01111110" + "0"*8
# this function is imported from cs5png.py
NUM_ROWS = 8
NUM_COLS = 8
binaryIm( ALIEN, NUM_COLS, NUM_ROWS )
# that should create a file, binary.png, in this
# directory with the 8x8 image...
def change( p ):
""" change takes in a pixel (an [R,G,B] list)
and returns a new pixel to take its place!
"""
red = p[0]
green = p[1]
blue = p[2]
return [ 255-red, 255-green, 255-blue ]
def testImageProcessing():
""" run this function to read in the in.png image,
change it, and write out the result to out.png
"""
Im_pix = getRGB( 'in.png' ) # read in the in.png image
print "The first two pixels of the first row are",
print Im_pix[0][0:2]
# remember that Im_pix is a list (the image)
# of lists (each row) of lists (each pixel is [R,G,B])
New_pix = [ [ [255 - num for num in p] for p in row ] for row in Im_pix ]
# now, save to the file 'out.png'
saveRGB( New_pix, 'out.png' )
def flipper(row):
row.reverse()
return row
def flipHoriz(image):
Im_pix = getRGB( image )
New_pix = [ flipper(row) for row in Im_pix ]
saveRGB( New_pix, 'out.png' )
def mirrorHoriz(image):
Im_pix = getRGB( image )
New_pix = [[ row[i] if i < len(row)/2 else row[len(row) - i] for i in range(len(row)) ] for row in Im_pix ]
saveRGB( New_pix, 'out.png' )
def flipVert(image):
Im_pix = getRGB( image )
New_pix = [ Im_pix[-i] for i in range(len(Im_pix)) ]
saveRGB( New_pix, 'out.png' )
def mirrorVert(image):
Im_pix = getRGB( image )
New_pix = [ Im_pix[i] for i in range(len(Im_pix)/2) ] + [ Im_pix[len(Im_pix)/2 - i] for i in range(len(Im_pix)/2) ]
saveRGB( New_pix, 'out.png' )
def scale(image):
Im_pix = getRGB( image )
New_pix = [[Im_pix[i][j] for j in range(len(Im_pix[i])) if j%2 == 0] for i in range(len(Im_pix)) if i%2 == 0 ]
saveRGB( New_pix, 'out.png' )
|
[
"oobazuaye@g.hmc.edu"
] |
oobazuaye@g.hmc.edu
|
0cb1630d22f89669c738d8606078d8d105f2671c
|
172da6fd31bace8be454f3b605a22ce7b3785676
|
/setup.py
|
5477f1419af79a0f5b34859b78e19ed8a2c32d8a
|
[
"MIT"
] |
permissive
|
mailtostanley/gaft
|
e6dde9d657a658bfc7ca0f884f7e5675fafd8252
|
eaffb6943d8f37eb4b6c7c803dd21a196673c779
|
refs/heads/master
| 2021-01-02T08:45:05.284165
| 2017-08-01T03:09:54
| 2017-08-01T03:09:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,477
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from gaft import __version__ as version
maintainer = 'Shao-Zheng-Jiang'
maintainer_email = 'shaozhengjiang@gmail.com'
author = maintainer
author_email = maintainer_email
description = "A Genetic Algorithm Framework in Python"
long_description = '''
====
GAFT
====
A **G**\ enetic **A**\ lgorithm **F**\ ramework in py\ **T**\ hon
.. image:: https://travis-ci.org/PytLab/gaft.svg?branch=master
:target: https://travis-ci.org/PytLab/gaft
:alt: Build Status
.. image:: https://codecov.io/gh/PytLab/gaft/branch/master/graph/badge.svg
:target: https://codecov.io/gh/PytLab/gaft
.. image:: https://landscape.io/github/PytLab/gaft/master/landscape.svg?style=flat
:target: https://landscape.io/github/PytLab/gaft/master
:alt: Code Health
.. image:: https://img.shields.io/badge/python-3.5-green.svg
:target: https://www.python.org/downloads/release/python-351/
:alt: platform
.. image:: https://img.shields.io/badge/pypi-v0.2.0-blue.svg
:target: https://pypi.python.org/pypi/gaft/
:alt: versions
Introduction
------------
**gaft** is a Python Framework for genetic algorithm computation. It provide built-in genetic operators for genetic algorithm optimization and plugin interfaces for users to define your own genetic operators and on-the-fly analysis for algorithm testing.
**gaft** is now accelerated using MPI parallelization interfaces. You can run it on your cluster in parallal with MPI environment.
Installation:
-------------
1. Via pip::
pip install gaft
2. From source::
python setup.py install
'''
install_requires = [
'mpi4py',
]
license = 'LICENSE'
name = 'gaft'
packages = [
'gaft',
]
platforms = ['linux', 'windows', 'macos']
url = 'https://github.com/PytLab/gaft'
download_url = 'https://github.com/PytLab/gaft/releases'
classifiers = [
'Development Status :: 3 - Alpha',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
]
setup(author=author,
author_email=author_email,
description=description,
license=license,
long_description=long_description,
install_requires=install_requires,
maintainer=maintainer,
name=name,
packages=find_packages(),
platforms=platforms,
url=url,
download_url=download_url,
version=version)
|
[
"shaozhengjiang@gmail.com"
] |
shaozhengjiang@gmail.com
|
faa0579dc83a0beee418e9f855984e65f2a2f514
|
a03303e46f21697c9da87d0bb0f7b0a3077aba5c
|
/siswa_psb_ocb11/wizards/wizard_report_calon_siswa.py
|
c8856a1a350f8fcd17acd55177141e2075782768
|
[] |
no_license
|
butirpadi/flectra_app_sek
|
fccd3e47ef261e116478e6da7f0cc544ee67f127
|
00fa36d9176511f8ffe3c7636a8434ee2ed8c756
|
refs/heads/master
| 2020-04-06T10:26:37.053024
| 2018-11-19T23:59:34
| 2018-11-20T00:17:02
| 157,380,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,988
|
py
|
from flectra import models, fields, api
from pprint import pprint
class wizard_report_calon_siswa(models.TransientModel):
_name = 'siswa_psb_ocb11.wizard_report_calon_siswa'
name = fields.Char('Name', default='0')
tahunajaran_id = fields.Many2one('siswa_ocb11.tahunajaran', string="Tahun Ajaran", default=lambda self: self.env['siswa_ocb11.tahunajaran'].search([('active','=',True)]), required=True)
jenjang_id = fields.Many2one('siswa_ocb11.jenjang', string='Jenjang')
calon_siswa_ids = fields.Many2many('siswa_psb_ocb11.calon_siswa',relation='siswa_psb_ocb11_wizard_report_calon_siswa_rel', column1='wizard_id',column2='calon_siswa_id', string="Calon Siswa")
@api.multi
def action_save(self):
self.ensure_one()
# update name
self.write({
'name' : 'Data Calon Siswa'
})
# add calon siswa
calon_siswas = self.env['siswa_psb_ocb11.calon_siswa'].search(['&','&',
('tahunajaran_id','=',self.tahunajaran_id.id),
('jenjang_id','=',self.jenjang_id.id),
('state','=','reg'),
])
reg_cs = []
for cs in calon_siswas:
self.write({
'calon_siswa_ids' : [(4,cs.id)]
})
# show wizard form view
# return {
# 'view_type': 'form',
# 'view_mode': 'form',
# 'res_model': 'siswa_psb_ocb11.wizard_report_calon_siswa',
# 'target': 'current',
# 'res_id': self.id,
# 'type': 'ir.actions.act_window'
# }
# show html report
return self.env.ref('siswa_psb_ocb11.report_calon_siswa_action').report_action(self)
def action_print(self):
return self.env.ref('siswa_psb_ocb11.report_calon_siswa_action').report_action(self)
|
[
"butirpadi@gmail.com"
] |
butirpadi@gmail.com
|
481acf4aa61cf301b609dfb3c9972a0ff29a8b6c
|
48388cdcdab994395f6df4fc577701e2ebd38ced
|
/apps/job/migrations/0005_rename_location_job_city.py
|
73bcbf26f35b37ae104a577f420316a7ed263194
|
[] |
no_license
|
darenceT/Artificial-Squad
|
7c21e66dc22bb9e751ef94a407c34eb9d17e5773
|
e52836a29bd6324ab2a7c84b6f3346a774c3aea9
|
refs/heads/main
| 2023-09-02T22:18:38.858921
| 2021-11-04T21:02:08
| 2021-11-04T21:02:08
| 415,767,165
| 1
| 0
| null | 2021-10-11T03:32:03
| 2021-10-11T03:32:02
| null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
# Generated by Django 3.2.6 on 2021-09-04 01:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('job', '0004_alter_job_description'),
]
operations = [
migrations.RenameField(
model_name='job',
old_name='location',
new_name='city',
),
]
|
[
"mickykumar510@gmail.com"
] |
mickykumar510@gmail.com
|
9f72f4971591b30c70c14b81fc1f4b5f3192fb1a
|
a2065fec7cfae2d96c173fcd0afbd822c3d74580
|
/Variable/uocSoChungNguyenTo.py
|
f2a26a7a0ae4c1560275ac3d7b5f092f024211db
|
[] |
no_license
|
DinhVanGioi2001/LearnPython
|
6505e1bc5f13ba9cae68e04842e4e3837787e8c3
|
d7f8d66b1c3f362ca8b3a07a3d0817510d7d760d
|
refs/heads/main
| 2023-07-28T01:09:50.971445
| 2021-09-19T13:51:33
| 2021-09-19T13:51:33
| 399,729,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
import math
def isPrime(n):
if n < 2:
return 0
for i in range(2, n):
if(n % i == 0):
return 0
return 1
test = int(input())
while(test):
test -= 1
line = input()
list = line.split(" ")
number1 = int(list[0])
num2 = int(list[1])
tmp = math.gcd(number1, num2)
x = str(tmp)
sum = 0
for i in x:
sum += int(i)
if(isPrime(sum)):
print("YES")
else:
print("NO")
|
[
"89461091+DinhVanGioi2001@users.noreply.github.com"
] |
89461091+DinhVanGioi2001@users.noreply.github.com
|
6fd6461dfa3ac4262311c02d6ca9391c80c84a87
|
ade39c9be46ccff0cefb1c80a93df086d4c78cad
|
/SnakeGame/snake.py
|
ba68ea1f21a3aeb72558d43e61241f693703f04a
|
[] |
no_license
|
rohira-dhruv/Python
|
257b1c471a7717f31100f53463c0f19692e10d54
|
379a010b4057f4541d605389975647752cc473a0
|
refs/heads/main
| 2023-07-09T11:26:03.513718
| 2021-08-13T14:24:19
| 2021-08-13T14:24:19
| 391,022,466
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
from turtle import Turtle
MOVE_DISTANCE = 20
STARTING_POSITIONS = [(0, 0), (-20, 0), (-40, 0)]
UP = 90
LEFT = 180
DOWN = 270
RIGHT = 0
class Snake:
def __init__(self):
self.segments = []
self.create_snake()
self.head = self.segments[0]
def create_snake(self):
for position in STARTING_POSITIONS:
self.add_segment(position)
def add_segment(self, position):
new_segment = Turtle("square")
new_segment.penup()
new_segment.color("white")
new_segment.goto(position)
self.segments.append(new_segment)
def extend(self):
self.add_segment(self.segments[-1].pos())
def move(self):
for seg_num in range(len(self.segments)-1, 0, -1):
self.segments[seg_num].goto(self.segments[seg_num-1].pos())
self.head.forward(MOVE_DISTANCE)
def up(self):
if self.head.heading() != DOWN:
self.head.setheading(UP)
def down(self):
if self.head.heading() != UP:
self.head.setheading(DOWN)
def right(self):
if self.head.heading() != LEFT:
self.head.setheading(RIGHT)
def left(self):
if self.head.heading() != RIGHT:
self.head.setheading(LEFT)
|
[
"f20201725@goa.bits-pilani.ac.in"
] |
f20201725@goa.bits-pilani.ac.in
|
47fb9e01b9adc880a3c4e3ef01825885ff111f97
|
0e1e643e864bcb96cf06f14f4cb559b034e114d0
|
/Exps_7_v3/doc3d/W_w_Mgt_to_Cx_Cy_focus_Z_ok/Sob_k15_s001_EroM_Mae_s001/pyr_Tcrop255_pad20_jit15/pyr_1s/L4/step10_a.py
|
feca52a6a51d9140c70a6ffe6b24aaf03eb0bdcf
|
[] |
no_license
|
KongBOy/kong_model2
|
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
|
1af20b168ffccf0d5293a393a40a9fa9519410b2
|
refs/heads/master
| 2022-10-14T03:09:22.543998
| 2022-10-06T11:33:42
| 2022-10-06T11:33:42
| 242,080,692
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,849
|
py
|
#############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_1side_L4 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_W_and_I_gt_F
use_loss_obj = [mae_s001_sobel_k15_s001_EroseM_loss_info_builder.set_loss_target("UNet_Cx").copy(), mae_s001_sobel_k15_s001_EroseM_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_1side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
|
[
"s89334roy@yahoo.com.tw"
] |
s89334roy@yahoo.com.tw
|
bd978ce5d339a8aeda5f79bf6e300c587ba6607e
|
12f81ac90a9df29ba442ca0be64a157f3678294d
|
/demo/main/models.py
|
d37ae80c0a5b0548e1e993443b57f2a4cb3316c2
|
[
"MIT"
] |
permissive
|
9dev/django-flags
|
add11599158dbc1602eb7b79a74ef76ba8312957
|
b4553ebd3f7750594da5abac4d73b6bc15c6bc30
|
refs/heads/master
| 2021-01-13T00:41:10.900887
| 2016-05-12T17:55:23
| 2016-05-12T17:55:23
| 54,309,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
from django.db import models
from django.core.urlresolvers import reverse
class Article(models.Model):
title = models.CharField(max_length=100)
content = models.CharField(max_length=1000)
def get_absolute_url(self):
return reverse('article_detail', kwargs={'pk': self.pk})
def __str__(self):
return '<Article id={}>'.format(self.pk)
|
[
"9devmail@gmail.com"
] |
9devmail@gmail.com
|
5b1104a16aa5fe5d2d5141faa1e029640e496f02
|
9ba4eeb8c44e9ece5c85499853df115293077840
|
/learning_utils.py
|
72bb1fb636b2e0248447a0585f7b41bc5a07d8ce
|
[] |
no_license
|
kiminh/Weighted-Empirical-Risk-Minimization
|
f8251233d77ae3396e873ce3e2c4e3ba947925a7
|
a85c0b57b3c61428fb34c6a0e55f8e2f4745eefd
|
refs/heads/master
| 2022-04-03T11:41:58.879496
| 2020-02-21T17:40:34
| 2020-02-21T17:40:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,581
|
py
|
"""
Contains a function that executes the steps of learning a model.
"""
import os
import logging
import json
import data_utils as du
import model_utils as mu
import plot_utils as pu
import matplotlib.pyplot as plt
DEFAULT_RATIO_FIGURE_MNIST = (3, 3) # (6, 3)
DEFAULT_RATIO_FIGURE_IMAGENET = (3, 3)
def run_exp(base_params,
out_folder="19_01_11_exps", db_name="MNIST", figsize=(3, 3)):
"""
Runs an experiment in the out_folder chosen.
Does 4 steps:
1. Loads the data,
2. Learns the model,
3. Saves the weights and graphs,
4. Plots the results.
"""
params = base_params.copy()
if db_name == "MNIST":
train_data, test_data = du.load_preprocess_MNIST(params)
elif db_name == "ImageNet":
train_data, test_data = du.load_preprocess_ImageNet(params)
run_exp_loaded_data(train_data, test_data, params,
out_folder=out_folder, db_name=out_folder,
figsize=out_folder)
def run_exp_loaded_data(train_data, test_data, base_params,
out_folder="19_01_11_exps", db_name="MNIST",
figsize=(3, 3)):
"""
Runs an experiment in the out_folder chosen.
Does 3 steps:
1. Learns the model,
2. Saves the weights and graphs,
3. Plots the results.
"""
assert base_params["type_weight"] in ["uniform", "prediction", "stratum"]
assert base_params["model_type"] in ["LINEAR", "MLP"]
if not os.path.exists(out_folder):
os.makedirs(out_folder)
logging.basicConfig(filename='{}/learning_process.log'.format(out_folder),
format='%(asctime)s - %(message)s', # - %(levelname)s
level=logging.INFO, datefmt='%m/%d/%y %I:%M:%S %p',
filemode="w")
logging.info("Starting exp...")
params = base_params.copy()
if db_name == "MNIST":
default_ratio_figure = DEFAULT_RATIO_FIGURE_MNIST
elif db_name == "ImageNet":
default_ratio_figure = DEFAULT_RATIO_FIGURE_IMAGENET
message = "p_z_train : " + " | ".join(["%d : %5.4f" % (i, w)
for i, w in
enumerate(params["p_z_train"])])
logging.info(message)
message = "p_z_test : " + " | ".join(["%d : %5.4f" % (i, w)
for i, w in
enumerate(params["p_z_test"])])
message = "Params: " + " | ".join(["%s : %s" % (k, v)
for k, v in params.items()])
logging.info(message)
X_train, Z_train, Y_train = train_data
X_test, Z_test, Y_test = test_data
params["X_test"] = X_test
params["Z_test"] = Z_test
params["Y_test"] = Y_test
plt.figure(figsize=default_ratio_figure)
pu.plot_class_probas(params, with_ticklabels=(db_name == "MNIST"))
plt.savefig("{}/{}.pdf".format(out_folder, "class_probas"), format="pdf")
plt.figure(figsize=default_ratio_figure)
pu.plot_strata_probas(params, with_ticklabels=(db_name == "MNIST"))
plt.savefig("{}/{}.pdf".format(out_folder, "strata_ratios"), format="pdf")
logging.info("Start learning.")
if params["model_type"] == "LINEAR":
model = mu.LinearModel()
elif params["model_type"] == "MLP":
model = mu.MLP()
model.fit(X_train, Z_train, Y_train, params)
logging.info("Saving the elements.")
# Transform the asymmetry weights to serializable objects.
params["p_z_train"] = list(params["p_z_train"])
params["p_z_test"] = list(params["p_z_test"])
# We get rid of the testing numpy arrays.
keys_to_delete = list(filter(lambda x: x in [x + "_test"
for x in ["X", "Z", "Y"]],
params.keys()))
# keys_to_delete = keys_to_delete + ["cost_train", "acc_train",
# "cost_test", "acc_test", "final_weights", "final_bias"]
params["p_y_train"] = list(params["p_y_train"])
params["p_y_test"] = list(params["p_y_test"])
for x in keys_to_delete:
params.pop(x)
# print(params)
json.dump(params, open("{}/params.json".format(out_folder), "wt"))
logging.info("Plotting the results.")
plt.figure(figsize=figsize)
pu.plot_cost_acc(params, lim_acc=params["dynamics_lim_acc"],
lim_cost=params["dynamics_lim_cost"])
plt.savefig("{}/{}.pdf".format(out_folder, "dynamics"), format="pdf")
|
[
"robvogel0@gmail.com"
] |
robvogel0@gmail.com
|
37a924260b7e9bd5d8cfdd5ad35937b611f8e43b
|
9b8641e0b5d03cca9561f65bb621279188600c45
|
/mysite/settings.py
|
39582219cdd9a69c9617b6919dbc5fbb94b9d1ae
|
[] |
no_license
|
memod/django-tutorial
|
55a24bad6eb720a2597def0195df70825d017e3f
|
7cbfe811f781313b06bc700febacb27918436676
|
refs/heads/master
| 2020-12-14T16:29:08.850242
| 2020-01-18T22:35:29
| 2020-01-18T22:35:29
| 234,806,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,229
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5^(521-_l!@sk-*+6n7o%@%0t3)#86rcr$_x=53k(h64-v9==#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# ALLOWED_HOSTS = ['127.0.0.1', '0.0.0.0', '.pythonanywhere.com']
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"carlos.durazo@thermofisher.com"
] |
carlos.durazo@thermofisher.com
|
777cb7f74ee3c7be90c8760b311faec02591177e
|
7a87aaf1cfff5dfc636c8c164d190c9a18a7f449
|
/Litespam/call.py
|
4bd4c5d74dacc2e7be6e2e9dc8a750642b399177
|
[] |
no_license
|
bangnaga1/Litespam
|
7c971bd06277f423f563729f082c84e940f3e755
|
e622957fa2d7b3eafea61dd07ec655c43fa5d315
|
refs/heads/master
| 2020-04-03T03:20:48.665099
| 2018-10-27T17:19:47
| 2018-10-27T17:19:47
| 154,983,158
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
print(""" =======================================
___ _ _
| _ \| |__ _ __ ___ __ _| | _____ _ __
| |_) | '_ \| '__/ _ \/ _` | |/ / _ \ '__|
| __/| | | | | | __/ (_| | < __/ |
|_| |_| |_|_| \___|\__,_|_|\_\___|_|
=======================================
""")
import thread
import requests
import sys
try:
file = sys.argv[1]
except:
print("usage: {} <numbers_list>".format(sys.argv[0]))
sys.exit()
numbers = open(sys.argv[1], "r").readlines()
count = 0
processc = 0
running_threads = 0
print_used = False
max_threads = 100
print("[-----info-----]: read {} numbers from {}".format(len(numbers), file))
def process(number):
global running_threads
global processc
global print_used
running_threads += 1
number = number.rstrip()
url = "https://www.tokocash.com/oauth/otp"
data = {"msisdn": number.rstrip(), "accept": "call"}
headers = {"X-Requested-With": "XMLHttpRequest"}
r = requests.post(url, data=data, headers=headers)
while print_used:
pass
print_used = True
print("\r[0x" + str(thread.get_ident()) + "]: " + number + " (status: " + r.json()['code'] + ")")
print_used = False
processc += 1
running_threads -= 1
return 1
for number in numbers:
while running_threads >= max_threads:
pass
if number == "" or number[0] == ";": continue
count += 1
thread.start_new_thread(process, ( number, ))
while processc != count:
pass
print("[-----done-----]: Success Send Semua Ke No List")
sys.exit()
|
[
"bangnaga183@gmail.com"
] |
bangnaga183@gmail.com
|
054f9416c1af9b8332a84b3b30575750e8ead587
|
568a7c7553d064b0968fdc449785ff781449f628
|
/ERIS.py
|
6108ed46ef7adbff09c59b9177a6cf5273befcbd
|
[] |
no_license
|
Sanjeev2021/ERIS
|
235a642f58d1e0a59622d963fd0c20663fad8aa6
|
ace56010b2d40777dcf0db4609934375f22d6f4e
|
refs/heads/main
| 2023-02-25T20:48:17.311576
| 2021-01-29T07:30:03
| 2021-01-29T07:30:03
| 334,069,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,647
|
py
|
import pyttsx3
import speech_recognition as sr
import datetime
import wikipedia
import webbrowser
import pyaudio
import os
import smtplib
import subprocess
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
print(voices[1].id)
engine.setProperty('voice', voices[1].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning sir")
speak("I am ERIS . how may i help you ?")
elif hour>=12 and hour<18:
speak("Good Afternoon sir")
speak("I am ERIS . how may i help you ?")
else:
speak("Good Evening sir")
speak("I am ERIS . how may i help you ?")
def takeCommand():
#it take mircrophone inpt from user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening ...")
r.pause_threshold = 1
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
try:
print("Recognizing....")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
print(e)
print("sir please repeat that again....")
speak("sir please repeat that again")
return "None"
return query
def sendEmail(to , content):
server = smtplib.SMTP('smtp.gmail.com',587)
server.ehlo()
server.starttls()
server.login('erisbot21@gmail.com', '9967492698')
server.sendmail('erisbot21@gmail.com',to,content)
server.close()
if __name__ == "__main__":
wishMe()
#while True:
if 1 :
query = takeCommand().lower()
#Logic for executing task based on query
if 'wikipedia' in query:
speak("Getting info from wikipedia...")
query = query.replace("wikipedia","")
results = wikipedia.summary(query, sentences=10)
speak("Acoording to Wikipedia")
print(results)
speak(results)
elif 'open youtube' in query:
webbrowser.open_new_tab("youtube.com")
speak("Youtube open now")
elif 'open google' in query:
webbrowser.open_new_tab("google.com")
speak("google opened ")
elif 'open stackoverflow' in query:
webbrowser.open_new_tab("stackoverflow.com")
speak("Stackoverflow opened")
elif 'open gmail' in query:
webbrowser.open_new_tab("gmail.com")
speak("Google Mail open now")
elif 'open whatsapp' in query:
webbrowser.open_new_tab("web.whatsapp.com")
speak("Whatsapp opened")
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir , the time is {strTime}")
elif 'send an email' in query:
try:
speak("what should i say?")
content = takeCommand()
to = "erisbot21@gmail.com"
sendEmail(to, content)
speak("Email has been sent")
except Exception as e :
print(e)
speak("sorry sir cannot send email")
elif 'play music' in query:
music_dir = 'F:\music'
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir , songs[0]))
elif "shutdown" in query:
speak("Ok sir, your pc will log off in 10 seconds make sure you exit from all applications")
subprocess.call(["shutdown", "/l"])
|
[
"noreply@github.com"
] |
Sanjeev2021.noreply@github.com
|
eb29c7b75c47fa7aab8a8a70bff3e004b2a18359
|
d22db204c665d16847447551cedc07756d357eb2
|
/hydrus/core/networking/HydrusNetwork.py
|
c80fadbd36f4e79821703da79f3716e972dbb491
|
[
"WTFPL"
] |
permissive
|
Suika/hydrus
|
9d5070d47c328b7054a9699de310ce580e563528
|
4b2b15e152e4bed900aa972c7d4b27f7bf242f29
|
refs/heads/master
| 2023-05-28T00:32:50.364999
| 2023-05-10T20:22:34
| 2023-05-10T20:22:34
| 237,063,790
| 1
| 2
|
NOASSERTION
| 2022-10-29T22:36:54
| 2020-01-29T19:23:21
|
Python
|
UTF-8
|
Python
| false
| false
| 91,405
|
py
|
import collections
import threading
import time
import typing
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusLists
from hydrus.core import HydrusSerialisable
from hydrus.core import HydrusTags
from hydrus.core import HydrusTime
from hydrus.core.networking import HydrusNetworking
UPDATE_CHECKING_PERIOD = 240
MIN_UPDATE_PERIOD = 600
MAX_UPDATE_PERIOD = 100000 * 100 # three months or so jej
MIN_NULLIFICATION_PERIOD = 86400
MAX_NULLIFICATION_PERIOD = 86400 * 365 * 5
def GenerateDefaultServiceDictionary( service_type ):
# don't store bytes key/value data here until ~version 537
# the server kicks out a patched version 1 of serialisabledict, so it can't handle byte gubbins, lad
dictionary = HydrusSerialisable.SerialisableDictionary()
dictionary[ 'upnp_port' ] = None
dictionary[ 'bandwidth_tracker' ] = HydrusNetworking.BandwidthTracker()
if service_type in HC.RESTRICTED_SERVICES:
dictionary[ 'bandwidth_rules' ] = HydrusNetworking.BandwidthRules()
dictionary[ 'service_options' ] = HydrusSerialisable.SerialisableDictionary()
dictionary[ 'service_options' ][ 'server_message' ] = 'Welcome to the server!'
if service_type in HC.REPOSITORIES:
update_period = 100000
dictionary[ 'service_options' ][ 'update_period' ] = update_period
dictionary[ 'service_options' ][ 'nullification_period' ] = 90 * 86400
dictionary[ 'next_nullification_update_index' ] = 0
metadata = Metadata()
now = HydrusTime.GetNow()
update_hashes = []
begin = 0
end = now
next_update_due = now + update_period
metadata.AppendUpdate( update_hashes, begin, end, next_update_due )
dictionary[ 'metadata' ] = metadata
if service_type == HC.FILE_REPOSITORY:
dictionary[ 'log_uploader_ips' ] = False
dictionary[ 'max_storage' ] = None
if service_type == HC.TAG_REPOSITORY:
dictionary[ 'service_options' ][ 'tag_filter' ] = HydrusTags.TagFilter()
if service_type == HC.SERVER_ADMIN:
dictionary[ 'server_bandwidth_tracker' ] = HydrusNetworking.BandwidthTracker()
dictionary[ 'server_bandwidth_rules' ] = HydrusNetworking.BandwidthRules()
return dictionary
def GenerateService( service_key, service_type, name, port, dictionary = None ):
if dictionary is None:
dictionary = GenerateDefaultServiceDictionary( service_type )
if service_type == HC.SERVER_ADMIN:
cl = ServerServiceAdmin
elif service_type == HC.TAG_REPOSITORY:
cl = ServerServiceRepositoryTag
elif service_type == HC.FILE_REPOSITORY:
cl = ServerServiceRepositoryFile
return cl( service_key, service_type, name, port, dictionary )
def GenerateServiceFromSerialisableTuple( serialisable_info ):
( service_key_encoded, service_type, name, port, dictionary_string ) = serialisable_info
try:
service_key = bytes.fromhex( service_key_encoded )
except TypeError:
raise HydrusExceptions.BadRequestException( 'Could not decode that service key!' )
dictionary = HydrusSerialisable.CreateFromString( dictionary_string )
return GenerateService( service_key, service_type, name, port, dictionary )
def GetPossiblePermissions( service_type ):
permissions = []
permissions.append( ( HC.CONTENT_TYPE_ACCOUNTS, [ None, HC.PERMISSION_ACTION_CREATE, HC.PERMISSION_ACTION_MODERATE ] ) )
permissions.append( ( HC.CONTENT_TYPE_ACCOUNT_TYPES, [ None, HC.PERMISSION_ACTION_MODERATE ] ) )
permissions.append( ( HC.CONTENT_TYPE_OPTIONS, [ None, HC.PERMISSION_ACTION_MODERATE ] ) )
if service_type == HC.FILE_REPOSITORY:
permissions.append( ( HC.CONTENT_TYPE_FILES, [ None, HC.PERMISSION_ACTION_PETITION, HC.PERMISSION_ACTION_CREATE, HC.PERMISSION_ACTION_MODERATE ] ) )
elif service_type == HC.TAG_REPOSITORY:
permissions.append( ( HC.CONTENT_TYPE_MAPPINGS, [ None, HC.PERMISSION_ACTION_PETITION, HC.PERMISSION_ACTION_CREATE, HC.PERMISSION_ACTION_MODERATE ] ) )
permissions.append( ( HC.CONTENT_TYPE_TAG_PARENTS, [ None, HC.PERMISSION_ACTION_PETITION, HC.PERMISSION_ACTION_MODERATE ] ) )
permissions.append( ( HC.CONTENT_TYPE_TAG_SIBLINGS, [ None, HC.PERMISSION_ACTION_PETITION, HC.PERMISSION_ACTION_MODERATE ] ) )
elif service_type == HC.SERVER_ADMIN:
permissions.append( ( HC.CONTENT_TYPE_SERVICES, [ None, HC.PERMISSION_ACTION_MODERATE ] ) )
return permissions
class Account( object ):
def __init__( self, account_key: bytes, account_type: "AccountType", created: int, expires: typing.Optional[ int ] ):
HydrusSerialisable.SerialisableBase.__init__( self )
self._lock = threading.Lock()
self._account_key = account_key
self._account_type = account_type
self._created = created
self._expires = expires
self._message = ''
self._message_created = 0
self._banned_info = None
self._bandwidth_tracker = HydrusNetworking.BandwidthTracker()
self._dirty = False
def __repr__( self ):
return 'Account: ' + self._account_type.GetTitle()
def __str__( self ):
return self.__repr__()
def _CheckBanned( self ):
if self._IsBanned():
raise HydrusExceptions.InsufficientCredentialsException( 'This account is banned: ' + self._GetBannedString() )
def _CheckExpired( self ):
if self._IsExpired():
raise HydrusExceptions.InsufficientCredentialsException( 'This account is expired: ' + self._GetExpiresString() )
def _CheckFunctional( self ):
if self._created == 0:
raise HydrusExceptions.ConflictException( 'account is unsynced' )
if self._IsAdmin():
# admins can do anything
return
self._CheckBanned()
self._CheckExpired()
if not self._account_type.BandwidthOK( self._bandwidth_tracker ):
raise HydrusExceptions.BandwidthException( 'account has exceeded bandwidth' )
def _GetBannedString( self ):
if self._banned_info is None:
return 'not banned'
else:
( reason, created, expires ) = self._banned_info
return 'banned ' + HydrusTime.TimestampToPrettyTimeDelta( created ) + ', ' + HydrusTime.TimestampToPrettyExpires( expires ) + ' because: ' + reason
def _GetExpiresString( self ):
return HydrusTime.TimestampToPrettyExpires( self._expires )
def _IsAdmin( self ):
return self._account_type.HasPermission( HC.CONTENT_TYPE_SERVICES, HC.PERMISSION_ACTION_MODERATE )
def _IsBanned( self ):
if self._banned_info is None:
return False
else:
( reason, created, expires ) = self._banned_info
if expires is None:
return True
else:
if HydrusTime.TimeHasPassed( expires ):
self._banned_info = None
return False
else:
return True
def _IsExpired( self ):
if self._expires is None:
return False
else:
return HydrusTime.TimeHasPassed( self._expires )
def _SetDirty( self ):
self._dirty = True
def Ban( self, reason, created, expires ):
with self._lock:
self._banned_info = ( reason, created, expires )
self._SetDirty()
def CheckAtLeastOnePermission( self, content_types_and_actions ):
with self._lock:
if True not in ( self._account_type.HasPermission( content_type, action ) for ( content_type, action ) in content_types_and_actions ):
raise HydrusExceptions.InsufficientCredentialsException( 'You do not have permission to do that.' )
def CheckFunctional( self ):
with self._lock:
self._CheckFunctional()
def CheckPermission( self, content_type, action ):
with self._lock:
if self._IsAdmin():
return
self._CheckBanned()
self._CheckExpired()
if not self._account_type.HasPermission( content_type, action ):
raise HydrusExceptions.InsufficientCredentialsException( 'You do not have permission to do that.' )
def GetAccountKey( self ):
with self._lock:
return self._account_key
def GetAccountType( self ):
with self._lock:
return self._account_type
def GetBandwidthCurrentMonthSummary( self ):
with self._lock:
return self._bandwidth_tracker.GetCurrentMonthSummary()
def GetBandwidthStringsAndGaugeTuples( self ):
with self._lock:
return self._account_type.GetBandwidthStringsAndGaugeTuples( self._bandwidth_tracker )
def GetBandwidthTracker( self ):
with self._lock:
return self._bandwidth_tracker
def GetBannedInfo( self ):
with self._lock:
return self._banned_info
def GetCreated( self ):
with self._lock:
return self._created
def GetExpires( self ):
with self._lock:
return self._expires
def GetExpiresString( self ):
with self._lock:
if self._IsBanned():
return self._GetBannedString()
else:
return self._GetExpiresString()
def GetMessageAndTimestamp( self ):
with self._lock:
return ( self._message, self._message_created )
def GetSingleLineTitle( self ):
with self._lock:
text = self._account_key.hex()
text = '{}: {}'.format( self._account_type.GetTitle(), text )
if self._IsExpired():
text = 'Expired: {}'.format( text )
if self._IsBanned():
text = 'Banned: {}'.format( text )
if self._account_type.IsNullAccount():
text = 'THIS IS NULL ACCOUNT: {}'.format( text )
return text
def GetStatusInfo( self ) -> typing.Tuple[ bool, str ]:
with self._lock:
try:
self._CheckFunctional()
return ( True, 'account is functional' )
except Exception as e:
return ( False, str( e ) )
def HasPermission( self, content_type, action ):
with self._lock:
if self._IsAdmin():
return True
if self._IsBanned() or self._IsExpired():
return False
return self._account_type.HasPermission( content_type, action )
def IsBanned( self ):
with self._lock:
return self._IsBanned()
def IsDirty( self ):
with self._lock:
return self._dirty
def IsExpired( self ):
with self._lock:
return self._IsExpired()
def IsFunctional( self ):
with self._lock:
try:
self._CheckFunctional()
return True
except:
return False
def IsNullAccount( self ):
with self._lock:
return self._account_type.IsNullAccount()
def IsUnknown( self ):
with self._lock:
return self._created == 0
def ReportDataUsed( self, num_bytes ):
with self._lock:
self._bandwidth_tracker.ReportDataUsed( num_bytes )
self._SetDirty()
def ReportRequestUsed( self ):
with self._lock:
self._bandwidth_tracker.ReportRequestUsed()
self._SetDirty()
def SetBandwidthTracker( self, bandwidth_tracker: HydrusNetworking.BandwidthTracker ):
with self._lock:
self._bandwidth_tracker = bandwidth_tracker
self._SetDirty()
def SetClean( self ):
with self._lock:
self._dirty = False
def SetExpires( self, expires: typing.Optional[ int ] ):
with self._lock:
self._expires = expires
self._SetDirty()
def SetMessage( self, message, created ):
with self._lock:
self._message = message
self._message_created = created
self._SetDirty()
def ToString( self ):
if self.IsNullAccount():
return 'This is the NULL ACCOUNT. It takes possession of old content to anonymise it. It cannot be modified.'
with self._lock:
return self._account_type.GetTitle() + ' -- created ' + HydrusTime.TimestampToPrettyTimeDelta( self._created )
def Unban( self ):
with self._lock:
self._banned_info = None
self._SetDirty()
@staticmethod
def GenerateAccountFromSerialisableTuple( serialisable_info ):
( account_key_encoded, serialisable_account_type, created, expires, dictionary_string ) = serialisable_info
account_key = bytes.fromhex( account_key_encoded )
if isinstance( serialisable_account_type, list ) and isinstance( serialisable_account_type[0], str ):
# this is a legacy account
( encoded_account_type_key, title, account_type_dictionary_string ) = serialisable_account_type
account_type_key = bytes.fromhex( encoded_account_type_key )
from hydrus.core.networking import HydrusNetworkLegacy
account_type = HydrusNetworkLegacy.ConvertToNewAccountType( account_type_key, title, account_type_dictionary_string )
else:
account_type = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_account_type )
dictionary = HydrusSerialisable.CreateFromString( dictionary_string )
return Account.GenerateAccountFromTuple( ( account_key, account_type, created, expires, dictionary ) )
@staticmethod
def GenerateAccountFromTuple( serialisable_info ):
( account_key, account_type, created, expires, dictionary ) = serialisable_info
if 'message' not in dictionary:
dictionary[ 'message' ] = ''
dictionary[ 'message_created' ] = 0
banned_info = dictionary[ 'banned_info' ]
bandwidth_tracker = dictionary[ 'bandwidth_tracker' ]
account = Account( account_key, account_type, created, expires )
account.SetBandwidthTracker( bandwidth_tracker )
if banned_info is not None:
( reason, created, expires ) = banned_info
account.Ban( reason, created, expires )
message = dictionary[ 'message' ]
message_created = dictionary[ 'message_created' ]
account.SetMessage( message, message_created )
account.SetClean()
return account
@staticmethod
def GenerateSerialisableTupleFromAccount( account ):
( account_key, account_type, created, expires, dictionary ) = Account.GenerateTupleFromAccount( account )
account_key_encoded = account_key.hex()
serialisable_account_type = account_type.GetSerialisableTuple()
dictionary_string = dictionary.DumpToString()
return ( account_key_encoded, serialisable_account_type, created, expires, dictionary_string )
@staticmethod
def GenerateTupleFromAccount( account: "Account" ):
account_key = account.GetAccountKey()
account_type = account.GetAccountType()
created = account.GetCreated()
expires = account.GetExpires()
banned_info = account.GetBannedInfo()
bandwidth_tracker = account.GetBandwidthTracker()
( message, message_created ) = account.GetMessageAndTimestamp()
dictionary = HydrusSerialisable.SerialisableDictionary()
dictionary[ 'banned_info' ] = banned_info
dictionary[ 'bandwidth_tracker' ] = bandwidth_tracker
dictionary[ 'message' ] = message
dictionary[ 'message_created' ] = message_created
dictionary = dictionary.Duplicate()
return ( account_key, account_type, created, expires, dictionary )
@staticmethod
def GenerateUnknownAccount( account_key = b'' ):
account_type = AccountType.GenerateUnknownAccountType()
created = 0
expires = None
unknown_account = Account( account_key, account_type, created, expires )
return unknown_account
class AccountIdentifier( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_ACCOUNT_IDENTIFIER
SERIALISABLE_NAME = 'Account Identifier'
SERIALISABLE_VERSION = 1
TYPE_ACCOUNT_KEY = 1
TYPE_CONTENT = 2
def __init__( self, account_key = None, content = None ):
HydrusSerialisable.SerialisableBase.__init__( self )
if account_key is not None:
self._type = self.TYPE_ACCOUNT_KEY
self._data = account_key
elif content is not None:
self._type = self.TYPE_CONTENT
self._data = content
def __eq__( self, other ):
if isinstance( other, AccountIdentifier ):
return self.__hash__() == other.__hash__()
return NotImplemented
def __hash__( self ): return ( self._type, self._data ).__hash__()
def __repr__( self ): return 'Account Identifier: ' + str( ( self._type, self._data ) )
def _GetSerialisableInfo( self ):
if self._type == self.TYPE_ACCOUNT_KEY:
serialisable_data = self._data.hex()
elif self._type == self.TYPE_CONTENT:
serialisable_data = self._data.GetSerialisableTuple()
return ( self._type, serialisable_data )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( self._type, serialisable_data ) = serialisable_info
if self._type == self.TYPE_ACCOUNT_KEY:
self._data = bytes.fromhex( serialisable_data )
elif self._type == self.TYPE_CONTENT:
self._data = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_data )
def GetAccountKey( self ) -> bytes:
if not self.HasAccountKey():
raise Exception( 'This Account Identifier does not have an account id!' )
return self._data
def GetContent( self ) -> "Content":
if not self.HasContent():
raise Exception( 'This Account Identifier does not have content!' )
return self._data
def GetData( self ):
return self._data
def HasAccountKey( self ):
return self._type == self.TYPE_ACCOUNT_KEY
def HasContent( self ):
return self._type == self.TYPE_CONTENT
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_ACCOUNT_IDENTIFIER ] = AccountIdentifier
class AccountType( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_ACCOUNT_TYPE
SERIALISABLE_NAME = 'Account Type'
SERIALISABLE_VERSION = 2
def __init__(
self,
account_type_key = None,
title = None,
permissions = None,
bandwidth_rules = None,
auto_creation_velocity = None,
auto_creation_history = None
):
HydrusSerialisable.SerialisableBase.__init__( self )
if account_type_key is None:
account_type_key = HydrusData.GenerateKey()
if title is None:
title = 'standard user'
if permissions is None:
permissions = {}
if bandwidth_rules is None:
bandwidth_rules = HydrusNetworking.BandwidthRules()
if auto_creation_velocity is None:
auto_creation_velocity = ( 0, 86400 )
if auto_creation_history is None:
auto_creation_history = HydrusNetworking.BandwidthTracker()
self._account_type_key = account_type_key
self._title = title
self._permissions = permissions
self._bandwidth_rules = bandwidth_rules
self._auto_creation_velocity = auto_creation_velocity
self._auto_creation_history = auto_creation_history
def __repr__( self ):
return 'AccountType: ' + self._title
def __str__( self ):
return self.__repr__()
def _GetSerialisableInfo( self ):
serialisable_account_type_key = self._account_type_key.hex()
serialisable_permissions = list( self._permissions.items() )
serialisable_bandwidth_rules = self._bandwidth_rules.GetSerialisableTuple()
serialisable_auto_creation_history = self._auto_creation_history.GetSerialisableTuple()
return ( serialisable_account_type_key, self._title, serialisable_permissions, serialisable_bandwidth_rules, self._auto_creation_velocity, serialisable_auto_creation_history )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( serialisable_account_type_key, self._title, serialisable_permissions, serialisable_bandwidth_rules, self._auto_creation_velocity, serialisable_auto_creation_history ) = serialisable_info
self._account_type_key = bytes.fromhex( serialisable_account_type_key )
self._permissions = dict( serialisable_permissions )
self._bandwidth_rules = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_bandwidth_rules )
self._auto_creation_history = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_auto_creation_history )
def _UpdateSerialisableInfo( self, version, old_serialisable_info ):
if version == 1:
( serialisable_account_type_key, title, serialisable_permissions, serialisable_bandwidth_rules, auto_creation_velocity, serialisable_auto_creation_history ) = old_serialisable_info
permissions = dict( serialisable_permissions )
# admins can do options
if HC.CONTENT_TYPE_ACCOUNT_TYPES in permissions and permissions[ HC.CONTENT_TYPE_ACCOUNT_TYPES ] == HC.PERMISSION_ACTION_MODERATE:
permissions[ HC.CONTENT_TYPE_OPTIONS ] = HC.PERMISSION_ACTION_MODERATE
serialisable_permissions = list( permissions.items() )
new_serialisable_info = ( serialisable_account_type_key, title, serialisable_permissions, serialisable_bandwidth_rules, auto_creation_velocity, serialisable_auto_creation_history )
return ( 2, new_serialisable_info )
def BandwidthOK( self, bandwidth_tracker ):
return self._bandwidth_rules.CanStartRequest( bandwidth_tracker )
def CanAutoCreateAccountNow( self ):
if not self.SupportsAutoCreateAccount():
return False
( num_accounts_per_time_delta, time_delta ) = self._auto_creation_velocity
num_created = self._auto_creation_history.GetUsage( HC.BANDWIDTH_TYPE_DATA, time_delta )
return num_created < num_accounts_per_time_delta
def GetAutoCreateAccountHistory( self ) -> HydrusNetworking.BandwidthTracker:
return self._auto_creation_history
def GetAutoCreateAccountVelocity( self ):
return self._auto_creation_velocity
def GetBandwidthRules( self ):
return self._bandwidth_rules
def GetBandwidthStringsAndGaugeTuples( self, bandwidth_tracker ):
return self._bandwidth_rules.GetBandwidthStringsAndGaugeTuples( bandwidth_tracker )
def GetAccountTypeKey( self ):
return self._account_type_key
def GetPermissions( self ):
return { k : v for ( k, v ) in self._permissions.items() if k != 'null' }
def GetPermissionStrings( self ):
if self.IsNullAccount():
return [ 'is null account, cannot do anything' ]
s = []
for ( content_type, action ) in self.GetPermissions().items():
s.append( HC.permission_pair_string_lookup[ ( content_type, action ) ] )
return s
def GetTitle( self ):
return self._title
def HasPermission( self, content_type, permission ):
if self.IsNullAccount():
return False
if content_type not in self._permissions:
return False
my_permission = self._permissions[ content_type ]
if permission == HC.PERMISSION_ACTION_MODERATE:
return my_permission == HC.PERMISSION_ACTION_MODERATE
elif permission == HC.PERMISSION_ACTION_CREATE:
return my_permission in ( HC.PERMISSION_ACTION_CREATE, HC.PERMISSION_ACTION_MODERATE )
elif permission == HC.PERMISSION_ACTION_PETITION:
return my_permission in ( HC.PERMISSION_ACTION_PETITION, HC.PERMISSION_ACTION_CREATE, HC.PERMISSION_ACTION_MODERATE )
return False
def IsNullAccount( self ):
# I had to tuck this in permissions dict because this was not during a network version update and I couldn't update the serialised object. bleargh
# ideally though, we move this sometime to a self._is_null_account boolean
return 'null' in self._permissions
def ReportAutoCreateAccount( self ):
self._auto_creation_history.ReportRequestUsed()
def SetToNullAccount( self ):
# I had to tuck this in permissions dict because this was not during a network version update and I couldn't update the serialised object. bleargh
# ideally though, we move this sometime to a self._is_null_account boolean
self._permissions[ 'null' ] = True
def SupportsAutoCreateAccount( self ):
if self.IsNullAccount():
return False
( num_accounts_per_time_delta, time_delta ) = self._auto_creation_velocity
return num_accounts_per_time_delta > 0
@staticmethod
def GenerateAdminAccountType( service_type ):
bandwidth_rules = HydrusNetworking.BandwidthRules()
permissions = {}
permissions[ HC.CONTENT_TYPE_ACCOUNTS ] = HC.PERMISSION_ACTION_MODERATE
permissions[ HC.CONTENT_TYPE_ACCOUNT_TYPES ] = HC.PERMISSION_ACTION_MODERATE
permissions[ HC.CONTENT_TYPE_OPTIONS ] = HC.PERMISSION_ACTION_MODERATE
if service_type in HC.REPOSITORIES:
for content_type in HC.SERVICE_TYPES_TO_CONTENT_TYPES[ service_type ]:
permissions[ content_type ] = HC.PERMISSION_ACTION_MODERATE
elif service_type == HC.SERVER_ADMIN:
permissions[ HC.CONTENT_TYPE_SERVICES ] = HC.PERMISSION_ACTION_MODERATE
else:
raise NotImplementedError( 'Do not have a default admin account type set up for this service yet!' )
account_type = AccountType.GenerateNewAccountType( 'administrator', permissions, bandwidth_rules )
return account_type
@staticmethod
def GenerateNewAccountType( title, permissions, bandwidth_rules ):
account_type_key = HydrusData.GenerateKey()
return AccountType( account_type_key = account_type_key, title = title, permissions = permissions, bandwidth_rules = bandwidth_rules )
@staticmethod
def GenerateNullAccountType():
account_type_key = HydrusData.GenerateKey()
title = 'null account'
permissions = {}
bandwidth_rules = HydrusNetworking.BandwidthRules()
at = AccountType( account_type_key = account_type_key, title = title, permissions = permissions, bandwidth_rules = bandwidth_rules )
at.SetToNullAccount()
return at
@staticmethod
def GenerateUnknownAccountType():
title = 'unknown account'
permissions = {}
bandwidth_rules = HydrusNetworking.BandwidthRules()
bandwidth_rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, None, 0 )
unknown_account_type = AccountType.GenerateNewAccountType( title, permissions, bandwidth_rules )
return unknown_account_type
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_ACCOUNT_TYPE ] = AccountType
class ClientToServerUpdate( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_CLIENT_TO_SERVER_UPDATE
SERIALISABLE_NAME = 'Client To Server Update'
SERIALISABLE_VERSION = 1
def __init__( self ):
HydrusSerialisable.SerialisableBase.__init__( self )
self._actions_to_contents_and_reasons = collections.defaultdict( list )
def _GetSerialisableInfo( self ):
serialisable_info = []
for ( action, contents_and_reasons ) in list(self._actions_to_contents_and_reasons.items()):
serialisable_contents_and_reasons = [ ( content.GetSerialisableTuple(), reason ) for ( content, reason ) in contents_and_reasons ]
serialisable_info.append( ( action, serialisable_contents_and_reasons ) )
return serialisable_info
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
for ( action, serialisable_contents_and_reasons ) in serialisable_info:
contents_and_reasons = [ ( HydrusSerialisable.CreateFromSerialisableTuple( serialisable_content ), reason ) for ( serialisable_content, reason ) in serialisable_contents_and_reasons ]
self._actions_to_contents_and_reasons[ action ] = contents_and_reasons
def AddContent( self, action, content, reason = None ):
if reason is None:
reason = ''
self._actions_to_contents_and_reasons[ action ].append( ( content, reason ) )
def ApplyTagFilterToPendingMappings( self, tag_filter: HydrusTags.TagFilter ):
if HC.CONTENT_UPDATE_PEND in self._actions_to_contents_and_reasons:
contents_and_reasons = self._actions_to_contents_and_reasons[ HC.CONTENT_UPDATE_PEND ]
new_contents_and_reasons = []
for ( content, reason ) in contents_and_reasons:
if content.GetContentType() == HC.CONTENT_TYPE_MAPPINGS:
( tag, hashes ) = content.GetContentData()
if not tag_filter.TagOK( tag ):
continue
new_contents_and_reasons.append( ( content, reason ) )
self._actions_to_contents_and_reasons[ HC.CONTENT_UPDATE_PEND ] = new_contents_and_reasons
def GetClientsideContentUpdates( self ):
content_updates = []
for ( action, clientside_action ) in ( ( HC.CONTENT_UPDATE_PEND, HC.CONTENT_UPDATE_ADD ), ( HC.CONTENT_UPDATE_PETITION, HC.CONTENT_UPDATE_DELETE ) ):
for ( content, reason ) in self._actions_to_contents_and_reasons[ action ]:
content_type = content.GetContentType()
content_data = content.GetContentData()
content_update = HydrusData.ContentUpdate( content_type, clientside_action, content_data )
content_updates.append( content_update )
return content_updates
def GetContentDataIterator( self, content_type, action ):
contents_and_reasons = self._actions_to_contents_and_reasons[ action ]
for ( content, reason ) in contents_and_reasons:
if content.GetContentType() == content_type:
yield ( content.GetContentData(), reason )
def GetHashes( self ):
hashes = set()
for contents_and_reasons in self._actions_to_contents_and_reasons.values():
for ( content, reason ) in contents_and_reasons:
hashes.update( content.GetHashes() )
return hashes
def HasContent( self ):
return len( self._actions_to_contents_and_reasons ) > 0
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_CLIENT_TO_SERVER_UPDATE ] = ClientToServerUpdate
class Content( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_CONTENT
SERIALISABLE_NAME = 'Content'
SERIALISABLE_VERSION = 1
def __init__( self, content_type = None, content_data = None ):
HydrusSerialisable.SerialisableBase.__init__( self )
self._content_type = content_type
self._content_data = content_data
def __eq__( self, other ):
if isinstance( other, Content ):
return self.__hash__() == other.__hash__()
return NotImplemented
def __hash__( self ): return ( self._content_type, self._content_data ).__hash__()
def __repr__( self ): return 'Content: ' + self.ToString()
def _GetSerialisableInfo( self ):
def EncodeHashes( hs ):
return [ h.hex() for h in hs ]
if self._content_type == HC.CONTENT_TYPE_FILES:
hashes = self._content_data
serialisable_content = EncodeHashes( hashes )
elif self._content_type == HC.CONTENT_TYPE_MAPPING:
( tag, hash ) = self._content_data
serialisable_content = ( tag, hash.hex() )
elif self._content_type == HC.CONTENT_TYPE_MAPPINGS:
( tag, hashes ) = self._content_data
serialisable_content = ( tag, EncodeHashes( hashes ) )
elif self._content_type in ( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_TYPE_TAG_SIBLINGS ):
( old_tag, new_tag ) = self._content_data
serialisable_content = ( old_tag, new_tag )
return ( self._content_type, serialisable_content )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
def DecodeHashes( hs ):
return [ bytes.fromhex( h ) for h in hs ]
( self._content_type, serialisable_content ) = serialisable_info
if self._content_type == HC.CONTENT_TYPE_FILES:
serialisable_hashes = serialisable_content
self._content_data = DecodeHashes( serialisable_hashes )
elif self._content_type == HC.CONTENT_TYPE_MAPPING:
( tag, serialisable_hash ) = serialisable_content
self._content_data = ( tag, bytes.fromhex( serialisable_hash ) )
elif self._content_type == HC.CONTENT_TYPE_MAPPINGS:
( tag, serialisable_hashes ) = serialisable_content
self._content_data = ( tag, DecodeHashes( serialisable_hashes ) )
elif self._content_type in ( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_TYPE_TAG_SIBLINGS ):
( old_tag, new_tag ) = serialisable_content
self._content_data = ( old_tag, new_tag )
def GetContentData( self ):
return self._content_data
def GetContentType( self ):
return self._content_type
def GetHashes( self ):
if self._content_type == HC.CONTENT_TYPE_FILES:
hashes = self._content_data
elif self._content_type == HC.CONTENT_TYPE_MAPPING:
( tag, hash ) = self._content_data
return [ hash ]
elif self._content_type == HC.CONTENT_TYPE_MAPPINGS:
( tag, hashes ) = self._content_data
else:
hashes = []
return hashes
def GetVirtualWeight( self ):
if self._content_type in ( HC.CONTENT_TYPE_FILES, HC.CONTENT_TYPE_MAPPINGS ):
return len( self.GetHashes() )
elif self._content_type == HC.CONTENT_TYPE_TAG_PARENTS:
return 5000
elif self._content_type == HC.CONTENT_TYPE_TAG_SIBLINGS:
return 5
elif self._content_type == HC.CONTENT_TYPE_MAPPING:
return 1
def HasHashes( self ):
return self._content_type in ( HC.CONTENT_TYPE_FILES, HC.CONTENT_TYPE_MAPPING, HC.CONTENT_TYPE_MAPPINGS )
def IterateUploadableChunks( self ):
if self._content_type == HC.CONTENT_TYPE_FILES:
hashes = self._content_data
for chunk_of_hashes in HydrusLists.SplitListIntoChunks( hashes, 100 ):
content = Content( content_type = self._content_type, content_data = chunk_of_hashes )
yield content
elif self._content_type == HC.CONTENT_TYPE_MAPPINGS:
( tag, hashes ) = self._content_data
for chunk_of_hashes in HydrusLists.SplitListIntoChunks( hashes, 100 ):
content = Content( content_type = self._content_type, content_data = ( tag, chunk_of_hashes ) )
yield content
else:
yield self
def ToString( self ):
if self._content_type == HC.CONTENT_TYPE_FILES:
hashes = self._content_data
text = 'FILES: ' + HydrusData.ToHumanInt( len( hashes ) ) + ' files'
elif self._content_type == HC.CONTENT_TYPE_MAPPING:
( tag, hash ) = self._content_data
text = 'MAPPING: ' + tag + ' for ' + hash.hex()
elif self._content_type == HC.CONTENT_TYPE_MAPPINGS:
( tag, hashes ) = self._content_data
text = 'MAPPINGS: ' + tag + ' for ' + HydrusData.ToHumanInt( len( hashes ) ) + ' files'
elif self._content_type == HC.CONTENT_TYPE_TAG_PARENTS:
( child, parent ) = self._content_data
text = 'PARENT: ' '"' + child + '" -> "' + parent + '"'
elif self._content_type == HC.CONTENT_TYPE_TAG_SIBLINGS:
( old_tag, new_tag ) = self._content_data
text = 'SIBLING: ' + '"' + old_tag + '" -> "' + new_tag + '"'
return text
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_CONTENT ] = Content
class ContentUpdate( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_CONTENT_UPDATE
SERIALISABLE_NAME = 'Content Update'
SERIALISABLE_VERSION = 1
def __init__( self ):
HydrusSerialisable.SerialisableBase.__init__( self )
self._content_data = {}
def _GetContent( self, content_type, action ):
if content_type in self._content_data:
if action in self._content_data[ content_type ]:
return self._content_data[ content_type ][ action ]
return []
def _GetSerialisableInfo( self ):
serialisable_info = []
for ( content_type, actions_to_datas ) in list(self._content_data.items()):
serialisable_actions_to_datas = list(actions_to_datas.items())
serialisable_info.append( ( content_type, serialisable_actions_to_datas ) )
return serialisable_info
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
for ( content_type, serialisable_actions_to_datas ) in serialisable_info:
actions_to_datas = dict( serialisable_actions_to_datas )
self._content_data[ content_type ] = actions_to_datas
def AddRow( self, row ):
( content_type, action, data ) = row
if content_type not in self._content_data:
self._content_data[ content_type ] = {}
if action not in self._content_data[ content_type ]:
self._content_data[ content_type ][ action ] = []
self._content_data[ content_type ][ action ].append( data )
def GetDeletedFiles( self ):
return self._GetContent( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DELETE )
def GetDeletedMappings( self ):
return self._GetContent( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_DELETE )
def GetDeletedTagParents( self ):
return self._GetContent( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_DELETE )
def GetDeletedTagSiblings( self ):
return self._GetContent( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_DELETE )
def GetNewFiles( self ):
return self._GetContent( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_ADD )
def GetNewMappings( self ):
return self._GetContent( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_ADD )
def GetNewTagParents( self ):
return self._GetContent( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_ADD )
def GetNewTagSiblings( self ):
return self._GetContent( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_ADD )
def GetNumRows( self, content_types_to_count = None ):
num = 0
for content_type in self._content_data:
if content_types_to_count is not None and content_type not in content_types_to_count:
continue
for action in self._content_data[ content_type ]:
data = self._content_data[ content_type ][ action ]
if content_type == HC.CONTENT_TYPE_MAPPINGS:
num_rows = sum( ( len( hash_ids ) for ( tag_id, hash_ids ) in data ) )
else:
num_rows = len( data )
num += num_rows
return num
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_CONTENT_UPDATE ] = ContentUpdate
class Credentials( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_CREDENTIALS
SERIALISABLE_NAME = 'Credentials'
SERIALISABLE_VERSION = 1
def __init__( self, host = None, port = None, access_key = None ):
HydrusSerialisable.SerialisableBase.__init__( self )
self._host = host
self._port = port
self._access_key = access_key
def __eq__( self, other ):
if isinstance( other, Credentials ):
return self.__hash__() == other.__hash__()
return NotImplemented
def __hash__( self ):
return ( self._host, self._port, self._access_key ).__hash__()
def __repr__( self ):
if self._access_key is None:
access_key_str = 'no access key'
else:
access_key_str = self._access_key.hex()
return 'Credentials: ' + str( ( self._host, self._port, access_key_str ) )
def _GetSerialisableInfo( self ):
if self._access_key is None:
serialisable_access_key = self._access_key
else:
serialisable_access_key = self._access_key.hex()
return ( self._host, self._port, serialisable_access_key )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( self._host, self._port, serialisable_access_key ) = serialisable_info
if serialisable_access_key is None:
self._access_key = serialisable_access_key
else:
self._access_key = bytes.fromhex( serialisable_access_key )
def GetAccessKey( self ):
return self._access_key
def GetAddress( self ):
return ( self._host, self._port )
def GetConnectionString( self ):
connection_string = ''
if self.HasAccessKey():
connection_string += self._access_key.hex() + '@'
connection_string += self._host + ':' + str( self._port )
return connection_string
def GetPortedAddress( self ):
if self._host.endswith( '/' ):
host = self._host[:-1]
else:
host = self._host
if '/' in host:
( actual_host, gubbins ) = self._host.split( '/', 1 )
address = '{}:{}/{}'.format( actual_host, self._port, gubbins )
else:
address = '{}:{}'.format( self._host, self._port )
return address
def HasAccessKey( self ):
return self._access_key is not None
def SetAccessKey( self, access_key ):
if access_key == '':
access_key = None
self._access_key = access_key
def SetAddress( self, host, port ):
self._host = host
self._port = port
@staticmethod
def GenerateCredentialsFromConnectionString( connection_string ):
( host, port, access_key ) = Credentials.ParseConnectionString( connection_string )
return Credentials( host, port, access_key )
@staticmethod
def ParseConnectionString( connection_string ):
if connection_string is None:
return ( 'hostname', 80, None )
if '@' in connection_string:
( access_key_encoded, address ) = connection_string.split( '@', 1 )
try:
access_key = bytes.fromhex( access_key_encoded )
except TypeError:
raise HydrusExceptions.DataMissing( 'Could not parse that access key! It should be a 64 character hexadecimal string!' )
if access_key == '':
access_key = None
else:
access_key = None
if ':' in connection_string:
( host, port ) = connection_string.split( ':', 1 )
try:
port = int( port )
if port < 0 or port > 65535:
raise ValueError()
except ValueError:
raise HydrusExceptions.DataMissing( 'Could not parse that port! It should be an integer between 0 and 65535!' )
if host == 'localhost':
host = '127.0.0.1'
return ( host, port, access_key )
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_CREDENTIALS ] = Credentials
class DefinitionsUpdate( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_DEFINITIONS_UPDATE
SERIALISABLE_NAME = 'Definitions Update'
SERIALISABLE_VERSION = 1
def __init__( self ):
HydrusSerialisable.SerialisableBase.__init__( self )
self._hash_ids_to_hashes = {}
self._tag_ids_to_tags = {}
def _GetSerialisableInfo( self ):
serialisable_info = []
if len( self._hash_ids_to_hashes ) > 0:
serialisable_info.append( ( HC.DEFINITIONS_TYPE_HASHES, [ ( hash_id, hash.hex() ) for ( hash_id, hash ) in list(self._hash_ids_to_hashes.items()) ] ) )
if len( self._tag_ids_to_tags ) > 0:
serialisable_info.append( ( HC.DEFINITIONS_TYPE_TAGS, list(self._tag_ids_to_tags.items()) ) )
return serialisable_info
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
for ( definition_type, definitions ) in serialisable_info:
if definition_type == HC.DEFINITIONS_TYPE_HASHES:
self._hash_ids_to_hashes = { hash_id : bytes.fromhex( encoded_hash ) for ( hash_id, encoded_hash ) in definitions }
elif definition_type == HC.DEFINITIONS_TYPE_TAGS:
self._tag_ids_to_tags = { tag_id : tag for ( tag_id, tag ) in definitions }
def AddRow( self, row ):
( definitions_type, key, value ) = row
if definitions_type == HC.DEFINITIONS_TYPE_HASHES:
self._hash_ids_to_hashes[ key ] = value
elif definitions_type == HC.DEFINITIONS_TYPE_TAGS:
self._tag_ids_to_tags[ key ] = value
def GetHashIdsToHashes( self ):
return self._hash_ids_to_hashes
def GetNumRows( self ):
return len( self._hash_ids_to_hashes ) + len( self._tag_ids_to_tags )
def GetTagIdsToTags( self ):
return self._tag_ids_to_tags
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_DEFINITIONS_UPDATE ] = DefinitionsUpdate
class Metadata( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_METADATA
SERIALISABLE_NAME = 'Metadata'
SERIALISABLE_VERSION = 1
def __init__( self, metadata = None, next_update_due = None ):
if metadata is None:
metadata = {}
if next_update_due is None:
next_update_due = 0
HydrusSerialisable.SerialisableBase.__init__( self )
self._lock = threading.Lock()
now = HydrusTime.GetNow()
self._metadata = metadata
self._next_update_due = next_update_due
self._update_hashes = set()
self._update_hashes_ordered = []
self._biggest_end = self._CalculateBiggestEnd()
def _CalculateBiggestEnd( self ):
if len( self._metadata ) == 0:
return None
else:
biggest_index = max( self._metadata.keys() )
( update_hashes, begin, end ) = self._GetUpdate( biggest_index )
return end
def _GetNextUpdateDueTime( self, from_client = False ):
delay = 10
if from_client:
delay = UPDATE_CHECKING_PERIOD * 2
return self._next_update_due + delay
def _GetSerialisableInfo( self ):
serialisable_metadata = [ ( update_index, [ update_hash.hex() for update_hash in update_hashes ], begin, end ) for ( update_index, ( update_hashes, begin, end ) ) in list(self._metadata.items()) ]
return ( serialisable_metadata, self._next_update_due )
def _GetUpdateHashes( self, update_index ):
( update_hashes, begin, end ) = self._GetUpdate( update_index )
return update_hashes
def _GetUpdate( self, update_index ):
if update_index not in self._metadata:
raise HydrusExceptions.DataMissing( 'That update does not exist!' )
return self._metadata[ update_index ]
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( serialisable_metadata, self._next_update_due ) = serialisable_info
self._metadata = {}
for ( update_index, encoded_update_hashes, begin, end ) in serialisable_metadata:
update_hashes = [ bytes.fromhex( encoded_update_hash ) for encoded_update_hash in encoded_update_hashes ]
self._metadata[ update_index ] = ( update_hashes, begin, end )
self._RecalcHashes()
self._biggest_end = self._CalculateBiggestEnd()
def _RecalcHashes( self ):
self._update_hashes = set()
self._update_hashes_ordered = []
for ( update_index, ( update_hashes, begin, end ) ) in sorted( self._metadata.items() ):
self._update_hashes.update( update_hashes )
self._update_hashes_ordered.extend( update_hashes )
def AppendUpdate( self, update_hashes, begin, end, next_update_due ):
with self._lock:
update_index = len( self._metadata )
self._metadata[ update_index ] = ( update_hashes, begin, end )
self._update_hashes.update( update_hashes )
self._update_hashes_ordered.extend( update_hashes )
self._next_update_due = next_update_due
self._biggest_end = end
def CalculateNewNextUpdateDue( self, update_period ):
with self._lock:
if self._biggest_end is None:
self._next_update_due = 0
else:
self._next_update_due = self._biggest_end + update_period
def GetEarliestTimestampForTheseHashes( self, hashes ):
hashes = set( hashes )
with self._lock:
for ( update_index, ( update_hashes, begin, end ) ) in sorted( self._metadata.items() ):
if HydrusData.SetsIntersect( hashes, update_hashes ):
return end
return 0
def GetNextUpdateIndex( self ):
with self._lock:
return len( self._metadata )
def GetNextUpdateBegin( self ):
with self._lock:
if self._biggest_end is None:
return HydrusTime.GetNow()
else:
return self._biggest_end + 1
def GetNextUpdateDueString( self, from_client = False ):
with self._lock:
if self._next_update_due == 0:
return 'have not yet synced metadata'
elif self._biggest_end is None:
return 'the metadata appears to be uninitialised'
else:
update_due = self._GetNextUpdateDueTime( from_client )
if HydrusTime.TimeHasPassed( update_due ):
s = 'checking for updates imminently'
else:
s = 'checking for updates {}'.format( HydrusTime.TimestampToPrettyTimeDelta( update_due ) )
return 'metadata synced up to {}, {}'.format( HydrusTime.TimestampToPrettyTimeDelta( self._biggest_end ), s )
def GetNumUpdateHashes( self ):
with self._lock:
return len( self._update_hashes )
def GetSlice( self, from_update_index ):
with self._lock:
metadata = { update_index : row for ( update_index, row ) in self._metadata.items() if update_index >= from_update_index }
return Metadata( metadata, self._next_update_due )
def GetUpdateHashes( self, update_index = None ):
with self._lock:
if update_index is None:
return set( self._update_hashes )
else:
return set( self._GetUpdateHashes( update_index ) )
def GetUpdateIndexBeginAndEnd( self, update_index ):
with self._lock:
if update_index in self._metadata:
( update_hashes, begin, end ) = self._metadata[ update_index ]
return ( begin, end )
raise HydrusExceptions.DataMissing( 'That update index does not seem to exist!' )
def GetUpdateIndicesAndTimes( self ):
with self._lock:
result = []
for ( update_index, ( update_hashes, begin, end ) ) in self._metadata.items():
result.append( ( update_index, begin, end ) )
return result
def GetUpdateIndicesAndHashes( self ):
with self._lock:
result = []
for ( update_index, ( update_hashes, begin, end ) ) in self._metadata.items():
result.append( ( update_index, update_hashes ) )
return result
def HasDoneInitialSync( self ):
with self._lock:
return self._next_update_due != 0
def HasUpdateHash( self, update_hash ):
with self._lock:
return update_hash in self._update_hashes
def SortContentHashesAndContentTypes( self, content_hashes_and_content_types ):
with self._lock:
content_hashes_to_content_types = dict( content_hashes_and_content_types )
content_hashes_and_content_types = [ ( update_hash, content_hashes_to_content_types[ update_hash ] ) for update_hash in self._update_hashes_ordered if update_hash in content_hashes_to_content_types ]
return content_hashes_and_content_types
def UpdateASAP( self ):
with self._lock:
# not 0, that's reserved
self._next_update_due = 1
def UpdateDue( self, from_client = False ):
with self._lock:
next_update_due_time = self._GetNextUpdateDueTime( from_client )
return HydrusTime.TimeHasPassed( next_update_due_time )
def UpdateFromSlice( self, metadata_slice: "Metadata" ):
with self._lock:
self._metadata.update( metadata_slice._metadata )
new_next_update_due = metadata_slice._next_update_due
if HydrusTime.TimeHasPassed( new_next_update_due ):
new_next_update_due = HydrusTime.GetNow() + 100000
self._next_update_due = new_next_update_due
self._biggest_end = self._CalculateBiggestEnd()
self._RecalcHashes()
def UpdateIsEmpty( self, update_index ):
with self._lock:
return len( self._GetUpdateHashes( update_index ) ) == 0
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_METADATA ] = Metadata
class Petition( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_PETITION
SERIALISABLE_NAME = 'Petition'
SERIALISABLE_VERSION = 2
def __init__( self, petitioner_account = None, reason = None, actions_and_contents = None ):
if actions_and_contents is None:
actions_and_contents = []
HydrusSerialisable.SerialisableBase.__init__( self )
self._petitioner_account = petitioner_account
self._reason = reason
self._actions_and_contents = [ ( action, HydrusSerialisable.SerialisableList( contents ) ) for ( action, contents ) in actions_and_contents ]
def _GetSerialisableInfo( self ):
serialisable_petitioner_account = Account.GenerateSerialisableTupleFromAccount( self._petitioner_account )
serialisable_actions_and_contents = [ ( action, contents.GetSerialisableTuple() ) for ( action, contents ) in self._actions_and_contents ]
return ( serialisable_petitioner_account, self._reason, serialisable_actions_and_contents )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( serialisable_petitioner_account, self._reason, serialisable_actions_and_contents ) = serialisable_info
self._petitioner_account = Account.GenerateAccountFromSerialisableTuple( serialisable_petitioner_account )
self._actions_and_contents = [ ( action, HydrusSerialisable.CreateFromSerialisableTuple( serialisable_contents ) ) for ( action, serialisable_contents ) in serialisable_actions_and_contents ]
def _UpdateSerialisableInfo( self, version, old_serialisable_info ):
if version == 1:
( action, serialisable_petitioner_account, reason, serialisable_contents ) = old_serialisable_info
contents = [ HydrusSerialisable.CreateFromSerialisableTuple( serialisable_content ) for serialisable_content in serialisable_contents ]
actions_and_contents = [ ( action, HydrusSerialisable.SerialisableList( contents ) ) ]
serialisable_actions_and_contents = [ ( action, contents.GetSerialisableTuple() ) for ( action, contents ) in actions_and_contents ]
new_serialisable_info = ( serialisable_petitioner_account, reason, serialisable_actions_and_contents )
return ( 2, new_serialisable_info )
def GetContents( self, action ):
actions_to_contents = dict( self._actions_and_contents )
if action in actions_to_contents:
return actions_to_contents[ action ]
else:
return []
def GetActionsAndContents( self ):
return self._actions_and_contents
def GetPetitionerAccount( self ):
return self._petitioner_account
def GetReason( self ):
return self._reason
@staticmethod
def GetApproval( action, contents, reason ):
update = ClientToServerUpdate()
content_updates = []
if action == HC.CONTENT_UPDATE_PEND:
content_update_action = HC.CONTENT_UPDATE_ADD
elif action == HC.CONTENT_UPDATE_PETITION:
content_update_action = HC.CONTENT_UPDATE_DELETE
else:
raise Exception( 'Petition came with unexpected action: {}'.format( action ) )
for content in contents:
update.AddContent( action, content, reason )
content_type = content.GetContentType()
row = content.GetContentData()
content_update = HydrusData.ContentUpdate( content_type, content_update_action, row )
content_updates.append( content_update )
return ( update, content_updates )
@staticmethod
def GetDenial( action, contents, reason ):
update = ClientToServerUpdate()
if action == HC.CONTENT_UPDATE_PEND:
denial_action = HC.CONTENT_UPDATE_DENY_PEND
elif action == HC.CONTENT_UPDATE_PETITION:
denial_action = HC.CONTENT_UPDATE_DENY_PETITION
else:
raise Exception( 'Petition came with unexpected action: {}'.format( action ) )
for content in contents:
update.AddContent( denial_action, content, reason )
return update
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_PETITION ] = Petition
class ServerService( object ):
def __init__( self, service_key, service_type, name, port, dictionary ):
self._service_key = service_key
self._service_type = service_type
self._name = name
self._port = port
self._lock = threading.Lock()
self._LoadFromDictionary( dictionary )
self._dirty = False
def _GetSerialisableDictionary( self ):
dictionary = HydrusSerialisable.SerialisableDictionary()
dictionary[ 'upnp_port' ] = self._upnp_port
dictionary[ 'bandwidth_tracker' ] = self._bandwidth_tracker
return dictionary
def _LoadFromDictionary( self, dictionary ):
self._upnp_port = dictionary[ 'upnp_port' ]
self._bandwidth_tracker = dictionary[ 'bandwidth_tracker' ]
def _SetDirty( self ):
self._dirty = True
def AllowsNonLocalConnections( self ):
with self._lock:
return True
def BandwidthOK( self ):
with self._lock:
return True
def Duplicate( self ):
with self._lock:
dictionary = self._GetSerialisableDictionary()
dictionary = dictionary.Duplicate()
duplicate = GenerateService( self._service_key, self._service_type, self._name, self._port, dictionary )
return duplicate
def GetName( self ):
with self._lock:
return self._name
def GetPort( self ):
with self._lock:
return self._port
def GetUPnPPort( self ):
with self._lock:
return self._upnp_port
def GetServiceKey( self ):
with self._lock:
return self._service_key
def GetServiceType( self ):
with self._lock:
return self._service_type
def IsDirty( self ):
with self._lock:
return self._dirty
def LogsRequests( self ):
return True
def ReportDataUsed( self, num_bytes ):
with self._lock:
self._bandwidth_tracker.ReportDataUsed( num_bytes )
self._SetDirty()
def ReportRequestUsed( self ):
with self._lock:
self._bandwidth_tracker.ReportRequestUsed()
self._SetDirty()
def SetClean( self ):
with self._lock:
self._dirty = False
def SetName( self, name ):
with self._lock:
self._name = name
self._SetDirty()
def SetPort( self, port ):
with self._lock:
self._port = port
self._SetDirty()
def SupportsCORS( self ):
return False
def ToSerialisableTuple( self ):
with self._lock:
dictionary = self._GetSerialisableDictionary()
dictionary_string = dictionary.DumpToString()
return ( self._service_key.hex(), self._service_type, self._name, self._port, dictionary_string )
def ToTuple( self ):
with self._lock:
dictionary = self._GetSerialisableDictionary()
dictionary = dictionary.Duplicate()
return ( self._service_key, self._service_type, self._name, self._port, dictionary )
def UseNormieEris( self ):
with self._lock:
return False
class ServerServiceRestricted( ServerService ):
def _GetSerialisableDictionary( self ):
dictionary = ServerService._GetSerialisableDictionary( self )
dictionary[ 'bandwidth_rules' ] = self._bandwidth_rules
dictionary[ 'service_options' ] = self._service_options
dictionary[ 'server_message' ] = self._server_message
return dictionary
def _LoadFromDictionary( self, dictionary ):
ServerService._LoadFromDictionary( self, dictionary )
if 'service_options' not in dictionary:
dictionary[ 'service_options' ] = HydrusSerialisable.SerialisableDictionary()
self._service_options = HydrusSerialisable.SerialisableDictionary( dictionary[ 'service_options' ] )
if 'server_message' not in self._service_options:
self._service_options[ 'server_message' ] = ''
self._server_message = self._service_options[ 'server_message' ]
self._bandwidth_rules = dictionary[ 'bandwidth_rules' ]
def BandwidthOK( self ):
with self._lock:
return self._bandwidth_rules.CanStartRequest( self._bandwidth_tracker )
class ServerServiceRepository( ServerServiceRestricted ):
def _GetSerialisableDictionary( self ):
dictionary = ServerServiceRestricted._GetSerialisableDictionary( self )
dictionary[ 'metadata' ] = self._metadata
dictionary[ 'next_nullification_update_index' ] = self._next_nullification_update_index
return dictionary
def _LoadFromDictionary( self, dictionary ):
ServerServiceRestricted._LoadFromDictionary( self, dictionary )
if 'update_period' not in self._service_options:
self._service_options[ 'update_period' ] = 100000
if 'nullification_period' in dictionary:
default_nullification_period = dictionary[ 'nullification_period' ]
del dictionary[ 'nullification_period' ]
else:
default_nullification_period = 90 * 86400
if 'nullification_period' not in self._service_options:
self._service_options[ 'nullification_period' ] = default_nullification_period
if 'next_nullification_update_index' not in dictionary:
dictionary[ 'next_nullification_update_index' ] = 0
self._next_nullification_update_index = dictionary[ 'next_nullification_update_index' ]
self._metadata = dictionary[ 'metadata' ]
def GetMetadata( self ):
with self._lock:
return self._metadata
def GetMetadataSlice( self, from_update_index ):
with self._lock:
return self._metadata.GetSlice( from_update_index )
def GetNullificationPeriod( self ) -> int:
with self._lock:
return self._service_options[ 'nullification_period' ]
def GetUpdatePeriod( self ) -> int:
with self._lock:
return self._service_options[ 'update_period' ]
def HasUpdateHash( self, update_hash ):
with self._lock:
return self._metadata.HasUpdateHash( update_hash )
def NullifyHistory( self ):
# when there is a huge amount to catch up on, we don't want to bosh the server for ages
# instead we'll hammer the server for an hour and then break (for ~three hours, should be)
MAX_WAIT_TIME_WHEN_HEAVY_UPDATES = 120
time_started_nullifying = HydrusTime.GetNow()
time_to_stop_nullifying = time_started_nullifying + 3600
while not HG.started_shutdown:
with self._lock:
next_update_index = self._metadata.GetNextUpdateIndex()
# we are caught up on a server with update times longer than nullification_period
if self._next_nullification_update_index >= next_update_index:
return
( nullification_begin, nullification_end ) = self._metadata.GetUpdateIndexBeginAndEnd( self._next_nullification_update_index )
nullification_period = self._service_options[ 'nullification_period' ]
# it isn't time to do the next yet!
if not HydrusTime.TimeHasPassed( nullification_end + nullification_period ):
return
if self._metadata.UpdateIsEmpty( self._next_nullification_update_index ):
HydrusData.Print( 'Account history for "{}" update {} was empty, so nothing to anonymise.'.format( self._name, self._next_nullification_update_index ) )
self._next_nullification_update_index += 1
self._SetDirty()
continue
service_key = self._service_key
locked = HG.server_busy.acquire( False ) # pylint: disable=E1111
if not locked:
return
try:
HydrusData.Print( 'Nullifying account history for "{}" update {}.'.format( self._name, self._next_nullification_update_index ) )
update_started = HydrusTime.GetNowFloat()
HG.server_controller.WriteSynchronous( 'nullify_history', service_key, nullification_begin, nullification_end )
update_took = HydrusTime.GetNowFloat() - update_started
with self._lock:
HydrusData.Print( 'Account history for "{}" update {} was anonymised in {}.'.format( self._name, self._next_nullification_update_index, HydrusTime.TimeDeltaToPrettyTimeDelta( update_took ) ) )
self._next_nullification_update_index += 1
self._SetDirty()
finally:
HG.server_busy.release()
if HydrusTime.TimeHasPassed( time_to_stop_nullifying ):
return
if update_took < 0.5:
continue
time_to_wait = min( update_took, MAX_WAIT_TIME_WHEN_HEAVY_UPDATES )
resume_timestamp = HydrusTime.GetNowFloat() + time_to_wait
while not HG.started_shutdown and not HydrusTime.TimeHasPassedFloat( resume_timestamp ):
time.sleep( 1 )
def SetNullificationPeriod( self, nullification_period: int ):
with self._lock:
self._service_options[ 'nullification_period' ] = nullification_period
self._SetDirty()
HG.server_controller.pub( 'notify_new_nullification' )
def SetUpdatePeriod( self, update_period: int ):
with self._lock:
self._service_options[ 'update_period' ] = update_period
self._metadata.CalculateNewNextUpdateDue( update_period )
self._SetDirty()
HG.server_controller.pub( 'notify_new_repo_sync' )
def Sync( self ):
with self._lock:
update_due = self._metadata.UpdateDue()
update_created = False
if update_due:
locked = HG.server_busy.acquire( False ) # pylint: disable=E1111
if not locked:
return
try:
while update_due:
with self._lock:
service_key = self._service_key
begin = self._metadata.GetNextUpdateBegin()
update_period = self._service_options[ 'update_period' ]
end = begin + update_period
update_hashes = HG.server_controller.WriteSynchronous( 'create_update', service_key, begin, end )
update_created = True
next_update_due = end + update_period
with self._lock:
self._metadata.AppendUpdate( update_hashes, begin, end, next_update_due )
update_due = self._metadata.UpdateDue()
finally:
HG.server_busy.release()
if update_created:
HG.server_controller.pub( 'notify_update_created' )
with self._lock:
self._SetDirty()
class ServerServiceRepositoryTag( ServerServiceRepository ):
def _LoadFromDictionary( self, dictionary ):
ServerServiceRepository._LoadFromDictionary( self, dictionary )
if 'tag_filter' not in self._service_options:
self._service_options[ 'tag_filter' ] = HydrusTags.TagFilter()
def GetTagFilter( self ) -> HydrusTags.TagFilter:
with self._lock:
return self._service_options[ 'tag_filter' ]
def SetTagFilter( self, tag_filter: HydrusTags.TagFilter ):
with self._lock:
self._service_options[ 'tag_filter' ] = tag_filter
self._SetDirty()
class ServerServiceRepositoryFile( ServerServiceRepository ):
def _GetSerialisableDictionary( self ):
dictionary = ServerServiceRepository._GetSerialisableDictionary( self )
dictionary[ 'log_uploader_ips' ] = self._log_uploader_ips
dictionary[ 'max_storage' ] = self._max_storage
return dictionary
def _LoadFromDictionary( self, dictionary ):
ServerServiceRepository._LoadFromDictionary( self, dictionary )
self._log_uploader_ips = dictionary[ 'log_uploader_ips' ]
self._max_storage = dictionary[ 'max_storage' ]
def LogUploaderIPs( self ):
with self._lock:
return self._log_uploader_ips
def GetMaxStorage( self ):
with self._lock:
return self._max_storage
class ServerServiceAdmin( ServerServiceRestricted ):
def _GetSerialisableDictionary( self ):
dictionary = ServerServiceRestricted._GetSerialisableDictionary( self )
dictionary[ 'server_bandwidth_tracker' ] = self._server_bandwidth_tracker
dictionary[ 'server_bandwidth_rules' ] = self._server_bandwidth_rules
return dictionary
def _LoadFromDictionary( self, dictionary ):
ServerServiceRestricted._LoadFromDictionary( self, dictionary )
self._server_bandwidth_tracker = dictionary[ 'server_bandwidth_tracker' ]
self._server_bandwidth_rules = dictionary[ 'server_bandwidth_rules' ]
def ServerBandwidthOK( self ):
with self._lock:
return self._server_bandwidth_rules.CanStartRequest( self._server_bandwidth_tracker )
def ServerReportDataUsed( self, num_bytes ):
with self._lock:
self._server_bandwidth_tracker.ReportDataUsed( num_bytes )
self._SetDirty()
def ServerReportRequestUsed( self ):
with self._lock:
self._server_bandwidth_tracker.ReportRequestUsed()
self._SetDirty()
class UpdateBuilder( object ):
def __init__( self, update_class, max_rows ):
self._update_class = update_class
self._max_rows = max_rows
self._updates = []
self._current_update = self._update_class()
self._current_num_rows = 0
def AddRow( self, row, row_weight = 1 ):
self._current_update.AddRow( row )
self._current_num_rows += row_weight
if self._current_num_rows > self._max_rows:
self._updates.append( self._current_update )
self._current_update = self._update_class()
self._current_num_rows = 0
def Finish( self ):
if self._current_update.GetNumRows() > 0:
self._updates.append( self._current_update )
self._current_update = None
def GetUpdates( self ):
return self._updates
|
[
"hydrus.admin@gmail.com"
] |
hydrus.admin@gmail.com
|
577ec4622d0dc70972ccdc9e4ecaf1630d96cf34
|
50417855e1c2fd1da178446a2b31ae20b807b24c
|
/Practice/Problem Solving/Implementation/PickingNumbers.py
|
d6c9136851a1c379e63f6a5d1ed106ac740635f1
|
[] |
no_license
|
DuTogira/HackerRank
|
d5d6f6c0ef74cd31d7126759cf05529c5a09c5ff
|
e1affb5a51ed8c3beefe2fa338444b177fe02a7e
|
refs/heads/master
| 2022-11-19T21:04:02.788218
| 2020-07-16T18:55:06
| 2020-07-16T18:55:06
| 278,694,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
# problem: https://www.hackerrank.com/challenges/picking-numbers/problem
# !/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'pickingNumbers' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY a as parameter.
#
def pickingNumbers(a):
# Write your code here
biggest = a[0]
smallest = a[0]
for num in a:
if num > biggest:
biggest = num
if num < smallest:
smallest = num
count_nums = {i: 0 for i in range(smallest, biggest + 1)}
for num in a:
count_nums[num] += 1
big_sum = count_nums[smallest]
print(count_nums)
for i in range(smallest, biggest):
lSum = count_nums[i] + count_nums[i + 1]
if lSum > big_sum:
big_sum = lSum
i += 1
return big_sum
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
a = list(map(int, input().rstrip().split()))
result = pickingNumbers(a)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"at.smothers@gmail.com"
] |
at.smothers@gmail.com
|
ed85b06748686be31ae6becb3e0e9f2f3191c68c
|
a704ffebe5a74cf2d1be472f03fbd050fbe6efc8
|
/src/problems/p01_find_kth_node.py
|
8309ef954db6047caa3d407c5c3dd84fd3f5bc88
|
[
"MIT"
] |
permissive
|
jbaisani/coding-interview-python
|
f642882c9860a29465d72f26c948e97143295866
|
4b9119eb6327a9fd53879b8c946f3d1068566f5a
|
refs/heads/master
| 2020-03-13T12:05:35.749594
| 2017-10-08T02:08:48
| 2017-10-08T02:08:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,667
|
py
|
"""
01_find_kth_node
~~~~~~~~~~~~~~
题目描述:求链表中倒数第 k 个节点。
当我们用一个指针遍历链表不能解决问题时,就可以尝试使用两个指针。
可以让其中一个指针遍历的快一点(如一次走两步),或者让它先在
链表上走若干步,直到走到结尾。
比如,求一个链表的中间节点,就可以使用两个指针,前指针一次走两步,
后指针一次走一步,当前指针走到头之后,后指针就所在的位置就是中间
节点。
:copyright: (c) 2017 by 0xE8551CCB.
:license: MIT, see LICENSE for more details.
"""
def find_kth_node(head, k):
"""思路是设置两个指针,分别为 ahead, behind,前指针先走
k-1 步,然后 ahead 和 behind 同步向后走,直到 ahead
走到最后,此时 behind 所在的位置就是倒数第 k 个节点了。
考虑边界条件:
1. 空链表
2. k 为 0
3. total_length < k
"""
if head is None:
return None
if k < 1:
return None
behind, ahead = head.next, head.next
for _ in range(k - 1):
# 头指针先走 k -1 步
ahead = ahead.next
if ahead is None:
# 防止走过头了,可能链表长度不够
return None
# 前后指针共同往后走
while ahead.next:
ahead = ahead.next
behind = behind.next
# behind 所指向的就是相应的节点
return behind
if __name__ == '__main__':
from src.datastructures.linkedlist.single import List
l = List.fromvalues([1])
print(find_kth_node(l.head, 1))
|
[
"christopherlee199398@gmail.com"
] |
christopherlee199398@gmail.com
|
4b01ba2a61a5fd13b0abeb505238766cb21dbfcf
|
629090051b975b5814b4b48e2cb2c784fa6705e4
|
/pgsmo/objects/extension/extension.py
|
c2b146f976c4c57eb1c141ca37a9d7a68ebc2272
|
[
"MIT"
] |
permissive
|
microsoft/pgtoolsservice
|
3d3597821c7cae1d216436d4f8143929e2c8a82a
|
24a048226f7f30c775bbcbab462d499a465be5da
|
refs/heads/master
| 2023-08-28T12:55:47.817628
| 2023-08-25T22:47:53
| 2023-08-25T22:47:53
| 80,681,087
| 68
| 35
|
NOASSERTION
| 2023-09-13T21:46:55
| 2017-02-02T01:00:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,578
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os.path as path
from typing import List
from smo.common.node_object import NodeObject
from smo.common.scripting_mixins import ScriptableCreate, ScriptableDelete
from pgsmo.objects.server import server as s # noqa
import smo.utils.templating as templating
class Extension(NodeObject, ScriptableCreate, ScriptableDelete):
TEMPLATE_ROOT = templating.get_template_root(__file__, 'templates')
MACRO_ROOT = templating.get_template_root(__file__, 'macros')
GLOBAL_MACRO_ROOT = templating.get_template_root(__file__, '../global_macros')
@classmethod
def _from_node_query(cls, server: 's.Server', parent: NodeObject, **kwargs) -> 'Extension':
"""
Creates a table instance from the results of a node query
:param server: Server that owns the table
:param parent: Parent object of the table. Should be a Schema
:param kwargs: A row from the node query
Kwargs:
oid int: Object ID of the table
name str: Name of the table
:return: A table instance
"""
extension = cls(server, parent, kwargs['name'])
extension._oid = kwargs['oid']
extension._schema = kwargs['schema']
extension._scid = kwargs['schemaoid']
extension._is_system = kwargs['is_system']
return extension
def __init__(self, server: 's.Server', parent: NodeObject, name: str):
NodeObject.__init__(self, server, parent, name)
ScriptableCreate.__init__(self, self._template_root(server), self._macro_root(), server.version)
ScriptableDelete.__init__(self, self._template_root(server), self._macro_root(), server.version)
self._schema: str = None
self._scid: int = None
# PROPERTIES ###########################################################
@property
def schema(self):
return self._schema
@property
def scid(self):
return self._scid
@property
def extended_vars(self):
template_vars = {
'scid': self.scid,
'did': self.parent.oid
}
return template_vars
# -FULL OBJECT PROPERTIES ##############################################
@property
def owner(self):
return self._full_properties.get("owner", "")
@property
def relocatable(self):
return self._full_properties.get("relocatable", "")
@property
def version(self):
return self._full_properties.get("version", "")
# IMPLEMENTATION DETAILS ###############################################
@classmethod
def _macro_root(cls) -> List[str]:
return [cls.MACRO_ROOT, cls.GLOBAL_MACRO_ROOT]
@classmethod
def _template_root(cls, server: 's.Server') -> str:
return path.join(cls.TEMPLATE_ROOT, server.server_type)
def _create_query_data(self) -> dict:
""" Provides data input for create script """
return {"data": {
"name": self.name,
"schema": self.schema
}}
def _delete_query_data(self) -> dict:
""" Provides data input for delete script """
return {
"data": {
"name": self.name,
"schema": self.schema
},
}
|
[
"noreply@github.com"
] |
microsoft.noreply@github.com
|
af21bd89bb0277d82edfe4e843fc71b8e6fea705
|
da750f9ffee13f8388e0d3ac48db2eab562e5a2b
|
/examples/set_vm_lease_storage_domain.py
|
80caa27928b5a3db1aef762ee12ffadfed2c38eb
|
[
"Apache-2.0"
] |
permissive
|
fjanuska/python-ovirt-engine-sdk4
|
f86085da0d1e3044d04c41aee5b842ccec37a094
|
8d51d43c63709a2c6064a9d9b8e095874fff4f2e
|
refs/heads/main
| 2023-08-18T13:12:46.491667
| 2021-09-29T13:18:01
| 2021-09-29T13:18:01
| 411,636,996
| 0
| 0
|
Apache-2.0
| 2021-09-29T10:56:52
| 2021-09-29T10:56:51
| null |
UTF-8
|
Python
| false
| false
| 1,975
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import ovirtsdk4 as sdk
import ovirtsdk4.types as types
logging.basicConfig(level=logging.DEBUG, filename='example.log')
# This example shows how to set the storage domain where the lease of a
# virtual machine should be created.
# Create the connection to the server:
connection = sdk.Connection(
url='https://engine40.example.com/ovirt-engine/api',
username='admin@internal',
password='redhat123',
ca_file='ca.pem',
debug=True,
log=logging.getLogger(),
)
# Get the reference to the root of the tree of services:
system_service = connection.system_service()
# Find the virtual machine:
vms_service = system_service.vms_service()
vm = vms_service.list(search='name=myvm')[0]
# Find the storage domain:
sds_service = system_service.storage_domains_service()
sd = sds_service.list(search='name=mydata')[0]
# Update the virtual machine so that high availability is enabled and
# the lease is created in the selected storage domain:
vm_service = vms_service.vm_service(vm.id)
vm_service.update(
vm=types.Vm(
high_availability=types.HighAvailability(
enabled=True
),
lease=types.StorageDomainLease(
storage_domain=types.StorageDomain(
id=sd.id
)
)
)
)
# Close the connection to the server:
connection.close()
|
[
"necas.marty@gmail.com"
] |
necas.marty@gmail.com
|
d71db23231a0eece940a3a178de915e33d5657e7
|
2ad9a73cb3e2da46fb15ae56a6dee11407fe8845
|
/ports/kodi/addons/plugin.video.transistortv/scrapers/onemovies_scraper.py
|
6782b94bcbb40ef2ae81075222e8b0eb5cbfdfbd
|
[] |
no_license
|
hpduong/retropie_configs
|
cde596b35897a3faeedefabd742fc15820d58255
|
ed4e39146e5bebc0212dcef91108541a128d9325
|
refs/heads/master
| 2021-07-12T15:46:17.589357
| 2018-11-11T19:10:54
| 2018-11-11T19:10:54
| 157,111,040
| 1
| 2
| null | 2020-07-24T03:43:29
| 2018-11-11T18:59:52
|
Python
|
UTF-8
|
Python
| false
| false
| 10,526
|
py
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json,base64
from transistortv_lib import cleantitle
from transistortv_lib import client
from transistortv_lib import directstream
from transistortv_lib import jsunfuck
from transistortv_lib import cache
CODE = '''def retA():
class Infix:
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def __call__(self, value1, value2):
return self.function(value1, value2)
def my_add(x, y):
try: return x + y
except Exception: return str(x) + str(y)
x = Infix(my_add)
return %s
param = retA()'''
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['123movies.to', '123movies.ru', '123movies.is', '123movies.gs', '123-movie.ru', '123movies-proxy.ru', '123movies.moscow', '123movies.msk.ru', '123movies.msk.ru', '123movies.unblckd.me', 'gomovies.to']
self.base_link = 'https://gomovies.to'
self.search_link = '/ajax/suggest_search'
self.search_link_2 = '/movie/search/%s'
self.info_link = '/ajax/movie_load_info/%s'
self.server_link = '/ajax/movie_episodes/%s'
self.embed_link = '/ajax/load_embed/'
self.token_link = '/ajax/movie_token?eid=%s&mid=%s'
self.sourcelink = '/ajax/movie_sources/%s?x=%s&y=%s'
def matchAlias(self, title, aliases):
try:
for alias in aliases:
if cleantitle.get(title) == cleantitle.get(alias['title']):
return True
except:
return False
def movie(self, imdb, title, localtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': title})
url = {'imdb': imdb, 'title': title, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def searchShow(self, title, season, aliases, headers):
try:
title = cleantitle.normalize(title)
u = urlparse.urljoin(self.base_link, self.search_link)
p = urllib.urlencode({'keyword': ('%s - Season %s' % (title, season))})
r = client.request(u, post=p, XHR=True)
r = json.loads(r)['content']
r = zip(client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'}))
r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
return url
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
u = urlparse.urljoin(self.base_link, self.search_link)
p = urllib.urlencode({'keyword': title})
r = client.request(u, post=p, XHR=True)
r = json.loads(r)['content']
r = zip(client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'}))
url = [i[0] for i in r if self.matchAlias(i[1], aliases)][0]
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
aliases = eval(data['aliases'])
headers = {}
if 'tvshowtitle' in data:
episode = int(data['episode'])
url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
else:
episode = 0
url = self.searchMovie(data['title'], data['year'], aliases, headers)
mid = re.findall('-(\d+)', url)[-1]
try:
headers = {'Referer': url}
u = urlparse.urljoin(self.base_link, self.server_link % mid)
r = client.request(u, headers=headers, XHR=True)
r = json.loads(r)
r = client.parseDOM(r['html'], 'div', attrs = {'class': 'les-content'})
ids = client.parseDOM(r, 'a', ret='data-id')
servers = client.parseDOM(r, 'a', ret='data-server')
labels = client.parseDOM(r, 'a', ret='title')
r = zip(ids, servers, labels)
for eid in r:
try:
try:
ep = re.findall('episode.*?(\d+):.*?',eid[2].lower())[0]
except:
ep = 0
if (episode == 0) or (int(ep) == int(episode)):
url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid))
script = client.request(url)
if '$_$' in script:
params = self.uncensored1(script)
elif script.startswith('[]') and script.endswith('()'):
params = self.uncensored2(script)
elif '_x=' in script:
x = re.search('''_x=['"]([^"']+)''', script).group(1)
y = re.search('''_y=['"]([^"']+)''', script).group(1)
params = {'x': x, 'y': y}
else:
raise Exception()
u = urlparse.urljoin(self.base_link, self.sourcelink % (eid[0], params['x'], params['y']))
r = client.request(u)
url = json.loads(r)['playlist'][0]['sources']
url = [i['file'] for i in url if 'file' in i]
url = [directstream.googletag(i) for i in url]
url = [i[0] for i in url if i]
for s in url:
sources.append({'source': 'gvideo', 'quality': s['quality'], 'language': 'en',
'url': s['url'], 'direct': True, 'debridonly': False})
except:
pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
if self.embed_link in url:
result = client.request(url, XHR=True)
url = json.loads(result)['embed_url']
return url
try:
if not url.startswith('http'):
url = 'http:' + url
for i in range(3):
u = directstream.googlepass(url)
if not u == None: break
return u
except:
return
except:
return
def uncensored(a, b):
x = '' ; i = 0
for i, y in enumerate(a):
z = b[i % len(b) - 1]
y = int(ord(str(y)[0])) + int(ord(str(z)[0]))
x += chr(y)
x = base64.b64encode(x)
return x
def uncensored1(self, script):
try:
script = '(' + script.split("(_$$)) ('_');")[0].split("/* `$$` */")[-1].strip()
script = script.replace('(__$)[$$$]', '\'"\'')
script = script.replace('(__$)[_$]', '"\\\\"')
script = script.replace('(o^_^o)', '3')
script = script.replace('(c^_^o)', '0')
script = script.replace('(_$$)', '1')
script = script.replace('($$_)', '4')
vGlobals = {"__builtins__": None, '__name__': __name__, 'str': str, 'Exception': Exception}
vLocals = {'param': None}
exec (CODE % script.replace('+', '|x|'), vGlobals, vLocals)
data = vLocals['param'].decode('string_escape')
x = re.search('''_x=['"]([^"']+)''', data).group(1)
y = re.search('''_y=['"]([^"']+)''', data).group(1)
return {'x': x, 'y': y}
except:
pass
def uncensored2(self, script):
try:
js = jsunfuck.JSUnfuck(script).decode()
x = re.search('''_x=['"]([^"']+)''', js).group(1)
y = re.search('''_y=['"]([^"']+)''', js).group(1)
return {'x': x, 'y': y}
except:
pass
|
[
"henryduong@gmail.com"
] |
henryduong@gmail.com
|
09b053f2a46cc53d68dd5156e610bb19811c1c9c
|
734f8168bbae6be49bede38ed0ac98b8c1783c9f
|
/app/model.py
|
8add97c8971a87b8511a3a9a738c4a170a4ba2a7
|
[
"MIT"
] |
permissive
|
cwhy/discubot_server
|
952590879ab2d3addbd1a12c83aebf5170c6fed9
|
b33db3a911b8b1a7d2e104cd19c7544896ff6de8
|
refs/heads/master
| 2021-08-02T01:19:50.850339
| 2021-07-27T14:39:41
| 2021-07-27T14:39:41
| 94,058,608
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,881
|
py
|
class VerboseObj:
def __str__(self):
sb = []
for key in self.__dict__:
sb.append("{key}='{value}'".format(key=key,
value=self.__dict__[key]))
return '{' + ', '.join(sb) + '}'
def __repr__(self):
return self.__str__()
class Player:
def __init__(self, name, index=None):
self.name = name
self.index = index
self.is_dead = False
self.profile_pic = f'/assets/images/profile_pics/{name}.jpg'
self.is_shreff_cadidate = False
# TODO
# self.id = 0
def __str__(self):
return f'\'{self.name}, index={self.index}\''
def __repr__(self):
return self.__str__()
class Comment(VerboseObj):
def __init__(self, player, day, content, is_suicide=False):
self.player = player
self.day = day
self.is_shreff_run = False
self.is_suicide = is_suicide
self.paragraphs = []
self.id = None
if content.strip():
self.paragraphs.append(content)
def append(self, content):
self.paragraphs.append(content.strip())
class Game(VerboseObj):
def __init__(self):
self.reset()
def start(self):
self.started = True
def simulate_start(self):
self.reset()
self.started = True
self.recruiting = False
player = Player('test', 23)
self.add_player(player)
self.set_current_speaker(player)
comment = Comment(player, self.day, "kasdfa asdfkl")
comment.append('asdf')
self.add_comment(comment)
def set_director(self, name):
self.director_name = name
def reset(self):
self.comments = []
self.day = 0
self.isNight = False
self.started = False
self.players = []
self.n_players = 0
self.director_name = ''
self.current_speaker = None
self.started = False
self.shreff = None
self.recruiting = False
def dawn(self):
self.isNight = False
self.day += 1
def dusk(self):
self.isNight = True
def add_player(self, player):
# This will throw ValueError when player not exist
self.players.append(player)
self.n_players += 1
def get_player(self, name=None, index=None):
for player in self.players:
if player.index == index:
return player
elif player.name == name:
if player.index == '?' and index != '?':
player.index = index
return player
if name:
raise Exception(f'Player {name} not found!')
elif index:
raise Exception(f'Player {index} not found!')
else:
raise Exception('Bad player request')
def init_player(self, name, index):
try:
player = self.get_player(name=name, index=index)
except:
player = Player(name, index)
self.add_player(player)
finally:
return player
def remove_player(self, name, index):
player = self.get_player(name=name, index=index)
self.players.remove(player)
self.n_players -= 1
def add_comment(self, comment):
comment.id = len(self.comments)
self.comments.append(comment)
def set_current_speaker(self, player):
self.current_speaker = player
def append_comment(self, content):
self.comments[-1].append(content)
def get_comments_by_player(self, player):
cs = []
for c in self.comments:
if c.player == player:
cs.append(c)
return cs
def get_comments_by_day(self, day):
cs = []
for c in self.comments:
if c.day == day:
cs.append(c)
return cs
|
[
"chenyu.nus@gmail.com"
] |
chenyu.nus@gmail.com
|
27c9fbca93d386a872085ae69bd2295ced07561e
|
b156c2f5ee7417dfa1f6cdcf14e9773a25397544
|
/GeneVisualization/venv2/Lib/site-packages/itk/ITKIOTIFFPython.py
|
7cb363c878759595f07d0d3d80d9a742aefdec84
|
[] |
no_license
|
PinarTurkyilmaz/Vis
|
1115d9426e9c8eeb5d07949241713d6f58a7721b
|
4dd4426a70c0bd0a6e405ffe923afee29630aa67
|
refs/heads/master
| 2022-11-18T13:16:18.668065
| 2020-07-06T21:04:10
| 2020-07-06T21:04:10
| 226,217,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,077
|
py
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3, 0, 0):
new_instancemethod = lambda func, inst, cls: _ITKIOTIFFPython.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_ITKIOTIFFPython', [dirname(__file__)])
except ImportError:
import _ITKIOTIFFPython
return _ITKIOTIFFPython
if fp is not None:
try:
_mod = imp.load_module('_ITKIOTIFFPython', fp, pathname, description)
finally:
fp.close()
return _mod
_ITKIOTIFFPython = swig_import_helper()
del swig_import_helper
else:
import _ITKIOTIFFPython
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import ITKPyBasePython
import ITKIOImageBasePython
from itkTIFFImageIOPython import *
|
[
"pinar.turkyilmaz@estudiant.upc.edu"
] |
pinar.turkyilmaz@estudiant.upc.edu
|
6ab2e0a7445ce131f56926aa0b1def1d3ba8d540
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04020/s954657742.py
|
35d923c411950b2ed65ad518cb35af7337a85f80
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
def solve(N, A):
ret = 0
for idx in range(N):
ret += A[idx] // 2
A[idx] %= 2
if A[idx] and idx + 1 < N and A[idx + 1]:
ret += 1
A[idx + 1] -= 1
return ret
if __name__ == "__main__":
N = int(input())
A = [0 for _ in range(N)]
for idx in range(N):
A[idx] = int(input())
print(solve(N, A))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
28919256b9942c2e71ddf0ed8efd759b2f3739d3
|
55a795e4833364208324c164ae91b8917675464f
|
/10-3.py
|
661341bbdbc27f17fd2b3fedb0e3df0b9e1d3fb6
|
[] |
no_license
|
ggodol/Python-practice
|
f632d3825052b4f1fff486b4437cdcb65cdaf716
|
4fa27180da1b955cc6033fd93ddbcb840e01acb1
|
refs/heads/main
| 2023-05-10T22:37:17.004969
| 2021-06-02T13:07:58
| 2021-06-02T13:07:58
| 368,192,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
"""
10-3 사용자 정의 예외처리
https://youtu.be/kWiCuklohdY?t=18065
"""
class BigNumberError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
try:
print("한 자리 숫자 나누기 전용 계산기입니다.")
num1 = int(input("첫 번째 숫자를 입력하세요 : "))
num2 = int(input("두 번째 숫자를 입력하세요 : "))
if num1 >= 10 or num2 >=10:
raise BigNumberError("입력값 : {0}, {1}".format(num1, num2))
print("{0} / {1} = {2}".format(num1, num2, int(num1/num2)))
except ValueError:
print("잘못된 값을 입력 하였습니다. 한 자리 숫자만 입력하세요.")
except BigNumberError as err:
print("에러가 발생하였습니다. 한 자리 숫자만 입력하세요.")
print(err)
|
[
"gghost10@naver.com"
] |
gghost10@naver.com
|
de83d850ca23b520d5553b11b7384a31b6381963
|
8a2adf71698f1ffdfce2b0a698296ba50e5074e9
|
/flask_split/__init__.py
|
9e136ebbe51a03a018519e449b005431a1168162
|
[
"MIT"
] |
permissive
|
AngusP/flask-split
|
ee7f52b84dfeeabd4c06a43b372259dbea8f8942
|
eecb6cb15de69e93613bd23d729d2ca8f4280ccb
|
refs/heads/master
| 2020-03-31T20:52:45.845576
| 2018-10-11T13:50:39
| 2018-10-11T13:50:39
| 152,559,008
| 0
| 0
|
MIT
| 2018-10-11T08:39:51
| 2018-10-11T08:39:50
| null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
# -*- coding: utf-8 -*-
"""
flask.ext.split
~~~~~~~~~~~~~~~
A/B testing for your Flask application.
:copyright: (c) 2012-2015 by Janne Vanhala.
:license: MIT, see LICENSE for more details.
"""
from .core import ab_test, finished
from .views import split
__all__ = (ab_test, finished, split)
try:
__version__ = __import__('pkg_resources')\
.get_distribution('flask_split').version
except Exception:
__version__ = 'unknown'
|
[
"janne.vanhala@gmail.com"
] |
janne.vanhala@gmail.com
|
2481a3678bffe6c87b75f6d8987110ae08848cf2
|
b315dfb5532c1bad50ba778bab17f63b0e360b0a
|
/trace_facebook.py
|
2eac9ddb7bfea2dc84111528a8c91d10a0f690fe
|
[] |
no_license
|
Dongjin-Dongdor/SNSTracer
|
94c51ee55258fe6cc7d90e59bce75b97048691e3
|
95b0a616f84b5775c187b434b159c40cf42fd2cd
|
refs/heads/master
| 2021-04-09T14:53:19.109576
| 2016-07-21T13:08:40
| 2016-07-21T13:08:40
| 62,938,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,073
|
py
|
#-- coding: utf-8 --
__author__ = 'gimdongjin'
import oauth2 as oauth
import string
import random
from bottle import request, run, route, jinja2_template, post, get, hook
import bottle
from beaker.middleware import SessionMiddleware
import urlparse
import tweepy
from pymongo import MongoClient
from json import dumps
from requests_oauthlib import OAuth2Session
from requests_oauthlib.compliance_fixes import facebook_compliance_fix
import sys
import urllib2
reload(sys)
sys.setdefaultencoding('utf-8')
session_opts = {
'session.type': 'memory',
'session.cookie_expires': 300,
'session.auto': True
}
app = SessionMiddleware(bottle.app(),session_opts)
AppID = "1719277154971838"
AppSecret = "8bd66003086da2df2a23fbfe378df2cc"
FACEBOOK_ACCESS_TOKEN_URL = 'https://graph.facebook.com/oauth/access_token'
FACEBOOK_REQUEST_TOKEN_URL = 'https://www.facebook.com/dialog/oauth'
FACEBOOK_CHECK_AUTH = 'https://graph.facebook.com/me'
code = request.GET.get('code')
consumer = oauth.Consumer(AppID,AppSecret)
client = oauth.Client(consumer)
redirect_uri = 'http://localhost:8081/get_auth'
request_url = FACEBOOK_REQUEST_TOKEN_URL + '?client_id=%s&redirect_uri=%s&client_secret=%s&response_type=%s' %(AppID,redirect_uri,AppSecret,'code')
access_token_regular = ''
print request_url
@route('/get_auth')
def get_oauth():
code = request.query['code']
redirect = "http://localhost:8081/get_auth"
access_request = FACEBOOK_ACCESS_TOKEN_URL+"?client_id=%s&redirect_uri=%s&client_secret=%s&code=%s" %(AppID,redirect,AppSecret,code)
client = oauth.Client(consumer)
resp,content = client.request(access_request,"POST")
access_token = dict(urlparse.parse_qsl(content))
access_token_regular = access_token['access_token']
# req = urllib2.Request(access_request)
# response = urllib2.urlopen(req)
# str = response.read()
# print str
@route('/get_access_detail')
def get_access_detail():
print 'good!!!'
def main():
run(app=app,host='0',port=8081,server='tornado')
if __name__ == "__main__":
main()
|
[
"djinkim0413@gmail.com"
] |
djinkim0413@gmail.com
|
2b745f2ac461e18172b3961380d6a2c707dd0aac
|
c4cc9a7406ebf2e46535cd29080f302cf2f6c01d
|
/main.py
|
3ae3ce9ade5563e21fc8cd1b645b0bae666a6fe5
|
[
"MIT"
] |
permissive
|
OscarEReyes/Modding-Tool
|
64660aa0348072ae97b01bab3828b65cd9e72645
|
2edcb6d3423f22b8720d5cb135043d72af31c44f
|
refs/heads/master
| 2021-01-17T14:17:11.181533
| 2016-12-20T00:45:34
| 2016-12-20T00:45:34
| 51,173,694
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,440
|
py
|
import sys
from os import path
from PyQt4 import QtGui
import design
from modEditor import *
from modCreator import create_mod
from softwareModClasses import *
import qdarkstyle
from Categories import Categories
from genericThread import *
from threads import *
class MainWindow(QtGui.QMainWindow, design.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.set_up_buttons(self)
self.set_feature_buttons(self)
self.set_category_buttons(self)
self.connect_state_changes(self)
self.fLineFields = [
self.feature_category,
self.feature_description,
self.feature_unlock,
self.feature_dev_time,
self.feature_innovation,
self.feature_usability,
self.feature_stability,
self.feature_code_art,
self.feature_server
]
self.cLineFields = [
self.category_description,
self.category_unlock,
self.category_popularity,
self.category_retention,
self.category_time_scale,
self.category_iterative,
self.category_name_generator
]
Feature.set_field_dicts(self.fLineFields)
Category.set_field_dicts(self.cLineFields)
self.define_actions(self)
self.dialog = QtGui.QFileDialog()
self.categories = None
self.threadPool = []
@staticmethod
def set_up_buttons(window):
window.openButton.clicked.connect(window.open_file_handler)
window.newModButton.clicked.connect(window.create_mod_handler)
window.saveSoftwareButton.clicked.connect(window.update_software_fields)
@staticmethod
def set_feature_buttons(window):
window.savechanges.clicked.connect(window.update_feature)
window.renameFeatureButton.clicked.connect(window.rename_feature)
window.addFeatureButton.clicked.connect(window.add_feature)
window.deleteFeatureButton.clicked.connect(window.delete_feature)
window.addDependencyButton.clicked.connect(window.add_dependency)
window.add_software_category_button.clicked.connect(window.add_software_category)
window.load_dependency_button.clicked.connect(window.load_dependency)
window.load_category_button.clicked.connect(window.load_software_category)
window.del_dep_button.clicked.connect(window.delete_dependency)
window.del_categ_button.clicked.connect(window.delete_software_category)
@staticmethod
def set_category_buttons(window):
window.saveCategoryChanges.clicked.connect(window.update_category)
window.addCategoryButton.clicked.connect(window.add_category)
window.removeCategoryButton.clicked.connect(window.delete_category)
@staticmethod
def connect_state_changes(window):
window.feature_box.activated.connect(window.load_feature)
window.category_box.activated.connect(window.load_category)
window.categoryCheckbox.stateChanged.connect(window.check_categories)
@staticmethod
def define_actions(window):
main_menu = window.menuBar()
file_menu = window.add_menu_to_menu_bar(main_menu, '&File')
window.define_action(file_menu, "&Close", "Ctrl+Q", window.close)
window.define_action(file_menu, "&Save As", "", window.save_as)
window.define_action(file_menu, "&Save", "Ctrl+S", window.save)
window.define_action(file_menu, "&New Mod", "Ctrl+N", window.create_mod)
@staticmethod
def add_menu_to_menu_bar(menu, name):
return menu.addMenu(name)
@staticmethod
def add_menu_bar_action(file_menu, action):
file_menu.addAction(action)
def define_action(self, file_menu, action_name, shortcut, function):
action = QtGui.QAction(action_name, self)
if shortcut:
action.setShortcut(shortcut)
action.triggered.connect(function)
self.add_menu_bar_action(file_menu, action)
def closeEvent(self, event):
"""
* Override closeEvent method to ask user if he/she wishes
* to exit
"""
message = 'Unsaved progress will be lost.\nDo you wish to continue?'
message_box = QtGui.QMessageBox()
choice = QtGui.QMessageBox.question(message_box, "Warning",
message, QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if choice == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
def get_file_name(self, save_type):
"""
* Prompt the user for a file_name
"""
if save_type == 'save':
file_name = QtGui.QFileDialog.getSaveFileName(self.dialog, 'Save', '', '*.xml')
elif save_type == 'saveAs':
file_name = QtGui.QFileDialog.getSaveFileName(self.dialog, 'Save As', '', '*.xml')
else:
file_name = QtGui.QFileDialog.getOpenFileName(self.dialog, 'Open File', '', '*.xml')
return file_name
def create_mod(self):
"""
* Create mod
* Display message 'New Mod Created'
"""
create_mod(self.featureNum, self.file_name, self.feature_box)
self.statusBar().showMessage('New Mod Created', 1500)
def create_mod_handler(self):
"""
* Create an xml file with the required fields.
"""
self.file_name = self.get_file_name('save')
self.feature_num = self.featureNum.value()
if self.file_name and self.feature_num:
thread = WorkThread()
self.connect(thread, QtCore.SIGNAL('create_mod()'), self.create_mod)
thread.start()
self.open_file()
def open_file_handler(self):
"""
* Get file_name
* Set self.directory to current working directory
* Open file
"""
self.categories = None
self.file_name = self.get_file_name('open')
self.directory = path.dirname(self.file_name)
combo_boxes = [
self.feature_box,
self.category_box,
self.dependency_box,
self.soft_category_box
]
for box in combo_boxes:
box.clear()
self.open_file()
def open_file(self):
"""
* Open and parse xml file.
"""
try:
self.tree = parse_tree(self.file_name)
extract_data(self)
self.statusBar().showMessage('File Opened', 1500)
except FileNotFoundError:
pass
def find_feature(self):
"""
* Try to:
* Get current text of feature_box (Qt Combobox) & store it in var
* Use Feature class function find_by_name to find Feature object
* Store it in window's instance var feature.
* Display message if no features are found. (Causes IndexError)
"""
try:
name = str(self.feature_box.currentText())
self.feature = Feature.find_by_name(name)
except IndexError:
self.statusBar().showMessage('No features found', 1500)
def save(self):
"""
* Saves the file to current directory.
"""
try:
with open(self.file_name, 'wb+') as f:
self.tree.write(f, pretty_print=True)
self.statusBar().showMessage('Saved', 1500)
except AttributeError:
self.statusBar().showMessage('Error, no file opened or created', 1500)
def save_as(self):
"""
* Save As Function. Allows new file_name and new directory
* Get file_name
* Get directory
* Save
"""
try:
self.file_name = self.get_file_name('saveAs')
self.directory = QtGui.QFileDialog.getExistingDirectory(self.dialog, 'Save As')
self.save()
except FileNotFoundError:
self.statusBar().showMessage('Failed to Save', 1500)
def load_category(self):
"""
* Loads currently selected category from category combobox
* Set name to self.category_box current text
* Find_by_name and store result in var
* Set line_field text
"""
if self.category_box.count():
name = str(self.category_box.currentText())
self.c_category = Category.find_by_name(name)
self.c_category.set_line_field_text()
def check_categories(self):
"""
* Check if the categories checkbox is selected
* Add Categories object if it is checked
* Delete Categories object if it is not checked
* Update category fields
* Display error message if AttributeError occurs
"""
try:
status = self.categoryCheckbox.isChecked()
if status:
root = self.tree.getroot()
self.categories = Categories.add(root, self.category_box, False)
else:
self.categories.delete()
self.categories = None
set_software_tags(self)
update_category_fields(self, status)
except AttributeError:
self.statusBar().showMessage('No file selected.', 1500)
def add_category(self):
"""
* Add new Category object
* If name is not an empty string
* Add category to self.categories
"""
name = str(self.category_name.text())
if name:
self.categories.add_category(name)
def update_category(self):
"""
* Applies changes to currently selected category
* then repopulates its fields on the mainWindow
*
* Try to execute self.category method set_etree_element_text
* Set line field text
* Display error message if AttributeError occurs.
"""
try:
if self.category_box.count():
self.c_category.set_etree_element_text()
self.c_category.set_line_field_text()
except AttributeError:
pass
def delete_category(self):
"""
* Deletes currently selected category object
* Delete category
"""
if self.category_box.count():
self.categories.delete_category(self.c_category)
if self.category_box.count():
self.load_category()
def load_feature(self):
"""
* Finds the currently selected feature and loads field values
* to GUI's fields
*
* Find Feature object and store it in self.feature
* Set line field text
"""
self.feature = find_feature(self)
self.feature.set_line_field_text()
self.load_dependencies()
self.load_software_categories()
self.load_feature_checkboxes()
def load_feature_checkboxes(self):
"""
* Check for attributes in feature
* If attribute in feature.attrib, check box
"""
attributes = {
'Research': self.research_check_box,
'Vital': self.vital_radio_button,
'Forced': self.forced_radio_button,
'From': self.from_radio_button
}
feature = Feature.inventory[self.feature]
self.research_check_box.setChecked(False)
self.none_radio_button.setChecked(True)
for attribute, box in attributes.items():
if attribute in feature.attrib:
box.setChecked(True)
def load_dependencies(self):
"""
* Load dependencies to combobox
"""
self.dependency_box.clear()
feature = Feature.inventory[self.feature]
for child in feature:
if child.tag == 'Dependency':
self.dependency_box.addItem(child.text)
def load_software_categories(self):
"""
* Load software categories to combobox
"""
self.soft_category_box.clear()
feature = Feature.inventory[self.feature]
for child in feature:
if child.tag == 'SoftwareCategory':
self.soft_category_box.addItem()
def load_dependency(self):
"""
* Loads dependency
"""
try:
dependency_feature = self.dependency_box.currentText()
dependency = self.feature.dependencies[dependency_feature]
self.dependency_software.setText(dependency.software)
self.dependency_feature.setText(dependency.feature)
except KeyError:
pass
def load_software_category(self):
"""
* Loads Software_category
"""
try:
category = self.soft_category_box.currentText()
software_category = self.feature.dependencies[category]
self.software_category.setText(software_category.category)
self.unlock_year.setText(software_category.unlock)
except KeyError:
pass
def update_software_fields(self):
"""
* Sets the tag text for each tag in software.
*
* Try set software tags method
* Show message 'Changes made'
* Display error message if Attribute Error occurs
"""
# try:
set_software_tags(self)
set_main_fields(self)
self.statusBar().showMessage('Changes made', 1500)
# except AttributeError:
# self.statusBar().showMessage('Error, Have you opened or created a mod?', 1500)
def update_feature(self):
"""
* Updates attributes and etree_element_text
* for currently selected feature
*
* Try to execute self.feature methods
* set_etree_element_text and check_attribute
* Display message if AttributeError occurs
"""
attributes = {
'Research': self.research_check_box.isChecked(),
'Vital': self.vital_radio_button.isChecked(),
'Forced': self.forced_radio_button.isChecked(),
'From': self.from_radio_button.isChecked()
}
from_text = str(self.fromEdit.text())
try:
self.feature.set_etree_element_text()
self.feature.check_attribute(attributes, from_text)
except AttributeError:
self.statusBar().showMessage('Error, Have you opened or created a mod?', 1500)
def rename_feature(self):
"""
* Renames currently selected feature
*
* Rename self.feature
"""
name = str(self.newNameEdit.text())
if name:
self.feature.rename(name)
def add_feature(self):
"""
* Adds feature to tree
*
* Create Feature Object
"""
name = str(self.featureNameEdit.text())
features = self.tree.find('Features')
if name:
Feature.add(features, name, self.feature_box)
def delete_feature(self):
"""
* Deletes current feature
*
* Delete currently selected feature
"""
feature_name = str(self.feature_box.currentText())
if feature_name:
self.feature.delete(self.feature.combobox)
def add_dependency(self):
"""
* Create dependency object for self.feature
* Add software dependency
* Display messages in case of NameError or AttributeError
"""
try:
dependency_feature = str(self.dependency_feature.text())
software = str(self.dependency_software.text())
if dependency_feature and software:
self.feature.add_special_tag('d', software, dependency_feature, self.dependency_box)
self.load_feature()
except NameError:
self.statusBar().showMessage('Error adding dependency', 1750)
except AttributeError:
self.statusBar().showMessage('Load Feature')
def add_software_category(self):
"""
* Create software category object for self.feature
* Add software category
* Display messages in case of NameError or AttributeError
"""
try:
category = str(self.software_category.text())
unlock = str(self.unlock_year.text())
if category and unlock:
self.feature.add_special_tag('sc', category, unlock, self.soft_category_box)
self.load_feature()
except NameError:
self.statusBar().showMessage('Error adding software category', 1750)
except AttributeError:
self.statusBar().showMessage('Load Feature')
def delete_dependency(self):
"""
* Deletes dependency
* Clear dependency_software and dependency_feature line edits
"""
dependency_feature = self.dependency_box.currentText()
if dependency_feature:
self.feature.delete_special_tag('d', dependency_feature, self.dependencyComboBox)
index = self.dependency_box.currentIndex()
self.dependency_box.removeItem(index)
self.dependency_software.clear()
self.dependency_feature.clear()
def delete_software_category(self):
"""
* Deletes software category
* Clear software_category and unlock_year line edits
"""
software_category = self.soft_category_box.currentText()
if software_category:
self.feature.delete_special_tag('sc', software_category, self.soft_category_box)
index = self.soft_category_box.currentIndex()
self.soft_category_box.removeItem(index)
self.software_category.clear()
self.unlock_year.clear()
def main():
app = QtGui.QApplication(sys.argv)
form = MainWindow()
app.setStyleSheet(qdarkstyle.load_stylesheet(pyside=False))
form.show()
app.exec_()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
OscarEReyes.noreply@github.com
|
c72448aeecb8ba1ac63124a5cc055890cf59f857
|
2c8dcd76752729de2d071836260c0fe87c87d49e
|
/mul.py
|
a49e89ed00e93ea6d5fb4219911f6efa03c0811d
|
[] |
no_license
|
iam-krj/Hackoctober-2021-2
|
7f6bbcf5e916b5b923180b7f0b8ab39c824f9f76
|
cb951f0b4105f75fb8a3e0dfc81a49ae9f6a4c3f
|
refs/heads/main
| 2023-08-13T05:25:31.027576
| 2021-10-11T17:32:09
| 2021-10-11T17:32:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
num_1 = input("Enter the first number pls")
num_2 = input("Enter the second number pls")
product = float(num_1) * float(num_2)
print("Product of {} and {} is {}".format(num_1, num_2,product))
|
[
"noreply@github.com"
] |
iam-krj.noreply@github.com
|
bb519b11be3a2a62bb2b0df454c39d98dc04e57a
|
8206009d82abae857c890d224a21a814ddcd1d54
|
/docs/conf.py
|
83a2152c9b55229377bd4fd5f72eb66f2d5530a8
|
[
"MIT"
] |
permissive
|
nurulrita/umn_hackathon
|
9647591d381a21ecca6937ca9917dbd5f7a7df06
|
a5e1a861ff6989edbdb25dce695312d2f7e8ca8c
|
refs/heads/master
| 2021-06-05T16:30:45.681164
| 2016-11-19T02:59:51
| 2016-11-19T02:59:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,904
|
py
|
# -*- coding: utf-8 -*-
#
# umn_hackathon documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import unicode_literals
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'umn_hackathon'
copyright = """2016, Nurul Rita Mustika"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'umn_hackathondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'umn_hackathon.tex',
'umn_hackathon Documentation',
"""Nurul Rita Mustika""", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'umn_hackathon', 'umn_hackathon Documentation',
["""Nurul Rita Mustika"""], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'umn_hackathon', 'umn_hackathon Documentation',
"""Nurul Rita Mustika""", 'umn_hackathon',
"""UMN Hackathon""", 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
[
"nuthanuu@gmail.com"
] |
nuthanuu@gmail.com
|
2a4469af9c01f1950395a53bd34cc19299b3bcc7
|
188490d20334d98926d2a1300799783dbe2d213c
|
/catalyst/callbacks/metric.py
|
7802cd7b63bb0a2ca09fc7883766f303a6577a27
|
[
"Apache-2.0"
] |
permissive
|
ammaddd/catalyst
|
cd2d41e0a9182f31a5c6b2893d5289f8294c99ad
|
e158bef16f8f8265517fb69918a8c1bffda43032
|
refs/heads/master
| 2023-04-25T12:49:36.293225
| 2021-03-22T05:55:23
| 2021-03-22T05:55:23
| 350,424,202
| 0
| 0
|
Apache-2.0
| 2021-03-23T13:12:02
| 2021-03-22T17:03:51
| null |
UTF-8
|
Python
| false
| false
| 8,815
|
py
|
from typing import Dict, Iterable, Optional, Tuple, Union
from abc import ABC, abstractmethod
import torch
from catalyst.core.callback import Callback, CallbackNode, CallbackOrder
from catalyst.core.runner import IRunner
from catalyst.metrics import ICallbackBatchMetric, ICallbackLoaderMetric, IMetric
class IMetricCallback(Callback, ABC):
"""Metric callback interface, abstraction over metric step."""
@abstractmethod
def on_loader_start(self, runner: "IRunner") -> None:
"""
On loader start action
Args:
runner: current runner
"""
pass
@abstractmethod
def on_batch_end(self, runner: "IRunner") -> None:
"""
On batch end action
Args:
runner: current runner
"""
pass
@abstractmethod
def on_loader_end(self, runner: "IRunner") -> None:
"""
On loader end action
Args:
runner: current runner
"""
pass
class MetricCallback(IMetricCallback):
"""
MetricCallback is a base implementation of callback that updates metrics over batch or loader.
Args:
metric: metric to calculate in callback
input_key: keys of tensors that should be used as inputs in metric calculation
target_key: keys of tensors that should be used as targets in metric calculation
"""
def __init__(
self,
metric: Union[ICallbackBatchMetric, ICallbackLoaderMetric],
input_key: Union[str, Iterable[str], Dict[str, str]],
target_key: Union[str, Iterable[str], Dict[str, str]],
):
"""Init MetricCallback"""
super().__init__(order=CallbackOrder.metric, node=CallbackNode.all)
self.metric = metric
assert isinstance(metric, IMetric)
self._metric_update_method = self.metric.update
kv_types = (dict, list, tuple)
is_value_input = isinstance(input_key, str)
is_value_targets = isinstance(target_key, str)
is_key_value_input = isinstance(input_key, kv_types)
is_key_value_targets = isinstance(target_key, kv_types)
if is_value_input and is_value_targets:
self._get_inputs = self._get_value_inputs
self._update_metric = self._update_value_metric
elif is_key_value_input and is_key_value_targets:
self._get_inputs = self._get_key_value_inputs
self._update_metric = self._update_key_value_metric
else:
raise NotImplementedError()
self.input_key = input_key
self.target_key = target_key
self._keys = {
**self._convert_keys_to_kv(input_key),
**self._convert_keys_to_kv(target_key),
}
@staticmethod
def _convert_keys_to_kv(keys: Union[str, Iterable[str], Dict[str, str]]) -> Dict[str, str]:
"""
Convert keys to key-value format
Args:
keys: keys to convert
Returns:
dict of keys like {"a": "b"} where "a" is a field name of field in batch,
"b" is a name of the same data for metric
"""
kv_keys = {}
if isinstance(keys, dict):
kv_keys.update(keys)
elif isinstance(keys, str):
kv_keys[keys] = keys
else:
for key in keys:
kv_keys[key] = key
return kv_keys
def _get_value_inputs(self, runner: "IRunner") -> Tuple[torch.Tensor, torch.Tensor]:
"""
Get data from batch in value input case
Args:
runner: current runner
Returns:
tuple of tensor of inputs and tensor of targets
"""
return runner.batch[self.input_key], runner.batch[self.target_key]
def _get_key_value_inputs(self, runner: "IRunner") -> Dict[str, torch.Tensor]:
"""
Get data from batch in key-value input case
Args:
runner: current runner
Returns:
dict of inputs and targets tensors
"""
kv_inputs = {}
for key in self._keys:
kv_inputs[self._keys[key]] = runner.batch[key]
return kv_inputs
def _update_value_metric(
self, value_inputs: Tuple[torch.Tensor, torch.Tensor]
) -> Optional[Dict[str, float]]:
"""
Update metric in value input case
Args:
value_inputs: tuple of input tensor and target tensor
Returns:
result of metric update: None or metric values
"""
return self._metric_update_method(*value_inputs)
def _update_key_value_metric(
self, kv_inputs: Dict[str, torch.Tensor]
) -> Optional[Dict[str, float]]:
"""
Update metric in key-value input case
Args:
kv_inputs: input tensors in key-value format
Returns:
result of metric update: None or metric values
"""
return self._metric_update_method(**kv_inputs)
class BatchMetricCallback(MetricCallback):
"""BatchMetricCallback implements batch-based metrics update and computation over loader
Args:
metric: metric to calculate in callback
input_key: keys of tensors that should be used as inputs in metric calculation
target_key: keys of tensors that should be used as targets in metric calculation
log_on_batch: boolean flag to log computed metrics every batch
"""
def __init__(
self,
metric: ICallbackBatchMetric,
input_key: Union[str, Iterable[str], Dict[str, str]],
target_key: Union[str, Iterable[str], Dict[str, str]],
log_on_batch: bool = True,
) -> None:
"""Init BatchMetricCallback"""
super().__init__(metric=metric, input_key=input_key, target_key=target_key)
assert isinstance(metric, ICallbackBatchMetric)
self.log_on_batch = log_on_batch
self._metric_update_method = self.metric.update_key_value
def on_loader_start(self, runner: "IRunner") -> None:
"""On loader start action: reset metric values
Args:
runner: current runner
"""
self.metric.reset()
def on_batch_end(self, runner: "IRunner") -> None:
"""On batch end action: update metric with new batch data and log it's value if necessary
Args:
runner: current runner
"""
metrics_inputs = self._get_inputs(runner=runner)
metrics = self._update_metric(metrics_inputs)
if self.log_on_batch:
runner.batch_metrics.update(metrics)
def on_loader_end(self, runner: "IRunner") -> None:
"""On loader end action: compute metric values and update runner's loader metrics with it
Args:
runner: current runner
"""
metrics = self.metric.compute_key_value()
metrics = {
k: runner.engine.sync_tensor(torch.tensor(v, device=runner.device), "mean")
for k, v in metrics.items()
}
runner.loader_metrics.update(metrics)
class LoaderMetricCallback(MetricCallback):
"""LoaderMetricCallback implements loader-based metrics update and computation over loader
Args:
metric: metric to calculate in callback
input_key: keys of tensors that should be used as inputs in metric calculation
target_key: keys of tensors that should be used as targets in metric calculation
"""
def __init__(
self,
metric: ICallbackLoaderMetric,
input_key: Union[str, Iterable[str], Dict[str, str]],
target_key: Union[str, Iterable[str], Dict[str, str]],
):
super().__init__(metric=metric, input_key=input_key, target_key=target_key)
assert isinstance(metric, ICallbackLoaderMetric)
def on_loader_start(self, runner: "IRunner") -> None:
"""On loader star action: reset metric values in case of ICallbackLoaderMetric metric
Args:
runner: current runner
"""
self.metric.reset(
num_batches=runner.loader_batch_len, num_samples=runner.loader_sample_len,
)
def on_batch_end(self, runner: "IRunner") -> None:
"""On batch end action: get data from runner's batch and update metrics with it
Args:
runner: current runner
"""
metrics_inputs = self._get_inputs(runner=runner)
self._update_metric(metrics_inputs)
def on_loader_end(self, runner: "IRunner") -> None:
"""On loader end action: compute metric values and update runner's loader metrics with it
Args:
runner: current runner
"""
metrics = self.metric.compute_key_value()
runner.loader_metrics.update(metrics)
__all__ = [
"IMetricCallback",
"BatchMetricCallback",
"LoaderMetricCallback",
]
|
[
"noreply@github.com"
] |
ammaddd.noreply@github.com
|
618fceedc335c21924b67b7086c765fa6ec126f9
|
8783cea4bd7110d51092d355634108dba56ed4c8
|
/GitHub.py
|
8a9dc276e46248dba4f2029ff40470d5da2b72f4
|
[] |
no_license
|
MU-hadi-hub/GitHub_Finder
|
16647f0066196e8e87d6b33b6c76b5a4fed10e8e
|
40bf08675d6e5c976369c4e8aa2d5f20dcd22d1e
|
refs/heads/master
| 2022-12-15T11:14:15.310805
| 2020-09-14T07:10:49
| 2020-09-14T07:10:49
| 295,332,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
from flask import Flask,render_template,request
import requests
app = Flask(__name__)
base_url="https://api.github.com/users/"
@app.route("/",methods = ["GET","POST"])
def home():
if request.method =="POST":
githubname = request.form.get("githubname")
responce = requests.get(base_url + githubname)
userinfo = responce.json()
return render_template("index.html",profile = userinfo)
else:
return render_template("index.html",methods = ["GET","POST"])
if __name__ == "__main__":
app.run(debug=True)
|
[
"MU.hadi.hub@gmail.com"
] |
MU.hadi.hub@gmail.com
|
f3fdc8c5013d0d689078773dcf4add4a3e543588
|
59956091467f90c8cbba2ea8c624297e710a18b3
|
/cw10/randomqueue_test.py
|
11071c4f9debc96ad16b77b0f9e8ee6298e0e1fd
|
[] |
no_license
|
gedlewska/Python
|
21fa726b2206c0ca9c1f57d03c5ec41177a234c6
|
8464ef6e4c79261a3ec8c670718d5702610ca25a
|
refs/heads/main
| 2023-03-10T12:43:39.135185
| 2021-02-22T18:16:28
| 2021-02-22T18:16:28
| 307,842,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,175
|
py
|
from cw10.randomqueue import *
import unittest
class TestRandomQueue(unittest.TestCase):
def setUp(self):
self.randomqueue = RandomQueue()
def test_is_empty(self):
self.assertEqual(RandomQueue.is_empty(self.randomqueue), True)
def test_is_full(self):
self.assertEqual(RandomQueue.is_full(self.randomqueue), False)
RandomQueue.insert(self.randomqueue, 11)
self.assertEqual(RandomQueue.is_full(self.randomqueue), True)
def test_insert(self):
RandomQueue.insert(self.randomqueue, 11)
self.assertEqual(RandomQueue.is_empty(self.randomqueue), False)
def test_remove(self):
RandomQueue.insert(self.randomqueue, 11)
self.assertEqual(RandomQueue.remove(self.randomqueue), 11)
self.assertEqual(RandomQueue.is_empty(self.randomqueue), True)
def test_clear(self):
RandomQueue.insert(self.randomqueue, 11)
RandomQueue.insert(self.randomqueue, 12)
RandomQueue.insert(self.randomqueue, 15)
RandomQueue.clear(self.randomqueue)
self.assertEqual(RandomQueue.is_empty(self.randomqueue), True)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
gedlewska.noreply@github.com
|
da03bbb8b6031b58861a2e4d552f9ed9d547faa0
|
d7f85d2d913c95230b514c54ba1146b31357e066
|
/plumed/wall_0.236/analyse_hills.py
|
3b84d5785ad3622bfab3e2ee155b37a4763e94d8
|
[] |
no_license
|
maxastyler/ProteinSimulation
|
f73b2958404a131c16cc3ae67a9354c684b2662f
|
21549542311d7d41e27693599a585f8cb0285c6f
|
refs/heads/master
| 2021-06-16T07:37:46.429904
| 2017-03-29T19:40:21
| 2017-03-29T19:40:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,779
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
my_files = ["fes_124_{}.dat".format(i) for i in range(70, 79)]
def extract_1d_file_data(f_path):
x_data=[]
y_data=[]
with open(f_path) as my_file:
for line in my_file:
ls = line.split()
if ls[0]!='#!':
x_data.append(float(ls[0]))
y_data.append(float(ls[1]))
return x_data, y_data
def extract_2d_file_data(f_path):
sections=[[]]
with open(f_path) as my_file:
for line in my_file:
sp=line.split()
if sp==[]: sections.append([])
else:
if sp[0]!='#!':
sections[-1].append((float(sp[0]), float(sp[1]), float(sp[2])))
xs=[]
ys=[]
for line in sections[0]:
xs.append(line[0])
for sec in sections:
ys.append(sec[0][1])
xs = np.array(xs)
ys = np.array(ys)
xs, ys = np.meshgrid(xs, ys)
zs=[]
for sec in sections:
zs.append([])
for line in sec:
zs[-1].append(line[2])
zs = np.array(zs)
print(zs)
return xs, ys, zs
def show_1d():
xs=[]
ys=[]
for path in my_files:
x,y=extract_1d_file_data(path)
xs.append(x)
ys.append(y)
for i in range(len(xs)):
plt.plot(xs[i], ys[i])
plt.show()
def show_2d():
fig=plt.figure()
ax = fig.gca(projection='3d')
#xs, ys, zs = extract_2d_file_data('fes_124_53.dat')
xs, ys, zs = extract_2d_file_data('histo_all_core.dat')
surf = ax.plot_surface(xs, ys, zs, cmap=cm.coolwarm, linewidth=0, antialiased=False)
plt.show()
show_1d()
|
[
"mtyler88@gmail.com"
] |
mtyler88@gmail.com
|
7f91092ce3750a44a3d2b5e2fd706151f2379331
|
62a8b1cb7d33a343a2c27b67ba114595c9fedadc
|
/blog/migrations/0001_initial.py
|
a4a9bfe28d3ad06002c63b15c2d0f99728fa1c59
|
[] |
no_license
|
chriswillis1211/my-first-blog
|
4970b26555d38757e0e370759e1c018e00d105d1
|
b7b1d56ac81556ddf84838cd8b429b18fe73366b
|
refs/heads/main
| 2021-12-08T07:24:38.372725
| 2021-12-07T17:49:47
| 2021-12-07T17:49:47
| 209,944,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# Generated by Django 2.2.5 on 2021-12-07 17:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"chriswillis1211@hotmail.co.uk"
] |
chriswillis1211@hotmail.co.uk
|
4d827f134bb66995943a60660d462792a22ebaf9
|
6f8e108c621f384f4b28c47a37878827a9379792
|
/mysite/settings.py
|
5ed3ac128e710adfbd3dab273886ebac179a554e
|
[] |
no_license
|
NiranDx/3SB04
|
d9f4493468a1480618b7980972c2a4d229db8bb9
|
e41e50e83aa89bd16656ff29b9b582db193a9901
|
refs/heads/master
| 2020-04-22T14:12:46.539341
| 2019-02-13T03:47:16
| 2019-02-13T03:47:16
| 170,435,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,105
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p5pkypb-8ayzfk994n)awpbggnx8v=yrglc%h)=5ms&dy(0_7&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"niran3688@gmail.com"
] |
niran3688@gmail.com
|
8d35394be58ff6a3bc5775363238dcdd3c4b8683
|
77b865b10ad1c662b4367ec273c3a74fed1c718c
|
/twarc/command.py
|
f28cf0f5b7c75aa182ed41eebef1589b01dccc09
|
[
"MIT"
] |
permissive
|
digilabhh/twarc
|
ddcb8958fc89cd790d189f9a5dcdfb7a4734fbd9
|
c4806765ca4aa50676fe97688b2c2b4b93f93418
|
refs/heads/master
| 2021-08-26T09:26:25.037814
| 2017-11-22T22:34:07
| 2017-11-22T22:34:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,582
|
py
|
from __future__ import print_function
import os
import sys
import json
import codecs
import logging
import argparse
import fileinput
from twarc.client import Twarc
from twarc.json2csv import csv, get_headings, get_row
if sys.version_info[:2] <= (2, 7):
# Python 2
get_input = raw_input
str_type = unicode
import ConfigParser as configparser
else:
# Python 3
get_input = input
str_type = str
import configparser
commands = [
"configure",
'dehydrate',
'filter',
'followers',
'friends',
'help',
'hydrate',
'replies',
'retweets',
'sample',
'search',
'timeline',
'trends',
'tweet',
'users',
'version',
]
def main():
parser = get_argparser()
args = parser.parse_args()
command = args.command
query = args.query or ""
logging.basicConfig(
filename=args.log,
level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s"
)
if command == "version":
print("twarc v%s" % __version__)
sys.exit()
elif command == "help" or not command:
parser.print_help()
print("\nPlease use one of the following commands:\n")
for cmd in commands:
print(" - %s" % cmd)
print("\nFor example:\n\n twarc search blacklivesmatter")
sys.exit(1)
t = Twarc(
consumer_key=args.consumer_key,
consumer_secret=args.consumer_secret,
access_token=args.access_token,
access_token_secret=args.access_token_secret,
connection_errors=args.connection_errors,
http_errors=args.http_errors,
config=args.config,
profile=args.profile,
tweet_mode=args.tweet_mode
)
# calls that return tweets
if command == "search":
things = t.search(
query,
since_id=args.since_id,
max_id=args.max_id,
lang=args.lang,
result_type=args.result_type,
geocode=args.geocode
)
elif command == "filter":
things = t.filter(
track=query,
follow=args.follow,
locations=args.locations
)
elif command == "dehydrate":
input_iterator = fileinput.FileInput(
query,
mode='rU',
openhook=fileinput.hook_compressed,
)
things = t.dehydrate(input_iterator)
elif command == "hydrate":
input_iterator = fileinput.FileInput(
query,
mode='rU',
openhook=fileinput.hook_compressed,
)
things = t.hydrate(input_iterator)
elif command == "tweet":
things = [t.tweet(query)]
elif command == "sample":
things = t.sample()
elif command == "timeline":
kwargs = {"max_id": args.max_id, "since_id": args.since_id}
if re.match('^[0-9]+$', query):
kwargs["user_id"] = query
else:
kwargs["screen_name"] = query
things = t.timeline(**kwargs)
elif command == "retweets":
things = t.retweets(query)
elif command == "users":
if os.path.isfile(query):
iterator = fileinput.FileInput(
query,
mode='rU',
openhook=fileinput.hook_compressed,
)
things = t.user_lookup(iterator=iterator)
elif re.match('^[0-9,]+$', query):
things = t.user_lookup(user_ids=query.split(","))
else:
things = t.user_lookup(screen_names=query.split(","))
elif command == "followers":
things = t.follower_ids(query)
elif command == "friends":
things = t.friend_ids(query)
elif command == "trends":
# lookup woeid for geo-coordinate if appropriate
geo = re.match('^([0-9\-\.]+),([0-9\-\.]+)$', query)
if geo:
lat, lon = map(float, geo.groups())
if lat > 180 or lat < -180 or lon > 180 or lon < -180:
parser.error('LAT and LONG must be within [-180.0, 180.0]')
places = list(t.trends_closest(lat, lon))
if len(places) == 0:
parser.error("Couldn't find WOE ID for %s" % query)
query = places[0]["woeid"]
if not query:
things = t.trends_available()
else:
trends = t.trends_place(query)
if trends:
things = trends[0]['trends']
elif command == "replies":
tweet = t.tweet(query)
if not tweet:
parser.error("tweet with id %s does not exist" % query)
things = t.replies(tweet, args.recursive)
elif command == "configure":
t.input_keys()
sys.exit()
else:
parser.print_help()
print("\nPlease use one of the following commands:\n")
for cmd in commands:
print(" - %s" % cmd)
print("\nFor example:\n\n twarc search blacklivesmatter")
sys.exit(1)
# get the output filehandle
if args.output:
fh = codecs.open(args.output, 'wb', 'utf8')
else:
fh = sys.stdout
# optionally create a csv writer
csv_writer = None
if args.format == "csv" and command not in ["filter", "hydrate", "replies",
"retweets", "sample", "search", "timeline", "tweet"]:
parser.error("csv output not available for %s" % command)
elif args.format == "csv":
csv_writer = csv.writer(fh)
csv_writer.writerow(get_headings())
line_count = 0
file_count = 0
for thing in things:
# rotate the files if necessary
if args.output and args.split and line_count % args.split == 0:
file_count += 1
fh = codecs.open(numbered_filepath(args.output, file_count), 'wb', 'utf8')
if csv_writer:
csv_writer = csv.writer(fh)
csv_writer.writerow(get_headings())
line_count += 1
# ready to output
kind_of = type(thing)
if kind_of == str_type:
# user or tweet IDs
print(thing, file=fh)
logging.info("archived %s" % thing)
elif 'id_str' in thing:
# tweets and users
if (args.format == "json"):
print(json.dumps(thing), file=fh)
elif (args.format == "csv"):
csv_writer.writerow(get_row(thing))
logging.info("archived %s", thing['id_str'])
elif 'woeid' in thing:
# places
print(json.dump(thing), file=fh)
elif 'tweet_volume' in thing:
# trends
print(json.dump(thing), file=fh)
elif 'limit' in thing:
# rate limits
t = datetime.datetime.utcfromtimestamp(
float(thing['limit']['timestamp_ms']) / 1000)
t = t.isoformat("T") + "Z"
logging.warn("%s tweets undelivered at %s",
thing['limit']['track'], t)
if args.warnings:
print(json.dump(thing), file=fh)
elif 'warning' in thing:
# other warnings
logging.warn(thing['warning']['message'])
if args.warnings:
print(json.dump(thing), file=fh)
def get_argparser():
"""
Get the command line argument parser.
"""
parser = argparse.ArgumentParser("twarc")
parser.add_argument('command', choices=commands)
parser.add_argument('query', nargs='?', default=None)
parser.add_argument("--log", dest="log",
default="twarc.log", help="log file")
parser.add_argument("--consumer_key",
default=None, help="Twitter API consumer key")
parser.add_argument("--consumer_secret",
default=None, help="Twitter API consumer secret")
parser.add_argument("--access_token",
default=None, help="Twitter API access key")
parser.add_argument("--access_token_secret",
default=None, help="Twitter API access token secret")
parser.add_argument('--config',
help="Config file containing Twitter keys and secrets")
parser.add_argument('--profile', default='main',
help="Name of a profile in your configuration file")
parser.add_argument('--warnings', action='store_true',
help="Include warning messages in output")
parser.add_argument("--connection_errors", type=int, default="0",
help="Number of connection errors before giving up")
parser.add_argument("--http_errors", type=int, default="0",
help="Number of http errors before giving up")
parser.add_argument("--max_id", dest="max_id",
help="maximum tweet id to search for")
parser.add_argument("--since_id", dest="since_id",
help="smallest id to search for")
parser.add_argument("--result_type", dest="result_type",
choices=["mixed", "recent", "popular"],
default="recent", help="search result type")
parser.add_argument("--lang", dest="lang",
help="limit to ISO 639-1 language code"),
parser.add_argument("--geocode", dest="geocode",
help="limit by latitude,longitude,radius")
parser.add_argument("--locations", dest="locations",
help="limit filter stream to location(s)")
parser.add_argument("--follow", dest="follow",
help="limit filter to tweets from given user id(s)")
parser.add_argument("--recursive", dest="recursive", action="store_true",
help="also fetch replies to replies")
parser.add_argument("--tweet_mode", action="store", default="extended",
dest="tweet_mode", choices=["compat", "extended"],
help="set tweet mode")
parser.add_argument("--output", action="store", default=None,
dest="output", help="write output to file path")
parser.add_argument("--format", action="store", default="json",
dest="format", choices=["json", "csv"],
help="set output format")
parser.add_argument("--split", action="store", type=int, default=0,
help="used with --output to split into numbered files")
return parser
def numbered_filepath(filepath, num):
path, ext = os.path.splitext(filepath)
return os.path.join('{}-{:0>3}{}'.format(path, num, ext))
|
[
"ehs@pobox.com"
] |
ehs@pobox.com
|
77d413c5c0eef082794af937e65645f5aa30638a
|
382df78024f588acea08039a0b0a9e24f297b6a3
|
/python/pandas/extract_tables.py
|
f92de68c5c33a5ca5aea8d8b383f49f542427ed3
|
[] |
no_license
|
id774/sandbox
|
c365e013654790bfa3cda137b0a64d009866d19b
|
aef67399893988628e0a18d53e71e2038992b158
|
refs/heads/master
| 2023-08-03T05:04:20.111543
| 2023-07-31T14:01:55
| 2023-07-31T14:01:55
| 863,038
| 4
| 1
| null | 2020-03-05T06:18:03
| 2010-08-26T01:05:11
|
TeX
|
UTF-8
|
Python
| false
| false
| 1,448
|
py
|
import os
import sys
import pandas as pd
import psycopg2
def connection_config(owner):
return {
'host' : '127.0.0.1',
'port' : '5432',
'database' : owner,
'user' : owner,
'password' : owner
}
def read_by_sql(sql, owner):
connect = connection_config(owner)
conn = psycopg2.connect(**connect)
conn.autocommit = True
df = pd.read_sql(sql=sql, con=conn)
conn.close()
return df
def select_from_db(sid, owner, table_name):
sql = "select * from %s where sid = '%s' order by id asc;" % (table_name, sid)
return read_by_sql(sql, owner)
def main(args):
sid = args[1]
owner = 's' + sid
table_name = args[2]
sys_columns = args[3]
df = select_from_db(sid, owner, table_name)
filename = (table_name + '.csv')
if not sys_columns == 'full':
df = df.drop('id', axis=1)
df = df.drop('created_at', axis=1)
df = df.drop('updated_at', axis=1)
df.to_csv(filename, date_format='%Y/%m/%d %H:%M:%S', index=False, encoding="utf8")
if __name__ == "__main__":
argsmin = 3
version = (3, 0)
if sys.version_info > (version):
if len(sys.argv) > argsmin:
main(sys.argv)
else:
print("This program needs at least %(argsmin)s arguments" %
locals())
else:
print("This program requires python > %(version)s" % locals())
|
[
"idnanashi@gmail.com"
] |
idnanashi@gmail.com
|
4a38e77ca92c404101021135b1f0e1fab42bff26
|
d5fbfeabfc98973493c4a440f877f9f1ea28082c
|
/apps/profesores/urls.py
|
e46f5e2590f0dba9e194850837222706944d4fd5
|
[
"MIT"
] |
permissive
|
eduardoagreda/requisiciones
|
f50621ab8b43a69366df826a4971a0af250f2c52
|
62e68e038d8ce3171b2e3d96194967fa6f93aca0
|
refs/heads/master
| 2023-04-30T12:54:19.614156
| 2022-07-21T19:09:49
| 2022-07-21T19:09:49
| 239,076,724
| 0
| 0
|
MIT
| 2023-04-21T20:46:57
| 2020-02-08T05:46:31
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 670
|
py
|
from django.urls import path
from apps.profesores.views import add_profesores, edit_profesores, delete_profesores, lista_profesores, ProfesoresList, DetalleProfesor, DeleteProfesor
urlpatterns = [
path('profesor/crear/', add_profesores, name='add_profesores'),
path('profesor/<int:pk>/editar/', edit_profesores, name='edit_profesores'),
path('profesor/<int:pk>/eliminar/', DeleteProfesor.as_view(), name='delete_profesores'),
path('profesor/<int:pk>/detalle/', DetalleProfesor.as_view(), name='read_profesores'),
path('profesores/listar/', lista_profesores, name='lista_profesores'),
path('api/profesores/listar/', ProfesoresList.as_view()),
]
|
[
"eduardoagreda25@gmail.com"
] |
eduardoagreda25@gmail.com
|
e179a12367d16ae439902f5aa415a644669e6351
|
5182897b2f107f4fd919af59c6762d66c9be5f1d
|
/.history/src/Simulador_20200707153625.py
|
7c3580bee6ea4ec2d8877641f9b5c1abbf1a185d
|
[
"MIT"
] |
permissive
|
eduardodut/Trabalho_final_estatistica_cd
|
422b7e702f96291f522bcc68d2e961d80d328c14
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
refs/heads/master
| 2022-11-23T03:14:05.493054
| 2020-07-16T23:49:26
| 2020-07-16T23:49:26
| 277,867,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,485
|
py
|
import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo
import random
class Simulador():
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
densidade_populacional_inicial, #percentual de ocupação inicial da matriz
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.indices_infectados_tipo_2 = []
self.indices_infectados_tipo_1 = []
self.matriz_individuos = []
self.fabrica_individuo = Fabrica_individuo(
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,
atualizacoes_cura)
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
self.populacao_inicial = int(densidade_populacional_inicial * tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(dict, index = [0])
class Fabrica_individuo():
def __init__(
self,
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.chance_infeccao = chance_infeccao
self.chance_infeccao_tipo2 = chance_infeccao_tipo2
self.chance_morte = chance_morte
self.atualizacoes_cura = atualizacoes_cura
def criar_individuo(self, status_inicial):
return Individuo(
status_inicial,
self.chance_infeccao,
self.chance_infeccao_tipo2,
self.chance_morte,
self.atualizacoes_cura)
chance_infeccao = 0.3
chance_infeccao_tipo2 = 0.2
chance_morte = 0.2
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.05
percentual_inicial_tipo2 = 0.01
sim = Simulador(
1000,
1,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
ind = sim.fabrica_individuo.criar_individuo(Individuo.MORTO)
dict = {'num_sadios':1,
'num_infect_t1':2,
'num_infect_t2':3,
'num_curados':4,
'num_mortos':5}
s = pd.Series(dict)
sim.dataframe = sim.dataframe.append(s, ignore_index=True)
print(sim.dataframe)
#print(sim.num_inicial_tipo2)
list1 = list(range(10))
random.shuffle(list1)
list2 = list(range(10))
random.shuffle(list2)
print(list1)
print(list2)
print(list(zip(list1,list2)))
zipado = list(zip(list1,list2))
for target_list in zipado[1:4]:
print(target_list)
|
[
"eduardo_dut@edu.unifor.br"
] |
eduardo_dut@edu.unifor.br
|
fbcc55e61dc062eb2d120bc8b093a8ee1a52c4d2
|
b384d8d6043dd7a02dad787603d7d32755167f06
|
/bookAuthorsProj/settings.py
|
feb509473bf40477bf06973cc21eaf503fa71115
|
[] |
no_license
|
paulsmalze/bookAuthorsProj
|
0be9d6cf4c60cedd21e7bf900986bebd305a7637
|
9e24879ce6f0ed343d0b48b50595fae7d3daa400
|
refs/heads/main
| 2023-05-02T03:09:58.284503
| 2021-05-12T23:29:18
| 2021-05-12T23:29:18
| 366,878,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,113
|
py
|
"""
Django settings for bookAuthorsProj project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!8t8-!b)2)x7ild@d9vyv^6+r__wgy=!uyy!^qfp2bd4w^lyjw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookAuthorsProj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookAuthorsProj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"waffwegs@yahoo.com"
] |
waffwegs@yahoo.com
|
a97d176669cdd943083404b6fc57bb5bfb1d37c7
|
e2805f1a6683206ea887f69cca5ed191b1e5e46b
|
/vizdoomaze/envs/vizdoomazetwo1.py
|
1aedef74bfbb0602f42095a5f13043719322061d
|
[
"MIT"
] |
permissive
|
clay-fang/ViZDoomaze
|
2f63972c8fdc816770d02e438e241fa4bed49835
|
5b444f2d861c908c4d96ae374bcce660d364f22e
|
refs/heads/main
| 2023-01-23T15:06:02.625511
| 2020-11-21T16:27:17
| 2020-11-21T16:27:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
from vizdoomaze.envs.vizdoomenv import VizdoomEnv
class vizdoomazeTwo1(VizdoomEnv):
def __init__(self):
super(vizdoomazeTwo1, self).__init__(34)
|
[
"zengfanyu_cn@163.com"
] |
zengfanyu_cn@163.com
|
49f9531c53a61c6d08acbffb88b7b874188f7181
|
200b52c1fe17cd47e20522b989adb05bd9cc3cfa
|
/tests/core/test_otel_logger.py
|
75f4ebdc802d5cb7a3f8449881116b90ed90161d
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
fritzwijaya/airflow
|
cbb70fd8832b23e31e4426e72652c76e0df9fba8
|
5a4106d96e670b82004dd6fa221cf3c29b2496a9
|
refs/heads/master
| 2023-07-07T18:46:09.854983
| 2023-06-22T00:52:20
| 2023-06-22T00:52:20
| 206,764,905
| 1
| 0
|
Apache-2.0
| 2019-09-06T09:53:33
| 2019-09-06T09:53:33
| null |
UTF-8
|
Python
| false
| false
| 11,684
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
import time
from unittest import mock
from unittest.mock import ANY
import pytest
from opentelemetry.metrics import MeterProvider
from pytest import param
from airflow.exceptions import InvalidStatsNameException
from airflow.metrics.otel_logger import (
OTEL_NAME_MAX_LENGTH,
UP_DOWN_COUNTERS,
MetricsMap,
SafeOtelLogger,
_generate_key_name,
_is_up_down_counter,
full_name,
)
from airflow.metrics.validators import BACK_COMPAT_METRIC_NAMES, MetricNameLengthExemptionWarning
INVALID_STAT_NAME_CASES = [
(None, "can not be None"),
(42, "is not a string"),
("X" * OTEL_NAME_MAX_LENGTH, "too long"),
("test/$tats", "contains invalid characters"),
]
@pytest.fixture
def name():
return "test_stats_run"
class TestOtelMetrics:
def setup_method(self):
self.meter = mock.Mock(MeterProvider)
self.stats = SafeOtelLogger(otel_provider=self.meter)
self.map = self.stats.metrics_map.map
self.logger = logging.getLogger(__name__)
def test_is_up_down_counter_positive(self):
udc = next(iter(UP_DOWN_COUNTERS))
assert _is_up_down_counter(udc)
def test_is_up_down_counter_negative(self):
assert not _is_up_down_counter("this_is_not_a_udc")
def test_exemption_list_has_not_grown(self):
assert len(BACK_COMPAT_METRIC_NAMES) <= 23, (
"This test exists solely to ensure that nobody is adding names to the exemption list. "
"There are 23 names which are potentially too long for OTel and that number should "
"only ever go down as these names are deprecated. If this test is failing, please "
"adjust your new stat's name; do not add as exemption without a very good reason."
)
@pytest.mark.parametrize(
"invalid_stat_combo",
[
*[param(("prefix", name), id=f"Stat name {msg}.") for (name, msg) in INVALID_STAT_NAME_CASES],
*[param((prefix, "name"), id=f"Stat prefix {msg}.") for (prefix, msg) in INVALID_STAT_NAME_CASES],
],
)
def test_invalid_stat_names_are_caught(self, invalid_stat_combo):
prefix = invalid_stat_combo[0]
name = invalid_stat_combo[1]
self.stats.prefix = prefix
with pytest.raises(InvalidStatsNameException):
self.stats.incr(name)
self.meter.assert_not_called()
def test_old_name_exception_works(self, caplog):
name = "task_instance_created-OperatorNameWhichIsSuperLongAndExceedsTheOpenTelemetryCharacterLimit"
assert len(name) > OTEL_NAME_MAX_LENGTH
with pytest.warns(MetricNameLengthExemptionWarning):
self.stats.incr(name)
self.meter.get_meter().create_counter.assert_called_once_with(
name=(full_name(name)[:OTEL_NAME_MAX_LENGTH])
)
def test_incr_new_metric(self, name):
self.stats.incr(name)
self.meter.get_meter().create_counter.assert_called_once_with(name=full_name(name))
def test_incr_new_metric_with_tags(self, name):
tags = {"hello": "world"}
key = _generate_key_name(full_name(name), tags)
self.stats.incr(name, tags=tags)
self.meter.get_meter().create_counter.assert_called_once_with(name=full_name(name))
self.map[key].add.assert_called_once_with(1, attributes=tags)
def test_incr_existing_metric(self, name):
# Create the metric and set value to 1
self.stats.incr(name)
# Increment value to 2
self.stats.incr(name)
assert self.map[full_name(name)].add.call_count == 2
self.meter.get_meter().create_counter.assert_called_once_with(name=full_name(name))
@mock.patch("random.random", side_effect=[0.1, 0.9])
def test_incr_with_rate_limit_works(self, mock_random, name):
# Create the counter and set the value to 1
self.stats.incr(name, rate=0.5)
# This one should not increment because random() will return a value higher than `rate`
self.stats.incr(name, rate=0.5)
# This one should raise an exception for a negative `rate` value
with pytest.raises(ValueError):
self.stats.incr(name, rate=-0.5)
assert mock_random.call_count == 2
assert self.map[full_name(name)].add.call_count == 1
def test_decr_existing_metric(self, name):
expected_calls = [
mock.call(1, attributes=None),
mock.call(-1, attributes=None),
]
# Create the metric and set value to 1
self.stats.incr(name)
# Decrement value to 0
self.stats.decr(name)
self.map[full_name(name)].add.assert_has_calls(expected_calls)
assert self.map[full_name(name)].add.call_count == len(expected_calls)
@mock.patch("random.random", side_effect=[0.1, 0.9])
def test_decr_with_rate_limit_works(self, mock_random, name):
expected_calls = [
mock.call(1, attributes=None),
mock.call(-1, attributes=None),
]
# Create the metric and set value to 1
self.stats.incr(name)
# Decrement the counter to 0
self.stats.decr(name, rate=0.5)
# This one should not decrement because random() will return a value higher than `rate`
self.stats.decr(name, rate=0.5)
# This one should raise an exception for a negative `rate` value
with pytest.raises(ValueError):
self.stats.decr(name, rate=-0.5)
assert mock_random.call_count == 2
# add() is called once in the initial stats.incr and once for the decr that passed the rate check.
self.map[full_name(name)].add.assert_has_calls(expected_calls)
self.map[full_name(name)].add.call_count == 2
def test_gauge_new_metric(self, name):
self.stats.gauge(name, value=1)
self.meter.get_meter().create_observable_gauge.assert_called_once_with(
name=full_name(name), callbacks=ANY
)
assert self.map[full_name(name)].value == 1
def test_gauge_new_metric_with_tags(self, name):
tags = {"hello": "world"}
key = _generate_key_name(full_name(name), tags)
self.stats.gauge(name, value=1, tags=tags)
self.meter.get_meter().create_observable_gauge.assert_called_once_with(
name=full_name(name), callbacks=ANY
)
self.map[key].attributes == tags
def test_gauge_existing_metric(self, name):
self.stats.gauge(name, value=1)
self.stats.gauge(name, value=2)
self.meter.get_meter().create_observable_gauge.assert_called_once_with(
name=full_name(name), callbacks=ANY
)
assert self.map[full_name(name)].value == 2
def test_gauge_existing_metric_with_delta(self, name):
self.stats.gauge(name, value=1)
self.stats.gauge(name, value=2, delta=True)
self.meter.get_meter().create_observable_gauge.assert_called_once_with(
name=full_name(name), callbacks=ANY
)
assert self.map[full_name(name)].value == 3
@mock.patch("random.random", side_effect=[0.1, 0.9])
@mock.patch.object(MetricsMap, "set_gauge_value")
def test_gauge_with_rate_limit_works(self, mock_set_value, mock_random, name):
# Create the gauge and set the value to 1
self.stats.gauge(name, value=1, rate=0.5)
# This one should not increment because random() will return a value higher than `rate`
self.stats.gauge(name, value=1, rate=0.5)
with pytest.raises(ValueError):
self.stats.gauge(name, value=1, rate=-0.5)
assert mock_random.call_count == 2
assert mock_set_value.call_count == 1
def test_gauge_value_is_correct(self, name):
self.stats.gauge(name, value=1)
assert self.map[full_name(name)].value == 1
def test_timing_new_metric(self, name):
self.stats.timing(name, dt=123)
self.meter.get_meter().create_observable_gauge.assert_called_once_with(
name=full_name(name), callbacks=ANY
)
def test_timing_new_metric_with_tags(self, name):
tags = {"hello": "world"}
key = _generate_key_name(full_name(name), tags)
self.stats.timing(name, dt=1, tags=tags)
self.meter.get_meter().create_observable_gauge.assert_called_once_with(
name=full_name(name), callbacks=ANY
)
self.map[key].attributes == tags
def test_timing_existing_metric(self, name):
self.stats.timing(name, dt=1)
self.stats.timing(name, dt=2)
self.meter.get_meter().create_observable_gauge.assert_called_once_with(
name=full_name(name), callbacks=ANY
)
assert self.map[full_name(name)].value == 2
# For the four test_timer_foo tests below:
# time.perf_count() is called once to get the starting timestamp and again
# to get the end timestamp. timer() should return the difference as a float.
@mock.patch.object(time, "perf_counter", side_effect=[0.0, 3.14])
def test_timer_with_name_returns_float_and_stores_value(self, mock_time, name):
with self.stats.timer(name) as timer:
pass
assert isinstance(timer.duration, float)
assert timer.duration == 3.14
assert mock_time.call_count == 2
self.meter.get_meter().create_observable_gauge.assert_called_once_with(
name=full_name(name), callbacks=ANY
)
@mock.patch.object(time, "perf_counter", side_effect=[0.0, 3.14])
def test_timer_no_name_returns_float_but_does_not_store_value(self, mock_time, name):
with self.stats.timer() as timer:
pass
assert isinstance(timer.duration, float)
assert timer.duration == 3.14
assert mock_time.call_count == 2
self.meter.get_meter().create_observable_gauge.assert_not_called()
@mock.patch.object(time, "perf_counter", side_effect=[0.0, 3.14])
def test_timer_start_and_stop_manually_send_false(self, mock_time, name):
timer = self.stats.timer(name)
timer.start()
# Perform some task
timer.stop(send=False)
assert isinstance(timer.duration, float)
assert timer.duration == 3.14
assert mock_time.call_count == 2
self.meter.get_meter().create_observable_gauge.assert_not_called()
@mock.patch.object(time, "perf_counter", side_effect=[0.0, 3.14])
def test_timer_start_and_stop_manually_send_true(self, mock_time, name):
timer = self.stats.timer(name)
timer.start()
# Perform some task
timer.stop(send=True)
assert isinstance(timer.duration, float)
assert timer.duration == 3.14
assert mock_time.call_count == 2
self.meter.get_meter().create_observable_gauge.assert_called_once_with(
name=full_name(name), callbacks=ANY
)
|
[
"noreply@github.com"
] |
fritzwijaya.noreply@github.com
|
9b6e1b9ed2222b06ad1c5d29f31f626787e5d320
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/numenta_nupic/nupic-master/tests/swarming/nupic/swarming/experiments/simpleV2/permutations.py
|
523415e121b159a08d2da8e71e0579c92643ec9e
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,481
|
py
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
import os
from nupic.swarming.permutationhelpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'consumption'
permutations = {
'modelParams': {
'sensorParams': {
'encoders': {
'gym': PermuteEncoder(fieldName='gym', encoderClass='SDRCategoryEncoder', w=7, n=100),
'timestamp_dayOfWeek': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.dayOfWeek', radius=PermuteChoices([1, 3]), w=7),
'timestamp_timeOfDay': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.timeOfDay', radius=PermuteChoices([1, 8]), w=7),
'consumption': PermuteEncoder(fieldName='consumption', encoderClass='ScalarEncoder', maxval=PermuteInt(100, 300, 25), n=PermuteInt(13, 500, 20), w=7, minval=0),
'address': PermuteEncoder(fieldName='address', encoderClass='SDRCategoryEncoder', w=7, n=100),
},
},
'tpParams': {
'minThreshold': PermuteInt(9, 12),
'activationThreshold': PermuteInt(12, 16),
},
}
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*consumption.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = 'prediction:rmse:field=consumption')
minimize = 'prediction:rmse:field=consumption'
def dummyModelParams(perm):
""" This function can be used for Hypersearch algorithm development. When
present, Hypersearch doesn't actually run the CLA model in the OPF, but
instead runs a dummy model. This function returns the dummy model params that
will be used. See the OPFDummyModelRunner class source code (in
nupic.swarming.ModelRunner) for a description of the schema
for the dummy model params.
"""
errScore = 50
errScore += abs(perm['modelParams']['sensorParams']['encoders']\
['consumption']['maxval'] - 250)
errScore += abs(perm['modelParams']['sensorParams']['encoders']\
['consumption']['n'] - 53)
if perm['modelParams']['sensorParams']['encoders']['address'] is not None:
errScore -= 20
if perm['modelParams']['sensorParams']['encoders']['gym'] is not None:
errScore -= 10
# Make models that contain the __timestamp_timeOfDay encoder run a bit
# slower so we can test that we successfully kill running models
waitTime = None
if eval(os.environ.get('NTA_TEST_variableWaits', 'False')):
if perm['modelParams']['sensorParams']['encoders']\
['timestamp_timeOfDay'] is not None:
waitTime = 0.01
dummyModelParams = dict(
metricValue = errScore,
iterations = int(os.environ.get('NTA_TEST_numIterations', '1')),
waitTime = waitTime,
sysExitModelRange = os.environ.get('NTA_TEST_sysExitModelRange',
None),
errModelRange = os.environ.get('NTA_TEST_errModelRange',
None),
jobFailErr = bool(os.environ.get('NTA_TEST_jobFailErr', False))
)
return dummyModelParams
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
limit = int(os.environ.get('NTA_TEST_maxvalFilter', 300))
if perm['modelParams']['sensorParams']['encoders']['consumption']['maxval'] > limit:
return False;
return True
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
0af374c75858dd6a5656c55150d2b971842c5bb7
|
9fd1e88ae01342d21e8fca85fae94c5cb3823b88
|
/python/tvm/relay/backend/graph_runtime_factory.py
|
a21a4a89f82a48dd9c648b76bc9791c355a84eaf
|
[
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
gussmith23/tvm
|
1bf32224275e0242287c6b23cc7649d878bf40c3
|
e02dc69fef294eb73dd65d18949ed9e108f60cda
|
refs/heads/master
| 2022-06-13T17:38:50.709735
| 2020-09-14T14:51:06
| 2020-09-14T14:51:06
| 157,422,354
| 3
| 0
|
Apache-2.0
| 2018-11-13T17:51:08
| 2018-11-13T17:51:07
| null |
UTF-8
|
Python
| false
| false
| 3,073
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Graph runtime factory."""
import warnings
from tvm._ffi.base import string_types
from tvm._ffi.registry import get_global_func
from tvm.runtime import ndarray
class GraphRuntimeFactoryModule(object):
"""Graph runtime factory module.
This is a module of graph runtime factory
Parameters
----------
graph_json_str : str
The graph to be deployed in json format output by graph compiler.
The graph can contain operator(tvm_op) that points to the name of
PackedFunc in the libmod.
libmod : tvm.Module
The module of the corresponding function
libmod_name: str
The name of module
params : dict of str to NDArray
The parameters of module
"""
def __init__(self, graph_json_str, libmod, libmod_name, params):
assert isinstance(graph_json_str, string_types)
fcreate = get_global_func("tvm.graph_runtime_factory.create")
args = []
for k, v in params.items():
args.append(k)
args.append(ndarray.array(v))
self.module = fcreate(graph_json_str, libmod, libmod_name, *args)
self.graph_json = graph_json_str
self.lib = libmod
self.libmod_name = libmod_name
self.params = params
self.iter_cnt = 0
def export_library(self, file_name, fcompile=None, addons=None, **kwargs):
return self.module.export_library(file_name, fcompile, addons, **kwargs)
# Sometimes we want to get params explicitly.
# For example, we want to save its params value to
# an independent file.
def get_params(self):
return self.params
def get_json(self):
return self.graph_json
def get_lib(self):
return self.lib
def __getitem__(self, item):
return self.module.__getitem__(item)
def __iter__(self):
warnings.warn(
"legacy graph runtime behaviour of producing json / lib / params will be "
"removed in the next release ",
DeprecationWarning,
2,
)
return self
def __next__(self):
if self.iter_cnt > 2:
raise StopIteration
objs = [self.graph_json, self.lib, self.params]
obj = objs[self.iter_cnt]
self.iter_cnt += 1
return obj
|
[
"noreply@github.com"
] |
gussmith23.noreply@github.com
|
510c11051cb29e8e52223988ca08602d27bd5b68
|
cf9cd951163a2fac9d593c2448e53c0356ed1f67
|
/credentials_test.py
|
03ef27df2d7f89f1043efe8b899a32de72d99517
|
[
"MIT"
] |
permissive
|
sheillanjoroge/Password-Locker
|
e295b512be17e72c62a074ee1b2596d325176be7
|
0c7125b44002751960bbab7590bab51c3423123c
|
refs/heads/master
| 2023-01-19T02:01:52.652003
| 2020-11-23T12:28:19
| 2020-11-23T12:28:19
| 314,545,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,899
|
py
|
import unittest # Importing the unittest module
from credentials import Credentials # Importing the credentials class
from user import User
import csv, os
Class TestCredentials(unittest.TestCase):
'''
def setUp(self):
'''
Setup up method to run before each test cases.
'''
self.test_user_credential = Credential('sheillan.njoroge@gmail.com', 'instagram', 'zay22')
self.test_other_user_credential = Credential('sheillan.njoroge@gmail.com', 'twitter', 'zay22')
def tearDown(self):
'''
Delete the credentials database after every test.
'''
if os.path.isfile(Credential.database):
os.remove(Credential.database)
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual('sheillan.njoroge@gmail.com', self.test_user_credential.get_email())
self.assertEqual('Instagram', self.test_user_credential.get_account())
self.assertEqual('zay22', self.test_user_credential.get_password())
def test_create_credentials_file(self):
'''
Test that the db is created successfully.
'''
with open(Credential.database, 'w+') as test_file:
file_exists = os.path.isfile(Credential.database)
self.assertTrue(file_exists)
def test_record_a_credential(self):
'''
Test that a credential is always created and stored in the db successfully
'''
self.test_user_credential.create_credential()
self.test_other_user_credential.create_credential()
with open(Credential.database, 'r')as read_file:
fields = ['account', 'email', 'password']
data_read = csv.DictReader(read_file, fields, lineterminator='\n')
self.add_success = False
counter = 0
for line in data_read:
counter += 1
if line['email'] == 'sheillan.njoroge@gmail.com' and line['account'] == 'Twitter' and line['password'] == 'zay22':
self.add_success = True
self.assertTrue(self.add_success)
def test_check_account_exist(self):
'''
From the instance method. Search the db and ensure an account exists
'''
self.test_user_credential.create_credential()
account_exist = self.test_user_credential.check_account_exist()
self.assertTrue(account_exist)
def test_check_account_exist(self):
'''
From the class method. Search the db and ensure an account exists
'''
self.test_user_credential.create_credential()
account_exist = Credential.check_an_account_exist(self.test_user_credential.get_email(), self.test_user_credential.get_account())
self.assertTrue(account_exist)
def test_check_account_exist_without_db(self):
'''
Tests and ensures check_an_account_exist() does not return True if the db is nonexistent.
'''
account_exist = Credential.check_an_account_exist(self.test_user_credential.get_email(), self.test_user_credential.get_account())
self.assertFalse(account_exist)
def test_randomizer(self):
'''
Tests and ensures the radomizer() generates a legit passwords
'''
random_password = Credential.randomizer()
self.assertGreater(len(random_password), 7)
self.assertLess(len(random_password), 9)
def test_display_accounts(self):
self.test_other_user_credential.create_credential()
self.test_user_credential.create_credential()
list_of_credentials = Credential.display_accounts(self.test_user_credential.get_email())
for a_credential in list_of_credentials:
a_credential_exist = Credential.check_an_account_exist(a_credential['email'], a_credential['account'] )
if not a_credential_exist:
return False
self.assertTrue(a_credential_exist)
def test_delete_account(self):
'''
Tests and ensures delete op occurs successfully.
'''
self.test_user_credential.create_credential()
account_created = Credential.check_an_account_exist(self.test_user_credential.get_email(), self.test_user_credential.get_account())
is_deleted = Credential.delete_account(self.test_other_user_credential.get_email(), self.test_other_user_credential.get_account())
list_of_credentials = Credential.display_accounts(self.test_user_credential.get_email())
account_exist = False
for account in list_of_credentials:
if account['account'] == self.test_user_credential.get_account():
account_exist = False
else:
account_exist = True
self.assertTrue(account_created)
self.assertTrue(is_deleted)
self.assertFalse(account_exist)
if __name__ == '__main__':
unittest.main()
|
[
"sheillan.njoroge@gmail.com"
] |
sheillan.njoroge@gmail.com
|
64aa900a7c594d4a3c56b09ff3fc0c6c91cffed0
|
ae7ba9c83692cfcb39e95483d84610715930fe9e
|
/csrgxtu/maxent/src/soccer/TeamTechStatisticsParser12.py
|
be627e95cd17a03c757988d05f2995d37c93b599
|
[] |
no_license
|
xenron/sandbox-github-clone
|
364721769ea0784fb82827b07196eaa32190126b
|
5eccdd8631f8bad78eb88bb89144972dbabc109c
|
refs/heads/master
| 2022-05-01T21:18:43.101664
| 2016-09-12T12:38:32
| 2016-09-12T12:38:32
| 65,951,766
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,682
|
py
|
#!/usr/bin/env python
#coding=utf-8
#
# Author: Archer Reilly
# Date: 12/Aug/2014
# File: TeamTechStatisticsParser12.py
# Description: parser for 12 season soccer data on QQ sports
# Website: http://csrgxtu.blog.com
#
# Produced By CSRGXTU
from Parser import Parser
class TeamTechStatisticsParser12(Parser):
def __init__(self, html):
Parser.__init__(self, html)
# getLeagues
# get leauges of the game
#
# @return leagues (list(string)) or None
def getLeagues(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[1]/table/tbody/tr/td[1]/text()'
leagues = self.getTree().xpath(xpathExp)
if len(leagues) == 0:
return None
else:
return leagues
# getWins
# get results of the games wins
#
# @return rtvs (list(string)) or None
def getWins(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[1]/table/tbody/tr/td[2]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getFlats
# get the number of flats of the game
#
# @return rtvs (list(string)) or None
def getFlats(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[1]/table/tbody/tr/td[3]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getLoses
# get the number of loses
#
# @return rtvs (list(string)) or None
def getLoses(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[1]/table/tbody/tr/td[4]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getGoals
# get number of goals
#
# @return rtvs (list(string)) or None
def getGoals(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[1]/table/tbody/tr/td[5]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getFumbles
# get number of fumbles
#
# @return rtvs (list(string)) or None
def getFumbles(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[1]/table/tbody/tr/td[6]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getAssistances
# get number of assistances
#
# @return rtvs (list(string)) or None
def getAssistances(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[1]/table/tbody/tr/td[7]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getPasses
# get number of passes
#
# @return rtvs (list(string)) or None
def getPasses(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[1]/table/tbody/tr/td[8]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getSteals
# get number of steals
#
# @return rtvs (list(string)) or None
def getSteals(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[1]/table/tbody/tr/td[9]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getOffsides
# get number os offsides
#
# @return rtvs (list(string)) or None
def getOffsides(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[1]/table/tbody/tr/td[10]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getFouls
# get number of fouls
#
# @return rtvs (list(string)) or None
def getFouls(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[1]/table/tbody/tr/td[11]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getRedCards
# get number of red cards
#
# @return rtvs (list(string)) or None
def getRedCards(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[1]/table/tbody/tr/td[12]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getYellowCards
# get number of yellow cards
#
# @return rtvs (list(string)) or None
def getYellowCards(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[1]/table/tbody/tr/td[13]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getShoots
# get number of shoots
#
# @return rtvs (list(string)) or None
def getShoots(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[2]/table/tbody/tr/td[2]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getShootOnGoals
# get number of shoot on goals
#
# @return rtvs (list(string)) or None
def getShootOnGoals(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[2]/table/tbody/tr/td[3]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getShootOnGoalRates
# get number of rate of shoot on goal
#
# @return rtvs (list(string)) or None
def getShootOnGoalRates(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[2]/table/tbody/tr/td[4]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getSuccessRates
# get rates of success
#
# @return rtvs (list(string)) or None
def getSuccessRates(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[2]/table/tbody/tr/td[5]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getHeadGoals
# get number of head goals
#
# @return rtvs (list(string)) or None
def getHeadGoals(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[2]/table/tbody/tr/td[6]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getDirectFreeGoals
# get number of direct free goals
#
# @return rtvs (list(string)) or None
def getDirectFreeGoals(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[2]/table/tbody/tr/td[7]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getPenaltyKicks
# get number of penalty kicks
#
# @return rtvs (list(string)) or None
def getPenaltyKicks(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[2]/table/tbody/tr/td[8]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getPenaltyKickGoals
# get number of penalty kick goals
#
# @return rtvs (list(string)) or None
def getPenaltyKickGoals(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[2]/table/tbody/tr/td[9]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getIntercepts
# get number of intercepts
#
# @return rtvs (list(string)) or None
def getIntercepts(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[3]/table/tbody/tr/td[2]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getRescues
# get number of rescues
#
# @return rtvs (list(string)) or None
def getRescues(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[3]/table/tbody/tr/td[3]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getHeadRescues
# get number of head rescues
#
# @return rtvs (list(string)) or None
def getHeadRescues(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[3]/table/tbody/tr/td[4]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getBackFieldRescues
# get number of back field rescues
#
# @return rtvs (list(string)) or None
def getBackFieldRescues(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[3]/table/tbody/tr/td[5]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getSuccessHeaders
# get number of success headers
#
# @return rtvs (list(string)) or None
def getSuccessHeaders(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[3]/table/tbody/tr/td[6]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getFailHeaders
# get number of fail headers
#
# @return rtvs (list(string)) or None
def getFailHeaders(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[3]/table/tbody/tr/td[7]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
# getOwnGoals
# get number of own goals
#
# @return rtvs (list(string)) or None
def getOwnGoals(self):
xpathExp = '//*[@id="table1_con_1" and @class="table"]/div[2]/div[3]/table/tbody/tr/td[8]/text()'
rtvs = self.getTree().xpath(xpathExp)
if len(rtvs) == 0:
return None
else:
return rtvs
|
[
"xenron@outlook.com"
] |
xenron@outlook.com
|
2a04a27974207d0fc38a3c7f60d7127baf69a5c9
|
108ec2b047320973e288acd8ed000623809e16a8
|
/spider_2.py
|
24108839c33f2b732348c7e0295f33e02898534c
|
[] |
no_license
|
Drozdovsimple/project_first
|
f3bdbf7b9e621faf01ce90711c374cceb741a23c
|
56497ddebc75078b7d166a57033f3c4e08064373
|
refs/heads/main
| 2023-07-31T02:16:59.339806
| 2021-09-29T09:46:08
| 2021-09-29T09:46:08
| 411,007,641
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
import turtle
turtle.speed(1)
n = 12
angle = -360/n
for i in range(n):
turtle.rt(angle)
turtle.fd(100)
turtle.stamp()
turtle.rt(180)
turtle.fd(100)
turtle.rt(180)
turtle.done() # сделано
|
[
"andrei_drozdov@mail.ru"
] |
andrei_drozdov@mail.ru
|
b83ccba0ed57f84b1e9a53f7ec815d13c68f215a
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/materialistic/testcase/firstcases/testcase3_007.py
|
52dd404a749a28a977501b6a4e1979e9ebd31897
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,138
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'io.github.hidroh.materialistic',
'appActivity' : 'io.github.hidroh.materialistic.LauncherActivity',
'resetKeyboard' : True,
'androidCoverage' : 'io.github.hidroh.materialistic/io.github.hidroh.materialistic.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase007
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"io.github.hidroh.materialistic:id/button_more\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.TextView\").instance(32)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"3_007\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'io.github.hidroh.materialistic'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
62d9d5745fa68b3cda5043e23597c7a34fb9d414
|
07f92805a75dc91b8be2ac14c238394245eda9ea
|
/Python生物信息学数据管理/python-for-biologists/05-biopython/19-sequences/create_seq_object.py
|
d0c56a09417a2a344a4b6659e10b0958841ddb31
|
[] |
no_license
|
08zhangyi/Some-thing-interesting-for-me
|
6ea7366ef1f0812397300259b2e9d0e7217bcba0
|
f4cbda341ada98753c57a3ba07653163522dd023
|
refs/heads/master
| 2023-01-11T22:54:03.396911
| 2023-01-06T05:47:41
| 2023-01-06T05:47:41
| 136,426,995
| 7
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
'''
Use a Seq object for a single sequence like a string.
-----------------------------------------------------------
(c) 2013 Allegra Via and Kristian Rother
Licensed under the conditions of the Python License
This code appears in section 19.3.1 of the book
"Managing Biological Data with Python".
-----------------------------------------------------------
'''
from Bio import Seq
my_seq = Seq.Seq("AGCATCGTAGCATGCAC")
print my_seq[0]
print my_seq[0:3]
print my_seq.split('T')
print my_seq.count('A')
print my_seq.count('A') / float(len(my_seq))
|
[
"395871987@qq.com"
] |
395871987@qq.com
|
7aa09d855612bb3c33141e022c704ad1745546c8
|
f4505b46687929b9e749f54dc84f5d40b81ca5f2
|
/web_intlTelInput_widget/__openerp__.py
|
78d024a752a9d530425b1d0209e9303923682c4a
|
[] |
no_license
|
linyaohui/openerp-custom-widget-intlTelInput
|
49d837c888e5fc8682da578eac62143000034972
|
72d69e5d7cf6300f78441d4bcfe3f9187a4a4b28
|
refs/heads/master
| 2020-06-01T04:21:00.040191
| 2014-06-13T23:18:40
| 2014-06-13T23:18:40
| 20,798,334
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,562
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# QQ field
# Copyright 2013 wangbuke <wangbuke@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'OpenERP intlTelInput',
'version': '1.0',
'summary': 'intlTelInput',
'sequence': '19',
'category': 'Tools',
'complexity': 'easy',
'description': """
自定义widget,用于输入国际电话号码。它增加了国旗下拉任何输入,列出所有国家及其国际拨号代码旁边他们的旗帜
""",
'depends' : ['web'],
'js': ['static/src/js/*.js'],
'css': ['static/src/css/*.css'],
'qweb': ['static/src/xml/*.xml'],
'installable': True,
'auto_install': False,
'application': True,
'images': [
'images/int.jpg',
],
}
|
[
"191433230@qq.com"
] |
191433230@qq.com
|
181944e07a33d20875f97362f7c25701561ff33d
|
dbb1dbdb6816fa7a81122688bde7434d5cadb4c3
|
/endgamelivraria/settings.py
|
d0489147a250615c0c3a051be562369e41852ed3
|
[] |
no_license
|
ClaudioGuitta/Livrary
|
ab291c85aecf8acd9fdeea7483dc125bc707e47c
|
d8714a491fca4c57ebf200aca78b8316610d42ef
|
refs/heads/master
| 2023-04-27T22:21:45.898580
| 2019-06-19T19:03:21
| 2019-06-19T19:03:21
| 192,788,646
| 0
| 0
| null | 2023-04-21T20:34:42
| 2019-06-19T19:00:45
|
CSS
|
UTF-8
|
Python
| false
| false
| 3,615
|
py
|
"""
Django settings for endgamelivraria project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'en^efdane%su+t)h-254i*f!#gus%w-m%&^3rw#5t0%c)0c4vk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'enter',
'bootstrapform',
'crispy_forms',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'endgamelivraria.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'endgamelivraria.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'Livraria',
'USER': 'postgres',
'PASSWORD': 'hcss1995.',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles"),
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
import django_heroku
django_heroku.settings (locals ())
|
[
"h.claudio17@gmail.com"
] |
h.claudio17@gmail.com
|
7c0ee367adbc616dcbbcd9d86e1681ba6efad77b
|
ed153015653fd3daaf6de8cd92eecc7c7ab1079b
|
/tests/test_wikipedia.py
|
15ffb21984a82d10a6a8f4b7032a97783d25d367
|
[
"MIT"
] |
permissive
|
kersulis/python-test
|
a77b726236e6a1db42a7c95b30c9e584eecaf877
|
6ae135d232c261a3e577338160467b4b4dd17d0d
|
refs/heads/master
| 2022-09-29T03:36:33.428852
| 2020-06-04T16:11:24
| 2020-06-04T16:11:24
| 269,168,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 834
|
py
|
"""Test wikipedia module."""
from unittest.mock import Mock
import click
import pytest
from python_test import wikipedia
def test_random_page_uses_given_language(mock_requests_get: Mock) -> None:
"""It uses the given language."""
wikipedia.random_page(language="de")
args, _ = mock_requests_get.call_args
assert "de.wikipedia.org" in args[0]
def test_random_page_returns_page(mock_requests_get: Mock) -> None:
"""It returns a Page resource."""
page = wikipedia.random_page()
assert isinstance(page, wikipedia.Page)
def test_random_page_handles_validation_errors(mock_requests_get: Mock) -> None:
"""It handles validation errors."""
mock_requests_get.return_value.__enter__.return_value.json.return_value = None
with pytest.raises(click.ClickException):
wikipedia.random_page()
|
[
"kersulis@umich.edu"
] |
kersulis@umich.edu
|
9ee0203cedd7090f2d22180978595571a0bfac0e
|
9c65f078879089c625c489a955a3f7a31c877264
|
/test/ising1d_version1.py
|
d426c4834bcd933fc43080f6e051c26849a1f441
|
[] |
no_license
|
Icespellcaster/Amateur_Exerxise
|
62d400e693fd6fb2b19677af643bde8cc5db46ee
|
056e97c2e2db480ff4e4e834535f4faddf06f9bb
|
refs/heads/master
| 2021-01-10T17:56:20.727129
| 2019-11-29T05:46:27
| 2019-11-29T05:46:27
| 54,821,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,617
|
py
|
"""
尝试使用强化学习求解一维横场ising模型的基态能
H = -J * sigma_z^i sigma_z^{i+1} - h * sigma_x^i 的基态能
"""
import os
os.chdir('./test')
import numpy as np
import pandas as pd
import time
np.random.seed(2)
L = 10 # The length of chain
CHANNEL = 2
ACTIONS = ['up', 'down']
MAX_EPISODES = 130
FRESH_TIME = 0.3
ITER = 0
class TFI_1d:
def __init__(self, L, channel, iter):
self.L = L
self.channel = channel
self.iter = iter
self.init_config()
def init_config(self):
self.config = np.zeros((self.L, self.channel))
x = np.random.randint(2, size=(self.L))
self.config[:,0] = x
self.config[:,1] = 1 - x
def local_E_Ising(self, h=1):
"""
H = -J sz_i sz_j - h sx_i
"""
L, channel = self.config.shape
localE = 0.
for i in range(L-1):
temp = self.config[i, :].dot(self.config[i+1, :])
localE -= 2 * (temp - 0.5)
# Periodic Boundary condition
temp = self.config[0, :].dot(self.config[-1, :])
localE -= 2 * (temp - 0.5)
#########################################
return localE
def init_env(self):
env_list = []
for i in range(self.config.shape[0]):
if self.config[i, 0] == 1:
env_list += ['↑']
else:
env_list += ['↓']
return env_list
def choose_action(self, state):
# This is how to choose an action
if self.config[state, 0] == 1:
action_name = ACTIONS[1]
else:
action_name = ACTIONS[0]
return action_name
def get_env_feedback(self, S, A):
S_ = ((S + 1) % self.L)
oldlocalE = self.local_E_Ising(self.config)
if A == 'up':
self.config[S, 0] = 1
self.config[S, 1] = 0
localE = self.local_E_Ising(self.config)
if localE < oldlocalE:
pass
elif localE == oldlocalE:
if self.iter % 2 == 1:
self.config[S, 0] = 0
self.config[S, 1] = 1
localE = oldlocalE
self.iter += 1
else:
self.config[S, 0] = 0
self.config[S, 1] = 1
localE = oldlocalE
else:
self.config[S, 0] = 0
self.config[S, 1] = 1
localE = self.local_E_Ising(self.config)
if localE < oldlocalE:
pass
else:
self.config[S, 0] = 1
self.config[S, 1] = 0
localE = oldlocalE
return S_, localE
def update_env(self, episode):
# This is how environment be updated
env_list = self.init_env()
interaction = ''.join(env_list)
print('\r{}'.format(interaction), end='')
time.sleep(FRESH_TIME)
def main():
ising = TFI_1d(L, CHANNEL, ITER)
localE = ising.local_E_Ising()
print('init localE: ', localE)
# main part of RL loop
init_env_list = ising.init_env()
print(''.join(init_env_list))
S = 0
for episode in range(MAX_EPISODES):
A = ising.choose_action(S)
S_, localE = ising.get_env_feedback(S, A)
S = S_
print('##########################')
print('episode: %d' % episode)
ising.update_env(episode)
print()
print('localE: ', localE)
if episode %10 == 0:
input()
if __name__ == '__main__':
|
[
"noreply@github.com"
] |
Icespellcaster.noreply@github.com
|
90a5ed2030fd2ea016d9a0b41c7f2a9aca52c1fb
|
c3c563657b58032495a504870250459d9df1b633
|
/pyptv/text_box_overlay.py
|
cd6a4806f794b5ef620f7108795a80d4b73bfa00
|
[] |
no_license
|
research-software-company/pyptv
|
0e058ed46202dad593fee703d8bf6bea2f45ac42
|
14381e15cf77af6ce96cabac13d29691175f26d9
|
refs/heads/master
| 2020-08-28T22:51:48.699588
| 2019-10-23T18:15:29
| 2019-10-23T18:15:29
| 217,844,792
| 0
| 1
| null | 2019-10-27T11:27:28
| 2019-10-27T11:27:28
| null |
UTF-8
|
Python
| false
| false
| 4,341
|
py
|
""" Defines the TextBoxOverlay class.
"""
# Enthought library imports
from enable.api import ColorTrait, AbstractOverlay, Label, black_color_trait
from kiva.trait_defs.kiva_font_trait import KivaFont
from traits.api import Any, Enum, Int, Str, Float, Trait
# Local, relative imports
class TextBoxOverlay(AbstractOverlay):
""" Draws a box with a text in it
"""
#### Configuration traits ##################################################
# The text to display in the box.
text = Str
# The font to use for the text.
font = KivaFont("swiss 12")
# The background color for the box (overrides AbstractOverlay).
bgcolor = ColorTrait("transparent")
# The alpha value to apply to **bgcolor**
alpha = Trait(1.0, None, Float)
# The color of the outside box.
border_color = ColorTrait("dodgerblue")
# The color of the text in the tooltip
text_color = black_color_trait
# The thickness of box border.
border_size = Int(1)
# Number of pixels of padding around the text within the box.
padding = Int(5)
# Alignment of the text in the box:
#
# * "ur": upper right
# * "ul": upper left
# * "ll": lower left
# * "lr": lower right
align = Enum("ur", "ul", "ll", "lr")
# This allows subclasses to specify an alternate position for the root
# of the text box. Must be a sequence of length 2.
alternate_position = Any
#### Public 'AbstractOverlay' interface ####################################
def overlay(self, component, gc, view_bounds=None, mode="normal"):
""" Draws the box overlaid on another component.
Overrides AbstractOverlay.
"""
if not self.visible:
return
# draw the label on a transparent box. This allows us to draw
# different shapes and put the text inside it without the label
# filling a rectangle on top of it
label = Label(text=self.text, font=self.font, bgcolor="transparent",
color=self.text_color, margin=5)
width, height = label.get_width_height(gc)
valign, halign = self.align
if self.alternate_position:
x, y = self.alternate_position
if valign == "u":
y += self.padding
else:
y -= self.padding + height
if halign == "r":
x += self.padding
else:
x -= self.padding + width
else:
if valign == "u":
y = component.y2 - self.padding - height
else:
y = component.y + self.padding
if halign == "r":
x = component.x2 - self.padding - width
else:
x = component.x + self.padding
# attempt to get the box entirely within the component
if x + width > component.width:
x = max(0, component.width - width)
if y + height > component.height:
y = max(0, component.height - height)
elif y < 0:
y = 0
# apply the alpha channel
color = self.bgcolor_
if self.bgcolor != "transparent":
if self.alpha:
color = list(self.bgcolor_)
if len(color) == 4:
color[3] = self.alpha
else:
color += [self.alpha]
gc.save_state()
try:
gc.translate_ctm(x, y)
gc.set_line_width(self.border_size)
gc.set_stroke_color(self.border_color_)
gc.set_fill_color(color)
# draw a rounded rectangle
x = y = 0
end_radius = 8.0
gc.begin_path()
gc.move_to(x + end_radius, y)
gc.arc_to(x + width, y,
x + width,
y + end_radius, end_radius)
gc.arc_to(x + width,
y + height,
x + width - end_radius,
y + height, end_radius)
gc.arc_to(x, y + height,
x, y,
end_radius)
gc.arc_to(x, y,
x + width + end_radius,
y, end_radius)
gc.draw_path()
label.draw(gc)
finally:
gc.restore_state()
|
[
"alex.liberzon@gmail.com"
] |
alex.liberzon@gmail.com
|
3e6da70e68d1cfa5c124924ea3a40a6de7ca802f
|
09cc2951898133eb44814d69f7aa023830d146bc
|
/mainapp/migrations/0001_initial.py
|
0216240137f814be63c8391dea174924bbb3e91f
|
[] |
no_license
|
malfin/kpk_django
|
0c82c80e32cd0195c293bd2c77b1789e7e2c30e3
|
c36a669c7b3b83f349e9cdd65bf904fcd0b22075
|
refs/heads/master
| 2023-02-14T20:13:15.236281
| 2021-01-14T04:14:39
| 2021-01-14T04:14:39
| 305,098,243
| 0
| 0
| null | 2020-11-14T12:36:05
| 2020-10-18T12:37:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,359
|
py
|
# Generated by Django 2.2 on 2020-10-24 05:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
options={
'verbose_name': 'Категорию',
'verbose_name_plural': 'Категории',
},
),
migrations.CreateModel(
name='Web',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('desc', models.TextField(blank=True)),
('prise', models.IntegerField(default=0)),
('disk', models.CharField(max_length=128)),
('site', models.CharField(max_length=128)),
('db', models.CharField(max_length=128)),
('cpu', models.CharField(max_length=128)),
('traffic', models.CharField(max_length=128)),
('location', models.CharField(max_length=128)),
('ddos', models.CharField(max_length=128)),
('is_active', models.BooleanField(default=False)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Category')),
],
options={
'verbose_name': 'Веб',
'verbose_name_plural': 'Веб',
},
),
migrations.CreateModel(
name='Vds',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('desc', models.TextField(blank=True)),
('prise', models.IntegerField(default=0)),
('cpu', models.CharField(max_length=128)),
('core', models.CharField(max_length=128)),
('ram', models.CharField(max_length=128)),
('disk', models.CharField(max_length=128)),
('ipv4', models.CharField(max_length=128)),
('traffic', models.CharField(max_length=128)),
('location', models.CharField(max_length=128)),
('ddos', models.CharField(max_length=128)),
('is_active', models.BooleanField(default=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Category')),
],
options={
'verbose_name': 'Виртуальный сервер',
'verbose_name_plural': 'Виртуальные сервера',
},
),
migrations.CreateModel(
name='Dedic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('desc', models.TextField(blank=True)),
('prise', models.IntegerField(default=0)),
('prise_day', models.IntegerField(default=0)),
('cpu', models.CharField(max_length=128)),
('core', models.CharField(max_length=128)),
('ram', models.CharField(max_length=128)),
('disk', models.CharField(max_length=128)),
('ipv4', models.CharField(max_length=128)),
('traffic', models.CharField(max_length=128)),
('location', models.CharField(max_length=128)),
('ddos', models.CharField(max_length=128)),
('is_active', models.BooleanField(default=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Category')),
],
options={
'verbose_name': 'Выделенный сервер',
'verbose_name_plural': 'Выделенные сервера',
},
),
]
|
[
"aiex.0115970@gmail.com"
] |
aiex.0115970@gmail.com
|
7c7662b9b3719388a203703992543a961c451132
|
14b519323f3b80ed021e463cac26c227f7e6409f
|
/Outlier_detection/venv/Include/count_dist.py
|
70471aa8f2612a713dad0315569c6aa08b26ad88
|
[] |
no_license
|
zmzmk11/shangquan_cluster_dbscan
|
1b24a2ae0f94c567aba894c601d0c399057d743e
|
273cb0a4e1c45f8a40970bc148a82f717decb1c5
|
refs/heads/master
| 2020-05-01T13:48:04.748370
| 2019-03-25T02:53:19
| 2019-03-25T02:53:19
| 177,502,295
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
# -*- coding:gb18030-*-
import numpy
import math
R = 6371.004
def count_dist(latA, lonA, latB, lonB):
C = math.sin(latA)*math.sin(latB)+math.cos(lonA)*math.cos(lonB)*math.cos(lonA-lonB)
Distance = R * numpy.arccos(C) * math.pi / 180
return Distance
if __name__ == '__main__':
dist = count_dist(31.547256, 120.439034, 31.547392, 120.438675)
print(dist)
|
[
"413957463@qq.com"
] |
413957463@qq.com
|
f411d75b1539b1f48e8ccdc3052744235353af54
|
6a805efec898005ad623c795729d233e2bc6a6e3
|
/python/examples/example13_overwrite_topic_model.py
|
385424cf971dcdf9546b9de299097c999dd28de5
|
[
"BSD-3-Clause"
] |
permissive
|
mindis/bigartm
|
8efb61c2d93d8b4e81aff368e86368f272fcf9fd
|
d14b051e368286baa1afe4ce6e723e30ad2ae039
|
refs/heads/master
| 2021-01-18T07:30:48.678763
| 2015-09-04T18:55:17
| 2015-09-04T18:55:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,655
|
py
|
# This advanced example demonstrates how to upload existing Phi matrix back into BigARTM.
# There are several gotchas that you need to be aware about:
# 1. You should learn the difference between pwt-requests and nwt-requests.
# This is explained in the comments further below.
# 2. ArtmOverwriteTopicModel only schedules the update of topic model.
# To apply it you need to call WaitIdle() and Synchronize().
# 3. For pwt-requests you should use InvokeRegularizers=False in Synchronize() call at step#2.
# For nwt-requests you should use InvokeRegularizers=True in Synchronize() call at step#2.
import glob
import artm.messages_pb2
import artm.library
# Load one test batch
batches_disk_path = 'kos'
batches = glob.glob(batches_disk_path + "/*.batch")
test_batch = artm.library.Library().LoadBatch(batches[0])
# Pwt and Nwt requests represent two different ways of retrieving topic model.
# - Pwt represent a probability distribution p(w|t) over words for each topic.
# This values are normalized so that sum_w p(w|t) = 1.
# This data is sufficient to infer topic distribution p(t|d) for new documents,
# however this data is not sufficient to continue tuning topic model
# - Nwt represent internal counters (n_wt) of topic model.
# This data has the same layout as p(w|t) distribution,
# e.g. there is one scalar n_wt value for each token and topic.
# This values are not normalized, and does not include regularization coefficients.
# However, by placing n_wt counters back into BigARTM you fully recover topic model state.
pwt_request = artm.library.GetTopicModelArgs_RequestType_Pwt
nwt_request = artm.library.GetTopicModelArgs_RequestType_Nwt
# Change request_type to nwt_request and re-run this example
# Then you will get the same p(t|d) distributions from 'master' and 'test_master'.
request_type = pwt_request
# Split 10 topics into two groups - even topics and odd topics.
topic_names = []
topic_names_odd = []
topic_names_even = []
for topic_index in range(0, 10):
topic_name = "Topic" + str(topic_index)
topic_names.append(topic_name)
if topic_index % 2 == 0:
topic_names_odd.append(topic_name)
else:
topic_names_even.append(topic_name)
# Create topic model in 'master', then upload it into 'test_master'.
with artm.library.MasterComponent() as master, artm.library.MasterComponent() as test_master:
model = master.CreateModel(topic_names=topic_names)
for iteration in range(0, 2):
master.InvokeIteration(disk_path=batches_disk_path)
master.WaitIdle()
model.Synchronize()
topic_model_odd = master.GetTopicModel(model=model, request_type=request_type, topic_names=topic_names_odd,
use_matrix=False)
topic_model_even = master.GetTopicModel(model=model, request_type=request_type, topic_names=topic_names_even,
use_matrix=False)
theta_matrix, numpy_matrix = master.GetThetaMatrix(model=model, batch=test_batch)
print "Theta distribution for one test document: "
print "For the original topic model: ",
for value in numpy_matrix[0, :]:
print "%.5f" % value,
test_model = test_master.CreateModel(topic_names=topic_names)
test_model.Overwrite(topic_model_odd, commit=False)
test_model.Overwrite(topic_model_even, commit=False)
test_master.WaitIdle()
invoke_regularizers = False if (request_type == pwt_request) else True
test_model.Synchronize(decay_weight=0.0, apply_weight=1.0, invoke_regularizers=False)
test_theta, numpy_matrix = test_master.GetThetaMatrix(model=test_model, batch=test_batch)
print "\nFor topic model copied into test_master: ",
for value in numpy_matrix[0, :]:
print "%.5f" % value,
print '(the same result is expected)'
# Continue topic model inference and compare new topic models
master.InvokeIteration(disk_path=batches_disk_path)
master.WaitIdle()
model.Synchronize(decay_weight=0.5, apply_weight=1.0)
theta_matrix, numpy_matrix = master.GetThetaMatrix(model=model, batch=test_batch)
print "After updating original topic model: ",
for value in numpy_matrix[0, :]:
print "%.5f" % value,
test_master.InvokeIteration(disk_path=batches_disk_path)
test_master.WaitIdle()
test_model.Synchronize(decay_weight=0.5, apply_weight=1.0)
test_theta, numpy_matrix = test_master.GetThetaMatrix(model=test_model, batch=test_batch)
print "\nAfter updating topic model in test_master:",
for value in numpy_matrix[0, :]:
print "%.5f" % value,
print '(depends on nwt vs pwt request)'
|
[
"sashafrey@gmail.com"
] |
sashafrey@gmail.com
|
12daf65b15a7f7f3a9850aeda63c6721498d4191
|
8b120f5babd6f0dc9de436684de1aa2844cb70f7
|
/Bert_weibo.py
|
a1d719c7875b12829437d35b1a9f30bc9cda450b
|
[] |
no_license
|
01070/virus_sentiment
|
382b2b8ab464f32970b2817a2eb2cff211d80048
|
5bd55d54c1333ba0cf089547326d8231ad39b34f
|
refs/heads/master
| 2023-02-10T09:24:48.487299
| 2021-01-13T08:05:39
| 2021-01-13T08:05:39
| 329,234,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,156
|
py
|
import torch
import torch.nn as nn
from tcn import TemporalConvNet
from transformers import BertModel, AutoModel
import torch.nn.functional as F
class BertOnly(nn.Module):
def __init__(self, opt):
super(BertOnly, self).__init__()
self.bert = BertModel.from_pretrained('bert-base-chinese')
self.dense = nn.Linear(768, 6)
self.dropout = nn.Dropout(0.1)
def forward(self, inputs):
outputs = self.bert(input_ids=inputs)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.dense(pooled_output)
return logits
class TinyBertOnly(nn.Module):
def __init__(self, opt):
super(TinyBertOnly, self).__init__()
self.bert = AutoModel.from_pretrained("voidful/albert_chinese_tiny")
self.dense = nn.Linear(312, 6)
self.dropout = nn.Dropout(0.1)
def forward(self, inputs):
outputs = self.bert(input_ids=inputs)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.dense(pooled_output)
return logits
class TinyBertLSTM(nn.Module):
def __init__(self, opt):
super(TinyBertLSTM, self).__init__()
self.bert = AutoModel.from_pretrained("voidful/albert_chinese_tiny")
self.bilstm = nn.LSTM(312, 128, 1, bidirectional=True)
self.dense1 = nn.Linear(312, 256)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(0.1)
self.dense2 = nn.Linear(256, 6)
def init_hidden(self, batch_size):
return torch.randn(2, batch_size, 128).cuda(), torch.randn(2, batch_size, 128).cuda()
def forward(self, inputs):
outputs = self.bert(input_ids=inputs)
hidden_x, pooled_output = outputs
h0, c0 = self.init_hidden(hidden_x.size(0))
lstm_out_x, (h, c) = self.bilstm(hidden_x.permute(1, 0, 2), (h0.cuda(), c0.cuda()))
logits = self.dense2(F.relu(lstm_out_x[-1]))
return logits
class TinyBertLSTMAttention(nn.Module):
def __init__(self, opt):
super(TinyBertLSTMAttention, self).__init__()
self.bert = AutoModel.from_pretrained("voidful/albert_chinese_tiny")
self.bilstm = nn.LSTM(opt.input_dim, 128, 1, bidirectional=True)
self.dense1 = nn.Linear(opt.input_dim, 256)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(0)
self.dense2 = nn.Linear(256, opt.output_dim)
def init_hidden(self, batch_size):
return torch.randn(2, batch_size, 128).cuda(), torch.randn(2, batch_size, 128).cuda()
def forward(self, inputs):
outputs = self.bert(input_ids=inputs)
hidden_x, pooled_output = outputs
h0, c0 = self.init_hidden(hidden_x.size(0))
lstm_out_x, (h, c) = self.bilstm(hidden_x.permute(1, 0, 2), (h0.cuda(), c0.cuda()))
pool_drop = self.dense1(self.dropout(pooled_output))
a = self.softmax(torch.matmul(pool_drop.view(-1, 1, 256), lstm_out_x.permute(1, 2, 0)))
a_vec = torch.matmul(a, lstm_out_x.permute(1, 0, 2))
a_vec_dropout = self.dropout(F.relu(a_vec.view(-1, 256)))
logits = self.dense2(a_vec_dropout)
return logits
class TinyBertCNN(nn.Module):
def __init__(self, opt):
super(TinyBertCNN, self).__init__()
self.bert = AutoModel.from_pretrained("voidful/albert_chinese_tiny")
self.kernel = 2
self.layer1 = nn.Sequential(nn.Conv1d(312, self.kernel, 3),
nn.MaxPool1d(3),
nn.ReLU()
)
self.layer2 = nn.Sequential(nn.Conv1d(312, self.kernel, 4),
nn.MaxPool1d(3),
nn.ReLU()
)
self.layer3 = nn.Sequential(nn.Conv1d(312, self.kernel, 5),
nn.MaxPool1d(3),
nn.ReLU()
)
self.maxpool = nn.MaxPool1d(3)
self.dense = nn.Linear(128, 6)
def forward(self, inputs):
layers = [self.layer1, self.layer2, self.layer3]
hidden_x, pooled_output = self.bert(input_ids=inputs)
x1, x2, x3 = [layer(hidden_x.permute(0, 2, 1)) for layer in layers]
max_out4 = F.relu(torch.cat((self.maxpool(x1), self.maxpool(x2), self.maxpool(x3)), dim=-1))
logits = self.dense(max_out4.view(-1, 128))
return logits
class TinyBertTCN(nn.Module):
def __init__(self, opt, embedding_matrix, input_size, output_size, num_channels,
kernel_size=2, dropout=0.3, emb_dropout=0.1, tied_weights=False):
super(TinyBertTCN, self).__init__()
self.output_size = output_size
self.bert = AutoModel.from_pretrained("voidful/albert_chinese_tiny")
self.tcn = TemporalConvNet(312, num_channels, kernel_size, dropout=dropout)
self.dense = nn.Linear(num_channels[-1], output_size)
if tied_weights:
if num_channels[-1] != input_size:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
# self.dense.weight = self.decode.weight
print("Weight tied")
self.drop = nn.Dropout(emb_dropout)
self.emb_dropout = emb_dropout
# self.init_weights()
# def init_weights(self):
# self.encoder.weight.data.normal_(0, 0.01)
# self.decoder.bias.data.fill_(0)
# self.decoder.weight.data.normal_(0, 0.01)
def forward(self, inputs):
hidden_x, pooled_output = self.bert(input_ids=inputs)
y = self.tcn(hidden_x.permute(0, 2, 1))
y = self.dense(y.permute(0, 2, 1))
logits = y[:, -1].view(-1, self.output_size)
return logits.contiguous()
class TCN(nn.Module):
def __init__(self, opt, embedding_matrix, input_size, output_size, num_channels,
kernel_size=2, dropout=0.3, emb_dropout=0.1, tied_weights=False):
super(TCN, self).__init__()
self.encoder = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
self.output_size = output_size
self.tcn = TemporalConvNet(opt.input_dim, num_channels, kernel_size, dropout=dropout)
if tied_weights:
if num_channels[-1] != input_size:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder = nn.Linear(num_channels[-1], opt.decoder_dim)
print("Weight tied")
self.drop = nn.Dropout(emb_dropout)
self.mhsa = nn.MultiheadAttention(opt.decoder_dim, opt.num_head)
self.lin_q = nn.Linear(opt.decoder_dim, opt.decoder_dim)
self.lin_k = nn.Linear(opt.decoder_dim, opt.decoder_dim)
self.lin_v = nn.Linear(opt.decoder_dim, opt.decoder_dim)
self.emb_dropout = emb_dropout
self.relu = nn.ReLU()
self.pool_way = opt.pool
self.init_weights()
self.dense = nn.Linear(opt.decoder_dim, output_size)
def init_weights(self):
self.encoder.weight.data.normal_(0, 0.01)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.normal_(0, 0.01)
def forward(self, inputs):
hidden_x = self.encoder(inputs)
x = self.tcn(hidden_x.permute(0, 2, 1))
x = self.decoder(x.permute(0, 2, 1))
query = self.lin_q(x.permute(1, 0, 2))
key = self.lin_k(x.permute(1, 0, 2))
value = self.lin_v(x.permute(1, 0, 2))
output, _ = self.mhsa(query, key, value)
if self.pool_way == 'max':
output = torch.max(output, dim=0)[0]
elif self.pool_way == 'mean':
output = torch.mean(output, dim=0)
else:
output = (torch.mean(output, dim=0) + torch.max(output, dim=0)[0])/2
logits = self.dense(self.relu(output))
return logits.contiguous()
class TinyBertLSTMAttentionSST(nn.Module):
def __init__(self, opt):
super(TinyBertLSTMAttentionSST, self).__init__()
self.bert = AutoModel.from_pretrained("sentence-transformers/ce-ms-marco-TinyBERT-L-6")
self.bilstm = nn.LSTM(opt.input_dim, 128, 1, bidirectional=True)
self.dense1 = nn.Linear(opt.input_dim, 256)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(0)
self.dense2 = nn.Linear(256, opt.output_dim)
def init_hidden(self, batch_size):
return torch.randn(2, batch_size, 128).cuda(), torch.randn(2, batch_size, 128).cuda()
def forward(self, inputs):
outputs = self.bert(input_ids=inputs)
hidden_x, pooled_output = outputs
h0, c0 = self.init_hidden(hidden_x.size(0))
lstm_out_x, (h, c) = self.bilstm(hidden_x.permute(1, 0, 2), (h0.cuda(), c0.cuda()))
pool_drop = self.dense1(self.dropout(pooled_output))
a = self.softmax(torch.matmul(pool_drop.view(-1, 1, 256), lstm_out_x.permute(1, 2, 0)))
a_vec = torch.matmul(a, lstm_out_x.permute(1, 0, 2))
a_vec_dropout = self.dropout(F.relu(a_vec.view(-1, 256)))
logits = self.dense2(a_vec_dropout)
return logits
|
[
"346023218@qq.com"
] |
346023218@qq.com
|
9b896e046f64d8dee6aa6a4f4c66efeece2b6a9e
|
1a6dc8f7883af7dd55d55a83bc3e6f78914aaef1
|
/beats.py
|
b2827a03ffd4ec7ef0dcb82cd3a3aba56f590ccd
|
[] |
no_license
|
ditcham-park-coding-club/dpu-beats
|
14c501e829c08c803935d11828b7361d9d502a7d
|
8fd4beb2712b49aebb0bff8aadb116e469e8cd66
|
refs/heads/master
| 2020-03-28T08:05:01.804290
| 2019-01-13T10:24:01
| 2019-01-13T10:24:01
| 147,943,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
from math import floor
from os import listdir
from pygame import mixer
# Smaller than default buffer size increases timing accuracy
mixer.init(buffer=1024)
bpm = 120 # 2 beats per second
tick = 32 # 32 ticks per second
tick_duration = 1 / tick
beat_duration = 60 / bpm
ticks_per_beat = beat_duration / tick_duration
score = []
position = 0
drums = {fn.rsplit('.', 1)[0]: mixer.Sound('kit/' + fn) for fn in listdir('kit')}
def beat(beats=1):
play('Snr-01', beats)
def play(drum, beats=1):
_score(lambda: drums[drum].play())
_score(lambda: None, floor(ticks_per_beat * beats) - 2)
# Stop the sound to prevent hogging channels
_score(lambda: drums[drum].stop())
def rest(beats=1):
_score(lambda: None, floor(ticks_per_beat * beats))
def together(*parts):
global position
startPos = position
for part in parts:
position = startPos
part()
def repeat(part, count):
for i in range(count):
part()
def _score(action, repeat=1):
global position
if position == len(score):
score.extend([action] * repeat)
position += repeat
elif repeat == 1:
also = score[position]
score[position] = lambda: _together(action, also)
position += 1
else:
_score(action, 1)
_score(action, repeat - 1)
def _together(*fns):
for fn in fns:
fn()
|
[
"gsvarovsky@gmail.com"
] |
gsvarovsky@gmail.com
|
7c53f7aabd010e4667b46d9439520356c35838ec
|
ac98e1c554525fe1c4729aa4cdb3084245259d98
|
/tensorpack/graph_builder/__init__.py
|
6ed6c1ad9e4fffe0deb34f9ec91d53e8cffa13a0
|
[
"Apache-2.0"
] |
permissive
|
yunhuiguo/tensorpack
|
e85803b871a52f8115af145c054e098a6ab1c02c
|
91ce2260e5dc41b802b1a39b8b65ae6bee7ac719
|
refs/heads/master
| 2021-04-27T10:55:44.544209
| 2018-04-01T16:55:02
| 2018-04-01T16:55:02
| 122,549,690
| 0
| 0
|
Apache-2.0
| 2018-02-22T23:50:30
| 2018-02-22T23:50:30
| null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
# -*- coding: UTF-8 -*-
# File: __init__.py
if False:
from .model_desc import *
from .training import *
from .distributed import *
from .predict import *
from .utils import *
from pkgutil import iter_modules
import os
import os.path
__all__ = []
def global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = p.__all__ if '__all__' in dir(p) else []
del globals()[name]
for k in lst:
if not k.startswith('__'):
globals()[k] = p.__dict__[k]
__all__.append(k)
_CURR_DIR = os.path.dirname(__file__)
_SKIP = []
for _, module_name, _ in iter_modules(
[_CURR_DIR]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py')
if not os.path.isfile(srcpath):
continue
if module_name.startswith('_'):
continue
if module_name not in _SKIP:
global_import(module_name)
|
[
"ppwwyyxxc@gmail.com"
] |
ppwwyyxxc@gmail.com
|
c5c253457cdac3e1dc8d24ea569ddc86e3ba6492
|
e01ff1ed99078e146ec9ac24cced7bdc6db8b43d
|
/Commit 5/pd110/src/src/urls.py
|
cb5deb2ebf1ff8ed7dd7bf36b258943f2acd23ae
|
[] |
no_license
|
abrahamrome/CURSO-DJANGO
|
d11449369f1dc5e29f2b9942f82306563c4de4fa
|
8aad6bde26285a940cea1bb832732f21d83c0f65
|
refs/heads/master
| 2022-12-08T05:32:40.778019
| 2020-09-01T21:04:22
| 2020-09-01T21:04:22
| 262,312,975
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
"""src URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"abrahamrodrimedina@gmail.com"
] |
abrahamrodrimedina@gmail.com
|
367183c4bd47f79fe6d046f11c9cafe43357f1b7
|
b6bb740ca8318ca987cb814ad16a997414999149
|
/Classes_And_Objects.py
|
bce8e4ef770da04d718a8a21ba67ddeec054b59f
|
[] |
no_license
|
afaubion/Python-Tutorials
|
e3c9027390199b2ca80e00c566ce348a04012115
|
212e79ebaa7e79e66acf95cfe538c83bbf8192fa
|
refs/heads/master
| 2022-09-09T03:35:22.334827
| 2020-05-23T19:26:42
| 2020-05-23T19:26:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,301
|
py
|
# a very basic class would look something like this:
class MyClass:
variable = "blah"
def function(self):
print("This is a message inside the class.")
# to assign the above class(template) to an object you would do the following:
myobjectx = MyClass()
# now the variable "myobjectx" contains the object of the class "MyClass" that contains
# the variable "variable" and the function "function" defined within "MyClass"
# to access object variable
myobjectx.variable
# ex:
print(myobjectx.variable)
# each object has an independent copy of the class, and can be edited independently
myobjecty = MyClass()
myobjecty.variable = "yackity"
print(myobjecty.variable)
# accessing function inside an object
myobjectx.function()
# EXERCISE
# define the Vehicle class
class Vehicle:
name = ""
kind = "car"
color = ""
value = 100.00
def description(self):
desc_str = "%s is a %s %s worth $%.2f." % (self.name, self.color, self.kind, self.value)
return desc_str
# your code goes here
car1 = Vehicle()
car1.name = "Fer"
car1.color = "red"
car1.kind = "convertible"
car1.value = 60000.00
car2 = Vehicle()
car2.name = "Jump"
car2.color = "blue"
car2.kind = "van"
car2.value = 10000.00
# test code
print(car1.description())
print(car2.description())
|
[
"noreply@github.com"
] |
afaubion.noreply@github.com
|
23b32041d38d8ba15d1d81dcb495a7b17d7b6482
|
017e95ce76e7676b8a05b63f72f5978a243bddde
|
/predictive_analytics_with_ensembled_learning/random_extremely_random_forest/app.py
|
b845f915efb1305b469a4240c8c923f7ca21532b
|
[
"MIT"
] |
permissive
|
dustrider/python_ai
|
7139d94338561481c7ff7afe8ee76a108ada4db6
|
16d9806a06ed0f4ba1fe638458caa37343482e0a
|
refs/heads/master
| 2020-03-21T18:05:03.326121
| 2018-07-31T16:11:52
| 2018-07-31T16:11:52
| 138,871,781
| 0
| 0
|
MIT
| 2018-06-27T11:22:21
| 2018-06-27T11:22:21
| null |
UTF-8
|
Python
| false
| false
| 3,329
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from predictive_analytics_with_ensembled_learning.random_extremely_random_forest.utilities import visualize_classifier
# Argument parser
# def build_arg_parser():
# parser = argparse.ArgumentParser(description='Classify data using \
# Ensemble Learning techniques')
# parser.add_argument('--classifier-type', dest='classifier_type', required=True, choices=['rf', 'erf'],
# help="Type of classifier to use; can be either 'rf' or 'erf'")
# return parser
if __name__ == '__main__':
# Parse the input arguments
# args = build_arg_parser().parse_args()
# classifier_type = args.classifier_type
# choose erf or rf
classifier_type = 'rf'
# Load input data
input_file = 'data_random_forests.txt'
data = np.loadtxt(input_file, delimiter=',')
X, y = data[:, :-1], data[:, -1]
# Separate input data into three classes based on labels
class_0 = np.array(X[y == 0])
class_1 = np.array(X[y == 1])
class_2 = np.array(X[y == 2])
# Visualize input data
plt.figure()
plt.scatter(class_0[:, 0], class_0[:, 1], s=75, facecolors='white', edgecolors='black', linewidth=1, marker='s')
plt.scatter(class_1[:, 0], class_1[:, 1], s=75, facecolors='white', edgecolors='black', linewidth=1, marker='o')
plt.scatter(class_2[:, 0], class_2[:, 1], s=75, facecolors='white', edgecolors='black', linewidth=1, marker='^')
plt.title('Input data')
# Split data into training and testing datasets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=5)
# Ensemble Learning classifier
params = {'n_estimators': 100, 'max_depth': 4, 'random_state': 0}
if classifier_type == 'rf':
classifier = RandomForestClassifier(**params)
else:
classifier = ExtraTreesClassifier(**params)
classifier.fit(X_train, y_train)
visualize_classifier(classifier, X_train, y_train, 'Training dataset')
y_test_pred = classifier.predict(X_test)
visualize_classifier(classifier, X_test, y_test, 'Test dataset')
# Evaluate classifier performance
class_names = ['Class-0', 'Class-1', 'Class-2']
print("\n" + "#" * 40)
print("\nClassifier performance on training dataset\n")
print(classification_report(y_train, classifier.predict(X_train), target_names=class_names))
print("#" * 40 + "\n")
print("#" * 40)
print("\nClassifier performance on test dataset\n")
print(classification_report(y_test, y_test_pred, target_names=class_names))
print("#" * 40 + "\n")
# Compute confidence
test_datapoints = np.array([[5, 5], [3, 6], [6, 4], [7, 2], [4, 4], [5, 2]])
print("\nConfidence measure:")
for datapoint in test_datapoints:
probabilities = classifier.predict_proba([datapoint])[0]
predicted_class = 'Class-' + str(np.argmax(probabilities))
print('\nDatapoint:', datapoint)
print('Predicted class:', predicted_class)
# Visualize the datapoints
visualize_classifier(classifier, test_datapoints, [0] * len(test_datapoints), 'Test datapoints')
plt.show()
|
[
"jude.tan@accuity.com"
] |
jude.tan@accuity.com
|
f42bfbaa8a8b1ed3d5220d7a4759c7987fc35cb7
|
6dfdc65b1d633847763fc5012aeb4ca1438fe6ae
|
/Practical Machine Learning/Arima2.py
|
3c4339cdde0d1bf5b13b64d4c98132b356fcd914
|
[] |
no_license
|
SaurabhRuikar/CdacRepo
|
71537432885c83995ea6f2248d2a20ef08b4c9d4
|
12c2a4be8141443e4a567104506a81f115baeab1
|
refs/heads/master
| 2022-05-26T02:22:02.748779
| 2021-03-03T15:33:13
| 2021-03-03T15:33:13
| 223,311,490
| 0
| 0
| null | 2022-04-12T21:57:09
| 2019-11-22T02:57:34
|
HTML
|
UTF-8
|
Python
| false
| false
| 513
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 27 15:23:07 2019
@author: student
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from statsmodels.tsa.arima_model import ARIMA
plt.style.use('fivethirtyeight')
df=pd.read_csv("/home/student/Desktop/Python/dataset/movavg.csv",
index_col='Date')
df.index=pd.to_datetime(df.index)
model=ARIMA(df.Price,order=(1,2,0))
model_fit=model.fit(disp=0)
print(model_fit.summary())
|
[
"saurabh2830076@gmail.com"
] |
saurabh2830076@gmail.com
|
f50be0615f896d40820ac91d75e2afbbc3c5a198
|
25c21626adbf8da59a009600e38158b94e61edcd
|
/leapYear
|
4117dc8f787cb5b13899357ee065a7e6c2819b3d
|
[] |
no_license
|
gsanya1003/testing
|
039710b3278d5800828881aa16177c1255d23bbd
|
6b0e8f3a870f1696497a655da46abab3f46537a9
|
refs/heads/master
| 2020-12-06T18:18:43.385560
| 2020-10-19T05:17:09
| 2020-10-19T05:17:09
| 232,524,296
| 0
| 6
| null | 2020-10-19T05:17:11
| 2020-01-08T09:14:22
|
Java
|
UTF-8
|
Python
| false
| false
| 265
|
year = int(input("Enter Year: "))
if year % 4 == 0 and year % 100 != 0:
print(year, "is a Leap Year")
elif year % 100 == 0:
print(year, "is not a Leap Year")
elif year % 400 ==0:
print(year, "is a Leap Year")
else:
print(year, "is not a Leap Year")
|
[
"noreply@github.com"
] |
gsanya1003.noreply@github.com
|
|
4c31abeb1a6a4864f3b02b091c6db7fb58eb973e
|
780750bf61b34b05662c7b735c59a23b11b8fb0e
|
/sudoku/test_sudoku.py
|
1bcb431a10bf3e272d01887d833aabc64ab4d0e7
|
[] |
no_license
|
mmweber2/adm
|
26203bc2b2db07a215f65bc49821c0cd44700ebe
|
bf622efcfad26f95696aa0bb72edb8fadfb2f717
|
refs/heads/master
| 2020-04-03T23:30:40.085340
| 2017-12-08T10:53:00
| 2017-12-08T10:53:00
| 52,829,709
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,503
|
py
|
from sudoku import Board
from nose.tools import assert_raises
from nose.tools import assert_equals
# Test Board
# Taken from puzzle generator online
def test_is_valid_start_board_valid():
input_array = [
[0, 0, 0, 0, 9, 0, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
assert b._is_valid_start_board()
def test_board_constructor_valid():
input_array = [
[0, 0, 0, 0, 9, 0, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
assert_equals(b.board, input_array)
def test_is_valid_start_board_wrong_outer_size():
input_array = [[1, 0], [0, 0]]
with assert_raises(ValueError) as cm:
Board(input_array)
assert_equals(str(cm.exception), "Board boxes must be square")
def test_is_valid_start_board_wrong_inner_size():
input_array = [[1, 0], [0, 0], [0, 0],
[0, 0], [0, 0], [0, 0],
[0, 0], [0, 0], [0, 0]
]
with assert_raises(ValueError) as cm:
Board(input_array)
assert_equals(str(cm.exception), "Board boxes must be square")
def test_is_valid_start_board_not_list():
input_array = "abcd"
with assert_raises(ValueError) as cm:
Board(input_array)
assert_equals(str(cm.exception), "Board must be a 2D list or tuple")
def test_is_valid_start_board_no_inner_list():
input_array = ["abcd", "efgh", "i", "j", "k", "l", "m", "n", "o"]
with assert_raises(ValueError) as cm:
Board(input_array)
assert_equals(str(cm.exception), "Board must contain only lists or tuples")
# For now, an empty board is valid
def test_is_valid_start_board_empty():
b = Board(([[0] * 9 for _ in xrange(9)]))
def test_is_valid_start_board_non_int():
input_array = [[0] * 9 for _ in xrange(9)]
input_array[2][2] = "abc"
with assert_raises(ValueError) as cm:
Board(input_array)
assert_equals(str(cm.exception), "Board must contain only integers")
def test_is_valid_start_board_invalid_ints():
input_array = [[0] * 9 for _ in xrange(9)]
input_array[2][2] = 10
with assert_raises(ValueError) as cm:
Board(input_array)
assert_equals(
str(cm.exception), "Board numbers must be in range 0 <= x <= board size"
)
def test_is_valid_start_board_duplicates():
input_array = [
# 2 2s in this row
[0, 0, 0, 0, 9, 2, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
assert_raises(ValueError, Board, input_array)
def test_is_valid_board_valid():
input_array = [
[0, 0, 0, 0, 9, 0, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
assert b != None
# Duplicates are somewhat hidden in the boxes because I wanted to test
# situations where only the row, column, or box had a duplicate, and
# not multiple situations at the same time.
def test_is_valid_board_duplicate_row():
input_array = [
# 2 2s in this row
[0, 0, 0, 0, 9, 2, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
assert_raises(ValueError, Board, input_array)
def test_is_valid_board_duplicate_column():
input_array = [
[0, 0, 0, 0, 9, 0, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[1, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[1, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
# second 1 in the first column
[0, 5, 0, 0, 6, 1, 0, 0, 0]
]
assert_raises(ValueError, Board, input_array)
def test_is_valid_board_duplicate_in_box():
input_array = [
# second box in first row has two 1s
[0, 0, 0, 1, 9, 0, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
assert_raises(ValueError, Board, input_array)
# The _numbers_in_row tests don't need checking for duplicates because
# that will be done when the board is created.
def test_numbers_in_row_valid():
input_array = [
[0, 0, 0, 0, 9, 0, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
result = b._numbers_in_row(0)
# Two sets are equal iff every element of each set is contained in the other
assert_equals(result, set((2, 5, 9)))
def test_numbers_in_row_empty():
input_array = [
# Clear out non-zeroes in first row
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
assert_equals(b._numbers_in_row(0), set())
def test_numbers_in_row_invalid_row():
input_array = [
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
assert_raises(ValueError, b._numbers_in_row, 9)
def test_numbers_in_col_valid():
input_array = [
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
assert_equals(b._numbers_in_column(8), set((4, 9)))
def test_numbers_in_col_invalid_col():
input_array = [
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[0, 0, 0, 5, 3, 8, 2, 0, 0],
[0, 0, 3, 0, 0, 0, 0, 6, 0],
[0, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
assert_raises(ValueError, b._numbers_in_column, "4")
def test_numbers_in_box_valid():
input_array = [
[0, 0, 0, 0, 9, 0, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
assert_equals(b._numbers_in_box(3, 3), set((7, 4, 6, 9, 1, 2, 5, 3)))
def test_numbers_in_box_invalid_params():
input_array = [
[0, 0, 0, 0, 9, 0, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
assert_raises(ValueError, b._numbers_in_box, 1, 1)
def test_valid_moves_valid():
input_array = [
[0, 0, 0, 0, 9, 0, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
result = b.valid_moves(0, 0)
assert_equals(type(result), list)
assert_equals(set(result), set((3, 6, 7, 8)))
# Check for a 9 move (no off-by-one error)
def test_valid_moves_9():
input_array = [
[3, 4, 6, 0, 9, 0, 0, 5, 2],
[0, 1, 5, 0, 0, 0, 3, 0, 4],
[7, 8, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
result = b.valid_moves(1, 0)
assert_equals(result, [9])
# Test a "failed" board where a number has no possible remaining moves
def test_valid_moves_none_left():
input_array = [
# Second number on first row has no moves left
[3, 0, 7, 6, 9, 4, 1, 5, 2],
[5, 1, 9, 0, 7, 0, 3, 8, 4],
[8, 6, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
assert_equals(b.valid_moves(0, 1), [])
def test_valid_moves_position_already_filled():
input_array = [
[0, 0, 0, 0, 9, 0, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
assert_raises(IndexError, b.valid_moves, 1, 1)
def test_valid_moves_invalid_input():
input_array = [
[0, 0, 0, 0, 9, 0, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
assert_raises(ValueError, b.valid_moves, 9, 0)
def test_make_moves():
# Easy
board1 = [
[0, 0, 0, 0, 9, 0, 0, 5, 2],
[0, 1, 0, 0, 0, 0, 3, 0, 4],
[0, 0, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
# Medium
board2 = [
[4, 0, 6, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 9, 0, 0, 8, 0, 1],
[8, 0, 0, 0, 7, 5, 0, 2, 0],
[0, 0, 5, 0, 6, 0, 0, 0, 8],
[2, 6, 0, 0, 9, 0, 0, 3, 4],
[9, 0, 0, 0, 2, 0, 7, 0, 0],
[0, 1, 0, 2, 5, 0, 0, 0, 7],
[6, 0, 9, 0, 0, 3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 3],
]
# Hard
board3 = [
[8, 9, 0, 0, 3, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 8, 2, 0, 0],
[4, 0, 0, 2, 0, 0, 0, 0, 9],
[0, 3, 0, 0, 0, 0, 0, 2, 5],
[0, 0, 7, 0, 6, 0, 4, 0, 0],
[5, 1, 0, 0, 0, 0, 0, 6, 0],
[1, 0, 0, 0, 0, 3, 0, 0, 4],
[0, 0, 9, 7, 0, 0, 0, 0, 0],
[0, 0, 0, 6, 1, 0, 0, 7, 2],
]
# Expert
board4 = [
[0, 6, 0, 0, 2, 0, 0, 0, 0],
[0, 0, 2, 0, 0, 0, 0, 0, 5],
[0, 0, 0, 0, 3, 5, 8, 0, 0],
[5, 0, 0, 0, 7, 4, 0, 0, 6],
[0, 0, 0, 0, 0, 0, 0, 9, 8],
[0, 3, 9, 5, 0, 0, 0, 1, 0],
[0, 5, 0, 0, 6, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 7, 0, 4, 0],
[0, 4, 8, 0, 0, 0, 2, 0, 0]
]
for input_array in (board1, board2, board3, board4):
result = Board(input_array).make_moves()
assert result != None
def test_make_moves_failed_board():
input_array = [
# Second number on first row has no moves left
[3, 0, 7, 6, 9, 4, 1, 5, 2],
[5, 1, 9, 0, 7, 0, 3, 8, 4],
[8, 6, 2, 3, 1, 5, 0, 0, 9],
[0, 0, 8, 7, 4, 6, 0, 3, 0],
[0, 7, 0, 9, 0, 1, 0, 2, 0],
[0, 9, 0, 2, 5, 3, 7, 0, 0],
[4, 0, 0, 5, 3, 8, 2, 0, 0],
[2, 0, 3, 0, 0, 0, 0, 6, 0],
[1, 5, 0, 0, 6, 0, 0, 0, 0]
]
b = Board(input_array)
assert_equals(b.make_moves(), None)
|
[
"mmweber2@gmail.com"
] |
mmweber2@gmail.com
|
8b7d108095db5abe3d979a34f593d3b39e380604
|
ed1b9afa46f8bb3c35f86daf79a1bb0de80e298e
|
/Performance-testing/01-test-tool/jmeter/startJmeter/StartJmeter.py
|
276aa1b14e3c43f7bc5af924869bdae51faba6c7
|
[
"Apache-2.0"
] |
permissive
|
DearMerlin/blog-example
|
7d60807b0e656d149c12f346048a0b00322303f7
|
b315d0555ea8ce14ace7cf6bd1c1d20558e2abae
|
refs/heads/master
| 2023-09-05T23:14:52.034936
| 2021-11-18T15:29:48
| 2021-11-18T15:29:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,681
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/12/11 22:07
# @Author : 7FGroup
# @name : Jmeter启动脚本
# @File : startJmeter.py
import os, sys
def jmeterNumber(caseName, num_threads, ramp_time, duration, remark, hostIps='127.0.0.1'):
'''
:param caseName: 脚本名字
:param num_threads: 线程数
:param ramp_time: 控制线程步长
:param duration: 执行时间
:param remark: 标志
:param hostIps: 负载参数
:return: 启动JMeter成功
启动命令:python startJmeter.py tiaoshi 2 1 30 pythontiaoshi
- Python:表示通过 Python 脚本语言执行
- startJmeter.py:表示启动脚本名字
- tiaoshi:表示 JMeter 脚本名字
- 2:表示并发数
- 1:执行步长
- 30:表示执行时间
- pythontiaoshi:表示备注,方便一个脚本多次执行好分别每次执行的结果
'''
if caseName is None:
return "测试用例为空"
if num_threads is None:
return "虚拟并发数为空"
if ramp_time is None:
return "测试步骤为空"
if duration is None:
return "执行时间为空"
# 执行脚本名字
runJmeterFile = '%s_%s_%s_%s_%s' % (caseName, num_threads, ramp_time, duration, remark)
print("执行名字脚本:%s" % runJmeterFile)
thisdir = os.getcwd()
# 原始脚本
newdir = os.path.join(thisdir, "testscript", caseName + ".jmx")
print("当前脚本路径: %s" % newdir)
if not os.path.exists(newdir):
print('脚本不存在!请检查脚本')
return False
# 保存测试结果路径
resultFile = os.path.join(thisdir, 'result', runJmeterFile)
print("脚本执行路径: ", resultFile)
# 判断结果路径是否存在
if not os.path.exists(resultFile):
os.makedirs(resultFile)
lines = open(newdir, encoding="utf-8").readlines()
fp = open(os.path.join(thisdir, "result", resultFile, runJmeterFile) + '.jmx', 'w') # 打开你要写得文件
for s in lines:
fp.write(s.replace('num_threads">1</stringProp>', 'num_threads">%s</stringProp>' % num_threads) # 替换并发数
.replace('ramp_time">1</stringProp>', 'ramp_time">%s</stringProp>' % ramp_time) # 替换步长
.replace('scheduler">false</boolProp>', 'scheduler">true</boolProp>') # 勾选通过时间判断结束
.replace('duration"></stringProp>', 'duration">%s</stringProp>' % duration) # 替换执行时间
.replace('name="LoopController.loops">1</stringProp>',
'name="LoopController.loops">-1</stringProp>')) # 勾选通过时间判断结束
fp.close()
os.chdir(resultFile)
print("当前路径: ", os.getcwd())
# 检查环境变量
if isEvn():
# 判断分布式执行方式
if len(hostIps.split(",")) > 2:
# 根据自己需求添加执行类型
Rcmd = 'jmeter -n -t %s.jmx -R %s -l %s.jtl -j %s.log' % (
runJmeterFile, hostIps, runJmeterFile, runJmeterFile)
# Rcmd = 'jmeter -n -t %s.jmx -R %s -l %s.jtl -j %s.log -e -o %s' % (runJmeterFile, hostIps, runJmeterFile, runJmeterFile, runJmeterFile)
print('执行命令:%s' % Rcmd)
# os.system(Rcmd)
else:
# 不生成html报告
# cmd = 'jmeter -n -t %s.jmx -l %s.jtl -j %s.log' % (runJmeterFile, runJmeterFile, runJmeterFile, runJmeterFile)
# 自动生成html报表
cmd = 'jmeter -n -t %s.jmx -l %s.jtl -j %s.log -e -o %s' % (
runJmeterFile, runJmeterFile, runJmeterFile, runJmeterFile)
print('执行命令:%s' % cmd)
os.system(cmd)
def isEvn():
'''
检查环境变量
:return: True/Fals
'''
cmd = 'jmeter -v'
lin = os.popen(cmd)
for i in lin:
if 'The Apache Software Foundation' in i:
print("Jmeter环境变量配置成功")
return True
else:
print("Jmeter环境变量配置失败")
return False
if __name__ == '__main__':
# 分布式ip写法,多个使用逗号隔开
hostIps = '127.0.0.1'
if len(sys.argv[1:]) == 5:
print('参数个数为:', len(sys.argv), '个参数。')
print('可用参数列表:', str(sys.argv[1:]))
param = sys.argv[1:]
print("脚本名字: %s,并发数: %s,步长: %s,执行时间: %s,备注: %s" % (param[0], param[1], param[2], param[3], param[4]))
jmeterNumber(param[0], param[1], param[2], param[3], param[4], hostIps)
else:
print("参数不对")
pass
|
[
"zuozewei@hotmail.com"
] |
zuozewei@hotmail.com
|
568a366f82f3a465bdc5357e25268025e5547105
|
885f70baec88ff6420144d9bafaffe9307b3e32d
|
/workspace/backend/application/api/v1/drinks.py
|
b1069b60b127825a823e3add81a0f355f46a0600
|
[] |
no_license
|
AlexGeControl/Full-Stacker--09-flask-social-blogging
|
738ea199006b87b043cbe63753f2d5ba23dbd2fd
|
10223a267d9a6acd80c1781d05082ae06da72259
|
refs/heads/master
| 2022-12-15T20:24:26.152644
| 2020-03-09T03:28:03
| 2020-03-09T03:28:03
| 245,116,415
| 0
| 0
| null | 2022-12-08T03:45:50
| 2020-03-05T09:11:03
|
Python
|
UTF-8
|
Python
| false
| false
| 7,042
|
py
|
from application import db
from application.auth.models import User
from application.models import Drink
import json
from flask import g
from flask import abort, request, jsonify
from . import bp
from flask_httpauth import HTTPBasicAuth
auth = HTTPBasicAuth()
# AUTH
# ----------------------------------------------------------------
@auth.verify_password
def verify_password(email, password):
# if account is not provided:
if email == '':
return False
# select user:
user = User.query.filter(
User.email == email
).first()
# if user doesn't exist:
if user is None:
return False
g.current_user = user
return user.verify_password(password)
@auth.error_handler
def unauthorized():
response = jsonify(
{
"success": False,
"error": 401,
"message": 'Authentication is needed to access this API.'
}
)
return response, 401
# CREATE
# ----------------------------------------------------------------
@bp.route('/drinks', methods=['POST'])
@auth.login_required
def create_drink():
"""
POST /drinks
it should create a new row in the drinks table
it should require the 'post:drinks' permission
it should contain the drink.long() data representation
return
status code 200 and json {"success": True, "drinks": drink} where drink an array containing only the newly created drink
or appropriate status code indicating reason for failure
"""
# parse POSTed json:
drink_created = request.get_json()
# serialize recipe as one string:
drink_created["recipe"] = json.dumps(drink_created["recipe"])
error = True
try:
drink = Drink(**drink_created)
# insert:
db.session.add(drink)
db.session.commit()
error = False
# prepare response:
drink = drink.long()
except:
# rollback:
db.session.rollback()
error = True
finally:
db.session.close()
if error:
abort(500, description="Failed to create new Drink")
# format:
response = jsonify(
{
"success": True,
"drinks": [drink]
}
)
return response, 200
# READ
# ----------------------------------------------------------------
@bp.route('/drinks', methods=['GET'])
def get_drinks():
"""
GET /drinks
it should be a public endpoint
it should contain only the drink.short() data representation
return
status code 200 and json {"success": True, "drinks": drinks} where drinks is the list of drinks
or appropriate status code indicating reason for failure
"""
error = True
try:
# select all drinks:
drinks = Drink.query.all()
error = False
except:
error = True
finally:
db.session.close()
if error:
abort(500, description="Failed to load drinks")
# format:
response = jsonify(
{
"success": True,
"drinks": [drink.short() for drink in drinks],
}
)
return response, 200
@bp.route('/drinks-detail', methods=['GET'])
@auth.login_required
def get_drinks_detail():
"""
GET /drinks-detail
it should require the 'get:drinks-detail' permission
it should contain the drink.long() data representation
return
status code 200 and json {"success": True, "drinks": drinks} where drinks is the list of drinks
or appropriate status code indicating reason for failure
"""
error = True
try:
# select all drinks:
drinks = Drink.query.all()
error = False
except:
error = True
finally:
db.session.close()
if error:
abort(500, description="Failed to load drinks")
# format:
response = jsonify(
{
"success": True,
"drinks": [drink.long() for drink in drinks],
}
)
return response, 200
# PATCH
# ----------------------------------------------------------------
@bp.route('/drinks/<int:id>', methods=['PATCH'])
@auth.login_required
def edit_drink(id):
"""
PATCH /drinks/<id>
where <id> is the existing model id
it should respond with a 404 error if <id> is not found
it should update the corresponding row for <id>
it should require the 'patch:drinks' permission
it should contain the drink.long() data representation
return
status code 200 and json {"success": True, "drinks": drink} where drink an array containing only the updated drink
or appropriate status code indicating reason for failure
"""
# parse POSTed json:
drink_edited = request.get_json()
# serialize recipe as one string:
drink_edited["recipe"] = json.dumps(drink_edited["recipe"])
error = True
try:
# select:
drink = Drink.query.get(id)
# if resource is not found:
if drink is None:
abort(404, description="Drink with id={} not found".format(id))
# update:
drink.title = drink_edited["title"]
drink.recipe = drink_edited["recipe"]
# insert:
db.session.add(drink)
db.session.commit()
error = False
# prepare response:
drink = drink.long()
except:
# rollback:
db.session.rollback()
error = True
finally:
db.session.close()
if error:
abort(500, description="Failed to edit the Drink with id={}".format(id))
# format:
response = jsonify(
{
"success": True,
"drinks": [drink]
}
)
return response, 200
# DELETE
# ----------------------------------------------------------------
@bp.route('/drinks/<int:id>', methods=['DELETE'])
@auth.login_required
def delete_drink(id):
"""
DELETE /drinks/<id>
where <id> is the existing model id
it should respond with a 404 error if <id> is not found
it should delete the corresponding row for <id>
it should require the 'delete:drinks' permission
return
status code 200 and json {"success": True, "delete": id} where id is the id of the deleted record
or appropriate status code indicating reason for failure
"""
error = True
try:
# select:
drink = Drink.query.get(id)
# if resource is not found:
if drink is None:
abort(404, description="Drink with id={} not found".format(id))
# delete:
db.session.delete(drink)
db.session.commit()
error = False
except:
# rollback:
db.session.rollback()
error = True
finally:
db.session.close()
if error:
abort(500, description="Failed to delete Drink with id={}".format(id))
# format:
response = jsonify(
{
"success": True,
"delete": id
}
)
return response, 200
|
[
"alexgecontrol@qq.com"
] |
alexgecontrol@qq.com
|
e92627db11f06b01bcfc12ef4c20578c4b9c09ba
|
e1e7bdc8e471860eb214056f10c1f94119dd71f7
|
/src/client.py
|
13cf26eaa983d2d424ce727669bc66eb18fd4e6f
|
[] |
no_license
|
mcarri01/Sharks-and-Minnows
|
5650356cdd7a91c84cef67c36317d143f578b014
|
d40ade79b08bf3ef7bfd12765e8bd9eb77451091
|
refs/heads/master
| 2021-04-30T12:52:48.423432
| 2018-01-20T17:00:48
| 2018-01-20T17:00:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,630
|
py
|
# client.py
# Client code for playing the game, organized by an initial
# startup process, and then splitting off into two threads.
#
# The DisplayThread class handles displaying the current iteration
# of the board to the user, while the FishThread handles accepting
# user input for control over the movement of their fish object.
#
import sys
import os
import threading
from fish import *
from title import *
import Pyro4
import socket
import time
import random
from datetime import datetime
import curses
from curses import wrapper
import signal
import re
import argparse
board = []
dead = False
class DisplayThread(threading.Thread):
""" The DisplayThread class handles displaying the current game status
to the user. It derives its methods from Python's threading.Thread
object, allowing it to run as a thread but giving flexibility with
class specific variables and state.
"""
def __init__(self, stdscr, username):
""" Initializes display parameters, especially the shutdown_flag
to check for exit signals from the user.
"""
threading.Thread.__init__(self)
self.user = username
self.shutdown_flag = threading.Event()
self.stdscr = stdscr
def run(self):
""" Main thread exec function. Loops until the user exits with a
SIGINT or SIGTERM.
Utilizes Python curses to handle effective terminal output,
printing out the current board from the Pyro remote object,
and then other specific strings indicating game status.
"""
global board
lastTime = datetime.now()
counter = 0
# until clean exit
while not self.shutdown_flag.is_set():
# currTime = datetime.now()
# delta = currTime - lastTime
# lastTime = currTime
# counter += delta.microseconds
# draw current board
b = board.readBoard()
wave = board.getWave()
string = ''
for line in b:
for c in line:
string += c
string += '\n'
self.stdscr.addstr(string, curses.color_pair(1))
players = board.getPlayers()
# if in a lobby
if not board.gameStarted():
board.clearBoard()
self.stdscr.addstr("Waiting for players...\n")
# if the player dies
elif self.user not in players:
self.stdscr.addstr("Game Over...you died!\n")
else:
s = "Current wave: " + str(wave) + ", Players alive: " + \
" ".join(players) + "\n"
self.stdscr.addstr(s, curses.A_BOLD)
self.stdscr.addstr(titleString)
self.stdscr.move(0, 0)
class FishThread(threading.Thread):
""" The FishThread class manages receiving user input and updating
the player's position.
"""
def __init__(self, stdscr, username):
""" Initializes the player's fish and terminal screen manager """
threading.Thread.__init__(self)
self.shutdown_flag = threading.Event()
self.username = username
self.stdscr = stdscr
self.username = username
def run(self):
"""
Main loop through for Fish thread.
Upon either the user signally SIGTERM/SIGINT
the main loop terminates and exits to the main program.
If a collision occurs between a player and a shark, sets the
user as dead to prevent them from playing (but they can
still watch their friends)!
"""
global board
global dead
fish = Fish("models/fish.txt", 15, 75, self.username)
while not self.shutdown_flag.is_set():
# gets key press from user
key = self.stdscr.getch()
curses.flushinp
currCol = fish.getCol()
currRow = fish.getRow()
if not dead:
boardWidth = board.getWidth()
boardHeight = board.getHeight()
fishWidth = fish.getFishWidth()
fishHeight = fish.getFishHeight()
# depending on direction moves fish
if key == ord('w') and currRow != 1:
fish.setRow(currRow - 1)
elif key == ord('d') and currCol != boardWidth-fishWidth:
diff = boardWidth-currCol-1
if fish.getDisplayNameLen() > diff:
fish.setDisplayName(fish.getDisplayName()[:diff])
fish.setCol(currCol + 1)
elif key == ord('s') and currRow != boardHeight-fishHeight-2:
fish.setRow(currRow + 1)
elif key == ord('a') and currCol != 1:
if fish.getDisplayNameLen() < fish.getNameLen():
fish.oneMoreChar()
fish.setCol(currCol - 1)
# checks if there was collision on write
collision = board.writeBoardFish(fish.getRow(), fish.getCol(), \
fish.getFish(), fish.getDisplayName())
if collision:
dead = True
board.decrementPlayer(self.username)
class ServiceExit(Exception):
""" Custom exception to safely end threads before exitting """
pass
def receive_sig(signum, stack):
""" Signal handler to raise exception, allowing the shutdown_flag for
each thread to be set.
"""
raise ServiceExit
def initializeGame(ip):
""" Initializes the game by connecting to the nameserver and retrieving
the board Pyro4 object.
Retrieves username and game mode the user wishes to play.
"""
# locate nameserver
NS = Pyro4.locateNS(host=ip, port=9090, broadcast=True)
uri = NS.lookup("example.board")
global board
# retrieve board object
board = Pyro4.Proxy(uri)
username = raw_input("Please choose your username: ")
username = re.sub(r'[^a-zA-Z]', '', username)
# only allow unique usernames
while username in board.getPlayers():
username = raw_input("Username already taken. Choose another: ")
username = re.sub(r'[^a-zA-Z]', '', username)
board.addPlayer(username)
waiting = raw_input("Wait for more players? (y?): ")
# check if we need to put player in a lobby or not
if waiting != 'y':
board.startGame()
elif board.numPlayers() > 1:
board.startGame()
return username
def parseArgs(argv):
""" Parses IP command line argument """
parser = argparse.ArgumentParser(description='Client program for SharksAndMinnows game!')
parser.add_argument('-i', dest='ip', type=str,
help='IPv4 Address of Name Server')
return parser.parse_args().ip
def main(stdscr, username, ip):
""" Main for client, initializing signal handlers and launching threads.
Waits for signal to begin shutdown process.
"""
# signal handlers for clean exit
signal.signal(signal.SIGTERM, receive_sig)
signal.signal(signal.SIGINT, receive_sig)
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
stdscr.nodelay(True)
dispThread = DisplayThread(stdscr, username)
fishThread = FishThread(stdscr, username)
b = board.readBoard()
# until signal exit just start threads and loop
try:
dispThread.start()
fishThread.start()
while True:
time.sleep(0.5)
except ServiceExit:
# set flags for threads to clean up and finish
dispThread.shutdown_flag.set()
fishThread.shutdown_flag.set()
if username in board.getPlayers():
board.decrementPlayer(username)
dispThread.join()
fishThread.join()
if __name__ == "__main__":
ip = parseArgs(sys.argv)
username = initializeGame(ip)
wrapper(main, username, ip)
|
[
"mepstein9@yahoo.com"
] |
mepstein9@yahoo.com
|
9f8d5645cb2e47fe17cd264eff39c73878f75b22
|
cab894cb54bf2820b04eaf910aa2384e387273fd
|
/users/migrations/0001_initial.py
|
b4878aed699dafaa485dabc0a93ae0760ab9bd39
|
[] |
no_license
|
UNREALre/PollsMaster
|
3d09bcf2577a3b5c95abc311ab5faf8a67cc84b6
|
40352a2b7671766814b98145f3cdd67c2fc0b9bb
|
refs/heads/master
| 2023-03-06T08:05:34.681796
| 2021-02-16T13:59:45
| 2021-02-16T13:59:45
| 339,416,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,868
|
py
|
# Generated by Django 2.2.10 on 2021-02-15 13:47
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
[
"a.podrabinovich@cashoff.ru"
] |
a.podrabinovich@cashoff.ru
|
f83ed41091ce9a5201897377d90e2de30b3b35fc
|
b86f6367e0f725ff884270075829e1dfaf90a7eb
|
/Spammer.py
|
753bc9e48e4b721dcd881989c64e9911eab64fa8
|
[
"MIT"
] |
permissive
|
pk8742/Python-Projects
|
4b858d108740626c0424e462fb1d6fe887a6095b
|
df4b00a0c46150c4561063ccfcdffa17cf41dd54
|
refs/heads/main
| 2023-03-04T09:25:26.692460
| 2021-02-15T10:06:48
| 2021-02-15T10:06:48
| 339,025,572
| 0
| 0
| null | 2021-02-15T09:28:20
| 2021-02-15T09:28:20
| null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
'''
Our Task Is To Create A Python Program That Spam Message On A Text Messanger
'''
import pyautogui,time
time.sleep(5)
f = open("data.txt",'r')
for word in f:
pyautogui.tywriter(word)
pyautogui.press("enter")
|
[
"sharmapravesh655@gmail.com"
] |
sharmapravesh655@gmail.com
|
c4cbf6f89ad8c87e9c134ae0e0af1dee875fb396
|
cf4ab5f196f00eacf2f3312fd3045f26e09b1e64
|
/main.py
|
49bdac9ac55ee19535ed4beb91c35436e637c7f2
|
[] |
no_license
|
sb-lviv/7_crypto_1
|
ec7a287be7261458df0658b98bea45fb2e989bf4
|
f7bb746578d077bb20f07e2246e7896620f2b25e
|
refs/heads/master
| 2020-07-27T01:44:34.684105
| 2019-09-16T14:53:59
| 2019-09-17T12:27:15
| 208,826,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,549
|
py
|
#!/usr/bin/env python3
import argparse
import random
class Crypto(object):
ACTIONS = [
'enc', # encrypt
'dec', # decrypt
]
ALGORITHMS = [
'sub', # substitution
'per', # permutation
'sca', # scaling
]
FIRST_CHAR = ' '
LAST_CHAR = '~'
def __init__(self):
print('__init__(self)')
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i', default='sample.txt',
help='path to source file')
parser.add_argument('--output', '-o', default='encrypted.txt',
help='path to resulting file(s)')
parser.add_argument('--action', '-a', default=Crypto.ACTIONS[0],
help='action to perform', choices=Crypto.ACTIONS)
parser.add_argument('--algorithm', '-l', default=Crypto.ALGORITHMS[0],
help='available encryption algorithms',
choices=Crypto.ALGORITHMS)
parser.add_argument('--seed', '-s', default=0, type=int,
help='used for random generators')
parser.add_argument('--key', '-k', default='',
help='used for scaling algorithm')
args = parser.parse_args()
self.input_file_name = args.input
self.output_file_name = args.output
self.key_file_name = args.key
self.__input = ''
self.__output = ''
self.__key = ''
self.__action = args.action
self.__algorithm = args.algorithm
self.__seed = args.seed
with open(self.input_file_name) as f:
self.__input = f.read()
print(5)
def run(self):
print('run(self)')
if self.__algorithm == Crypto.ALGORITHMS[0]:
self.substitution()
elif self.__algorithm == Crypto.ALGORITHMS[1]:
self.permutation()
elif self.__algorithm == Crypto.ALGORITHMS[2]:
self.scaling()
self.save_to_file()
@staticmethod
def get_alphabet():
print('get_alphabet()')
return [
[chr(x)
for x
in range(ord(Crypto.FIRST_CHAR), ord(Crypto.LAST_CHAR) + 1)],
{chr(x): x
for x
in range(ord(Crypto.FIRST_CHAR), ord(Crypto.LAST_CHAR) + 1)},
]
def get_substitution_alphabet(self):
print('get_substitution_alphabet(self)')
[alpha, alpha_rev] = Crypto.get_alphabet()
shuffled = alpha[:]
self.reset_generator()
random.shuffle(shuffled)
return [
{key: value for key, value in zip(alpha, shuffled)},
{key: value for value, key in zip(alpha, shuffled)}
]
def substitution(self):
print('substitution(self)')
alpha, alpha_rev = self.get_substitution_alphabet()
if self.__action == Crypto.ACTIONS[0]:
self.__output = ''.join([alpha[x] for x in self.__input])
elif self.__action == Crypto.ACTIONS[1]:
self.__output = ''.join([alpha_rev[x] for x in self.__input])
else:
self.__output = ''
def permutation(self):
print('permutation(self)')
reference = list(range(len(self.__input)))
self.reset_generator()
random.shuffle(reference)
if self.__action == Crypto.ACTIONS[0]:
self.__output = ''.join([self.__input[x] for x in reference])
elif self.__action == Crypto.ACTIONS[1]:
self.__output = list(range(len(self.__input)))
for x, char in zip(reference, self.__input):
self.__output[x] = char
self.__output = ''.join(self.__output)
else:
self.__output = ''
def scaling(self):
[alpha, alpha_rev] = Crypto.get_alphabet()
self.__get_key()
self.__output = []
_min = ord(Crypto.FIRST_CHAR)
_max = ord(Crypto.LAST_CHAR)
_range = _max - _min + 1
if self.__action == Crypto.ACTIONS[0]:
for index, char in enumerate(self.__input):
key_index = index % len(self.__key)
char_code = ord(char) + alpha_rev[self.__key[key_index]] - _min
char_code %= _range
char_code += _min
self.__output.append(chr(char_code))
self.__output = ''.join(self.__output)
elif self.__action == Crypto.ACTIONS[1]:
for index, char in enumerate(self.__input):
key_index = index % len(self.__key)
char_code = ord(char) - alpha_rev[self.__key[key_index]] - _min
char_code %= _range
char_code += _min
self.__output.append(chr(char_code))
self.__output = ''.join(self.__output)
else:
self.__output = ''
def __get_key(self):
try:
with open(self.key_file_name, 'r') as f:
self.__key = f.read()
if len(self.__key) == 0:
raise FileNotFoundError
except FileNotFoundError:
raise argparse.ArgumentTypeError(
'key "{}" not found'.format(self.key_file_name))
def save_to_file(self):
print('save_to_file(self)')
with open(self.output_file_name, 'w') as f:
f.write(self.__output)
def reset_generator(self):
print('reset_generator(self)')
random.seed(self.__seed)
if __name__ == "__main__":
Crypto().run()
|
[
"serhiy.boiko@plvision.eu"
] |
serhiy.boiko@plvision.eu
|
0f304f9d2c5697279489097b2f55a267627938d2
|
e89c10b364c1e4c13ae0df864640c72dca319d0a
|
/Modules/EikonDatabase.py
|
0d00c9e783c8df88028e6e368ca38235a792fad1
|
[] |
no_license
|
yongks/news
|
50049207b88f0f3b411700bc5412aa710cc78d33
|
4afce6737a900b6894aad7043c31347021c3e3ae
|
refs/heads/master
| 2022-11-07T21:09:18.547447
| 2020-06-28T07:43:36
| 2020-06-28T07:43:36
| 260,725,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,352
|
py
|
#######################################################################################
### Module: EikonDatabase
### Date: 5 Jan 2020
### Features: - Fetch Stock Price, Outstanding Stocks Info from Thomson Reuters Eikon
### - Support Corporate Action Records
### - Minimize Eikon API call to Single call per Symbol
### - Support selective Symbol update, with optional save to disk
### - Price and Outstanding Stocks are Adjusted
### - Support date range fetch and filter
### - Save to Local Disk
#######################################################################################
import pandas as pd
import datetime as dt
import configparser as cp
import eikon as ek
#import warnings
#warnings.simplefilter(action='ignore', category=FutureWarning)
import warnings
warnings.filterwarnings("ignore")
## Reading Directory Path From Config
config = cp.ConfigParser()
config.read('../settings.cfg')
## Reading File Path From Config
instruments_file = config['data']['instruments_file']
listing_db = config['data']['listing_db']
eod_db_adjusted = config['data']['eod_db_adjusted']
eod_db_unadjusted = config['data']['eod_db_unadjusted']
corp_act_db = config['data']['corp_act_db']
financial_db = config['data']['financial_db']
eikon_api = config['eikon']['api_key']
#%%
class EikonDatabase():
UNIVERSE = None
EOD_DF = pd.DataFrame()
EOD_DF_UNADJUSTED = pd.DataFrame()
FINANCIAL_DF = pd.DataFrame()
CORPACT_DF = pd.DataFrame()
LISTING_DF = pd.DataFrame()
TR_COLUMNS = ['COMPANYSHARESOUTSTANDING','PE','PRICETOBVPERSHARE','VOLUME','OPENPRICE','HIGHPRICE','LOWPRICE','CLOSEPRICE']
CALC_COLUMNS = ['MARKETCAP']
ALL_COLUMNS = TR_COLUMNS + CALC_COLUMNS
################################################################
### INIT
################################################################
def __init__(self):
try:
print('EikonDatabase: Loading Data From Hard Disk. Please Wait ...')
#self.EOD_DF = pd.read_csv(eod_db_adjusted , header=[0,1], index_col=0, parse_dates=True)
#self.EOD_DF_UNADJUSTED = pd.read_csv(eod_db_unadjusted, header=[0,1], index_col=0, parse_dates=True)
self.FINANCIAL_DF= pd.read_csv(financial_db , header=[0,1], index_col=0, parse_dates=True)
self.UNIVERSE = self.FINANCIAL_DF.columns.get_level_values(0).unique().to_list()
self.CORPACT_DF = pd.read_csv(corp_act_db, index_col=0, parse_dates=['CACORPACTDATE'])
self.LISTING_DF = pd.read_csv(listing_db, index_col=0)
self.ReportDatabaseStatus()
except:
print('EikonDatabase: Unable to Load EOD Data')
################################################################
### Report Database Overall Status
################################################################
def ReportDatabaseStatus(self):
print('')
# sym_count = len(self.EOD_DF.columns.levels[0])
# min_date = self.EOD_DF.index.min().date().isoformat()
# max_date = self.EOD_DF.index.max().date().isoformat()
# print('EikonDatabase: Adjusted EOD (EOD_DF) - Symbols #: {:>5d} {} > {}'.format(sym_count, min_date, max_date))
# sym_count = len(self.EOD_DF_UNADJUSTED.columns.levels[0])
# min_date = self.EOD_DF_UNADJUSTED.index.min().date().isoformat()
# max_date = self.EOD_DF_UNADJUSTED.index.max().date().isoformat()
# print('EikonDatabase: UnAdjusted EOD (EOD_DF_UNADJUSTED) - Symbols #: {:>5d} {} > {}'.format(sym_count, min_date, max_date))
sym_count = len(self.FINANCIAL_DF.columns.levels[0])
min_date = self.FINANCIAL_DF.index.min().date().isoformat()
max_date = self.FINANCIAL_DF.index.max().date().isoformat()
print('EikonDatabase: Financial Data (FINANCIAL_DF) - Symbols #: {:>5d} {} > {}'.format(sym_count, min_date, max_date))
sym_count = len(self.CORPACT_DF.RIC.unique())
min_date = self.CORPACT_DF['CACORPACTDATE'].min().date().isoformat()
max_date = self.CORPACT_DF['CACORPACTDATE'].max().date().isoformat()
print('EikonDatabase: Corp Act Data (CORPACT_DF) - Symbols #: {:>5d} {} > {}'.format(sym_count, min_date, max_date))
sym_count = len(self.LISTING_DF.index.unique())
exc_count = len(self.LISTING_DF.EXCHANGEMARKETIDCODE.unique())
print('EikonDatabase: Stock Listing (LISTING_DF) - Symbols #: {:>5d} Exchange #: {}'.format(sym_count, exc_count))
################################################################
### Refresh CorpAct Data (CORPACT_DF)
################################################################
def RefreshCorpAct(self,
symbols = [],
date_from = '2000-01-01',
date_to = dt.date.today().isoformat(),
save=False,
overwrite=False):
## If single stock, convert to list first
if (not type(symbols)==list): symbols = [symbols]
current_universe = self.FINANCIAL_DF.columns.get_level_values(0).unique().to_list() +\
self.CORPACT_DF.RIC.to_list()
## Default to all Universe
if len(symbols)==0:
symbols = current_universe
## Consider current universe for update
elif (not overwrite):
symbols = list(set(symbols + current_universe))
print('EikonDatabase: Getting Corp Act Data.')
df, err = ek.get_data( symbols,
['TR.CACorpActDate','TR.CAExDate','TR.CAEffectiveDate',
'TR.CACorpActDesc','TR.CACorpActEventType','TR.CAAdjustmentType','TR.CAAdjustmentFactor',
'TR.CATermsOldShares','TR.CATermsNewShares','TR.CAOfferPrice'],
{'SDate':'%s'%date_from,'EDate':'%s'%date_to},
field_name=True)
## Tidy Up Column Names. Remove 'TR.'
df.columns = [ x[3:] if 'TR.' in x else x for x in df.columns]
df.rename(columns={'Instrument':'RIC'}, inplace=True)
## Convert Date Columns, otherwise Report will fail
date_cols = [ x for x in df.columns.to_list() if 'DATE' in x]
df.loc[:, date_cols ] = df.loc[:, date_cols].applymap(pd.to_datetime)
if (save==True):
print('EikonDatabase: Saved Corp Act.')
self.CORPACT_DF = df
self.CORPACT_DF.to_csv(corp_act_db)
self.ReportDatabaseStatus()
else:
return df
################################################################
### Get Corp Act Data (CORPACT_DF)
################################################################
def GetCorpAct(self,
symbols=None,
date_from = (dt.date.today()-dt.timedelta(days=365*20)).isoformat(),
date_to = dt.date.today().isoformat()):
## Return Entire DataFrame
if (symbols==None):
return self.CORPACT_DF
## If single stock, convert to list first
if (not type(symbols)==list): symbols = [symbols]
return self.CORPACT_DF.query('RIC == @symbols')
################################################################
### Refresh Lisiting Data For An Exchange (LISTING_DF)
### Included Dual Listing Counters
################################################################
def RefreshListing(
self, exchanges=['XKLS'],
save=True,
overwrite=False):
tr_company_fields = ['TR.RIC','TR.CommonName','TR.ISIN','OFFCL_CODE','TR.SEDOLCODE','TR.ExchangeMarketIdCode','TR.ExchangeTicker',
'TR.TRBCEconomicSector', 'TR.TRBCBusinessSector','TR.TRBCIndustry',
'TR.GICSSector','TR.GICSIndustryGroup','TR.GICSIndustry','TR.GICSSubindustry',
'TR.TRBCIndustryGroup', 'TR.TRBusinessSummary', 'CF_EXCHNG',
'TR.CompanyIncorpDate','TR.IPODate','TR.CompanyPublicSinceDate','TR.FirstTradeDate',
'TR.CompanySharesOutstanding','TR.CompanyMarketCap','TR.SharesFreeFloat', 'CF_CLOSE','CF_DATE']
if (not type(exchanges)==list):
exchanges = [exchanges]
## If not overwriting, Fetch Listing for Existing Exchanges Too
if (not overwrite):
exchanges = list(set(exchanges + self.LISTING_DF.EXCHANGEMARKETIDCODE.unique().tolist()))
## Fetch Listing for Each Exchange
df = pd.DataFrame()
for exc in exchanges:
print('EikonDatabase: Fetching Listing Data - Exchange: {}'.format(exc))
temp_df, err = ek.get_data('SCREEN(U(IN(Equity(active,public,primary,countryprimaryquote))/*UNV:Public*/), \
IN(TR.ExchangeMarketIdCode,"{}"))'.format(exc),
tr_company_fields, field_name = True)
df = pd.concat([df, temp_df] , axis=0)
## Remove 'TR.' from column names
df.columns = [ x[3:] if 'TR.' in x else x for x in df.columns]
## remove First column 'Instrument', as it is redundant at 'RIC' column, make it index
df = df.iloc[:, 1:].set_index('RIC')
if(save):
## Assign to Memory then save
print('EikonDatabase: Saving Listing Data.')
self.LISTING_DF = df
df.to_csv(listing_db)
self.ReportDatabaseStatus()
else:
return df
################################################################
### Refresh Financial Data
### overwrite : discard all existing data, fetch new one
################################################################
def RefreshFinancial(self,
symbols = None,
date_from = '2010-01-01',
date_to = dt.date.today().isoformat(),
adjusted = '1',
save = False,
overwrite = False):
## Connect to Eikon
ek.set_app_key(eikon_api)
## If single stock, convert to list first
if (not type(symbols)==list): symbols = [symbols]
## Loop Through Every Symbol
df = pd.DataFrame()
i = 1
for sym in symbols:
print('EikonDatabase: Fetching Financial Data {:>4d}/{:<4d} : {:7<} From {} > {}'.format(i, len(symbols), sym, date_from, date_to))
i = i + 1
## Fetch The Data
temp_df, err = ek.get_data([sym],
['TR.CompanySharesOutstanding','TR.CompanySharesOutstanding.date',
'TR.PE.date', 'TR.PE',
'TR.PriceToBVPerShare', 'TR.PriceToBVPerShare.date',
'TR.VOLUME.date', 'TR.VOLUME',
'TR.OPENPRICE.date', 'TR.OPENPRICE',
'TR.HIGHPRICE.date', 'TR.HIGHPRICE',
'TR.LOWPRICE.date', 'TR.LOWPRICE',
'TR.CLOSEPRICE.date', 'TR.CLOSEPRICE'],
{'SDate':date_from,'EDate':date_to, 'Adjusted':adjusted}, field_name=True)
## Fix Header: Remove 'TR'
temp_df.columns = [ x[3:] if 'TR.' in x else x for x in temp_df.columns]
#temp_df.columns = [ x[1:] if '.'== x[0] else x for x in temp_df.columns]
## Loop Through Each Columns For This Stock
temp_df1 = pd.DataFrame()
for col in self.TR_COLUMNS:
temp_df2 = temp_df.loc[ : , [col+'.DATE', col]] ## Get the Data For This Column
temp_df2 = temp_df2.rename(columns={col+'.DATE':'Date'}) ## Rename Column 'Date'
if (temp_df2.isna().sum()[0] == len(temp_df2)):
## This Symbol does not have this column
continue
else:
temp_df2['Date'] = temp_df2['Date'].apply(lambda x: x[:10]) ## Keep Date String Only - Ommit Time
temp_df2 = temp_df2.dropna().drop_duplicates().groupby('Date').first() ## Remove duplicates
temp_df1 = temp_df1.merge(temp_df2, ## Append To Overall DataFrame For This Stock
left_index = True,
right_index = True, how='outer')
## All Comlumns For This Tock Has Been Added. Remove Rows without Pricing
clean_mask = temp_df1.loc[:, 'OPENPRICE':'CLOSEPRICE'].dropna(how='all').index
temp_df1 = temp_df1.loc[clean_mask, : ]
## Treat Missing Value, and Calculate Additional Columns
if 'COMPANYSHARESOUTSTANDING' in temp_df1.columns:
temp_df1['COMPANYSHARESOUTSTANDING'] = temp_df1.COMPANYSHARESOUTSTANDING.ffill()
temp_df1['MARKETCAP'] = temp_df1.CLOSEPRICE * temp_df1.COMPANYSHARESOUTSTANDING
## Add RIC code into Header, Then Merge Into Final DataFrame
#full_columns = self.TR_COLUMNS + self.CALC_COLUMNS
full_columns = temp_df1.columns
temp_df1.columns = pd.MultiIndex.from_arrays([ [sym]*len(full_columns), full_columns])
df = df.merge(temp_df1, left_index=True, right_index=True, how='outer')
## All Stocks Data Are Complete
df.columns = pd.MultiIndex.from_tuples(df.columns, names=('RIC','COLUMN'))
## Replace will construct a Complete DataFrame with Existing and New Data
if (overwrite):
print('EikonDatabase: Financial Data - Removing All Existing Symbols From Memory')
self.FINANCIAL_DF = df
else:
print('EikonDatabase: Financial Data - Removing Overlapping Symbols From Memory')
## remove overlapping symbols from existing DataFrame
existing_set = set(self.FINANCIAL_DF.columns.get_level_values(0).to_list())
remove_symbols = list(existing_set.intersection(symbols))
## Remove Overlapping Symbols if detected
if len(remove_symbols)>0:
self.FINANCIAL_DF = self.FINANCIAL_DF.drop(columns=remove_symbols, level=0)
#df = pd.merge(temp_df, df, how='outer', left_index=True, right_index=True)
self.FINANCIAL_DF = self.FINANCIAL_DF.merge(df, how='outer',left_index=True, right_index=True)
## If Save, update the DataFrame and save to Local File
## Else return as DataFrame
if(save):
## Assign to Memory
print('EikonDatabase: Financial Data - Saving To Local Disk')
self.UNIVERSE = self.FINANCIAL_DF.columns.get_level_values(0).unique()
self.FINANCIAL_DF.to_csv(financial_db)
self.ReportFinancialStatus()
################################################################
### Return Financial By Symbol(s))
################################################################
def GetFinancialBySymbols(self,
symbols = [],
date_from = '2000-01-01',
date_to = dt.date.today().isoformat(),
columns = []):
## Convert Symbols to List if it is not
if (not type(symbols)==list): symbols = [symbols]
## Default Symbols to All Tickers if not specified
if len(symbols)==0:
symbols = self.FINANCIAL_DF.columns.get_level_values(0).unique().to_list()
## Convert Columns to List if it is not
if (not type(columns)==list): columns = [columns]
## Default Columns if not specified
if len(columns)==0:
columns = self.ALL_COLUMNS
## Reorder Symbols and Return
df = self.FINANCIAL_DF.loc[date_from:date_to, pd.IndexSlice[symbols,columns]]\
.reindex(symbols, level=0, axis=1)\
.reindex(columns, level=1, axis=1)\
.dropna(how='all')
## return final data
return df
################################################################
### Return Financial By Column(s)
################################################################
def GetFinancialByColumns(self,
columns = [],
date_from = '2000-01-01',
date_to = dt.date.today().isoformat(),
symbols = []):
## Get The Data
df = self.GetFinancialBySymbols(symbols, date_from, date_to, columns)
## Swap Columns 0,1 and Return
df.columns = df.columns.swaplevel(0,1)
## Reorder Columns and Return
return df.reindex(columns, level=0, axis=1)\
.reindex(symbols, level=1, axis=1)
################################################################
### Return All Financial Symbols
################################################################
def GetFinancialSymbols(self):
## Get The Data
symbols = self.FINANCIAL_DF.columns.get_level_values(0).to_list()
print('EikonDatabase: Financial Data - Total Symbols: {:4d}'.format(len(symbols)))
return symbols
################################################################
### Report Database Status
################################################################
def ReportFinancialStatus(self):
## Calculate First and Last Date
df1 = self.FINANCIAL_DF.stack(level=0).swaplevel().reset_index().groupby('RIC').agg(
min_date= ('Date', lambda x: x[x.dropna().index[0]]),
max_date= ('Date', lambda x: x[x.dropna().index[-1]]))
## Calcuate None NA Rows
df2 = self.FINANCIAL_DF.stack(level=0).swaplevel().reset_index().groupby('RIC')\
.apply(lambda x: (~x.isna()).sum())\
.filter(items=self.ALL_COLUMNS)
## return merged data
return df1.merge(df2, left_index=True, right_index=True, how='inner')
## Update Database with list of symbols for both Adjusted and Unadjusted
## default to last 10 years from current date
##
def RefreshPricing(self,
symbols = None,
date_from = '2000-01-01',
date_to = dt.date.today().isoformat(),
save = False,
replace = False):
## Connect to Eikon
ek.set_app_key(eikon_api)
## If single stock, convert to list first
if (not type(symbols)==list): symbols = [symbols]
## Loop through each symbol to get the final DataFrame
df_adjusted = pd.DataFrame()
df_unadjusted = pd.DataFrame()
## Build The Pair Of Dates (In String) with 10Y Gap
## replace last date with current day, To Avoid Count=-1 column output
decades = (dt.date.fromisoformat(date_to).year - 2000) // 10 + 1 ## how many periods of 10 years, since 2000
dt_range1 = pd.date_range('2000-01-01', freq='10YS', periods=decades).format()
dt_range2 = pd.date_range('2000-01-01', freq='10Y', periods=decades).shift(periods=9, freq='Y').format()
dt_range2[-1] = date_to
## Initialize
unrecoverable_symbols = []
i = 0
for sym in symbols:
i = i+1
df_range_adjusted = pd.DataFrame()
df_range_unadjusted = pd.DataFrame()
## Loop N times fore ach symbol, N is gap of 10 years
for decade in range(0,decades):
dt1 = dt_range1[decade]
dt2 = dt_range2[decade]
## Attempt 10 times getting data from Eikon, catering for error Retry
temp_range_adjusted = pd.DataFrame()
temp_range_unadjusted = pd.DataFrame()
## Try To Fetch Data 10x on this symbol
for attempt in range(10):
try:
print('EikonDatabase: Fetching EOD Data {:4>}/{:4>} : {:7<} From {} > {}'.format(i, len(symbols), sym, dt1, dt2))
temp_range_adjusted = ek.get_timeseries(sym, start_date=dt1, end_date=dt2, corax= "adjusted")
temp_range_unadjusted = ek.get_timeseries(sym, start_date=dt1, end_date=dt2, corax="unadjusted")
## If failure due to No Available Data, move to next time range, else try again
except Exception as err:
if ('No data available for the requested date range' in err.message or
'Invalid RIC' in err.message):
break
else:
continue
## Attempt successful, acquire the data for this symbol
else:
df_range_adjusted = pd.concat([df_range_adjusted, temp_range_adjusted] , axis=0)
df_range_unadjusted = pd.concat([df_range_unadjusted, temp_range_unadjusted], axis=0)
break
else:
## All 10 loops completed successfully,
### Meaning we failed all the attempts - deal with the consequences
unrecoverable_symbols += [sym]
print('EikonDatabase: {} Unrecoverable Error Processing Symbol- Skipping It'.format(sym))
continue
### Print Error Symbols if any
if len(unrecoverable_symbols)>0:
print('EikonDatabase: Symbols Failure : {}'.format(unrecoverable_symbols))
## Consolidate multiple date range for this Symbol
df_range_adjusted.columns = pd.MultiIndex.from_product([[sym], df_range_adjusted.columns])
df_range_unadjusted.columns = pd.MultiIndex.from_product([[sym], df_range_unadjusted.columns])
## Consolidate this Symbol into main dataframe
df_adjusted = pd.concat([df_adjusted, df_range_adjusted] , axis=1)
df_unadjusted = pd.concat([df_unadjusted, df_range_unadjusted], axis=1)
## All Symbols Completed
## Rename the Headers, Save to local storage if chosen, and overwrite memory
## Else return as a dictionary of both adjusted and unadjusted DF
df_adjusted.columns.rename("RIC", level=0, inplace=True)
df_unadjusted.columns.rename("COLUMN", level=1, inplace=True)
## Replace will construct A Complete DataFrame with Existing and New Data
if (replace):
## remove relevant symbols from existing DataFrame
eod_df = self.EOD_DF.drop(columns=symbols, level=0)
eod_df_unadjusted = self.EOD_DF_UNADJUSTED.drop(columns=symbols, level=0)
## merge with newly acquired dataframe columns
df_adjusted = pd.merge(eod_df, df_adjusted, how='outer', left_index=True, right_index=True)
df_unadjusted = pd.merge(eod_df_unadjusted, df_unadjusted, how='outer', left_index=True, right_index=True)
## If Save, update the DataFrame and save to Local File
## Else return as DataFrame
if(save):
## Assign to Memory
print('EikonDatabase: Saving EOD Data with Full Replacement')
self.EOD_DF = df_adjusted
self.EOD_DF_UNADJUSTED = df_unadjusted
self.UNIVERSE = self.EOD_DF.columns.get_level_values(0).unique()
df_adjusted .to_csv(eod_db_adjusted)
df_unadjusted.to_csv(eod_db_unadjusted)
self.ReportDatabaseStatus()
else:
return ({'EOD_ADJUSTED': df_adjusted, 'EOD_UNADJUSTED': df_unadjusted})
### Return Pricing of Symbols(s)
### DataFrame returned always has a Symbol as Level1 Header
def GetPricingBySymbols(self,
symbols=None,
date_from = '2000-01-01',
date_to = dt.date.today().isoformat(),
columns = ['OPEN','CLOSE','HIGH','LOW','VOLUME'],
adjusted = True):
## Default Symbols to All Tickers if not specified
if (symbols==None):
if adjusted: symbols = self.EOD_DF.columns.get_level_values(0).to_list()
else: symbols = self.EOD_DF_UNADJUSTED.columns.get_level_values(0).to_list()
## Convert Symbols to List if it is not
if (not type(symbols)==list):
symbols = [symbols]
#eikon.EOD_DF.loc[:, pd.IndexSlice[:,['CLOSE','OPEN']]]
## Select Adjusted or Non-Adjusted
if adjusted:
#df = self.EOD_DF.loc[date_from:date_to, symbols]
df = self.EOD_DF.loc[date_from:date_to, pd.IndexSlice[symbols,columns]]
else:
df = self.EOD_DF_UNADJUSTED.loc[date_from:date_to, symbols]
return df
### Call GetPricingBySymbos
### Then Swap The Two Headers
### DataFrame returned always has a Symbol as Level 2 Header
def GetPricingByColumns(self,
symbols=None,
date_from = '2000-01-01',
date_to = dt.date.today().isoformat(),
columns=['OPEN','CLOSE','HIGH','LOW','VOLUME'],
adjusted = True):
## Get The Data
df = self.GetPricingBySymbols(symbols, date_from, date_to, columns, adjusted)
## Swap Columns 0,1 and Return
df.columns = df.columns.swaplevel(0,1)
df.sort_index(axis=1, inplace=True)
return df
|
[
"="
] |
=
|
fb1282807c3f9e8423589cabcd735a724f04e75f
|
e638e9fda0e672fa9a414515d0c05a24ab55ad38
|
/UniqueWordAbbreviation.py
|
a9b9eddbd49fb1351c2bb4ec55d9ff5102f8d5b4
|
[] |
no_license
|
zjuzpz/Algorithms
|
8d1c7d50429aa5540eb817dc5495a20fc3f11125
|
2df1a58aa9474f2ecec2ee7c45ebf12466181391
|
refs/heads/master
| 2021-01-21T05:55:48.768728
| 2020-08-04T22:44:08
| 2020-08-04T22:44:08
| 44,586,024
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,071
|
py
|
"""
288. Unique Word Abbreviation
An abbreviation of a word follows the form <first letter><number><last letter>. Below are some examples of word abbreviations:
a) it --> it (no abbreviation)
1
b) d|o|g --> d1g
1 1 1
1---5----0----5--8
c) i|nternationalizatio|n --> i18n
1
1---5----0
d) l|ocalizatio|n --> l10n
Assume you have a dictionary and given a word, find whether its abbreviation is unique in the dictionary. A word's abbreviation is unique if no other word from the dictionary has the same abbreviation.
Example:
Given dictionary = [ "deer", "door", "cake", "card" ]
isUnique("dear") -> false
isUnique("cart") -> true
isUnique("cane") -> false
isUnique("make") -> true
"""
# O(n) for init O(1) for lookup
# O(1)
class ValidWordAbbr(object):
def __init__(self, dictionary):
"""
initialize your data structure here.
:type dictionary: List[str]
"""
self.lookup = {}
for word in dictionary:
candidate = self.abbr(word)
if candidate not in self.lookup:
self.lookup[candidate] = word
elif self.lookup[candidate] != word:
self.lookup[candidate] = False
def isUnique(self, word):
"""
check if a word is unique.
:type word: str
:rtype: bool
"""
abbr = self.abbr(word)
if abbr not in self.lookup:
return True
return self.lookup[abbr] == word
def abbr(self, word):
if len(word) == 0:
abbr = ""
elif len(word) == 1:
abbr = word[0] + word[0]
elif len(word) == 2:
abbr = word[0] + word[1]
else:
abbr = word[0] + str(len(word) - 2) + word[-1]
return abbr
if __name__ == "__main__":
d = ["deer", "door", "cake", "card"]
a = ValidWordAbbr(d)
print(a.isUnique("dear"))
print(a.isUnique("cart"))
print(a.isUnique("cane"))
print(a.isUnique("make"))
|
[
"zjuzpz@gmail.com"
] |
zjuzpz@gmail.com
|
866c4ad1982ee56b888f762ad139b2747094dfdc
|
588df80dd8654457c60291008350297fb70c012c
|
/comparisons/model_comparison.py
|
f0c40246acb0e7ef463993e07c9de28a5702caea
|
[] |
no_license
|
danielacraciun/ml_practice
|
60d9d8ff10762367037a97fb1e29c76a5ed1b25d
|
ca638b557fdeba9ee9634dac407e552e84b1cc99
|
refs/heads/master
| 2021-01-21T05:16:52.838615
| 2017-03-21T21:15:24
| 2017-03-21T21:15:24
| 83,164,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,760
|
py
|
# Compare Algorithms
from pandas import read_csv
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
names2 = [
'age',
'sex',
'trestbps',
'chol',
'thalach',
'num',]
data2 = read_csv('data2.txt', names=names2, sep=' ')
array = data2.values
X = array[:,0:5]
Y = array[:,5]
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('QDA', QuadraticDiscriminantAnalysis()))
models.append(('LSVC', LinearSVC()))
models.append(('SVC', SVC(kernel='sigmoid')))
models.append(('DTC', DecisionTreeClassifier()))
models.append(('KNC', KNeighborsClassifier()))
models.append(('MLP', MLPClassifier()))
models.append(('GPC', GaussianProcessClassifier()))
models.append(('RFC', RandomForestClassifier()))
models.append(('ABC', AdaBoostClassifier()))
models.append(('GNB', GaussianNB()))
# evaluate each model in turn
results = []
names = []
scoring = 'accuracy'
for name, model in models:
kfold = KFold(n_splits=10)
cv_results = cross_val_score(model, X, Y, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
|
[
"craciundaniela19@gmail.com"
] |
craciundaniela19@gmail.com
|
00b3df640d513087ce7d9a9858d446d5eecf0644
|
70fa6468c768d4ec9b4b14fc94fa785da557f1b5
|
/lib/googlecloudsdk/command_lib/interactive/bindings.py
|
15cc143b2435aaf9a624a6d8f3a70ea0c641e696
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
kylewuolle/google-cloud-sdk
|
d43286ef646aec053ecd7eb58566ab2075e04e76
|
75f09ebe779e99fdc3fd13b48621fe12bfaa11aa
|
refs/heads/master
| 2020-04-20T22:10:41.774132
| 2019-01-26T09:29:26
| 2019-01-26T09:29:26
| 169,131,028
| 0
| 0
|
NOASSERTION
| 2019-02-04T19:04:40
| 2019-02-04T18:58:36
|
Python
|
UTF-8
|
Python
| false
| false
| 7,906
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud interactive key bindings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
import sys
from googlecloudsdk.command_lib.interactive import browser
from prompt_toolkit import keys
from prompt_toolkit.key_binding import manager
import six
class _KeyBinding(object):
"""Key binding base info to keep registered bindings and toolbar in sync.
Attributes:
key: The keys.Key.* object.
help_text: The UX help text.
label: The short word label for the bottom toolbar.
metavar: Display this value in GetLabel(markdown=True) instead of the real
value.
status: The bool => string toggle status map.
toggle: The bool toggle state.
"""
def __init__(self, key, help_text=None, label=None, metavar=None,
status=None, toggle=True):
self.key = key
self.help_text = help_text
self.label = label
self.metavar = metavar
self.status = status
self.toggle = toggle
def GetName(self):
"""Returns the binding display name."""
return re.sub('.*<(.*)>.*', r'\1',
six.text_type(self.key)).replace('C-', 'ctrl-') # pytype: disable=wrong-arg-types
def GetLabel(self, markdown=False):
"""Returns the key binding display label containing the name and value."""
if self.label is None and self.status is None:
return None
label = []
if markdown:
label.append('*')
label.append(self.GetName())
label.append(':')
if self.label:
label.append(self.label)
if self.status:
label.append(':')
if markdown:
label.append('*')
if self.status:
if markdown:
label.append('_')
label.append(self.metavar or 'STATE')
label.append('_')
else:
label.append(self.status[self.toggle])
return ''.join(label)
def GetHelp(self, markdown=False):
"""Returns the key help text."""
if not self.help_text:
return None
key = self.GetName()
if markdown:
key = '*{}*'.format(key)
return self.help_text.format(key=key)
def SetMode(self, cli):
"""Sets the toggle mode in the cli."""
del cli
def Handle(self, event):
"""Handles a bound key event."""
self.toggle = not self.toggle
self.SetMode(event.cli)
class _WebHelpKeyBinding(_KeyBinding):
"""The web help key binding."""
def __init__(self, key):
super(_WebHelpKeyBinding, self).__init__(
key=key,
label='web-help',
help_text=(
'Opens a web browser tab/window to display the complete man page '
'help for the current command. If there is no active web browser '
'(running in *ssh*(1) for example), then command specific help or '
'*man*(1) help is attempted.'
),
)
def Handle(self, event):
doc = event.cli.current_buffer.document
browser.OpenReferencePage(event.cli, doc.text, doc.cursor_position)
class _ContextKeyBinding(_KeyBinding):
"""set context key binding."""
def __init__(self, key):
super(_ContextKeyBinding, self).__init__(
key=key,
label='context',
help_text=(
'Sets the context for command input, so you won\'t have to re-type '
'common command prefixes at every prompt. The context is the '
'command line from just after the prompt up to the cursor.'
'\n+\n'
'For example, if you are about to work with `gcloud compute` for '
'a while, type *gcloud compute* and hit {key}. This will display '
'*gcloud compute* at subsequent prompts until the context is '
'changed.'
'\n+\n'
'Hit ctrl-c and {key} to clear the context, or edit a command line '
'and/or move the cursor and hit {key} to set a different context.'
),
)
def Handle(self, event):
event.cli.config.context = (
event.cli.current_buffer.document.text_before_cursor)
class _HelpKeyBinding(_KeyBinding):
"""The help key binding."""
def __init__(self, key, toggle=True):
super(_HelpKeyBinding, self).__init__(
key=key,
label='help',
toggle=toggle, status={False: 'OFF', True: 'ON'},
help_text=(
'Toggles the active help section, *ON* when enabled, *OFF* when '
'disabled.'
),
)
class _QuitKeyBinding(_KeyBinding):
"""The quit key binding."""
def __init__(self, key):
super(_QuitKeyBinding, self).__init__(
key=key,
label='quit',
help_text=(
'Exit.'
),
)
def Handle(self, event):
del event
sys.exit(1)
class _InterruptKeyBinding(_KeyBinding):
"""The interrupt (ctrl-c) key binding.
Catches control-C and clears the prompt input buffer and completer.
"""
def __init__(self, key):
super(_InterruptKeyBinding, self).__init__(
key=key,
)
def Handle(self, event):
event.cli.current_buffer.reset()
event.cli.completer.reset()
class _StopKeyBinding(_KeyBinding):
"""The stop (^Z) key binding.
This binding's sole purpose is to ignore ^Z and prevent it from echoing
in the prompt window.
"""
def __init__(self, key):
super(_StopKeyBinding, self).__init__(
key=key,
)
class KeyBindings(object):
"""All key bindings.
Attributes:
bindings: The list of key bindings in left to right order.
help_key: The help visibility key binding. True for ON, false for
OFF.
context_key: The command prefix context key that sets the context to the
command substring from the beginning of the input line to the current
cursor position.
web_help_key: The browse key binding that pops up the full reference
doc in a browser.
quit_key: The key binding that exits the shell.
"""
def __init__(self, help_mode=True):
"""Associates keys with handlers. Toggle states are reachable from here."""
# The actual key bindings. Changing keys.Keys.* here automatically
# propagates to the bottom toolbar labels.
self.help_key = _HelpKeyBinding(keys.Keys.F2, toggle=help_mode)
self.context_key = _ContextKeyBinding(keys.Keys.F7)
self.web_help_key = _WebHelpKeyBinding(keys.Keys.F8)
self.quit_key = _QuitKeyBinding(keys.Keys.F9)
self.interrupt_signal = _InterruptKeyBinding(keys.Keys.ControlC)
self.stop_signal = _StopKeyBinding(keys.Keys.ControlZ)
# This is the order of binding label appearance in the bottom toolbar.
self.bindings = [
self.help_key,
self.context_key,
self.web_help_key,
self.quit_key,
self.interrupt_signal,
self.stop_signal,
]
def MakeRegistry(self):
"""Makes and returns a key binding registry populated with the bindings."""
m = manager.KeyBindingManager(
enable_abort_and_exit_bindings=True,
enable_system_bindings=True,
enable_search=True,
enable_auto_suggest_bindings=True,)
for binding in self.bindings:
m.registry.add_binding(binding.key, eager=True)(binding.Handle)
return m.registry
def Initialize(self, cli):
"""Initialize key binding defaults in the cli."""
for binding in self.bindings:
binding.SetMode(cli)
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
4b834a5ccb6e2d2cff1e5c2d12793d67fa346046
|
d5853dbef54a0dea3ee88d27781f5aaddc235af5
|
/Schelling/complexite.py
|
4f552d7a3305e16d0bd625cb06dd3c5d4be08091
|
[] |
no_license
|
victor77420/SimulatingSegregation
|
b8f4101c67782e6e07cfbadca5c80f6f4428b24c
|
472fcda024a797fc2be8acb5bbe00214c6747551
|
refs/heads/master
| 2023-05-01T09:40:05.858312
| 2020-05-18T15:54:48
| 2020-05-18T15:54:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,792
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 17:10:18 2020
@author: victorhuynh
"""
from time import time
from sklearn.linear_model import LinearRegression
from simulationdebase import schelling
def cout_n_iter () :
P = Paramètre(50,80,1/2)
x=[] #Contiendra les différentes valeurs de n_iter testées.
y=[] #Contiendra le temps d'execution associé.
for n_iter in range(1,5000,5) :
x.append(n_iter)
t=time() #t prend pour valeur l'heure avant simulation.
schelling(P,n_iter)
#On effectue une simulation avec n_iter itérations et les autres paramètres choisis.
t=time()-t
#t prend pour valeur la différence entre l'heure avant la simulation et après,
#c'est-à-dire le temps que prend la simulation.
y.append(t)
#Réalisons ensuite une régression linéaire pour trouver a et b tels que t = a*n_iter + b :
regressor = LinearRegression()
X = [[i] for i in x]
#Liste constituée de listes unitaires contenant chacun des éléments de x.
regressor.fit(X, y)
#On effectue la régression linéaire
a = regressor.coef_[0]
b = regressor.intercept_
r2 = regressor.score(X,y)
y_droite = [a*i + b for i in x] #On constitue la droite de régression linéaire.
#On trace ensuite les points obtenus en effectuant les simulations,
# et la droite obtenue par régression linéaire.
axes = plt.gca()
axes.set_xlim(0,5000)
plt.plot(x,y, label = "t(n_iter)")
plt.plot(x,y_droite, label = "t="+str(a)+"n_iter+"+str(b)+", r² ="+str(r2))
plt.title("Temps d'exécution t en fonction du nombre d'itérations n_iter (1000 points)")
plt.xlabel("n_iter")
plt.ylabel("t (en secondes)")
plt.legend()
plt.show()
|
[
"noreply@github.com"
] |
victor77420.noreply@github.com
|
59d75aeee1d55ab76727eb92c5a42f191912ea9e
|
9c9e531635b1f562d9d72231f8bb40448db07471
|
/test.py
|
b480d678d46cf1624f774aeda54766e80b5c1bfe
|
[
"Apache-2.0"
] |
permissive
|
vernor1/carnd_advanced_lane_finding
|
f2ea8622035ad75b15816e8ed52d3ee406ab1d7f
|
1e9d2fb7b845c647452542e9bb063781d76cd8ec
|
refs/heads/master
| 2021-07-23T17:26:17.216434
| 2017-11-03T16:12:00
| 2017-11-03T16:12:00
| 80,960,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,844
|
py
|
import argparse
import cv2
import matplotlib.pyplot as plt
import numpy as np
from binary_thresholding import GetThresholdedBinary, GetSobelBinary, GetMagnitudeBinary, GetDirectionBinary, GetChannelBinary
from lane_tracking import TLaneTracker
from lens_correction import TLensCorrector
from perspective_transform import TPerspectiveTransformer
# The following code is only used for debugging and generating test images
#------------------------------------------------------------------------------
if __name__ == '__main__':
argParser = argparse.ArgumentParser(description="Test Pipeline Components")
argParser.add_argument("type",
choices=["lens_correction", "binary_thresholding", "perspective_transform", "lane_tracking"])
argParser.add_argument("in_img",
type=str,
help="Path to the original image file")
argParser.add_argument("out_img",
type=str,
help="Path to the plot file of a side-by-side comparison of the distorted and undistorted images")
args = argParser.parse_args()
img = cv2.imread(args.in_img)
lensCorrector = TLensCorrector("camera_calibration")
undistortedImg = lensCorrector.Undistort(img)
if args.type == "lens_correction":
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,8))
fig.tight_layout()
ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax1.set_title("Original Image", fontsize=20)
ax2.imshow(cv2.cvtColor(undistortedImg, cv2.COLOR_BGR2RGB))
ax2.set_title("Undistorted Image", fontsize=20)
plt.subplots_adjust(left=0.05, right=0.95, top=0.995, bottom=0.005)
fig.savefig(args.out_img)
elif args.type == "binary_thresholding":
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
channelS = hls[:,:,2]
channelBinaryS = GetChannelBinary(channelS, thresh=(90, 255))
channelH = hls[:,:,0]
channelBinaryH = GetChannelBinary(channelH, thresh=(15, 100))
channelR = img[:,:,0]
channelBinaryR = GetChannelBinary(channelR, thresh=(200, 255))
combinedColors = np.zeros_like(channelBinaryS)
# S-channel can capture shadows on the lane, so we should use it in conjunction with H-channel, which captures same color regions.
# R-channel capures white lines only and can form a union with S and H.
combinedColors[((channelBinaryS == 1) & (channelBinaryH == 1)) | (channelBinaryR == 1)] = 1
# Use Y-channel providing the most information about lane lines as the base channel for gradients
baseChannel = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gradX = GetSobelBinary(baseChannel, orient='x', sobelKernel=5, thresh=(35, 200))
gradY = GetSobelBinary(baseChannel, orient='y', sobelKernel=5, thresh=(35, 200))
magBinary = GetMagnitudeBinary(baseChannel, sobelKernel=7, thresh=(50, 250))
dirBinary = GetDirectionBinary(baseChannel, sobelKernel=9, thresh=(0.7, 1.3))
combinedGradients = np.zeros_like(dirBinary)
combinedGradients[((gradX == 1) & (gradY == 1)) | ((magBinary == 1) & (dirBinary == 1))] = 1
combinedBinary = np.zeros_like(combinedGradients)
combinedBinary[(combinedGradients == 1) | (combinedColors == 1)] = 1
fig, (ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9, ax10, ax11, ax12, ax13, ax14) = plt.subplots(14, 1, figsize=(10, 82))
fig.tight_layout()
ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax1.set_title("Original Image", fontsize=20)
ax2.imshow(cv2.cvtColor(channelS, cv2.COLOR_GRAY2RGB))
ax2.set_title("S-channel", fontsize=20)
ax3.imshow(cv2.cvtColor(channelBinaryS * 255, cv2.COLOR_GRAY2RGB))
ax3.set_title("S-channel Binary", fontsize=20)
ax4.imshow(cv2.cvtColor(channelH, cv2.COLOR_GRAY2RGB))
ax4.set_title("H-channel", fontsize=20)
ax5.imshow(cv2.cvtColor(channelBinaryH * 255, cv2.COLOR_GRAY2RGB))
ax5.set_title("H-channel Binary", fontsize=20)
ax6.imshow(cv2.cvtColor(channelR, cv2.COLOR_GRAY2RGB))
ax6.set_title("R-channel", fontsize=20)
ax7.imshow(cv2.cvtColor(channelBinaryR * 255, cv2.COLOR_GRAY2RGB))
ax7.set_title("R-channel Binary", fontsize=20)
ax8.imshow(cv2.cvtColor(combinedColors * 255, cv2.COLOR_GRAY2RGB))
ax8.set_title("Combined Colors", fontsize=20)
ax9.imshow(cv2.cvtColor(gradX * 255, cv2.COLOR_GRAY2RGB))
ax9.set_title("X-gradient", fontsize=20)
ax10.imshow(cv2.cvtColor(gradY * 255, cv2.COLOR_GRAY2RGB))
ax10.set_title("Y-gradient", fontsize=20)
ax11.imshow(cv2.cvtColor(magBinary * 255, cv2.COLOR_GRAY2RGB))
ax11.set_title("Gradient Magnitude", fontsize=20)
ax12.imshow(cv2.cvtColor(dirBinary * 255, cv2.COLOR_GRAY2RGB))
ax12.set_title("Gradient Direction", fontsize=20)
ax13.imshow(cv2.cvtColor(combinedGradients * 255, cv2.COLOR_GRAY2RGB))
ax13.set_title("Combined Gradients", fontsize=20)
ax14.imshow(cv2.cvtColor(combinedBinary * 255, cv2.COLOR_GRAY2RGB))
ax14.set_title("Combined Binary", fontsize=20)
plt.subplots_adjust(left=0.05, right=0.95, top=0.995, bottom=0.005)
fig.savefig(args.out_img)
elif args.type == "perspective_transform":
perspectiveTransformer = TPerspectiveTransformer(undistortedImg.shape[1], undistortedImg.shape[0])
warpedImg = perspectiveTransformer.Warp(undistortedImg)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 8))
fig.tight_layout()
ax1.imshow(cv2.cvtColor(undistortedImg, cv2.COLOR_BGR2RGB))
ax1.set_title("Undistorted Image", fontsize=20)
ax2.imshow(cv2.cvtColor(warpedImg, cv2.COLOR_BGR2RGB))
ax2.set_title("Warped Image", fontsize=20)
plt.subplots_adjust(left=0.05, right=0.95, top=0.995, bottom=0.005)
fig.savefig(args.out_img)
elif args.type == "lane_tracking":
perspectiveTransformer = TPerspectiveTransformer(undistortedImg.shape[1], undistortedImg.shape[0])
thresholdedBinary = GetThresholdedBinary(undistortedImg)
warpedBinary = perspectiveTransformer.Warp(thresholdedBinary)
laneTracker = TLaneTracker()
leftCoefficients, rightCoefficients, curveRad, deviation = laneTracker.ProcessLaneImage(warpedBinary)
# Generate x and y values for plotting
plotY = np.linspace(0, warpedBinary.shape[0] - 1, warpedBinary.shape[0])
leftPlotX = leftCoefficients[0] * plotY**2 + leftCoefficients[1] * plotY + leftCoefficients[2]
rightPlotX = rightCoefficients[0] * plotY**2 + rightCoefficients[1] * plotY + rightCoefficients[2]
# Fill the lane surface
laneImg = np.zeros_like(undistortedImg)
# Recast the x and y points into usable format for cv2.fillPoly()
leftPoints = np.array([np.transpose(np.vstack([leftPlotX, plotY]))])
rightPoints = np.array([np.flipud(np.transpose(np.vstack([rightPlotX, plotY])))])
lanePoints = np.hstack((leftPoints, rightPoints))
# Draw the lane onto the warped blank image
cv2.fillPoly(laneImg, np.int_([lanePoints]), (255, 0, 0))
# Draw lines
cv2.polylines(laneImg, np.int_([leftPoints]), isClosed=False, color=(0, 0, 255), thickness=32)
cv2.polylines(laneImg, np.int_([rightPoints]), isClosed=False, color=(0, 255, 0), thickness=32)
unwarpedLane = perspectiveTransformer.Unwarp(laneImg)
outImg = cv2.addWeighted(undistortedImg, 1, unwarpedLane, 0.3, 0)
if deviation < 0:
deviationDirection = "left"
else:
deviationDirection = "right"
deviation = np.absolute(deviation)
cv2.putText(outImg, "Curvature radius: %dm" % (curveRad),
(20, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.4, (255, 255, 255), 2)
cv2.putText(outImg, "Deviation: %.2fm %s" % (deviation, deviationDirection),
(20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1.4, (255, 255, 255), 2)
# Create the plot
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(10, 20))
fig.tight_layout()
ax1.imshow(cv2.cvtColor(undistortedImg, cv2.COLOR_BGR2RGB))
ax1.set_title("Undistorted Image", fontsize=20)
ax2.imshow(cv2.cvtColor(warpedBinary * 255, cv2.COLOR_GRAY2RGB))
plt.rcParams['lines.linewidth'] = 7
ax2.plot(leftPlotX, plotY, color='red')
ax2.plot(rightPlotX, plotY, color='green')
ax2.set_title("Warped Binary with Detected Lines", fontsize=20)
ax3.imshow(cv2.cvtColor(outImg, cv2.COLOR_BGR2RGB))
ax3.set_title("Undistorted with Overlay", fontsize=20)
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
fig.savefig(args.out_img)
|
[
"Yury.Melnikov@gmail.com"
] |
Yury.Melnikov@gmail.com
|
332c014db1ef05b24dc29c573e456b58fa46550d
|
85c873fddf396b079ec3d677b7bc7932542f62e2
|
/named_entity_recognition/demo.py
|
c26bf84eaac4f68e7f0c93b2cb10b4ac0b9a9ee4
|
[] |
no_license
|
HuihuiChyan/DL4NLP_practices
|
a934278c494b0b03a7d236dba25fa11e69ce17c0
|
f3b9f36b81050c8e2e24383a0b6c5b7e447d5229
|
refs/heads/master
| 2022-11-12T17:31:23.018602
| 2020-07-03T06:30:52
| 2020-07-03T06:30:52
| 276,823,612
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
import tensorflow as tf
from data import sents2id
from data import pad_sequence
from data import id2label
import pickle
def demo_one(args):
print("Please input your Chinese sentence:")
sentence = input()
vocab_file = args.vocab_file
model_path = args.model_path
with open(vocab_file, "rb") as vocab_f:
vocab = pickle.load(vocab_f)
sess = tf.InteractiveSession()
ckpt = tf.train.get_checkpoint_state(model_path)
saver = tf.train.import_meta_graph(ckpt.model_checkpoint_path+".meta")
saver.restore(sess, ckpt.model_checkpoint_path)
graph = tf.get_default_graph()
input_sents = graph.get_operation_by_name("embedding_layer/input_sents").outputs[0]
input_labels = graph.get_operation_by_name("embedding_layer/input_labels").outputs[0]
sequence_lengths = graph.get_operation_by_name("embedding_layer/sequence_lengths").outputs[0]
#log_likelihood = graph.get_operation_by_name("loss/log_likelihood").outputs[0]
result = graph.get_operation_by_name("accuracy/result").outputs[0]
right_ner = graph.get_operation_by_name("accuracy/right_ner").outputs[0]
recog_ner = graph.get_operation_by_name("accuracy/recog_ner").outputs[0]
all_ner = graph.get_operation_by_name("accuracy/all_ner").outputs[0]
shape = graph.get_operation_by_name("accuracy/shape").outputs[0]
sentence = list(sentence)
ori_sentence = sentence
sent_len = len(sentence)
sent_labels = [[0 for _ in range(sent_len)] for _ in range(256)]
seq_len = [sent_len]
seq_len.extend([0 for _ in range(255)])
sentence = sents2id([sentence], vocab)
sentence.extend([[1 for _ in range(sent_len)] for _ in range(255)])
#print(sentence)
#print(sent_labels)
#print(seq_len)
result = sess.run(result, feed_dict={input_sents:sentence,
input_labels:sent_labels,
sequence_lengths:seq_len})
result_labels = id2label([result])
result_labels = result_labels[0][:sent_len]
for i in range(sent_len):
print(str(ori_sentence[i])+" "+str(result_labels[i]))
|
[
"noreply@github.com"
] |
HuihuiChyan.noreply@github.com
|
c33a1ed3472dbba45bc3662cd0fd882b24634a01
|
335cdc7c5e5a38699c8359e83315a89fa4d6b1a1
|
/venv/Scripts/pip3.7-script.py
|
ddeee4cbbf72baa5329f2ff10ad8f4585313709b
|
[] |
no_license
|
Crazy-Color/firsttest
|
46fd01f623d3b6df560ff12edb6ced1a41ccdbb2
|
52bbdf9c4ae23a7dd5af38590b56ab49dda7df4d
|
refs/heads/master
| 2020-06-08T00:40:44.975729
| 2019-07-03T15:04:21
| 2019-07-03T15:04:21
| 193,125,908
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
#!D:\0101010101\PythonLearnning\test\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
|
[
"hmncumt@163.com"
] |
hmncumt@163.com
|
35013d36abe32551f34f323dc0a17f772efdf540
|
44dbddc5be734cf7fae25c85dc3f6309eebec9ef
|
/redirectory/runnables/management.py
|
0f8cf631e40f00525a19c150ee84b83af9db472f
|
[
"BSD-3-Clause"
] |
permissive
|
kumina/k8s-redirectory
|
1df99db98795344f3947abb96a52db3260e0c08c
|
1b91ec1a065f60a40c66e897d3c8dc935e00bcc6
|
refs/heads/master
| 2023-01-14T07:23:15.924964
| 2021-06-08T12:09:18
| 2021-06-08T12:09:18
| 194,644,803
| 5
| 0
|
BSD-3-Clause
| 2023-01-04T02:59:29
| 2019-07-01T09:43:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
from kubi_ecs_logger import Logger, Severity
from .runnable_service import RunnableService
from redirectory.libs_int.service import NamespaceManager
from redirectory.libs_int.hyperscan import HsManager
class ManagementService(RunnableService):
def run(self):
# Load hyperscan database because of test in UI
HsManager().database.load_database()
# Get needed namespaces
management_ns = NamespaceManager().get_namespace("management")
status_ns = NamespaceManager().get_namespace("status")
# Add the ui
from redirectory.services import ManagementUI
self.api.add_resource(ManagementUI, "/", "/<path:path>")
# Log ui folder
Logger().event(
category="ui",
action="ui loaded",
dataset=self.config.directories.ui
).out(severity=Severity.INFO)
# Add namespaces to api
self.api.add_namespace(management_ns)
self.api.add_namespace(status_ns)
# Init api with application
self.api.init_app(self.application)
# Log
Logger() \
.event(category="runnable", action="service configured") \
.service(name="management").out(severity=Severity.INFO)
# Run according to configuration
if self.config.deployment == "prod":
self._run_production()
elif self.config.deployment == "dev":
self._run_development()
elif self.config.deployment == "test":
self._run_test()
|
[
"ivaylo@kumina.nl"
] |
ivaylo@kumina.nl
|
1a8424e397e17fdf6126e184db70519c5d05aa92
|
471bd0bb613517cdeb7e658fe2834b0d2bece0bf
|
/lib/models/train_Solver_VCOCO_MultiGPU.py
|
b30a12632f40236adaefdb69199f47073a7abc35
|
[
"MIT"
] |
permissive
|
zhihou7/VCL
|
1d9dadc216565531caaaf580720e893e6f11040e
|
1bc21ec64d3bae15b8bac524cfa4beeaf08f2c48
|
refs/heads/master
| 2023-04-21T03:17:04.645416
| 2021-04-23T09:33:15
| 2021-04-23T09:33:15
| 282,781,879
| 33
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,855
|
py
|
# --------------------------------------------------------
# Tensorflow VCL
# Licensed under The MIT License [see LICENSE for details]
# Written by Zhi Hou
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from models.train_Solver_VCOCO import SolverWrapper
from ult.config import cfg
from ult.timer import Timer
import os
import tensorflow as tf
class VCOCOSolverWrapperMultiGPU(SolverWrapper):
"""
A wrapper class for the training process
"""
def __init__(self, sess, network, output_dir, tbdir, Restore_flag, pretrained_model):
super(VCOCOSolverWrapperMultiGPU, self).__init__(sess, network, None, None, output_dir, tbdir, 0, 0, 0, Restore_flag, pretrained_model)
self.image = None
self.image_id = None
self.spatial = None
self.H_num = None
self.blobs = None
def set_coco_data(self, image, image_id, H_num, blobs):
if image is not None: self.image = image
if image_id is not None: self.image_id = image_id
self.blobs = blobs
if H_num is not None: self.H_num = H_num
def construct_graph2(self, sess):
print("construct_graph2")
compose_type = self.compose_feature_helper.get_compose_type()
with sess.graph.as_default(), tf.device('/cpu:0'):
# Set the random seed for tensorflow
tf.set_random_seed(cfg.RNG_SEED)
init_step = self.get_init_step()
global_step = tf.Variable(init_step, trainable=False, name='global_step')
step_factor = self.get_step_factor()
lr, self.optimizer = self.get_optimzer_lr(global_step, step_factor)
tower_grads = []
V_features = []
O_features = []
num_stop_list = []
tower_losses = []
for i in range(2):
gpu_idx = i
if len(os.environ['CUDA_VISIBLE_DEVICES'].split(',')) == 1:
gpu_idx = 0
with tf.device('/gpu:%d' % gpu_idx):
with tf.name_scope('%s_%d' % ('HICO', i), ) as scope:
split_image = self.image[i]
split_image_id = self.image_id[i]
split_H_num = self.H_num[i]
blobs = self.blobs[i]
print(i , split_H_num, '----------------------')
self.net.set_ph(split_image, split_image_id, split_H_num, blobs['sp'], blobs['H_boxes'],
blobs['O_boxes'], blobs['gt_class_H'], blobs['gt_class_HO'], blobs['gt_class_sp'],
blobs['Mask_HO'], blobs['Mask_H'], blobs['Mask_sp'], blobs['gt_class_C'])
# Build the main computation graph
layers = self.net.create_architecture(True) # is_training flag: True
O_features.append(self.net.intermediate['fc7_O'][:self.net.get_compose_num_stop()])
V_features.append(self.net.intermediate['fc7_verbs'][:self.net.get_compose_num_stop()])
num_stop_list.append(self.net.get_compose_num_stop())
print('num stop:', self.net.get_compose_num_stop(), num_stop_list)
# Define the loss
loss = layers['total_loss']
tower_losses.append(loss)
if i == 1:
if not self.net.model_name.__contains__('base'):
if self.net.model_name.__contains__('_t2_')\
or self.net.model_name.__contains__('_t3_'):
key = 'gt_class_C'
else:
key = 'gt_class_HO'
new_loss = self.compose_feature_helper.merge_generate(O_features, V_features,
[self.blobs[j][key][
:num_stop_list[j]] for j in
range(2)],
compose_type)
ll = self.compose_feature_helper.get_ll()
tower_losses.append(new_loss * ll)
variables = tf.trainable_variables()
grads_and_vars = self.optimizer.compute_gradients(tf.reduce_sum(tower_losses), variables)
tower_grads.append(grads_and_vars)
if self.net.model_name.__contains__('base'):
assert len(tower_losses) == 2, tower_losses
capped_gvs = [(tf.clip_by_norm(grad, 1.), var) for grad, var in grads_and_vars if grad is not None]
# self.addition_loss(capped_gvs, layers)
# for grad, var in capped_gvs:
# print('update: {}'.format(var.name))
train_op = self.optimizer.apply_gradients(capped_gvs, global_step=global_step)
tf.summary.scalar('lr', lr)
tf.summary.scalar('merge_loss', tf.reduce_sum(tower_losses))
self.net.summary_op = tf.summary.merge_all()
self.saver = tf.train.Saver(max_to_keep=cfg.TRAIN.SNAPSHOT_KEPT)
# Write the train and validation information to tensorboard
self.writer = tf.summary.FileWriter(self.tbdir, sess.graph)
return lr, train_op, tf.reduce_sum(tower_losses)
def snapshot(self, sess, iter):
if self.net.model_name.__contains__('VCOCO'):
snapshot_iters = cfg.TRAIN.SNAPSHOT_ITERS
elif self.net.model_name.__contains__('zs4_') and self.net.model_name.__contains__('test'):
snapshot_iters = cfg.TRAIN.SNAPSHOT_ITERS
elif self.net.model_name.__contains__('multi'):
snapshot_iters = cfg.TRAIN.SNAPSHOT_ITERS * 5 // 2
else:
snapshot_iters = cfg.TRAIN.SNAPSHOT_ITERS * 5
if (iter % snapshot_iters == 0 and iter != 0):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# Store the model snapshot
filename = 'HOI' + '_iter_{:d}'.format(iter) + '.ckpt'
filename = os.path.join(self.output_dir, filename)
self.saver.save(sess, filename)
print('Wrote snapshot to: {:s}'.format(filename), iter / snapshot_iters)
def train_model(self, sess, max_iters):
lr, train_op, t_loss = self.construct_graph2(sess)
self.from_snapshot(sess)
sess.graph.finalize()
timer = Timer()
# Data_length = len(self.Trainval_GT)
iter = self.get_init_step()
while iter < max_iters + 1:
timer.tic()
if (iter % cfg.TRAIN.SUMMARY_INTERVAL == 0) or (iter < 20):
# Compute the graph with summary
# total_loss, image_id, summary = self.net.train_step_tfr_with_summary(sess, blobs, lr, train_op)
total_loss, summary, image_id, _ = sess.run([t_loss,
self.net.summary_op, self.net.image_id,
train_op])
# total_loss, summary = self.net.train_step_with_summary(sess, blobs, lr.eval(), train_op)
self.writer.add_summary(summary, float(iter))
else:
# Compute the graph without summary
total_loss, image_id, _ = sess.run([t_loss, self.net.image_id,
train_op])
# total_loss, image_id = self.net.train_step_tfr(sess, blobs, lr, train_op)
timer.toc()
# print(image_id)
# Display training information
if iter % (cfg.TRAIN.DISPLAY) == 0:
if type(image_id) == tuple:
image_id = image_id[0]
print('iter: {:d} / {:d}, im_id: {:d}, total loss: {:.6f}, lr: {:f}, speed: {:.3f} s/iter'.format(
iter, max_iters, image_id, total_loss, lr.eval(), timer.average_time), end='\r', flush=True)
# print('\rmodel: {} im_detect: {:d}/{:d} {:d}, {:.3f}s'.format(net.model_name, count, 15765, _image_id,
# _t['im_detect'].average_time), end='',
# flush=True)
# Snapshotting
self.snapshot(sess, iter)
iter += 1
self.writer.close()
|
[
"zhou9878@uni.sydney.edu.au"
] |
zhou9878@uni.sydney.edu.au
|
86fcc94d4adea80c43d2d6c351eb8ca9e8de46c2
|
66cdc54a4870514faec5a51ead316d7871e8362e
|
/main.py
|
e075f66c3ab0d3c181266953d79b71fd5f1ac444
|
[] |
no_license
|
ashwinram472/cats_vs_dogs
|
e34d933741893a127ebae3f234454f722ceca08c
|
c50ff9d4e952166f4cae62611e8d939d89f83fd5
|
refs/heads/master
| 2022-12-03T22:13:42.666214
| 2020-08-16T01:45:06
| 2020-08-16T01:45:06
| 286,925,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,587
|
py
|
# Importing Libraries
from cnn_model import cnn
from inception_model import inception_model
import tensorflow as tf
#Turning off warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import matplotlib.pyplot as plt
from eda import model_performance
# Doing Image Augmentation to prevent overfitting and capture different features
def train_model(model):
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode = 'nearest')
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255)
train_generator = train_datagen.flow_from_directory('data/dogs_cats/train/',
# There are 2000 images
batch_size=20,
target_size=(150,150),
class_mode = 'binary')
validation_generator = test_datagen.flow_from_directory('data/dogs_cats/test/',
batch_size=20,
target_size=(150,150),
class_mode = 'binary')
#Fitting model with generator.
history = model.fit_generator(train_generator,
validation_data=validation_generator,
steps_per_epoch=100,
epochs=15,
validation_steps=50,verbose = 2)
return history,model
if __name__ =="__main__":
# Using Cnn model
cnn_model = cnn()
print('Running CNN Model:')
cnn_history, cnn_model = train_model(cnn_model)
cnn_model.save('models\cnn_model.h5')
# Using Inception model
inception_model = inception_model()
# PLotting Model Performance
#Cnn Model Performance
model_performance(cnn_history)
|
[
"ashwinram472@gmail.com"
] |
ashwinram472@gmail.com
|
06bba6c82a00da9f4ad7a8290e459b69bbaa2a7c
|
61ef327bd1d5ff6db7595221db6823c947dab42b
|
/FlatData/ScenarioScriptGroup1Excel.py
|
4abaa01eb0bee3726459ade22c4860fe42b9440e
|
[] |
no_license
|
Aikenfell/Blue-Archive---Asset-Downloader
|
88e419686a80b20b57a10a3033c23c80f86d6bf9
|
92f93ffbdb81a47cef58c61ec82092234eae8eec
|
refs/heads/main
| 2023-09-06T03:56:50.998141
| 2021-11-19T12:41:58
| 2021-11-19T12:41:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,489
|
py
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: FlatData
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ScenarioScriptGroup1Excel(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ScenarioScriptGroup1Excel()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsScenarioScriptGroup1Excel(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# ScenarioScriptGroup1Excel
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ScenarioScriptGroup1Excel
def GroupId(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# ScenarioScriptGroup1Excel
def SelectionGroup(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# ScenarioScriptGroup1Excel
def BGMId(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# ScenarioScriptGroup1Excel
def Sound(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ScenarioScriptGroup1Excel
def Transition(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# ScenarioScriptGroup1Excel
def BGName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# ScenarioScriptGroup1Excel
def BGEffect(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# ScenarioScriptGroup1Excel
def PopupFileName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ScenarioScriptGroup1Excel
def ScriptKr(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ScenarioScriptGroup1Excel
def TextJp(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ScenarioScriptGroup1Excel
def TextTh(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ScenarioScriptGroup1Excel
def TextTw(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ScenarioScriptGroup1Excel
def TextEn(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ScenarioScriptGroup1Excel
def TextDe(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ScenarioScriptGroup1Excel
def TextFr(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(32))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ScenarioScriptGroup1Excel
def VoiceJp(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(34))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def Start(builder): builder.StartObject(16)
def ScenarioScriptGroup1ExcelStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddGroupId(builder, GroupId): builder.PrependInt64Slot(0, GroupId, 0)
def ScenarioScriptGroup1ExcelAddGroupId(builder, GroupId):
"""This method is deprecated. Please switch to AddGroupId."""
return AddGroupId(builder, GroupId)
def AddSelectionGroup(builder, SelectionGroup): builder.PrependInt64Slot(1, SelectionGroup, 0)
def ScenarioScriptGroup1ExcelAddSelectionGroup(builder, SelectionGroup):
"""This method is deprecated. Please switch to AddSelectionGroup."""
return AddSelectionGroup(builder, SelectionGroup)
def AddBGMId(builder, BGMId): builder.PrependInt64Slot(2, BGMId, 0)
def ScenarioScriptGroup1ExcelAddBGMId(builder, BGMId):
"""This method is deprecated. Please switch to AddBGMId."""
return AddBGMId(builder, BGMId)
def AddSound(builder, Sound): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(Sound), 0)
def ScenarioScriptGroup1ExcelAddSound(builder, Sound):
"""This method is deprecated. Please switch to AddSound."""
return AddSound(builder, Sound)
def AddTransition(builder, Transition): builder.PrependUint32Slot(4, Transition, 0)
def ScenarioScriptGroup1ExcelAddTransition(builder, Transition):
"""This method is deprecated. Please switch to AddTransition."""
return AddTransition(builder, Transition)
def AddBGName(builder, BGName): builder.PrependUint32Slot(5, BGName, 0)
def ScenarioScriptGroup1ExcelAddBGName(builder, BGName):
"""This method is deprecated. Please switch to AddBGName."""
return AddBGName(builder, BGName)
def AddBGEffect(builder, BGEffect): builder.PrependUint32Slot(6, BGEffect, 0)
def ScenarioScriptGroup1ExcelAddBGEffect(builder, BGEffect):
"""This method is deprecated. Please switch to AddBGEffect."""
return AddBGEffect(builder, BGEffect)
def AddPopupFileName(builder, PopupFileName): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(PopupFileName), 0)
def ScenarioScriptGroup1ExcelAddPopupFileName(builder, PopupFileName):
"""This method is deprecated. Please switch to AddPopupFileName."""
return AddPopupFileName(builder, PopupFileName)
def AddScriptKr(builder, ScriptKr): builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(ScriptKr), 0)
def ScenarioScriptGroup1ExcelAddScriptKr(builder, ScriptKr):
"""This method is deprecated. Please switch to AddScriptKr."""
return AddScriptKr(builder, ScriptKr)
def AddTextJp(builder, TextJp): builder.PrependUOffsetTRelativeSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(TextJp), 0)
def ScenarioScriptGroup1ExcelAddTextJp(builder, TextJp):
"""This method is deprecated. Please switch to AddTextJp."""
return AddTextJp(builder, TextJp)
def AddTextTh(builder, TextTh): builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(TextTh), 0)
def ScenarioScriptGroup1ExcelAddTextTh(builder, TextTh):
"""This method is deprecated. Please switch to AddTextTh."""
return AddTextTh(builder, TextTh)
def AddTextTw(builder, TextTw): builder.PrependUOffsetTRelativeSlot(11, flatbuffers.number_types.UOffsetTFlags.py_type(TextTw), 0)
def ScenarioScriptGroup1ExcelAddTextTw(builder, TextTw):
"""This method is deprecated. Please switch to AddTextTw."""
return AddTextTw(builder, TextTw)
def AddTextEn(builder, TextEn): builder.PrependUOffsetTRelativeSlot(12, flatbuffers.number_types.UOffsetTFlags.py_type(TextEn), 0)
def ScenarioScriptGroup1ExcelAddTextEn(builder, TextEn):
"""This method is deprecated. Please switch to AddTextEn."""
return AddTextEn(builder, TextEn)
def AddTextDe(builder, TextDe): builder.PrependUOffsetTRelativeSlot(13, flatbuffers.number_types.UOffsetTFlags.py_type(TextDe), 0)
def ScenarioScriptGroup1ExcelAddTextDe(builder, TextDe):
"""This method is deprecated. Please switch to AddTextDe."""
return AddTextDe(builder, TextDe)
def AddTextFr(builder, TextFr): builder.PrependUOffsetTRelativeSlot(14, flatbuffers.number_types.UOffsetTFlags.py_type(TextFr), 0)
def ScenarioScriptGroup1ExcelAddTextFr(builder, TextFr):
"""This method is deprecated. Please switch to AddTextFr."""
return AddTextFr(builder, TextFr)
def AddVoiceJp(builder, VoiceJp): builder.PrependUOffsetTRelativeSlot(15, flatbuffers.number_types.UOffsetTFlags.py_type(VoiceJp), 0)
def ScenarioScriptGroup1ExcelAddVoiceJp(builder, VoiceJp):
"""This method is deprecated. Please switch to AddVoiceJp."""
return AddVoiceJp(builder, VoiceJp)
def End(builder): return builder.EndObject()
def ScenarioScriptGroup1ExcelEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder)
|
[
"rkolbe96@gmail.com"
] |
rkolbe96@gmail.com
|
1ee8a1ef21b0e7a78ce3b6ce3457194be9afdd87
|
d6bb89975fac26d4a40d4326af98ed6a33f2a922
|
/builtin-functions/print/sep.py
|
ba59df49059ec7a0fb0d14bddece73cf79e87fe4
|
[] |
no_license
|
ReneNyffenegger/about-python
|
70b51469d27df27454acfe809dae03c0c06d9ba2
|
6fb57d5eaee19f970b392127d66d53fb3ef9dc21
|
refs/heads/master
| 2023-08-19T17:17:09.143008
| 2023-08-15T07:03:28
| 2023-08-15T07:03:28
| 22,133,854
| 9
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
print('foo', 'bar', 'baz', sep=', ')
#
# foo, bar, baz(
lst = [ 'one', 'two', 'three' ]
print(*lst, sep=' - ')
#
# one - two - three
|
[
"rene.nyffenegger@adp-gmbh.ch"
] |
rene.nyffenegger@adp-gmbh.ch
|
9273551330884ade4300c2e81f1eb5451aff938e
|
0ab491f953e37632c51f9c672c6dafb47f94a80d
|
/venv/bin/easy_install-2.7
|
7c3c5436a1e4f46ea405f10f846611437a57c970
|
[] |
no_license
|
NEvans85/flask_tutorial
|
12d43a482c2183b789f54bec7ef47b004333a4b8
|
d2d6dc10345845713899cadfd889d48595762a79
|
refs/heads/master
| 2020-03-07T06:13:04.768614
| 2018-04-17T20:26:03
| 2018-04-17T20:26:03
| 127,315,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
7
|
#!/Users/nicholasevans/DevProjects/Python_Projects/Flaskr/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"nevans85.pro@gmail.com"
] |
nevans85.pro@gmail.com
|
78f6a7ffaa1d67a6dca1dbe0b92302c2126d58d0
|
e02b6e43b690e148e3ea619c2fc99bf515d6b24c
|
/mlpackage01/MLPractical42.py
|
b5176855d6394ac15f0bffacb612c02516b9b0b5
|
[] |
no_license
|
kashishy/MLTraining
|
35c79764ca9f3cbcf87149d5d06d4103b2876c76
|
86056ba3af6f6178c7c73a1866f77cf6a845517a
|
refs/heads/master
| 2020-12-07T04:43:24.870647
| 2020-01-16T17:21:07
| 2020-01-16T17:21:07
| 232,635,382
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
import pandas as pd
from numpy import set_printoptions
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
import warnings
warnings.filterwarnings("ignore")
filename = 'indians-diabetes.data.csv'
hnames = ['preg', 'plas', 'pres',
'skin', 'test', 'mass',
'pedi', 'age', 'class'
]
dataframe = pd.read_csv(filename, names=hnames)
array = dataframe.values
X = array[:, 0:8]
Y = array[:, 8]
# feature extraction
test = SelectKBest(score_func=chi2, k=3)
fit = test.fit(X, Y)
# Summarize scores
set_printoptions(precision=3)
print(fit.scores_)
features = fit.transform(X)
# Summarize selected features
print("\n\n")
print(features[0:20, :])
|
[
"ashishkmrydv392@gmail.com"
] |
ashishkmrydv392@gmail.com
|
9511f47b91c96757d11224a795c9dfdb289998b0
|
8eff2593ef44b3bdb8dda8678eb051e58b4b5129
|
/myenv/lib/python3.5/site-packages/IPython/core/magics/basic.py
|
6ce120779c8b278f6fc1130d386c50105b07ddf4
|
[
"MIT"
] |
permissive
|
rupeshparab/techscan
|
08391c26c7916dd397527e1da8f91b4aa78bc96e
|
ce2558602ddad31873d7129f25b1cc61895b9939
|
refs/heads/master
| 2022-12-11T03:33:07.533280
| 2017-09-03T17:48:57
| 2017-09-03T17:48:57
| 102,280,759
| 1
| 1
|
MIT
| 2022-12-08T00:36:08
| 2017-09-03T17:11:16
|
Python
|
UTF-8
|
Python
| false
| false
| 21,310
|
py
|
"""Implementation of basic magic functions."""
import argparse
import textwrap
import io
import sys
from pprint import pformat
from IPython.core import magic_arguments, page
from IPython.core.error import UsageError
from IPython.core.magic import Magics, magics_class, line_magic, magic_escapes
from IPython.utils.text import format_screen, dedent, indent
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils.ipstruct import Struct
from warnings import warn
from logging import error
class MagicsDisplay(object):
def __init__(self, magics_manager, ignore=None):
self.ignore = ignore if ignore else []
self.magics_manager = magics_manager
def _lsmagic(self):
"""The main implementation of the %lsmagic"""
mesc = magic_escapes['line']
cesc = magic_escapes['cell']
mman = self.magics_manager
magics = mman.lsmagic()
out = ['Available line magics:',
mesc + (' '+mesc).join(sorted([m for m,v in magics['line'].items() if (v not in self.ignore)])),
'',
'Available cell magics:',
cesc + (' '+cesc).join(sorted([m for m,v in magics['cell'].items() if (v not in self.ignore)])),
'',
mman.auto_status()]
return '\n'.join(out)
def _repr_pretty_(self, p, cycle):
p.text(self._lsmagic())
def __str__(self):
return self._lsmagic()
def _jsonable(self):
"""turn magics dict into jsonable dict of the same structure
replaces object instances with their class names as strings
"""
magic_dict = {}
mman = self.magics_manager
magics = mman.lsmagic()
for key, subdict in magics.items():
d = {}
magic_dict[key] = d
for name, obj in subdict.items():
try:
classname = obj.__self__.__class__.__name__
except AttributeError:
classname = 'Other'
d[name] = classname
return magic_dict
def _repr_json_(self):
return self._jsonable()
@magics_class
class BasicMagics(Magics):
"""Magics that provide central IPython functionality.
These are various magics that don't fit into specific categories but that
are all part of the base 'IPython experience'."""
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-l', '--line', action='store_true',
help="""Create a line magic alias."""
)
@magic_arguments.argument(
'-c', '--cell', action='store_true',
help="""Create a cell magic alias."""
)
@magic_arguments.argument(
'name',
help="""Name of the magic to be created."""
)
@magic_arguments.argument(
'target',
help="""Name of the existing line or cell magic."""
)
@magic_arguments.argument(
'-p', '--params', default=None,
help="""Parameters passed to the magic function."""
)
@line_magic
def alias_magic(self, line=''):
"""Create an alias for an existing line or cell magic.
Examples
--------
::
In [1]: %alias_magic t timeit
Created `%t` as an alias for `%timeit`.
Created `%%t` as an alias for `%%timeit`.
In [2]: %t -n1 pass
1 loops, best of 3: 954 ns per loop
In [3]: %%t -n1
...: pass
...:
1 loops, best of 3: 954 ns per loop
In [4]: %alias_magic --cell whereami pwd
UsageError: Cell magic function `%%pwd` not found.
In [5]: %alias_magic --line whereami pwd
Created `%whereami` as an alias for `%pwd`.
In [6]: %whereami
Out[6]: u'/home/testuser'
In [7]: %alias_magic h history -p "-l 30" --line
Created `%h` as an alias for `%history -l 30`.
"""
args = magic_arguments.parse_argstring(self.alias_magic, line)
shell = self.shell
mman = self.shell.magics_manager
escs = ''.join(magic_escapes.values())
target = args.target.lstrip(escs)
name = args.name.lstrip(escs)
params = args.params
if (params and
((params.startswith('"') and params.endswith('"'))
or (params.startswith("'") and params.endswith("'")))):
params = params[1:-1]
# Find the requested magics.
m_line = shell.find_magic(target, 'line')
m_cell = shell.find_magic(target, 'cell')
if args.line and m_line is None:
raise UsageError('Line magic function `%s%s` not found.' %
(magic_escapes['line'], target))
if args.cell and m_cell is None:
raise UsageError('Cell magic function `%s%s` not found.' %
(magic_escapes['cell'], target))
# If --line and --cell are not specified, default to the ones
# that are available.
if not args.line and not args.cell:
if not m_line and not m_cell:
raise UsageError(
'No line or cell magic with name `%s` found.' % target
)
args.line = bool(m_line)
args.cell = bool(m_cell)
params_str = "" if params is None else " " + params
if args.line:
mman.register_alias(name, target, 'line', params)
print('Created `%s%s` as an alias for `%s%s%s`.' % (
magic_escapes['line'], name,
magic_escapes['line'], target, params_str))
if args.cell:
mman.register_alias(name, target, 'cell', params)
print('Created `%s%s` as an alias for `%s%s%s`.' % (
magic_escapes['cell'], name,
magic_escapes['cell'], target, params_str))
@line_magic
def lsmagic(self, parameter_s=''):
"""List currently available magic functions."""
return MagicsDisplay(self.shell.magics_manager, ignore=[self.pip])
def _magic_docs(self, brief=False, rest=False):
"""Return docstrings from magic functions."""
mman = self.shell.magics_manager
docs = mman.lsmagic_docs(brief, missing='No documentation')
if rest:
format_string = '**%s%s**::\n\n%s\n\n'
else:
format_string = '%s%s:\n%s\n'
return ''.join(
[format_string % (magic_escapes['line'], fname,
indent(dedent(fndoc)))
for fname, fndoc in sorted(docs['line'].items())]
+
[format_string % (magic_escapes['cell'], fname,
indent(dedent(fndoc)))
for fname, fndoc in sorted(docs['cell'].items())]
)
@line_magic
def magic(self, parameter_s=''):
"""Print information about the magic function system.
Supported formats: -latex, -brief, -rest
"""
mode = ''
try:
mode = parameter_s.split()[0][1:]
except IndexError:
pass
brief = (mode == 'brief')
rest = (mode == 'rest')
magic_docs = self._magic_docs(brief, rest)
if mode == 'latex':
print(self.format_latex(magic_docs))
return
else:
magic_docs = format_screen(magic_docs)
out = ["""
IPython's 'magic' functions
===========================
The magic function system provides a series of functions which allow you to
control the behavior of IPython itself, plus a lot of system-type
features. There are two kinds of magics, line-oriented and cell-oriented.
Line magics are prefixed with the % character and work much like OS
command-line calls: they get as an argument the rest of the line, where
arguments are passed without parentheses or quotes. For example, this will
time the given statement::
%timeit range(1000)
Cell magics are prefixed with a double %%, and they are functions that get as
an argument not only the rest of the line, but also the lines below it in a
separate argument. These magics are called with two arguments: the rest of the
call line and the body of the cell, consisting of the lines below the first.
For example::
%%timeit x = numpy.random.randn((100, 100))
numpy.linalg.svd(x)
will time the execution of the numpy svd routine, running the assignment of x
as part of the setup phase, which is not timed.
In a line-oriented client (the terminal or Qt console IPython), starting a new
input with %% will automatically enter cell mode, and IPython will continue
reading input until a blank line is given. In the notebook, simply type the
whole cell as one entity, but keep in mind that the %% escape can only be at
the very start of the cell.
NOTE: If you have 'automagic' enabled (via the command line option or with the
%automagic function), you don't need to type in the % explicitly for line
magics; cell magics always require an explicit '%%' escape. By default,
IPython ships with automagic on, so you should only rarely need the % escape.
Example: typing '%cd mydir' (without the quotes) changes your working directory
to 'mydir', if it exists.
For a list of the available magic functions, use %lsmagic. For a description
of any of them, type %magic_name?, e.g. '%cd?'.
Currently the magic system has the following functions:""",
magic_docs,
"Summary of magic functions (from %slsmagic):" % magic_escapes['line'],
str(self.lsmagic()),
]
page.page('\n'.join(out))
@line_magic
def page(self, parameter_s=''):
"""Pretty print the object and display it through a pager.
%page [options] OBJECT
If no object is given, use _ (last output).
Options:
-r: page str(object), don't pretty-print it."""
# After a function contributed by Olivier Aubert, slightly modified.
# Process options/args
opts, args = self.parse_options(parameter_s, 'r')
raw = 'r' in opts
oname = args and args or '_'
info = self.shell._ofind(oname)
if info['found']:
txt = (raw and str or pformat)( info['obj'] )
page.page(txt)
else:
print('Object `%s` not found' % oname)
@line_magic
def profile(self, parameter_s=''):
"""Print your currently active IPython profile.
See Also
--------
prun : run code using the Python profiler
(:meth:`~IPython.core.magics.execution.ExecutionMagics.prun`)
"""
raise UsageError("The `%profile` magic has been deprecated since IPython 2.0. "
"and removed in IPython 6.0. Please use the value of `get_ipython().profile` instead "
"to see current profile in use. Perhaps you meant to use `%prun` to profile code?")
@line_magic
def pprint(self, parameter_s=''):
"""Toggle pretty printing on/off."""
ptformatter = self.shell.display_formatter.formatters['text/plain']
ptformatter.pprint = bool(1 - ptformatter.pprint)
print('Pretty printing has been turned',
['OFF','ON'][ptformatter.pprint])
@line_magic
def colors(self, parameter_s=''):
"""Switch color scheme for prompts, info system and exception handlers.
Currently implemented schemes: NoColor, Linux, LightBG.
Color scheme names are not case-sensitive.
Examples
--------
To get a plain black and white terminal::
%colors nocolor
"""
def color_switch_err(name):
warn('Error changing %s color schemes.\n%s' %
(name, sys.exc_info()[1]), stacklevel=2)
new_scheme = parameter_s.strip()
if not new_scheme:
raise UsageError(
"%colors: you must specify a color scheme. See '%colors?'")
# local shortcut
shell = self.shell
# Set shell colour scheme
try:
shell.colors = new_scheme
shell.refresh_style()
except:
color_switch_err('shell')
# Set exception colors
try:
shell.InteractiveTB.set_colors(scheme = new_scheme)
shell.SyntaxTB.set_colors(scheme = new_scheme)
except:
color_switch_err('exception')
# Set info (for 'object?') colors
if shell.color_info:
try:
shell.inspector.set_active_scheme(new_scheme)
except:
color_switch_err('object inspector')
else:
shell.inspector.set_active_scheme('NoColor')
@line_magic
def xmode(self, parameter_s=''):
"""Switch modes for the exception handlers.
Valid modes: Plain, Context and Verbose.
If called without arguments, acts as a toggle."""
def xmode_switch_err(name):
warn('Error changing %s exception modes.\n%s' %
(name,sys.exc_info()[1]))
shell = self.shell
new_mode = parameter_s.strip().capitalize()
try:
shell.InteractiveTB.set_mode(mode=new_mode)
print('Exception reporting mode:',shell.InteractiveTB.mode)
except:
xmode_switch_err('user')
@line_magic
def pip(self, args=''):
"""
Intercept usage of ``pip`` in IPython and direct user to run command outside of IPython.
"""
print(textwrap.dedent('''
The following command must be run outside of the IPython shell:
$ pip {args}
The Python package manager (pip) can only be used from outside of IPython.
Please reissue the `pip` command in a separate terminal or command prompt.
See the Python documentation for more informations on how to install packages:
https://docs.python.org/3/installing/'''.format(args=args)))
@line_magic
def quickref(self, arg):
""" Show a quick reference sheet """
from IPython.core.usage import quick_reference
qr = quick_reference + self._magic_docs(brief=True)
page.page(qr)
@line_magic
def doctest_mode(self, parameter_s=''):
"""Toggle doctest mode on and off.
This mode is intended to make IPython behave as much as possible like a
plain Python shell, from the perspective of how its prompts, exceptions
and output look. This makes it easy to copy and paste parts of a
session into doctests. It does so by:
- Changing the prompts to the classic ``>>>`` ones.
- Changing the exception reporting mode to 'Plain'.
- Disabling pretty-printing of output.
Note that IPython also supports the pasting of code snippets that have
leading '>>>' and '...' prompts in them. This means that you can paste
doctests from files or docstrings (even if they have leading
whitespace), and the code will execute correctly. You can then use
'%history -t' to see the translated history; this will give you the
input after removal of all the leading prompts and whitespace, which
can be pasted back into an editor.
With these features, you can switch into this mode easily whenever you
need to do testing and changes to doctests, without having to leave
your existing IPython session.
"""
# Shorthands
shell = self.shell
meta = shell.meta
disp_formatter = self.shell.display_formatter
ptformatter = disp_formatter.formatters['text/plain']
# dstore is a data store kept in the instance metadata bag to track any
# changes we make, so we can undo them later.
dstore = meta.setdefault('doctest_mode',Struct())
save_dstore = dstore.setdefault
# save a few values we'll need to recover later
mode = save_dstore('mode',False)
save_dstore('rc_pprint',ptformatter.pprint)
save_dstore('xmode',shell.InteractiveTB.mode)
save_dstore('rc_separate_out',shell.separate_out)
save_dstore('rc_separate_out2',shell.separate_out2)
save_dstore('rc_separate_in',shell.separate_in)
save_dstore('rc_active_types',disp_formatter.active_types)
if not mode:
# turn on
# Prompt separators like plain python
shell.separate_in = ''
shell.separate_out = ''
shell.separate_out2 = ''
ptformatter.pprint = False
disp_formatter.active_types = ['text/plain']
shell.magic('xmode Plain')
else:
# turn off
shell.separate_in = dstore.rc_separate_in
shell.separate_out = dstore.rc_separate_out
shell.separate_out2 = dstore.rc_separate_out2
ptformatter.pprint = dstore.rc_pprint
disp_formatter.active_types = dstore.rc_active_types
shell.magic('xmode ' + dstore.xmode)
# mode here is the state before we switch; switch_doctest_mode takes
# the mode we're switching to.
shell.switch_doctest_mode(not mode)
# Store new mode and inform
dstore.mode = bool(not mode)
mode_label = ['OFF','ON'][dstore.mode]
print('Doctest mode is:', mode_label)
@line_magic
def gui(self, parameter_s=''):
"""Enable or disable IPython GUI event loop integration.
%gui [GUINAME]
This magic replaces IPython's threaded shells that were activated
using the (pylab/wthread/etc.) command line flags. GUI toolkits
can now be enabled at runtime and keyboard
interrupts should work without any problems. The following toolkits
are supported: wxPython, PyQt4, PyGTK, Tk and Cocoa (OSX)::
%gui wx # enable wxPython event loop integration
%gui qt4|qt # enable PyQt4 event loop integration
%gui qt5 # enable PyQt5 event loop integration
%gui gtk # enable PyGTK event loop integration
%gui gtk3 # enable Gtk3 event loop integration
%gui tk # enable Tk event loop integration
%gui osx # enable Cocoa event loop integration
# (requires %matplotlib 1.1)
%gui # disable all event loop integration
WARNING: after any of these has been called you can simply create
an application object, but DO NOT start the event loop yourself, as
we have already handled that.
"""
opts, arg = self.parse_options(parameter_s, '')
if arg=='': arg = None
try:
return self.shell.enable_gui(arg)
except Exception as e:
# print simple error message, rather than traceback if we can't
# hook up the GUI
error(str(e))
@skip_doctest
@line_magic
def precision(self, s=''):
"""Set floating point precision for pretty printing.
Can set either integer precision or a format string.
If numpy has been imported and precision is an int,
numpy display precision will also be set, via ``numpy.set_printoptions``.
If no argument is given, defaults will be restored.
Examples
--------
::
In [1]: from math import pi
In [2]: %precision 3
Out[2]: u'%.3f'
In [3]: pi
Out[3]: 3.142
In [4]: %precision %i
Out[4]: u'%i'
In [5]: pi
Out[5]: 3
In [6]: %precision %e
Out[6]: u'%e'
In [7]: pi**10
Out[7]: 9.364805e+04
In [8]: %precision
Out[8]: u'%r'
In [9]: pi**10
Out[9]: 93648.047476082982
"""
ptformatter = self.shell.display_formatter.formatters['text/plain']
ptformatter.float_precision = s
return ptformatter.float_format
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-e', '--export', action='store_true', default=False,
help=argparse.SUPPRESS
)
@magic_arguments.argument(
'filename', type=str,
help='Notebook name or filename'
)
@line_magic
def notebook(self, s):
"""Export and convert IPython notebooks.
This function can export the current IPython history to a notebook file.
For example, to export the history to "foo.ipynb" do "%notebook foo.ipynb".
The -e or --export flag is deprecated in IPython 5.2, and will be
removed in the future.
"""
args = magic_arguments.parse_argstring(self.notebook, s)
from nbformat import write, v4
cells = []
hist = list(self.shell.history_manager.get_range())
if(len(hist)<=1):
raise ValueError('History is empty, cannot export')
for session, execution_count, source in hist[:-1]:
cells.append(v4.new_code_cell(
execution_count=execution_count,
source=source
))
nb = v4.new_notebook(cells=cells)
with io.open(args.filename, 'w', encoding='utf-8') as f:
write(nb, f, version=4)
|
[
"rupeshparab.rp@gmail.com"
] |
rupeshparab.rp@gmail.com
|
93dacb69fddddea04a605d78c422cc274f8277a5
|
a603a234cb3310cb8c92491204816bfc26a7b09f
|
/cnn-mnist/mnist_lenet5_test.py
|
58083e406955c750f223bd26b94b7cff968e0991
|
[] |
no_license
|
reroze/DL
|
5ae6e73ceac408ea4d37a1aafed5fce16407f3f3
|
7857e34ad89981cafc1395e63edbcc07b3e772e1
|
refs/heads/master
| 2020-07-04T22:45:06.155185
| 2019-08-15T00:41:49
| 2019-08-15T00:41:49
| 202,446,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,086
|
py
|
#coding:utf-8
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_lenet5_forward
import mnist_lenet5_backward
import numpy as np
TEST_INTERVAL_SECS = 5
def test(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [
mnist.test.num_examples,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.NUM_CHANNELS])
y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE])
y = mnist_lenet5_forward.forward(x, False, None)
ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY)
ema_restore = ema.variables_to_restore()
saver = tf.train.Saver(ema_restore)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH)
if(ckpt and ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
reshaped_x = np.reshape(mnist.test.images,(
mnist.test.num_examples,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.NUM_CHANNELS))
accuracy_score = sess.run(accuracy, feed_dict={x: reshaped_x, y_:mnist.test.labels})
print("After %s training step(s), test accuracy is %g" %(global_step, accuracy_score))
else:
print("No checkpoint file found")
return
time.sleep(TEST_INTERVAL_SECS)
def main():
mnist = input_data.read_data_sets("./data/", one_hot=True)
test(mnist)
if __name__ == "__main__":
main()
|
[
"47293260+reroze@users.noreply.github.com"
] |
47293260+reroze@users.noreply.github.com
|
894523bf9885a4be64fcc4f40894562d9d68783c
|
8b8909b666099aca5df1389d86b72fbd9bafdeab
|
/aula 06/desafio 04.py
|
9d25841b30bb66ad5f5747bae2adb2ef1e406855
|
[] |
no_license
|
GabrielRossin/Python
|
c694ee0c3c557f854296b3e7843b7616dec0101e
|
1780a1c760149f6d65a25c8108b8e70a35914009
|
refs/heads/master
| 2022-12-15T04:32:19.009377
| 2020-09-13T06:10:56
| 2020-09-13T06:10:56
| 206,431,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
x = input('Insira algo ')
print('Todos caracteres são números ?', x.isnumeric())
print('-------------------------------')
x1 = input('Insira algo ')
print('Todos os caracteres são alfanuméricos ?', x1.isalnum())
print('-------------------------------')
x2 = input('Insira algo ')
print('Todos os caracteres são do alfabeto ?', x2.isalpha())
print('-------------------------------')
x3 = input('Insira algo ')
print('Todos os caracteres são decimais?', x3.isdecimal())
print('-------------------------------')
x4 = input('Insira algo ')
print('Todos os caracteres são dígitos ?', x4.isdigit())
print('--------------------------------')
x5 = input('Insira algo ')
print('É um identificador válido ?', x5.isidentifier())
print('--------------------------------')
x6 = input('Insira algo ')
print('Todos os caracteres estão em minúsculas ?', x6.islower())
print('--------------------------------')
x7 = input('Insira algo ')
print('Todos os caracteres podem ser impressos ?', x7.isprintable())
print('---------------------------------')
x8 = input('Insira algo ')
print('Todos os caracteres no texto são espaços em branco ?', x8.isspace())
print('--------------------------------')
x9 = input('Insira algo ')
print('Cada palavra começa com uma letra maiúscula ?',x9.istitle())
print('-----------------------------------')
x10 = input('Insira algo')
print('Todos os caracteres do texto estão em maiúsculas',x10.isupper())
|
[
"gabrielfiliperossin@hotmail.com"
] |
gabrielfiliperossin@hotmail.com
|
90e8e6081190b41166105c662b759bf495c1220e
|
bbc01d657f7af4297fc650e3f956133d972fec57
|
/utils.py
|
fb087e02e6e431c4142024c5e93c33b545e48d7a
|
[] |
no_license
|
hadishamgholi/video_classification
|
4a5f4339ba97587f13d94c6e9f65b679f7a2d88d
|
1260a9e398f5844b120f9354c62d3fa5593f13d3
|
refs/heads/master
| 2021-02-11T12:37:39.306706
| 2020-03-14T17:56:49
| 2020-03-14T17:56:49
| 244,491,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
import re, os
from glob import glob
import config as con
from sklearn.utils import shuffle
import numpy as np
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
la_enc = LabelEncoder()
c_int = la_enc.fit_transform(sorted(os.listdir(con.dataset_path)))
oh_enc = OneHotEncoder(sparse=False)
oh_enc.fit(np.array(c_int).reshape(-1, 1))
def encode_label(label):
int_encoded = la_enc.transform(label)
return oh_enc.transform(np.array(int_encoded).reshape(-1, 1)).flatten()
def get_label_from_filename(filename):
pattern = 'v_([a-zA-Z]*)_'
match = re.search(pattern, filename).groups()[0]
return match
def get_data_pathes(root_path):
train, val = [], []
clas = sorted(glob(os.path.join(root_path, '*')))
for c in clas:
vids = glob(os.path.join(c, '*'))
vids = shuffle(vids)
train += vids[:int(con.split * len(vids))]
val += vids[int(con.split * len(vids)):]
return train, val
# if __name__ == '__main__':
# print(get_label_from_filename('v_BenchPress_g01_c01.avi'))
|
[
"hadishamgholi74@gmail.com"
] |
hadishamgholi74@gmail.com
|
55c11d01b0c113d4367972d23e5aa53d550f4b57
|
dfa69189145a0e15e69815950aebb45f3f88738d
|
/main.py
|
5cd581a848babe75a9897fe3f7023c35ae59b655
|
[] |
no_license
|
shogo-makishima/Keras_Projects
|
1be9e6e8a7b12d316edcfc46fae3119673b2838d
|
e7e349d4e696bbfe846d7474133342c6d5268442
|
refs/heads/master
| 2022-11-17T12:49:11.109984
| 2020-07-14T13:54:53
| 2020-07-14T13:54:53
| 279,592,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,852
|
py
|
import keras
modelPath = "Models\\MNIST_Model\\model"
batch_size = 128 # количество обучающих образцов, обрабатываемых одновременно за одну итерацию алгоритма градиентного спуска;
num_epochs = 1 # количество итераций обучающего алгоритма по всему обучающему множеству;
hidden_size = 512 # количество нейронов в каждом из двух скрытых слоев MLP.
num_train = 60000
num_test = 10000
height, width, depth = 28, 28, 1
num_classes = 10
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(num_train, height * width)
x_test = x_test.reshape(num_test, height * width)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train = (x_train / 255 * 0.99) + 0.01
x_test = (x_test / 255 * 0.99) + 0.01
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
inputLayer = keras.layers.Input(shape=(height * width))
hiddenLayer_1 = keras.layers.Dense(hidden_size, activation='relu')(inputLayer)
# hiddenLayer_2 = keras.layers.Dense(hidden_size, activation='relu')(hiddenLayer_1)
outLayer = keras.layers.Dense(num_classes, activation='softmax')(hiddenLayer_1)
try:
model: keras.Model = keras.models.load_model(modelPath)
model.predict(x_test, verbose=1)
except Exception as exception:
print(exception)
model = keras.Model(inputLayer, outLayer)
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=num_epochs, verbose=1, validation_split=0.1)
model.evaluate(x_test, y_test, verbose=1)
model.save(modelPath)
|
[
"48924544+shogo-makishima@users.noreply.github.com"
] |
48924544+shogo-makishima@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.