blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f28f2b8d0a341418b14873cfe5a8cf5027367013 | b37c943e29f3cac3cb733ecb7a93b8f8ba070547 | /Hangman.py | d384fd1448bb58d00a578c8c037a720443da1e45 | [] | no_license | NicholasKao/Hangman | 30c5f9cde02ad9b46a41dbebf00a6426923d08b2 | 1bbe8883143a6b433d891db3a11f750ab12cb5da | refs/heads/master | 2021-01-24T16:33:41.801712 | 2018-02-28T00:17:43 | 2018-02-28T00:17:43 | 123,201,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,008 | py | def hangman():
word = input('Player 1: Please select a word or phrase: ').lower() #word to play with in lower case
letters = [] # to be used later for the letters in the selected word
### function for player 1 to select a word
def selectWord(word):
for x in range(len(word)): # iterate though each letter of the word
letters.append(word[x]) # add each letter to a list of letters
return(letters) # return the list of letters
#implementation of above function
selectWord(word)
# formatting to player2 cannot see player 1's word and showing strikes left:
print('\n' * 50)
print('Please do not scroll up.')
print('\n' * 20)
level = input('Player 2: Please select a level; easy, medium, or hard? ').lower() # selecting a level, which corresponds to allowed strikes
if level[0] == 'e': #just the first letter to account for typos
strikes = 13 # sets the easy level to miss 12 times
elif level[0] == 'm':
strikes = 10 # medium level gives 9 misses
elif level[0] == 'h':
strikes = 6 # hard level gives 6 misses
misses = strikes
print('You have ' + str(strikes) + ' strikes remaining.')
### function to display the length of the word or phrase
def showSize(letters):
size = []
for x in letters: # iterates through the characters in the word
if x.isalpha() == True: #if the character is a letter
size.append('_') # then add an underscore to the 'board'
else: #otherwise
size.append(x) # keep the character as is
return(size) #return the board
# implamentation of above function and scaffolding for man to hang from
board = showSize(letters)
print(' _____\n | |\n |\n |\n |\n_|_' + '\n')
print(board)
# list of possible letters to guess from
possibles = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
man = [' _____\n | |\n |\n |\n |\n_|_', ' _____\n | |\n | ( )\n |\n |\n_|_', ' _____\n | |\n | ( )\n | *\n |\n_|_', ' _____\n | |\n | ( )\n | \*\n | \n_|_', ' _____\n | |\n | ( )\n | \*/\n | \n_|_', ' _____\n | |\n | ( )\n | \*/\n | /\n_|_', ' _____\n | |\n | ( )\n | \*/\n | / \\\n_|_', ' _____\n | |\n | ( )\n | -\*/\n | / \\\n_|_', ' _____\n | |\n | ( )\n | -\*/-\n | / \\\n_|_', ' _____\n | |\n | ( )\n | -\*/-\n | _/ \\\n_|_', ' _____\n | |\n | ( )\n | -\*/-\n | _/ \\_\n_|_', ' _____\n | |\n | (")\n | -\*/-\n | _/ \\_\n_|_', ' _____\n | |\n | (")\n | o-\*/-\n | _/ \\_\n_|_', ' _____\n | |\n | (")\n | o-\*/-o\n | _/ \\_\n_|_']
while strikes > 0: #while player 2 has strikes left
if '_' not in board:
print('Player 2 wins!\n ')
response = input('Play Again? Y/N\n').lower()
if response == 'y':
print('\n')
hangman()
else:
break
letter = input('Please guess again: ').lower() #prompt the user to guess
print('\n'*5) #spacing for easier reading
print("Player 2's Guess: " + letter) #display player 2's guess
positions = [] # create an empty list that will correspond to the positions of the letters
if letter in possibles: #if the guess is in the letters left
possibles.remove(letter) # remove the letter from the list of letters left
if letter in letters: #if the guess is in the word
print('Good Guess!')
for x in range(len(letters)): #iterate through the word
if letter == letters[x]: #find the position of the matching letter
positions.append(x) #add the position of the matching letter to the list of positions
else: #if the guessed letter is not in the word
strikes = strikes - 1 #remove a strike
print(letter + ' is not in the word.')
print('Strikes left: ' + str(strikes))
if strikes == 0: # if player 2 has no strikes left
print(man[misses-strikes])
print('Player 1 wins! The word is ' + word)
break #end the game
else: # if the character guessed is not available
print(letter + ' is not a valid guess. ')
print(man[misses-strikes])
### displays the new board after each guess
def newBoard(letters, positions, letter):
for x in positions: # iterates through the index of letters
board[x] = letter #adds the letter guessed to the corresponding index on the board
return(board) #returns the new board
print(newBoard(letters, positions, letter)) #prints the board
hangman() #implement the game
| [
"noreply@github.com"
] | noreply@github.com |
f371da24e39dc65c8a368916d0897107542f39c0 | 652dc8c22b929b3b75bdf68fdc4c7905794f9e74 | /Reverse_Int.py | e5bc8fc2c2eff64e776421d33a779d379b57250f | [] | no_license | jaysurn/Leetcode_dailies | 29ea9cc866e0f39a5573e39dc7b4bbd4157b4ea9 | 4593e396327aa8965c130dde97975612a75cc285 | refs/heads/main | 2023-04-07T22:07:38.869051 | 2021-04-07T07:06:55 | 2021-04-07T07:06:55 | 350,246,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | # Goal : Return a 32-bit int with its digits reversed, if the value is outside the 32-bit range, then return 0
# Given : 32-bit signed int
# Assumption : Assume given int is between -21^31 <= x <= 2^31 - 1
def Reverse_int( input_int ):
result = int( str( abs ( input_int ) ) [ ::-1 ] ) # Take absolute value of the input then turn into a string to reverse it using slicing
# Take reversed string and turn back into int and set to result
if result.bit_length() > 32: # If result is out of 32-bit range, return 0
result = 0
if input_int < 0: # If input value is negative, return negative result
result = -result
return result
def main():
test1 = 123 # User defined base test case
test2 = -123 # User defined negative test case
test3 = 120 # User defined leading 0 test case
test4 = 0 # User defined single digit test case
print( "Testing case {0}".format( test1 ) )
result = Reverse_int( test1 )
print( result )
print( "Testing case {0}".format( test2 ) )
result = Reverse_int( test2 )
print( result )
print( "Testing case {0}".format( test3 ) )
result = Reverse_int( test3 )
print( result )
print( "Testing case {0}".format( test4 ) )
result = Reverse_int( test4 )
print( result )
main() | [
"noreply@github.com"
] | noreply@github.com |
6c5aeaeaa97a6a6c63f493f4001386755a32d850 | 1e160d6f6b400a38e36e237e74f37296c8637513 | /src/colony/center/hall.py | 762ca3d6be24645d7064e11a1d21ab8c1dccc1ec | [] | no_license | max97292/Pirates | 22e3bb1df9cddd51f1bbbf13aada5668084cdb83 | 1bc3fd6f25c9a8c4c123fb60bd0850f6bfdf225c | refs/heads/master | 2022-12-25T02:35:07.171626 | 2020-09-30T20:43:36 | 2020-09-30T20:43:36 | 291,234,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | # -*- coding: utf-8 -*-
import random
import time
import telebot
import sqlite3
from telebot import types
from src.config import TOKEN
bot = telebot.TeleBot(TOKEN)
conn = sqlite3.connect("DataBase.db", check_same_thread=False)
cursor = conn.cursor()
| [
"max97292@gmail.com"
] | max97292@gmail.com |
ee193694d08978ec07e06b3a43be5b022a396bef | 4b4558c6cbc3cf2a5c0517a9d388572aa5a216ec | /Computer Science/Artificial Intelligence/Pacman/search/search.py | 36c79159ff511165c9be6e3665892fbea901d891 | [] | no_license | cow-coding/School-Project | 15269e3f7078521381453871c88ed36ff1008cff | 1e8fa3347da14f381cb73f2cd59d3ea144612bf8 | refs/heads/master | 2022-06-22T01:22:42.025813 | 2022-06-08T18:32:21 | 2022-06-08T18:32:21 | 247,887,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,202 | py | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
"""
"*** YOUR CODE HERE ***"
stk = util.Stack()
visited = []
action = []
stk.push((problem.getStartState(), action))
while not stk.isEmpty():
state, action = stk.pop()
if state not in visited:
visited.append(state)
if problem.isGoalState(state):
return action
for item in problem.getSuccessors(state):
curr, dir, _ = item
next = action + [dir]
stk.push((curr, next))
return []
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
q = util.Queue()
visited = []
action = []
q.push((problem.getStartState(), action))
while not q.isEmpty():
state, action = q.pop()
if state not in visited:
visited.append(state)
if problem.isGoalState(state):
return action
for item in problem.getSuccessors(state):
curr, dir, _ = item
next = action + [dir]
q.push((curr, next))
return []
util.raiseNotDefined()
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE ***"
pq = util.PriorityQueue()
visited = dict()
action = []
pq.push((problem.getStartState(), action, 0), 0)
while not pq.isEmpty():
state, action, cost = pq.pop()
visited[state] = cost
if problem.isGoalState(state):
return action
for item in problem.getSuccessors(state):
curr, dir, ncost = item
if curr not in visited:
visited[curr] = ncost + cost
pq.push((curr, action + [dir], ncost + cost), ncost + cost)
else:
if visited[curr] > cost + ncost:
visited[curr] = ncost + cost
pq.push((curr, action + [dir], ncost+cost), ncost+cost)
return []
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
start = problem.getStartState()
pq = util.PriorityQueue()
visited = dict()
action = []
pq.push((start, action, 0), heuristic(start, problem))
while not pq.isEmpty():
state, action, cost = pq.pop()
visited[state] = cost
if problem.isGoalState(state):
return action
for item in problem.getSuccessors(state):
curr, dir, ncost = item
if curr not in visited:
visited[curr] = ncost + cost
pq.push((curr, action + [dir], ncost + cost), ncost + cost + heuristic(curr, problem))
else:
if visited[curr] > cost + ncost + heuristic(curr, problem):
visited[curr] = ncost + cost
pq.push((curr, action + [dir], ncost + cost), ncost + cost + heuristic(curr, problem))
return []
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| [
"kbp0237@gmail.com"
] | kbp0237@gmail.com |
28623b218914214e68ec618ce9c1d580488e0449 | b12f6c2e4e35cf51956c6a887dcf47eaccc8ed21 | /File Write &Read.py | 4abdc291cb0f82e7845110f4665039908616a353 | [] | no_license | VanyashreeS1/Python-flask-projects | 76873da41be0190abb54813256f3386f7d7addf6 | fcf2f4fabb3d1fa45f9e3545aa8b961408a4c944 | refs/heads/master | 2021-01-05T23:47:18.193419 | 2020-02-21T17:45:20 | 2020-02-21T17:45:20 | 241,170,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | f = open("file3.txt", "a")
f.write("Now the file has more content!")
f.close()
#open and read the file after the appending:
f = open("file3.txt", "r")
print(f.read())
| [
"vanyashree.s@gmail.com"
] | vanyashree.s@gmail.com |
5c49b4a8a73336da08a895778349ad6859231a04 | e4e4ed8338a389e1e329562d253224d95d9080c6 | /ml/sample/pipelines/large_graph/subgraphs/new_subgraph_c95b.py | 2d17b3d8152ab9e09f21de757ee8366fd254f437 | [] | no_license | lalala123123/project_template | 854361ac6b9b714dc23be9a439506310e04f1952 | a1aef8b2509388e47ebf51b21066169b977286de | refs/heads/master | 2023-08-30T13:51:49.135769 | 2021-10-28T07:06:46 | 2021-10-28T07:06:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,358 | py | from datetime import datetime
from azure.ml.component import dsl
from resources import generalselectcosmos_func, generalselectcosmos_func
print(datetime.now(), "Declaring pipeline: new_subgraph_c95b")
@dsl.pipeline(name='new_subgraph_c95b', default_compute_target='cpu-cluster')
def new_subgraph_c95b(
in1,
in2
):
generalselectcosmos_func_36ed_instance = generalselectcosmos_func(
extractcols1='*',
extractcols2='ImageUrl:string, Vec:string',
extractcols3='*',
extractcols4='*',
select1='In1 = SELECT DISTINCT * FROM In1',
select2='SELECT In1.*, In2.Vec AS Vec1 FROM In1 LEFT JOIN In2 ON In1.SourceEntityPhotoUrl == In2.ImageUrl AND In1.SourceEntityId == In2.EntityId',
select3='SELECT DISTINCT *',
select4=';',
select5=';',
ssclause='CLUSTERED BY SourceEntityId,CandidateEntityId INTO 100',
tsvclause=';',
resourcestmts=';',
referencestmts=';',
comment='',
vc='cosmos08/Outings',
scopeparams='-tokens 35',
in1=in1,
in2=in2
)
generalselectcosmos_func_7dba_instance = generalselectcosmos_func(
extractcols1='*',
extractcols2='ImageUrl:string, Vec:string',
extractcols3='*',
extractcols4='*',
select1='SELECT In1.*, In2.Vec AS Vec2 FROM In1 LEFT JOIN In2 ON In1.CandidateEntityPhotoUrl == In2.ImageUrl AND In1.CandidateEntityId == In2.EntityId',
select2='SELECT DISTINCT *',
select3=';',
select4=';',
select5=';',
ssclause='CLUSTERED BY SourceEntityId,CandidateEntityId INTO 100',
tsvclause=';',
resourcestmts=';',
referencestmts=';',
comment='',
vc='cosmos08/Outings',
scopeparams='-tokens 35',
in1=generalselectcosmos_func_36ed_instance.outputs.outputstream_out,
in2=in2
)
return {'outputstream_out': generalselectcosmos_func_7dba_instance.outputs.outputstream_out}
if __name__ == '__main__':
print(datetime.now(), "Creating new_subgraph_c95b")
pipeline = new_subgraph_c95b()
print(datetime.now(), "Validating")
pipeline.validate()
print(datetime.now(), "Saving")
pipeline._save()
print(datetime.now(), "Submitting")
pipeline.submit(experiment_name='new_subgraph_c95b')
print(datetime.now(), "Finish")
| [
"henu_rz@163.com"
] | henu_rz@163.com |
77bec3ab8b9e6134f5d4e804a0775b0ffb29825b | 74f382e96cc6f60092ec25878bcaeef280c7acdf | /core/api/models.py | 136ed4467bbd2eec39f63bf7caf919cf569c8585 | [
"MIT"
] | permissive | vroxo/api-flask-users | f2bfe57e1466bc0461fa35977fa766b2708ba192 | c464537b8df260e7cf4e8654f34aacd4959297ee | refs/heads/master | 2021-06-27T21:40:25.580338 | 2017-09-14T08:08:05 | 2017-09-14T08:08:05 | 103,499,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | import datetime
from core import db
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(128), nullable=False)
active = db.Column(db.Boolean(), default=False, nullable=False)
created_at = db.Column(db.DateTime, nullable=False)
def __init__(self, username, email):
self.username = username
self.email = email
self.created_at = datetime.datetime.utcnow()
| [
"vitorroxo.dev@gmail.com"
] | vitorroxo.dev@gmail.com |
cd15183227ca013ef8df4b0a9c35e52023611ad0 | 4d332c45578246847ef2cdcdeb827ca29ab06090 | /modules/Bio/Blast/Applications.py | 634372703b80657a8c7039c95ba7de9db2e186ef | [
"MIT"
] | permissive | prateekgupta3991/justforlearn | 616cc297a2a6119fa959b9337a5e91c77a11ebf7 | 3984c64063b356cf89003e17a914272983b6cf48 | refs/heads/master | 2021-03-12T22:09:12.184638 | 2014-01-28T10:37:07 | 2014-01-28T10:37:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | /usr/share/pyshared/Bio/Blast/Applications.py | [
"prateekgupta.3991@gmail.com"
] | prateekgupta.3991@gmail.com |
134f1ee4050d20ac333a4b35da4222bf51a32243 | cce0f3939036f536a182d7541b636874cd8247b6 | /xicam/core/data/bluesky_utils.py | f93d436d1f70f7dd0bcf439a14fb2df9fa7ab9e4 | [
"BSD-3-Clause-LBNL"
] | permissive | Xi-CAM/Xi-cam.core | b942ab7935935b4b514cd8593afcfba83ce7b042 | f993699391439402624934daafe329024165bb0b | refs/heads/master | 2023-08-25T16:16:19.231948 | 2020-05-01T17:28:29 | 2020-05-01T17:28:29 | 111,475,839 | 0 | 0 | NOASSERTION | 2020-04-28T22:51:49 | 2017-11-20T23:55:13 | Python | UTF-8 | Python | false | false | 907 | py | from databroker.core import BlueskyRun
def ndims_from_descriptor(descriptor: dict, field: str):
return len(descriptor['data_keys'][field]['shape']) # NOTE: this doesn't include event dim
def shape_from_descriptor(descriptor: dict, field: str):
return descriptor['data_keys'][field]['shape']
def fields_from_stream(run: BlueskyRun, stream: str):
return fields_from_descriptor(descriptors_from_stream(run, stream))
def descriptors_from_stream(run: BlueskyRun, stream: str):
return run[stream].metadata['descriptors']
def fields_from_descriptor(descriptor):
return list(descriptor['data_keys'].keys())
def streams_from_run(run: BlueskyRun):
return list(run)
def xarray_from_run(run: BlueskyRun, stream: str = None, field: str = None):
data = run.to_dask()
if stream:
data = data[stream]
if field:
data = data[field]
return data
| [
"ronpandolfi@gmail.com"
] | ronpandolfi@gmail.com |
a6518773dbefe07cc7d1820e140f06c7535c0a7b | 4316f9a62c7d09fc8ff74b0b27251bce2090462b | /camelot/view/album.py | cad8592a0b5df30d74a86464d0cabb68c4a52aab | [] | no_license | SaChiBon/project-camelot | 9739a70623cbd732edbf3eff4d61acca77516ad6 | b7aa839753f534a38aebc031004091e095843bdc | refs/heads/master | 2021-04-09T13:43:32.559201 | 2018-03-18T01:23:12 | 2018-03-18T01:23:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,614 | py | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.http import HttpResponse
from ..controllers.albumcontroller import albumcontroller, AlreadyExistsException
from ..forms import AlbumCreateForm, UploadPhotoForm
"""
Album views
"""
@login_required
def create_album(request):
# TEST: this *might* be creating 2 of the same album on the first creation, make unit test
# if this is a POST request we need to process the form data
if request.method == 'POST': # this all needs to be put in the controller.. maybe
# create a form instance and populate it with data from the request:
form = AlbumCreateForm(request.POST)
# check whether it's valid:
if form.is_valid():
# process the data in form.cleaned_data as required
albumname = form.cleaned_data['albumname']
albumdescription = form.cleaned_data['description']
albumcontrol = albumcontroller(request.user.id)
try:
albummodel = albumcontrol.create_album(albumname, albumdescription)
except AlreadyExistsException as e:
return render(request, 'camelot/messageloggedin.html', {'message': 'Album name must be unique'})
#return HttpResponse('Album name must be unique')
return redirect("show_album", albummodel.id)
#return HttpResponse('Created album ' + albummodel.name)
# if a GET (or any other method) we'll create a blank form
else:
form = AlbumCreateForm()
return render(request, 'camelot/createalbum.html', {'form': form})
@login_required
def display_albums(request, userid):
albumcontrol = albumcontroller(request.user.id)
albums = albumcontrol.return_albums(userid)
return render(request, 'camelot/showalbums.html', {'albums': albums})
# showalbums.html might be able to be made more generic, may repeat in showalbum.html
@login_required
def display_album(request, id):
"""
:param request:
:param id: id of album (need to validate permissions)
:return:
"""
albumcontrol = albumcontroller(request.user.id)
album = albumcontrol.return_album(id)
# query db for photos in album
photos = albumcontrol.get_photos_for_album(album)
return render(request, 'camelot/showalbum.html', {'photos': photos, 'album': album})
@login_required
def add_photo(request, id):
"""
Need to check if user has permission to access this view
:param request:
:param id: id of the album to add photo to
:return:
"""
# https://docs.djangoproject.com/en/2.0/topics/http/file-uploads/
# check that user actually has permission to add to this album
if request.method == 'POST':
albumcontrol = albumcontroller(request.user.id) # there has to be a better way than redeclaring this every time
# probably with class views and sessions?
form = UploadPhotoForm(request.POST, request.FILES)
if form.is_valid():
photodescription = form.cleaned_data['description']
for fname, fdat in request.FILES.items():
# need to sort out multiple file upload and association with description
albumcontrol.add_photo_to_album(id, photodescription, fdat)
return redirect("show_album", id)
else:
form = UploadPhotoForm()
return render(request, 'camelot/uploadphoto.html', {'form': form, 'albumid': id}) # so maybe we make the move to class based views
def return_photo_file_http(request, photoid):
"""
wrapper to security show a photo without exposing externally
We must ensure the security of photo.filename, because if this can be messed with our whole filesystem could be vulnerable
:param request:
:param uid: id of user profile that uploaded photo
:param albumid: id of album photo belongs to
:param photoid: id of photo
:return:
"""
albumcontrol = albumcontroller(request.user.id)
photo = albumcontrol.return_photo(photoid)
# add in permissions check
try:
# this scares me from a memory perspective
with open(photo.filename, "rb") as f:
return HttpResponse(f.read(), content_type="image/*")
except IOError:
# maybe edit this except to be more applicable..
from PIL import Image
red = Image.new('RGBA', (1, 1), (255, 0, 0, 0))
response = HttpResponse(content_type="image/*")
red.save(response, "JPEG")
return response | [
"docz2a@gmail.com"
] | docz2a@gmail.com |
7fb6b02760faa33f68b8c94a97b6b44671597bd2 | 99cedc8927edc502db3270008608358fe6f507a1 | /Salasah/urls.py | f19b44bf90984327a35c5c64526922d377067945 | [] | no_license | maulanasdqn/django-salasah | b2d76735997cb5562cc1f876e8513d4a040c228b | c386b8fca75f383e66a89ea4ad27922d60aaf6ea | refs/heads/main | 2023-01-01T04:22:06.130164 | 2020-10-24T13:02:13 | 2020-10-24T13:02:13 | 306,884,437 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | from django.urls import path
from .import views
urlpatterns = [
path('', views.home, name= 'home'),
] | [
"sodiqincahyana1@gmail.com"
] | sodiqincahyana1@gmail.com |
5daabd0e42aacc1ce5f197f6154547f200ab892b | 4bb9cde4a8aa41d883a9f139ebf476993d3ab8a1 | /listing/migrations/0004_auto_20201129_1211.py | 0d371dc556f3c9188c8d97a13670faad836e042c | [] | no_license | sajalmia381/real-estate | bc380328046899406ec97524697b87cbfc0e45ec | a2602f9333e677b362020123789b68933703b3c6 | refs/heads/master | 2023-01-21T06:16:50.992361 | 2020-12-05T18:19:55 | 2020-12-05T18:19:55 | 318,859,009 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | # Generated by Django 3.1.3 on 2020-11-29 12:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('listing', '0003_auto_20201129_1205'),
]
operations = [
migrations.AddField(
model_name='listing',
name='square_feet',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='listing',
name='sale_type',
field=models.CharField(choices=[('rent', 'Rent'), ('sale', 'Sale')], default='rent', max_length=40),
),
]
| [
"sajalmia381@gmail.com"
] | sajalmia381@gmail.com |
b33dc6ed7a11b4a2e3127592c66b9d813072b574 | e62a8943ea2cc45b660b17ab10e238e7cb4642dc | /CompareMatchingMethod_Codec_ORB.py | 0a47f5d8d3a1da1bab046fd0ef44a1cec40c6526 | [] | no_license | mkjubran/HMS | 6229fd36e7f01b93be6f572e59e26a42a1058257 | 2402380d4e68c9b924303a8e1efac6af434d3a57 | refs/heads/master | 2021-04-09T15:18:07.334487 | 2020-11-10T19:49:30 | 2020-11-10T19:49:30 | 125,491,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,171 | py | #Frame1: Type POC QPoffset QPOffsetModelOff QPOffsetModelScale CbQPoffset CrQPoffset QPfactor tcOffsetDiv2 betaOffsetDiv2 temporal_id #ref_pics_active #ref_pics reference pictures predict deltaRPS #ref_idcs reference idcs
#print >> fid, 'Frame1: P 1 5 -6.5 0.2590 0 0 1.0 0 0 0 1 1 -1 0');
from __future__ import division
from numpy import *
import numpy as np
import cv2, os, sys, subprocess, pdb
import argparse
import ConfigParser
import time, re, datetime
import math
import matplotlib.pyplot as plt
FRMPERWIN = 1 ; INF = 999
###--------------------------------------------------------------
## Parse configuration Parameters from the configuration file
def main(argv=None):
# Do argv default this way, as doing it in the functional
# declaration sets it at compile time.
if argv is None:
argv = sys.argv
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
description=__doc__, # printed with -h/--help
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args()
defaults = { "option":"default"}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Parametters")))
#print(dict(config.items("Parametters")))
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser]
)
parser.set_defaults(**defaults)
args = parser.parse_args(remaining_argv)
return(args)
###--------------------------------------------------------------
def call(cmd):
# proc = subprocess.Popen(["cat", "/etc/services"], stdout=subprocess.PIPE, shell=True)
#proc = subprocess.Popen(cmd, \
# stdout=subprocess.PIPE, shell=True)
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
return (out, err)
###--------------------------------------------------------------
def call_bg(cmd):
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE, shell=True)
return proc
###--------------------------------------------------------------
def prepare_video(fn):
osout = call('rm -rf ../CodecSIFT')
osout = call('mkdir ../CodecSIFT')
osout = call('mkdir ../CodecSIFT/pngall')
osout = call('ffmpeg -r 1 -i {} -r 1 -qp 0 ../CodecSIFT/pngall/%d.png'.format(fn))
osout = call('ffmpeg -start_number 0 -i ../CodecSIFT/pngall/%d.png -c:v libx264 -vf "fps=25,format=yuv420p" -qp 0 ../CodecSIFT/{}_CodecSIFT.mp4'.format(fnname))
osout = call('ffmpeg -y -i ../CodecSIFT/{}_CodecSIFT.mp4 -vcodec rawvideo -pix_fmt yuv420p -qp 0 ../CodecSIFT/{}_CodecSIFT.yuv'.format(fnname,fnname))
return
###--------------------------------------------------------------
## Building Configuration File
def Build_encoding_struct_stitch():
iFNums=map(int, range(GOP+1))
## get total number of frames
NumFrames=round(len(iFNums))
NumFrames=int(NumFrames)
##write config files header
fid = open('../CodecSIFT/encoder_HMS_GOP.cfg','w')
print >> fid, '#======== Coding Structure ============='
print >> fid, 'IntraPeriod : -1 # Period of I-Frame ( -1 = only first)'
print >> fid, 'DecodingRefreshType : 2 # Random Accesss 0:none, 1:CRA, 2:IDR, 3:Recovery Point SEI'
print >> fid, 'GOPSize : '+str(GOP)+' # GOP Size (number of B slice = GOPSize-1)'
print >> fid, 'ReWriteParamSetsFlag : 1 # Write parameter sets with every IRAP'
'# Type POC QPoffset QPOffsetModelOff QPOffsetModelScale CbQPoffset CrQPoffset QPfactor tcOffsetDiv2 betaOffsetDiv2 temporal_id #ref_pics_active #ref_pics reference pictures predict deltaRPS' '#ref_idcs reference idcs'
print >> fid,''
## Produce iFNums_array2 [StitchFrame; other frames ordered]
iFNums_array = np.array(iFNums)
#iFNums_array=iFNums_array.clip(0, 999999999)
#indexes = np.unique(iFNums_array, return_index=True)[1]
#iFNums_array=[iFNums_array[index] for index in sorted(indexes)]
#iFNums_array=np.array(iFNums_array)
ref_pics_Stitching_array=np.array([StitchFrame])
ref_pics_RemovedStitching_array=np.array(range(0,NumFrames))
index=np.where(np.isin(ref_pics_RemovedStitching_array,ref_pics_Stitching_array))
ref_pics_RemovedStitching_array=np.delete(ref_pics_RemovedStitching_array,index)
ref_pics_RemovedStitching_array.sort()
iFNums_array2=np.concatenate((ref_pics_Stitching_array,ref_pics_RemovedStitching_array), axis=0) #Stitching Frames + Ordered remaining Frames
#print(iFNums_array2)
ref_pics_active_Stitching=1
ref_pics_active_Max=1
## Buidling encoding structure for Stitching mode
ref_pics_stitch_to_use=[]
if 0 in ref_pics_Stitching_array:
if ref_pics_active_Stitching>0:
ref_pics_stitch_to_use=np.append(ref_pics_stitch_to_use,0)
ref_pics=np.array([StitchFrame])
GOPLine='Frame' + str(1) + ': I '+ str(StitchFrame) +' 0 -6.5 0.2590 0 0 1.0 0 0 0 '+ str(0) + ' ' + str(0)+' '+str(int(0))
print >> fid, GOPLine
cntin=1
for cnt in range(1,NumFrames):
if cnt != StitchFrame:
GOPLine='Frame' + str(cnt+cntin) + ': P '+ str(cnt) +' 0 -6.5 0.2590 0 0 1.0 0 0 0 '+ str(len(ref_pics)) + ' ' + str(len(ref_pics))
for cnt1 in range(len(ref_pics)):
GOPLine=GOPLine+' '+str(int(ref_pics[cnt1]-cnt))
GOPLine=GOPLine+' 2 0'
print >> fid, GOPLine
else:
cntin=0
###--------------------------------------------------------------
def Encode_decode_video():
print('Encoding Video')
InputYUV='../CodecSIFT/{}_CodecSIFT.yuv'.format(fnname)
BitstreamFile='../CodecSIFT/{}_CodecSIFT.bin'.format(fnname)
ReconFile='../CodecSIFT/{}_CodecSIFT_Recon.yuv'.format(fnname)
osout = call('rm -rf {}'.format(BitstreamFile))
osout = call('cp -f ./encoder_HMS.cfg ../CodecSIFT/encoder_HMS.cfg')
if RateControl==0:
osout=call_bg('./HMS/bin/TAppEncoderStatic -c ../CodecSIFT/encoder_HMS.cfg -c ../CodecSIFT/encoder_HMS_GOP.cfg --InputFile={} --SourceWidth={} --SourceHeight={} --SAO=0 --QP={} --FrameRate={} --FramesToBeEncoded={} --MaxCUSize={} --MaxPartitionDepth={} --QuadtreeTULog2MaxSize=4 --BitstreamFile="{}" --RateControl={} --TargetBitrate={} '.format(InputYUV,Width,Height,QP,fps,GOP,MaxCUSize,MaxPartitionDepth,BitstreamFile,RateControl,rate))
else:
osout=call_bg('./HMS/bin/TAppEncoderStatic -c ../CodecSIFT/encoder_HMS.cfg -c ../CodecSIFT/encoder_HMS_GOP.cfg --InputFile={} --SourceWidth={} --SourceHeight={} --SAO=0 --QP={} --FrameRate={} --FramesToBeEncoded={} --MaxCUSize={} --MaxPartitionDepth={} --QuadtreeTULog2MaxSize=4 --BitstreamFile="{}" --RateControl={} --TargetBitrate={} &'.format(InputYUV,Width,Height,QP,fps,GOP*alpha,MaxCUSize,MaxPartitionDepth,BitstreamFile,RateControl,rate))
encoderlogfile='../CodecSIFT/encoderlog.dat'
fid = open(encoderlogfile,'w')
fid.write(osout.stdout.read())
fid.close
osout.stdout.read()
print('Decoding Video')
osout = call('rm -rf {}'.format(ReconFile))
osout=call_bg('./HMS/bin/TAppDecoderStatic --BitstreamFile="{}" --ReconFile="{}" &'.format(BitstreamFile,ReconFile))
decoderlogfile='../CodecSIFT/decoderlog.dat'
fid = open(decoderlogfile,'w')
fid.write(osout.stdout.read())
fid.close
return
###--------------------------------------------------------------
def Measure_Rate_PSNR():
InputYUV='../CodecSIFT/{}_CodecSIFT.yuv'.format(fnname)
ReconFile='../CodecSIFT/{}_CodecSIFT_Recon.yuv'.format(fnname)
(osout,err)=call('python ./Quality/measure.py {} {} {} {} &'.format(InputYUV,ReconFile,Width,Height))
encoderlogfile='../CodecSIFT/encoderlog.dat'
fid = open(encoderlogfile,'a')
fid.write(osout)
fid.close
return
###--------------------------------------------------------------
def Edit_encoder_log():
PIXEL_MAX = 255.0
mseY=0
mseU=0
mseV=0
mseYUV=0
NumFramesPSNR=0
NumFramesRate=0
TotalBits=0
CombinedLinesRateAll=[]
CombinedLinesPSNRAll=[]
CombinedLinesRate=[]
CombinedLinesPSNR=[]
encoderlogfile='../CodecSIFT/encoderlog.dat'
with open(encoderlogfile) as f:
Lines = f.readlines()
f.close()
cnt_col_Rate=0
cnt_col_PSNR=0
for cnt in range(len(Lines)):
templine=(Lines[cnt][:]).rstrip()
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
if templine.split(' ')[0] == 'POC':
#print('{} ... {}'.format(cnt_row,cnt_col_Rate))
CombinedLinesRateAll.append(Lines[cnt][:])
CombinedLinesRate.append(Lines[cnt][:])
cnt_col_Rate=cnt_col_Rate+1
TotalBits=TotalBits+int(templine.split(' ')[11])
NumFramesRate=NumFramesRate+1
if (NumFramesRate>0):
AverageRate=(TotalBits/NumFramesRate)*fps
if (((re.split(' |:',templine)[0]) == 'Frame') and ((re.split(' |:',templine)[3]) == '[Y')):
CombinedLinesPSNRAll.append(Lines[cnt][:])
PSNRYFrame=re.split(' |:',templine)[4]
PSNRUFrame=re.split(' |:',templine)[6]
PSNRVFrame=re.split(' |:',templine)[8]
PSNRYUVFrame=re.split(' |:',templine)[10]
PSNRYFrame=float(PSNRYFrame[0:(len(PSNRYFrame)-2)])
PSNRUFrame=float(PSNRUFrame[0:(len(PSNRUFrame)-2)])
PSNRVFrame=float(PSNRVFrame[0:(len(PSNRVFrame)-2)])
PSNRYUVFrame=float(PSNRYUVFrame[0:(len(PSNRYUVFrame)-3)])
mseYFrame=((PIXEL_MAX)/(10**(PSNRYFrame/20)))**2
mseY=mseY+mseYFrame
mseUFrame=((PIXEL_MAX)/(10**(PSNRUFrame/20)))**2
mseU=mseU+mseUFrame
mseVFrame=((PIXEL_MAX)/(10**(PSNRVFrame/20)))**2
mseV=mseV+mseVFrame
mseYUVFrame=((PIXEL_MAX)/(10**(PSNRYUVFrame/20)))**2
mseYUV=mseYUV+mseYUVFrame
NumFramesPSNR=NumFramesPSNR+1
PSNRYVideo=20 * math.log10(PIXEL_MAX / (math.sqrt(mseY/NumFramesPSNR)))
PSNRUVideo=20 * math.log10(PIXEL_MAX / (math.sqrt(mseU/NumFramesPSNR)))
PSNRVVideo=20 * math.log10(PIXEL_MAX / (math.sqrt(mseV/NumFramesPSNR)))
PSNRYUVVideo=20 * math.log10(PIXEL_MAX / (math.sqrt(mseYUV/NumFramesPSNR)))
templineNew=('Frame {0:3d}: [Y {1:1.4f}dB U {2:1.4f}dB V {3:1.4f}dB YUV {4:1.4f}dB] ..... Video: [Y {5:1.4f}dB U {6:1.4f}dB V {7:1.4f}dB YUV {8:1.4f}dB]').format(NumFramesPSNR,PSNRYFrame,PSNRUFrame,PSNRVFrame,PSNRYUVFrame,PSNRYVideo,PSNRUVideo,PSNRVVideo,PSNRYUVVideo)
CombinedLinesPSNR.append(templineNew)
cnt_col_PSNR=cnt_col_PSNR+1
## write to edited log file
fid = open(Edited_encoder_log,'w')
fid.write('Input File (MP4) = {}\n'.format(vid))
fid.write('QP = {}\n'.format(QP))
fid.write('MaxCUSize = {}\n'.format(MaxCUSize))
fid.write('MaxPartitionDepth = {}\n'.format(MaxPartitionDepth))
fid.write('fps = {}\n'.format(fps))
fid.write('RateControl = {}\n'.format(RateControl))
fid.write('rate = {}\n'.format(rate))
## write PSNR
for cnt in range(len(CombinedLinesPSNR)):
templine=CombinedLinesPSNR[cnt][:].replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.rstrip()
templine=templine.split(' ')
fid.write('Frame {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format(cnt,str(templine[2]),str(templine[3]),str(templine[4]),str(templine[5]),str(templine[6]),str(templine[7]),str(templine[8]),str(templine[9]),str(templine[10]),str(templine[11]),str(templine[12]),str(templine[13]),str(templine[14]),str(templine[15]),str(templine[16]),str(templine[17]),str(templine[18]),str(templine[19])))
PSNR_temp=str(templine[3])
Rate_PSNR[cnt,0]=cnt
Rate_PSNR[cnt,2]=float(PSNR_temp[0:(len(PSNR_temp)-2)])
## write Rate
fid.write('\n\n')
for cnt in range(len(CombinedLinesRate)):
templine=CombinedLinesRate[cnt][:].replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.split(' ')
fid.write('POC {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format(cnt,str(templine[2]),str(templine[3]),str(templine[4]),str(templine[5]),str(templine[6]),str(templine[7]),str(templine[8]),str(templine[9]),str(templine[10]),str(templine[11]),str(templine[12]),str(templine[13]),str(templine[14]),str(templine[15]),str(templine[16]),str(templine[17]),str(templine[18]),str(templine[19]),str(templine[20]),str(templine[21]),str(templine[22])))
Rate_temp=str(templine[11])
Rate_PSNR[cnt,1]=float(Rate_temp)
fid.write('\nNumber of Frames = {}\n'.format(NumFramesRate))
fid.write('Written bites = {}\n'.format(TotalBits))
fid.write('Bit Rate = {} kbps\n'.format(AverageRate/1000))
fid.close
fid = open((Edited_encoder_log[0:(len(Edited_encoder_log)-4)]+'All.dat'),'w')
for cnt in range(len(CombinedLinesPSNRAll)):
templine=CombinedLinesPSNRAll[cnt][:].replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.split(' ')
#print('Frame {}...{}'.format(cnt,templine[2:10]))
fid.write('Frame {} {} {} {} {} {} {} {} {}\n'.format(str(templine[1]),str(templine[2]),str(templine[3]),str(templine[4]),str(templine[5]),str(templine[6]),str(templine[7]),str(templine[8]),str(templine[9]),str(templine[10])))
fid.write('\n\n')
for cnt in range(len(CombinedLinesRateAll)):
templine=CombinedLinesRateAll[cnt][:].replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.split(' ')
#print('POC {}...{}'.format(cnt,templine[2:22]))
fid.write('POC {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format(str(templine[1]),str(templine[2]),str(templine[3]),str(templine[4]),str(templine[5]),str(templine[6]),str(templine[7]),str(templine[8]),str(templine[9]),str(templine[10]),str(templine[11]),str(templine[12]),str(templine[13]),str(templine[14]),str(templine[15]),str(templine[16]),str(templine[17]),str(templine[18]),str(templine[19]),str(templine[20]),str(templine[21]),str(templine[22])))
fid.close
return
###################################################################3
## check similarity using SIFT
def call_err(cmd):
# proc = subprocess.Popen(["cat", "/etc/services"], stdout=subprocess.PIPE, shell=True)
proc = subprocess.Popen(cmd, \
stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
return (out, err)
def get_frames_list(fn):
osout = call_err('ls -v ../CodecSIFT/pngall/*.png') ; lfrmall = osout[0]
lfrmall = lfrmall.split('\n')[0:-1]
return lfrmall
def make_windows(lfrm, numfrmwin):
numfrm = len(lfrm) ; numwin = numfrm/numfrmwin
lwin = []
for i in range(0, numfrm, numfrmwin ): lwin.append(lfrm[i:i+numfrmwin])
return lwin
def comp_similarity(lwin_,lwin_sc_,lwinsim):
for win in lwin_:
now = datetime.datetime.now()
#print('{} ... {}').format(win,now.strftime("%Y-%m-%d %H:%M:%S"))
for win_sc in lwin_sc_:
s=re.split('/',str(win))[-1]
iwin=int(s[0:(len(s)-6)])
s=re.split('/',win_sc)[-1]
iwin_sc=int(s[0:(len(s)-4)])
lwinsim[iwin-1][iwin_sc-1]=window_similarity(win, win_sc)
#print('{}..&..{}=..{}').format(win,win_sc,lwinsim[iwin-1][iwin_sc-1])
return lwinsim
def window_similarity(win_0, win_1):
lfrmsim = []
if (type(win_0) == str and type(win_1) == str):
lfrmsim.append(content_similarity(win_0, win_1))
elif (type(win_0) == str and type(win_1) <> str):
lfrmsim.append(content_similarity(win_0, win_1[0]))
elif (type(win_0) <> str and type(win_1) == str):
lfrmsim.append(content_similarity(win_0[0], win_1))
else:
lfrmsim.append(content_similarity(win_0[0], win_1[0]))
return np.mean(lfrmsim)
def content_similarity(img_0, img_1):
img1 = cv2.imread(img_0, 0)
img2 = cv2.imread(img_1, 0)
# Initiate SIFT detector
orb = cv2.ORB_create(nfeatures=100000)
# find the keypoints and descriptors with SIFT
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
if (type(des1)==type(des2)):
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1,des2)
# Sort them in the order of their distance.
matches = sorted(matches, key = lambda x:x.distance)
distances = [ _.distance for _ in matches if _.distance < 100]
if not distances:
simind_1=INF
else:
simind_1 = np.mean(distances)
if math.isnan(simind_1):
simind_1=INF
simind_2=simind_1
simind = (simind_1 + simind_2)/float(2)
else:
simind=INF
return simind
##################################################################
## Main Body
if __name__ == "__main__":
np.set_printoptions(threshold='nan')
args=main()
##Inputs
StitchFrame=int(args.stitchframe);
vid=args.vid;
mode=args.mode;
fps=int(args.fps);
GOP=int(args.gop);
Width=int(args.w);
Height=int(args.h);
QP=int(args.qp);
MaxCUSize=int(args.maxcusize);
MaxPartitionDepth=int(args.maxpartitiondepth);
RateControl=int(args.ratecontrol);
rate=int(args.rate);
Edited_encoder_log=args.edited_encoder_log
fsr=fps
fnname=vid.split('/')[-1]
fnname=fnname[0:(len(fnname)-4)]
if GOP%2!=0:
GOP=int(GOP/2) * 2
prepare_video(vid)
Build_encoding_struct_stitch()
Encode_decode_video()
Measure_Rate_PSNR()
Rate_PSNR=np.full((GOP,3), INF,float)
Edit_encoder_log()
Rate_PSNR=np.array(Rate_PSNR)
#print(Rate_PSNR)
fname=fnname
lfrm = get_frames_list(vid);
lfrm=lfrm[0:GOP]
lwin = make_windows(lfrm, FRMPERWIN)
lwinsim=np.full((len(lwin),len(lwin)), INF)
lwin_stitch=lwin[StitchFrame-1]
lwinsim=comp_similarity(lwin,lwin_stitch,lwinsim)
Rate=Rate_PSNR[:,1]
Rate_Norm=Rate/np.max(Rate)
PSNR=Rate_PSNR[:,2]
PSNR_Norm=PSNR/np.max(PSNR)
s=re.split('/',str(lwin_stitch))[-1]
lwinsim=np.array(lwinsim)
SIFT_score=lwinsim[:,int(s[0:(len(s)-6)])-1]
SIFT_score=SIFT_score[0:GOP]
SIFT_score_Norm=SIFT_score/np.max(SIFT_score)
SIFT_score=SIFT_score.reshape(len(SIFT_score),1)
Rate_PSNR_SIFT=np.concatenate((Rate_PSNR, SIFT_score),axis=1)
np.save(('../savenpy/'+fnname+'_Rate_PSNR_ORB'),Rate_PSNR_SIFT)
#print(Rate_PSNR_SIFT)
fig1, ax1 =plt.subplots()
ax1.plot(range(len(SIFT_score_Norm)),SIFT_score_Norm,'-k')
ax1.plot(range(len(Rate)),Rate_Norm,'--b')
ax1.plot(range(len(PSNR)),PSNR_Norm,':r')
ax1.set_title('ORB Similarity Score & CODEC Rate PSNR')
ax1.set_xlabel('Frame Number')
#ax1.set_ylabel('Average SIFT Score')
ax1.legend(['ORB','Rate','PSNR'])
fig2, ax2 =plt.subplots()
ax2.plot(range(len(SIFT_score_Norm)),SIFT_score_Norm,'-k')
ax2.plot(range(len(Rate)),Rate_Norm,'--b')
ax2.set_title('ORB Similarity Score & CODEC Rate')
ax2.set_xlabel('Frame Number')
#ax2.set_ylabel('Average SIFT Score')
ax2.legend(['ORB','Rate'])
fig3, ax3 =plt.subplots()
ax3.plot(range(len(SIFT_score_Norm)),SIFT_score_Norm,'-k')
ax3.plot(range(len(PSNR)),PSNR_Norm,':r')
ax3.set_title('ORB Similarity Score & CODEC PSNR')
ax3.set_xlabel('Frame Number')
#ax3.set_ylabel('Average SIFT Score')
ax3.legend(['ORB','PSNR'])
fig4, ax4 =plt.subplots()
ax4.plot(range(np.shape(Rate_PSNR_SIFT)[0]),(Rate_PSNR_SIFT[:,3]/np.max(Rate_PSNR_SIFT[:,3])),'-k')
ax4.plot(range(np.shape(Rate_PSNR_SIFT)[0]),(Rate_PSNR_SIFT[:,2]/np.max(Rate_PSNR_SIFT[:,2])),':r')
ax4.plot(range(np.shape(Rate_PSNR_SIFT)[0]),(Rate_PSNR_SIFT[:,1]/np.max(Rate_PSNR_SIFT[:,1])),'--b')
ax4.set_title('ORB Similarity Score & CODEC PSNR')
ax4.set_xlabel('Frame Number')
#ax3.set_ylabel('Average SIFT Score')
ax4.legend(['SIFT','PSNR'])
plt.show()
| [
"mjubran@birzeit.edu"
] | mjubran@birzeit.edu |
60f501dd33bc408bb5b0ce9ae012cb0765548801 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_144/ch20_2020_03_09_20_17_14_756367.py | 0730239956a1947237866393bd5dc6de5401f7cc | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | distância=float(input("Digite a distância a percorrer:"))
multa = distancia - 200
excesso = multa * 0.45
if distância <= 200:
passagem = 0.5 * distância
else:
passagem = passagem + excesso
print("Preço da passagem: R$ %7.2f" % passagem)
| [
"you@example.com"
] | you@example.com |
cbf6bc2fa02f3077d4a2e66ac887debcce4bae36 | aba1d17ddc7d7ad9f49e2d6d87600e9e0387ba14 | /mi/dataset/driver/flort_kn/stc_imodem/flort_kn__stc_imodem_recovered_driver.py | 4fe4de3d18ce68d6534b32380e50fd98fe6bab2f | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | oceanobservatories/mi-instrument | 3ad880c1366b1a8461fc9085768df0e9ddeb6ef5 | bdbf01f5614e7188ce19596704794466e5683b30 | refs/heads/master | 2023-07-23T07:28:36.091223 | 2023-07-14T15:54:49 | 2023-07-14T15:54:49 | 24,165,325 | 1 | 32 | BSD-2-Clause | 2023-07-13T01:39:22 | 2014-09-17T22:53:22 | Python | UTF-8 | Python | false | false | 877 | py | from mi.dataset.parser.flort_kn__stc_imodem import Flort_kn_stc_imodemParser,Flort_kn_stc_imodemParserDataParticleRecovered
from mi.dataset.dataset_driver import DataSetDriver
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.core.versioning import version
@version("0.0.2")
def parse(unused, source_file_path, particle_data_handler):
with open(source_file_path,"r") as fil :
parser = Flort_kn_stc_imodemParser({
DataSetDriverConfigKeys.PARTICLE_MODULE: "mi.dataset.parser.flort_kn__stc_imodem",
DataSetDriverConfigKeys.PARTICLE_CLASS: "Flort_kn_stc_imodemParserDataParticleRecovered"},
None,
fil,
lambda state, f: None,
lambda state: None)
driver = DataSetDriver(parser, particle_data_handler)
driver.processFileStream()
return particle_data_handler
| [
"petercable@gmail.com"
] | petercable@gmail.com |
c6d6095b6aecf8907d6dbe353e20a0cf0c58d042 | cc36d7ba409dfc2c9b7252b3c4efa55ca829adb7 | /tests/test_split_and_export.py | 354a7a0d37f2c8667857f6c75d9617afb5048cbd | [] | no_license | shinglyu/MusicPupil-tf | e09b2615047e9b87caa797fd7108b8ae35b34cf5 | 5ae05dc23fef1f9daf9deecd378adee9353a9e66 | refs/heads/master | 2021-05-15T18:10:37.284122 | 2017-12-30T15:34:46 | 2017-12-30T15:34:46 | 107,603,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,370 | py | import os
import sys
sys.path.append(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..',
'feature_extractor'
)
)
from unittest.mock import patch, MagicMock
import music21
import csv
import split_and_export
def test_split_train_test():
samples = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
# Split in 2 as a group
splitted = split_and_export.split_train_test(samples, int(len(samples)/2))
assert len(splitted) > 1 # More then one way to split
assert len(splitted[0]['training']) > 0
assert len(splitted[0]['testing']) > 0
assert len(splitted[0]['training']) > len(splitted[0]['testing'])
for elem in splitted[0]['testing']:
assert elem not in splitted[0]['training']
def test_export_to_csv():
samples = [
{
"score_features": {
"foo": [1, 2, 3]
},
"perf_features": {
"bar": [7, 8, 9]
}
},
{
"score_features": {
"foo": [4, 5, 6]
},
"perf_features": {
"bar": [10, 11, 12]
}
}
]
split_and_export.export_to_csv(samples, "tests/test_export_training.csv")
with open('tests/test_export_training.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
rows = list(reader)
assert rows[0] == ["foo", "bar"]
assert rows[1] == ["1", "7"]
def test_export_all_to_csv():
splits = [
{"training": "training_0", "testing": "testing_0"},
{"training": "training_1", "testing": "testing_1"},
{"training": "training_2", "testing": "testing_2"},
]
with patch("split_and_export.export_to_csv") as mock_export:
split_and_export.export_all_to_csv(splits, "tests/test_export")
mock_export.assert_any_call("testing_0", "tests/test_export_0_testing.csv")
mock_export.assert_any_call("training_0", "tests/test_export_0_training.csv")
mock_export.assert_any_call("testing_1", "tests/test_export_1_testing.csv")
mock_export.assert_any_call("training_1", "tests/test_export_1_training.csv")
mock_export.assert_any_call("testing_2", "tests/test_export_2_testing.csv")
mock_export.assert_any_call("training_2", "tests/test_export_2_training.csv")
| [
"shing.lyu@gmail.com"
] | shing.lyu@gmail.com |
42e44d36df2d8995690e0ac00535e4955d8b3472 | 94d5467b1315791fa75165eb862fdd8fef300958 | /yunyan_baotou/src/business_ultra/init_data.py | 61600784148a3a71b91b0ae55c58a09ba84d4b62 | [] | no_license | scmsqhn/code | e31926174c247d49c1db8f121e3ec1b82f8a2d9d | b389d7dc5fafad8a4185a03cd6d5519ccf8f99df | refs/heads/master | 2022-12-09T05:37:07.065840 | 2019-05-14T01:55:07 | 2019-05-14T01:55:07 | 185,903,771 | 1 | 0 | null | 2022-12-08T05:05:51 | 2019-05-10T02:22:28 | Python | UTF-8 | Python | false | false | 5,621 | py | #!/usr/bin/env python3
import datetime
import pandas as pd
from datetime import datetime
import json
import os
import codecs
import numpy as np
import traceback
import sys
sys.path.append(os.environ['YUNYAN'])
sys.path.append(os.environ['ROOT'])
sys.path.append(os.environ['WORKBENCH'])
#import gensim
#from gensimplus.source.gensim_plus_config import FLAGS
#from gensimplus.source.model_save_load_helper import ModelSaveLoadHelper
#from gensim.models import LsiModel
#from gensim.models import LdaModel
#from gensim.models import TfidfModel
import myconfig
import src
from src import myjieba_posseg
from myjieba_posseg import posseg as posseg
import user_prob
from user_prob.test import new_cut
import re
import numpy as np
import pdb
import codecs
import function_ultra.trie_tree as trie_tree
import function_ultra.utils as utils
#DEBUG = False
DICT = False#$True
DEBUG = True
JIEBACUT= True
global r_cnt
global w_cnt
r_cnt = 1
w_cnt = 0
standard_addr = {}
load_json = lambda x:json.load(open(x,'r',encoding='utf-8'))
standard_addr = load_json(myconfig.STDADD)
standard_dct = {}
ks = []
vs = []
for item in standard_addr['RECORDS']:
v = item['name']
k = item['type']
ks.append(k)
vs.append(v)
keys = list(set(ks))
values = list(set(vs))
level_keys = ["省","市","区","社区","村居委会","街路巷名","自然村组",\
"门牌号","小区名","建筑物名称","组团名称","栋号",\
"单元号","楼层","户室号","sent","rw"]
out_keys = ["省","市","区","社区","村居委会","街路巷名","自然村组","门牌号","小区名","组团名称","栋号","单元号","楼层","户室号"]
global global_cnt
def read_standard_data(self,docpath='standard_address.json'):
'''
read word from standard dict, return key words dict
'''
standard_kvs = {}
standard_num = {}
fl = open(docpath,'r',encoding='utf-8')
info = json.load(fl)
return info #返回标准地址库
kvs_lst = info.get('RECORDS','')
for item in kvs_lst:
k = item.get('name','')
v = len(standard_kvs)
standard_kvs[k] = v
for k in standard_kvs:
_k = standard_kvs[k]
_v = k
standard_num[_k] = _v
return standard_kvs, standard_num
def gen_word_tree(filename=myconfig.STDTXTPATH,sav_file=myconfig.MY_WORD):
print('\n>gen_address_tree start')
my_tree = trie_tree.Trie()
df = open(filename,'r')
lines = df.readlines() #pd.read_csv(filename)
print(len(lines))
for sent in lines:
words = sent.split('/')
for word in words:
my_tree.insert(word)
utils.save_var(my_tree,sav_file)
print('\n>my address tree save ok')
return my_tree
def gen_std_tree(filename=myconfig.STDTXTPATH,sav_file=myconfig.MY_TREE,delimeter='/'):
print('\n>gen_std_tree start')
my_tree = trie_tree.Trie()
df = open(filename,'r')
lines = df.readlines() #pd.read_csv(filename)
for sent in lines:
words = sent.split(delimeter)
my_tree.insert(words)
utils.save_var(my_tree,sav_file)
print('\n>my std tree save ok')
return my_tree
def remove_nan(item):
clritem = []
for node in item:
if 'nan' in node:
continue
clritem.append(node)
return clritem
def gen_std_tree_from_dataframe(data_src, sav_file=myconfig.MY_TREE):
# 从dataframe创建标准地址树
print('\n>gen_std_tree_from_dataframe start')
my_tree = trie_tree.Trie()
for item in data_src:
clritem = remove_nan(item)
print(clritem)
pdb.set_trace()
my_tree.part_insert(my_tree.root,clritem)
utils.save_var(my_tree,sav_file)
print('\n>gen_std_tree_from_dataframe ready and save finish')
return myconfig.SUCCESS
def gen_address_tree(filename=myconfig.STDTXTPATH,sav_file=myconfig.MY_TREE):
print('\n>gen_address_tree start')
my_tree = trie_tree.Trie()
df = open(filename,'r')
lines = df.readlines() #pd.read_csv(filename)
for sent in lines:
my_tree.insert(sent)
utils.save_var(my_tree,sav_file)
print('\n>my address tree save ok')
return my_tree
def gen_zhengzhou_tree(dirname=myconfig.ZZ_STD_ADD,sav_file=myconfig.zhengzhou_std_word,sav_file_2=myconfig.zhengzhou_std_tree):
addr_kv_rec = open("./addr_match.txt",'w+')
print('\n>gen_zhengzhou_tree start')
#pdb.set_trace()
my_tree = trie_tree.Trie()
my_word = trie_tree.Trie()
paths = os.walk(dirname)
sum_lines = []
cnt = 0
for _,_,fs in paths:
for f in fs:
pth = os.path.join(dirname,str(f))
lines = open(pth,'r').readlines()
np.random.shuffle(lines)
#lines = open(pth,'r').readlines()[:myconfig.TRAIN_DATA]
for line in lines:
if not ',' in line:
continue
_line = line.split(',')[1]
line = utils.pre_trans(_line)
addr_kv_rec.write('%s\t%s\n'%(str(line),str(_line)))
cnt+=1
if cnt%10000==1:
print(cnt)
my_tree.insert(line)
my_word.insert(_line)
utils.save_var(my_word,sav_file)
utils.save_var(my_tree,sav_file_2)
print('\n>my address tree save ok')
addr_kv_rec.close()
def load_address_tree(sav_file='./my_tree.pkl'):
my_tree = utils.read_var(sav_file)
return my_tree
#gen_address_tree()
if __name__ == "__time__":
pass
print('')
gen_address_tree(filename='/home/distdev/src/iba/dmp/gongan/gy_addr_normal/pre_data/yyap_address_tree.csv',sav_file='./my_tree.pkl')
| [
"2364839934@qq.com"
] | 2364839934@qq.com |
bce41ae64991eaa9ef5e290859b7196415b1c8d0 | e5a62b970378386ec1b19ee7adc707c445e19a4f | /problems/p072/p072_solution.py | dba19258f0c7370a6a37dc881615ca2677071f15 | [] | no_license | lawrencium/project_euler | a39976d9b3619c2edae3e3f1b4f0bdd1ec82162d | 96ac5684b449d327e7f63a8c63cdb46660bff731 | refs/heads/master | 2021-01-17T07:46:43.603066 | 2017-09-25T05:56:05 | 2017-09-25T05:56:05 | 23,099,118 | 0 | 0 | null | 2016-07-22T22:24:47 | 2014-08-19T06:15:47 | Python | UTF-8 | Python | false | false | 1,467 | py | from abc import abstractmethod
from problems.util.prime import sieve_of_eratosthenes, get_factorization
from problems.util.solutiontimer import time_function
d = 1000000
# d = 8
sieve = sieve_of_eratosthenes(d)
sieve_set = set(sieve)
def main():
number_reduced_fractions = 0
factory = DenominatorHandlerFactory()
for denominator in range(2, d + 1):
number_reduced_fractions += factory.get(denominator).calculate_number_reduced_numerators()
print number_reduced_fractions
class DenominatorHandler(object):
def __init__(self, denominator):
self._denominator = denominator
@abstractmethod
def calculate_number_reduced_numerators(self):
pass
class PrimeDenominatorHandler(DenominatorHandler):
def calculate_number_reduced_numerators(self):
return self._denominator - 1
class CompositeDenominatorHandler(DenominatorHandler):
def calculate_number_reduced_numerators(self):
return calculate_totient(self._denominator)
class DenominatorHandlerFactory(object):
def get(self, denominator):
return PrimeDenominatorHandler(denominator) if denominator in sieve_set else CompositeDenominatorHandler(
denominator)
def calculate_totient(n):
prime_factorization = get_factorization(n, sieve)
totient = n
for prime in prime_factorization:
totient *= 1 - float(1) / prime
return int(totient)
if __name__ == '__main__':
time_function(main)
| [
"ltc35@cornell.edu"
] | ltc35@cornell.edu |
1da4cd6e7a958faab83c1099e1a7f95e2a6fd87f | c00790236ba746596f205bcb609aee44a369800b | /pi_scale_server.py | 5e1567c8433fdc197e8310ca9ea7b7118f567950 | [
"MIT"
] | permissive | lhenkel/ToledoScalePiServer | ab6e335735bb4fa84bef60e2c36f9c895ee1ea7b | 1fa8f675d971cd92c84206b639b72858d81f0598 | refs/heads/master | 2020-04-10T07:11:32.851377 | 2018-12-07T22:41:14 | 2018-12-07T22:41:14 | 160,874,884 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,433 | py | from serial import *
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
import re
import subprocess
import socket
import urllib2
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
import glob
import os
PORT_NUMBER = 8675
def find_usb_tty(vendor_id = None, product_id = None) :
tty_devs = []
for dn in glob.glob('/sys/bus/usb/devices/*') :
try :
vid = int(open(os.path.join(dn, "idVendor" )).read().strip(), 16)
pid = int(open(os.path.join(dn, "idProduct")).read().strip(), 16)
if ((vendor_id is None) or (vid == vendor_id)) and ((product_id is None) or (pid == product_id)) :
dns = glob.glob(os.path.join(dn, os.path.basename(dn) + "*"))
for sdn in dns :
for fn in glob.glob(os.path.join(sdn, "*")) :
if re.search(r"\/ttyUSB[0-9]+$", fn) :
#tty_devs.append("/dev" + os.path.basename(fn))
tty_devs.append(os.path.join("/dev", os.path.basename(fn)))
pass
pass
pass
pass
except ( ValueError, TypeError, AttributeError, OSError, IOError ) :
pass
pass
return tty_devs
def detect_USB_serial():
device_re = re.compile("Bus\s+(?P<bus>\d+)\s+Device\s+(?P<device>\d+).+ID\s(?P<id>\w+:\w+)\s(?P<tag>.+)$", re.I)
df = subprocess.check_output("lsusb")
devices = []
for i in df.split('\n'):
if i:
info = device_re.match(i)
if info:
dinfo = info.groupdict()
dinfo['device'] = '/dev/bus/usb/%s/%s' % (dinfo.pop('bus'), dinfo.pop('device'))
devices.append(dinfo)
#print devices
for device in devices:
#print device
if "Serial Port" in device['tag']:
print "Got Serial USB"
return True
return False
def redetect_settings():
# Because scales like to randomly get new baud rates, because of course they do
possible_usb_ports = find_usb_tty()
possible_baud_rates = [9600, 4800, 115200, 57600, 1200, 2400, 19200, 38400]
possible_bytesizes = [7,5,6,8]
possible_parity = [PARITY_ODD,'N', PARITY_EVEN]
possible_stopbits = [2,1]
if (len(possible_usb_ports) == 0):
print "No USB/tty detected"
return False
else:
print "Detecting Baud Rate.."
for usb_port in possible_usb_ports:
for baud in possible_baud_rates:
for parity in possible_parity:
for stopbit in possible_stopbits:
for bytesize in possible_bytesizes:
print "Attempting: " + str(baud) + ' - ' + str(parity) + ' - ' + str(baud) + ' - ' + str(bytesize) + ' - ' + str(stopbit)
serial = Serial(port = usb_port, baudrate = baud , bytesize = bytesize, parity = parity, stopbits =stopbit, timeout = .2)
serial.write("W\r") #S D 6.35 g
s = serial.readline()
serial.close()
if "lb" in s:
print "Detected" + str(usb_port) + ' - ' + str(baud) + ' - ' + str(parity) + ' - ' + str(baud) + ' - ' + str(bytesize) + ' - ' + str(stopbit)
return {'usb_port':usb_port, 'baud':baud, 'parity':parity, 'bytesize':bytesize, 'stopbits':stopbit}
print "No Port detected"
return False
def get_weight(serial):
try:
serial.write("W\r") #S D 6.35 g
except SerialException:
print "Serial Exception.. Nuts"
return False
count = 0
while count < 5:
try:
s = serial.readline()
except SerialException:
print "Serial Exception.. Nuts"
return False
count = count + 1
#print(s)
if (len(s.strip()) > 1):
floats_arr = re.findall("\d+\.\d+", s[0:7])
if len(floats_arr) > 0:
return floats_arr[0]
else:
return False
count = count + 1
return False
class MyHandler(BaseHTTPRequestHandler):
def __init__(self, context, *args):
self.context = context
BaseHTTPRequestHandler.__init__(self, *args)
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
if self.path.endswith("debug"):
self.wfile.write( context )
else:
if self.path.endswith("getWeight"):
serial = context['serial_obj']
got_weight = get_weight(serial)
if (got_weight == False):
print "Got Error, troubleshooting.."
if detect_USB_serial() == False:
print "Did not find Serial on USB"
else:
settings = redetect_settings()
if settings == False:
print "Could not detect Baud Rate etc during Call"
message = "Could not detect Baud Rate etc,"
print message
alert('lee.henkel@kng.com', 'Problem with PI scale' + my_ip, message)
else:
print "Got settings (maybe they changed)"
serial = Serial(port = settings['usb_port'], baudrate = settings['baud'] , bytesize = settings['bytesize'], parity = settings['parity'], stopbits = settings['stopbits'], timeout = 1)
context['serial_obj'] = serial
self.context = context
got_weight = get_weight(serial)
self.wfile.write( got_weight )
else:
self.wfile.write( got_weight )
else:
self.wfile.write( "Hello" )
return
print find_usb_tty()
if detect_USB_serial() == False:
message = "Did not find Serial on USB, Bailing"
print message
else:
settings = redetect_settings()
if settings == False:
message = "Could not detect Baud Rate etc,"
print message
else:
serial = Serial(port = settings['usb_port'], baudrate = settings['baud'] , bytesize = settings['bytesize'], parity = settings['parity'], stopbits = settings['stopbits'], timeout = 1)
try:
#Create a web server and define the handler to manage the
#incoming request
context = {
'serial_obj' : serial
}
def handler(*args):
MyHandler(context, *args)
server = HTTPServer(('', PORT_NUMBER), handler)
print 'Started httpserver on port ' , PORT_NUMBER
#Wait forever for incoming htto requests
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
| [
"lee.henkel@kng.com"
] | lee.henkel@kng.com |
35c107479c099af9ea0ca70ffefed3c45c7af786 | adb03bb1735a2bc87c4bbb0c6037e835e28563d9 | /scripts/teleop.py | 4d281e1e50dfc697db182a6980ed71ce9ccc3439 | [] | no_license | 2038prob/lab6 | d5a415859dad2cbc2335880366db247bdea4a67c | 4d1fa120d12c208fbcb6698eff8d87a602a85fcc | refs/heads/master | 2023-04-15T18:18:14.828425 | 2021-04-26T17:24:54 | 2021-04-26T17:24:54 | 361,834,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,494 | py | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from std_msgs.msg import UInt16
import sys, select, os
if os.name == 'nt':
import msvcrt
else:
import tty, termios
MAX = 180
MIN = 0
msg = """
Control Your Traxxis!
--------------------------------
Controls:
w
a s d
x
w/s : increase/decrease throttle (MIN = 0, MAX = 180)
a/d : increase/decrease steering angle (MIN = 30, MAX = 150)
space key, x : force stop
CTRL-C to quit
"""
def getKey():
if os.name == 'nt':
if sys.version_info[0] >= 3:
return msvcrt.getch().decode()
else:
return msvcrt.getch()
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
def constrain(input, low=MIN, high=MAX):
if input < low:
input = low
elif input > high:
input = high
else:
input = input
return input
def pretty(steer, throttle):
print("currently:\tsteering angle %s\t throttle %s " % (steer, throttle))
if __name__ == "__main__":
if os.name != 'nt':
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('traxxis_teleop')
pub1 = rospy.Publisher('servo2', UInt16, queue_size=10)
pub2 = rospy.Publisher('servo1', UInt16, queue_size=10)
count = 0
steer = 90
throttle = 90
while(1):
key = getKey()
if count % 20 ==0:
print(msg)
count += 1
if key != '':
count += 1
if key == 'w':
print("Key Pressed: " + key)
throttle = constrain(throttle+30)
pretty(steer, throttle)
elif key == 's':
print("Key Pressed: " + key)
throttle = constrain(throttle-30)
pretty(steer, throttle)
elif key in ['x', ' ']:
print("Key Pressed: " + key)
throttle = 90
pretty(steer, throttle)
elif key == 'a':
print("Key Pressed: " + key)
steer = constrain(steer-30, 30, 150)
pretty(steer, throttle)
elif key == 'd':
print("Key Pressed: " + key)
steer = constrain(steer+30, 30, 150)
pretty(steer, throttle)
else:
if (key == '\x03'):
break
pub1.publish(steer)
pub2.publish(throttle)
| [
"christopher.m.korpela.mil@mail.mil"
] | christopher.m.korpela.mil@mail.mil |
011b6cd63f0e69535e48f048f9780470226cac42 | 5313fe47b0dd453af3ea51bb707a9882242c37d3 | /VisualPIC/Views/createVTKAnimationWindow.py | 6ff5f3f8f51e19a3a73d3137be7aeecf2dae2e38 | [] | no_license | QJohn2017/VisualPIC | c347d0864907b5261ed737cfa1a4b780ab69fe78 | 5b7e0c10f8b0589ec24f41fc8c631d1c6cd34bfd | refs/heads/master | 2023-03-03T07:16:58.382512 | 2018-06-05T17:14:04 | 2018-06-05T17:14:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,007 | py | # -*- coding: utf-8 -*-
#Copyright 2016-2018 Angel Ferran Pousa, DESY
#
#This file is part of VisualPIC.
#
#VisualPIC is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#VisualPIC is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with VisualPIC. If not, see <http://www.gnu.org/licenses/>.
import os
from PyQt5 import QtCore, QtWidgets
from PIL import Image
import numpy as np
class CreateVTKAnimationWindow(QtWidgets.QDialog):
def __init__(self,parent=None):
super(CreateVTKAnimationWindow, self).__init__(parent)
self.main_window = parent
self.verticalLayout = QtWidgets.QVBoxLayout(self)
self.verticalLayout.setObjectName("verticalLayout")
self.label_4 = QtWidgets.QLabel(self)
self.label_4.setObjectName("label_4")
self.verticalLayout.addWidget(self.label_4)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.firstStep_lineEdit = QtWidgets.QLineEdit(self)
self.firstStep_lineEdit.setMaximumSize(QtCore.QSize(100, 16777215))
self.firstStep_lineEdit.setObjectName("firstStep_lineEdit")
self.horizontalLayout.addWidget(self.firstStep_lineEdit)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_2 = QtWidgets.QLabel(self)
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
self.lastStep_lineEdit = QtWidgets.QLineEdit(self)
self.lastStep_lineEdit.setMaximumSize(QtCore.QSize(100, 16777215))
self.lastStep_lineEdit.setObjectName("lastStep_lineEdit")
self.horizontalLayout_2.addWidget(self.lastStep_lineEdit)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_3 = QtWidgets.QLabel(self)
self.label_3.setObjectName("label_3")
self.horizontalLayout_3.addWidget(self.label_3)
self.frequency_lineEdit = QtWidgets.QLineEdit(self)
self.frequency_lineEdit.setMaximumSize(QtCore.QSize(100, 16777215))
self.frequency_lineEdit.setObjectName("frequency_lineEdit")
self.horizontalLayout_3.addWidget(self.frequency_lineEdit)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.onlySnaps_checkBox = QtWidgets.QCheckBox(self)
self.onlySnaps_checkBox.setObjectName("onlySnaps_checkBox")
self.horizontalLayout_4.addWidget(self.onlySnaps_checkBox)
self.makeVideo_checkBox = QtWidgets.QCheckBox(self)
self.makeVideo_checkBox.setObjectName("makeVideo_checkBox")
self.horizontalLayout_4.addWidget(self.makeVideo_checkBox)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.line = QtWidgets.QFrame(self)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.label_5 = QtWidgets.QLabel(self)
self.label_5.setObjectName("label_5")
self.verticalLayout.addWidget(self.label_5)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.frameTime_radioButton = QtWidgets.QRadioButton(self)
self.frameTime_radioButton.setChecked(True)
self.frameTime_radioButton.setObjectName("frameTime_radioButton")
self.horizontalLayout_5.addWidget(self.frameTime_radioButton)
self.totalTime_radioButton = QtWidgets.QRadioButton(self)
self.totalTime_radioButton.setObjectName("totalTime_radioButton")
self.horizontalLayout_5.addWidget(self.totalTime_radioButton)
self.gifTime_lineEdit = QtWidgets.QLineEdit(self)
self.gifTime_lineEdit.setMaximumSize(QtCore.QSize(100, 16777215))
self.gifTime_lineEdit.setObjectName("gifTime_lineEdit")
self.horizontalLayout_5.addWidget(self.gifTime_lineEdit)
self.label_6 = QtWidgets.QLabel(self)
self.label_6.setObjectName("label_6")
self.horizontalLayout_5.addWidget(self.label_6)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.line2 = QtWidgets.QFrame(self)
self.line2.setFrameShape(QtWidgets.QFrame.HLine)
self.line2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line2.setObjectName("line2")
self.verticalLayout.addWidget(self.line2)
self.label_7 = QtWidgets.QLabel(self)
self.label_7.setObjectName("label_7")
self.verticalLayout.addWidget(self.label_7)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.saveTo_lineEdit = QtWidgets.QLineEdit(self)
self.saveTo_lineEdit.setObjectName("saveTo_lineEdit")
self.horizontalLayout_3.addWidget(self.saveTo_lineEdit)
self.browse_Button = QtWidgets.QPushButton(self)
self.browse_Button.setObjectName("browse_Button")
self.horizontalLayout_3.addWidget(self.browse_Button)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.label_8 = QtWidgets.QLabel(self)
self.label_8.setObjectName("label_8")
self.horizontalLayout_4.addWidget(self.label_8)
self.fileName_lineEdit = QtWidgets.QLineEdit(self)
self.fileName_lineEdit.setObjectName("fileName_lineEdit")
self.horizontalLayout_4.addWidget(self.fileName_lineEdit)
self.create_Button = QtWidgets.QPushButton(self)
self.create_Button.setObjectName("create_Button")
self.verticalLayout.addWidget(self.create_Button)
QtCore.QMetaObject.connectSlotsByName(self)
# define non-UI properties
self.input_filter = InputFilter(parent)
self.has_already_run = False
# call initial methods
self.setup_ui()
self.register_ui_events()
def setup_ui(self):
self.setWindowTitle("Create Snapshots and Animation")
self.label_4.setText("Snapshots:")
self.label.setText("First time step:")
self.label_2.setText("Last time step:")
self.label_3.setText("Step size:")
self.firstStep_lineEdit.setText(str(self.main_window.time_steps[0]))
self.lastStep_lineEdit.setText(str(self.main_window.time_steps[-1]))
self.frequency_lineEdit.setText("1")
self.onlySnaps_checkBox.setText("Create snapshots.")
self.makeVideo_checkBox.setText("Create video.")
self.label_5.setText("Framerate:")
self.frameTime_radioButton.setText("Time between frames")
self.totalTime_radioButton.setText("Total time")
self.gifTime_lineEdit.setText("0.1")
self.label_6.setText("[s]")
self.label_7.setText("Save to:")
self.label_8.setText("File name:")
self.create_Button.setText("Create")
self.browse_Button.setText("Browse")
self.saveTo_lineEdit.setText(
self.main_window.get_data_folder_location() + "/3D_Animation")
self.fileName_lineEdit.setText("movie")
self.onlySnaps_checkBox.setChecked(True)
self.makeVideo_checkBox.setChecked(True)
def register_ui_events(self):
self.firstStep_lineEdit.installEventFilter(self.input_filter)
self.lastStep_lineEdit.installEventFilter(self.input_filter)
self.create_Button.clicked.connect(self.create_button_clicked)
self.browse_Button.clicked.connect(self.open_folder_dialog)
self.makeVideo_checkBox.toggled.connect(
self.make_video_checkbox_status_changed)
def open_folder_dialog(self):
folder_path = str(QtWidgets.QFileDialog.getExistingDirectory(
self, "Save animation to:", self.saveTo_lineEdit.text()))
if folder_path != "":
self.saveTo_lineEdit.setText(folder_path)
def create_button_clicked(self):
self.create_animation()
def make_video_checkbox_status_changed(self):
if self.makeVideo_checkBox.checkState():
self.frameTime_radioButton.setEnabled(True)
self.totalTime_radioButton.setEnabled(True)
self.gifTime_lineEdit.setEnabled(True)
else:
self.frameTime_radioButton.setEnabled(False)
self.totalTime_radioButton.setEnabled(False)
self.gifTime_lineEdit.setEnabled(False)
def create_animation(self):
self.has_already_run = False
simulation_time_steps = self.main_window.time_steps
first_time_step = int(self.firstStep_lineEdit.text())
first_index = np.where(simulation_time_steps == first_time_step)[0][0]
last_time_step = int(self.lastStep_lineEdit.text())
last_index = np.where(simulation_time_steps == last_time_step)[0][0]
freq = int(self.frequency_lineEdit.text())
for i in simulation_time_steps[first_index:last_index+1:freq]:
self.main_window.set_time_step(i)
movie_name = self.fileName_lineEdit.text()
frames_folder_path = (self.saveTo_lineEdit.text() + "/"
+ movie_name + "_frames")
frame_path = (frames_folder_path + "/" + movie_name
+ "_frame_" + str(i).zfill(6))
if not os.path.exists(frames_folder_path):
os.makedirs(frames_folder_path)
self.main_window.save_screenshot(frame_path)
class InputFilter(QtCore.QObject):
def __init__(self, mainWindow):
super(InputFilter,self).__init__()
self.main_window = mainWindow
def eventFilter(self, widget, event):
# FocusOut event
try:
if event.type() == QtCore.QEvent.FocusOut:
# do custom stuff
step = int(widget.text())
time_steps = self.main_window.time_steps
if step not in time_steps:
higher_time_steps = np.where(time_steps > step)[0]
if len(higher_time_steps) == 0:
closest_higher = time_steps[0]
else:
closest_higher = time_steps[
np.where(time_steps > step)[0][0]]
lower_time_steps = np.where(time_steps < step)[0]
if len(lower_time_steps) == 0:
closest_lower = time_steps[-1]
else:
closest_lower = time_steps[
np.where(time_steps < step)[0][-1]]
if abs(step-closest_higher) < abs(step-closest_lower):
widget.setText(str(closest_higher))
else:
widget.setText(str(closest_lower))
except:
pass
# return False so that the widget will also handle the event
# otherwise it won't focus out
return False | [
"angelmail_97@msn.com"
] | angelmail_97@msn.com |
32194e3ebe219fc0fafff1a524d1df176a58bd46 | e6dd07c0f9104051ac850d90045f38b2f4ae5c79 | /advance/apps.py | 888bee437ed3fc702f0f2e32d27113550ff20e64 | [] | no_license | mostaq22/drf-vue | 70fb3846814dd4814b0cd4b4544f1c2a7b4ff11e | 76061ff46831fe70c6b482b20b10377287f806ff | refs/heads/master | 2022-06-08T04:55:28.948466 | 2020-05-06T20:09:55 | 2020-05-06T20:09:55 | 257,530,726 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from django.apps import AppConfig
class AdvanceConfig(AppConfig):
name = 'advance'
| [
"mostaqcse@gmail.com"
] | mostaqcse@gmail.com |
aa82b974a22240b99dced283997bfed6a235f20a | a9063fd669162d4ce0e1d6cd2e35974274851547 | /swagger_client/models/inline_response20094_site.py | 60809a21527af5e7d917c54707fe326dad72bc22 | [] | no_license | rootalley/py-zoom-api | 9d29a8c750e110f7bd9b65ff7301af27e8518a3d | bfebf3aa7b714dcac78be7c0affb9050bbce8641 | refs/heads/master | 2022-11-07T14:09:59.134600 | 2020-06-20T18:13:50 | 2020-06-20T18:13:50 | 273,760,906 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,519 | py | # coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: developersupport@zoom.us
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineResponse20094Site(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name'
}
def __init__(self, id=None, name=None): # noqa: E501
"""InlineResponse20094Site - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
@property
def id(self):
"""Gets the id of this InlineResponse20094Site. # noqa: E501
Unique Identifier of the [site](https://support.zoom.us/hc/en-us/articles/360020809672-Managing-Multiple-Sites). # noqa: E501
:return: The id of this InlineResponse20094Site. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this InlineResponse20094Site.
Unique Identifier of the [site](https://support.zoom.us/hc/en-us/articles/360020809672-Managing-Multiple-Sites). # noqa: E501
:param id: The id of this InlineResponse20094Site. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this InlineResponse20094Site. # noqa: E501
Name of the site. # noqa: E501
:return: The name of this InlineResponse20094Site. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this InlineResponse20094Site.
Name of the site. # noqa: E501
:param name: The name of this InlineResponse20094Site. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse20094Site, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse20094Site):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"github@rootalley.com"
] | github@rootalley.com |
3afdb2cb66231d75fca8ddb8c9b16ecc1bd4b320 | 856460e5bdbb4046b43258e48baaf26b314762be | /client/client_main.py | 743256e53901a2a668faa2810cba9e4d58633e6f | [] | no_license | imwiwiim90/robotZero | 7a6e510eaf007311ce803201f0939808188804fd | d2e6cbbaadc7cdad6c86f1c58a6a1f5c46e9e76b | refs/heads/master | 2020-05-26T15:15:35.855059 | 2017-04-07T22:18:07 | 2017-04-07T22:18:07 | 82,491,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | from controller_alt import PS4Controller
from MessageUDP import MessageUDP
from videoStream import Video
import cv2
import json
import threading
import time
lock = threading.Lock()
#ip_dir = '192.168.0.5'
#ip_dir = '127.0.0.1'
#ip_dir = '186.31.47.239'
ip_dir = '190.24.142.149'
PORT = 8000
ctrl = PS4Controller()
mailer = MessageUDP()
mailer.set_destination(ip_dir,PORT)
video = Video(mailer.socket,ip_dir,PORT)
udpReceiver = video.u
video.start()
while True:
time.sleep(1/30.0)
message = ctrl.getKeys()
#print message
if udpReceiver.in_routine == True:
print message["buttons"]['T']
if message["buttons"]['T'] == False:
continue
mailer.send(json.dumps(message))
img = video.getFrame()
cv2.imshow('stream',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| [
"wilsondaniel95@hotmail.com"
] | wilsondaniel95@hotmail.com |
4ce26279b22d0826b7b738a468f20d2e3d818f58 | 61b6e904a9e3d0bdf9cab361a08bd028b4ab0c77 | /cybo/training/trainer.py | 1759a571b982df55128f63976d309c54b47727e2 | [
"MIT"
] | permissive | bo-ke/cybo | 81c5e94ca7c6c78fa8342b35384d6e7cacc22140 | 612f30b0466b4ed6d04f5c2128b133367b55e576 | refs/heads/master | 2023-06-04T11:46:37.612829 | 2021-06-22T13:22:34 | 2021-06-22T13:22:34 | 321,707,325 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,198 | py | # -*- coding: utf-8 -*-
'''
@author: kebo
@contact: kebo0912@outlook.com
@version: 1.0
@file: trainer.py
@time: 2021/05/12 01:09:57
这一行开始写关于本文件的说明与解释
'''
import tensorflow as tf
from functools import wraps
from cybo.data.dataloader import Dataloader
from cybo.models.model import Model
from cybo.training.utils import evaluate
from cybo.training.tensorboard import TensorBoard, Mode
RUN_EAGER = False
def debug(run_eager: bool = False):
def wrapper(func):
@wraps(func)
@tf.function()
def run_with_tf_function(*args, **kwargs):
return func(*args, **kwargs)
@wraps(func)
def run_without_tf_function(*args, **kwargs):
return func(*args, **kwargs)
if run_eager:
return run_without_tf_function
else:
return run_with_tf_function
return wrapper
class Trainer():
def __init__(self,
model: Model,
training_dataloader: Dataloader,
optimizer: tf.keras.optimizers.Optimizer,
epochs: int,
checkpoint_path: str,
validation_dataloader: Dataloader = None,
patience: int = 5,
max_to_keep: int = 3,
monitor: str = "acc",
use_tensorboard: bool = False,
logs_dir: str = "logs/",
run_eager: bool = False
) -> None:
self.model = model
self.training_dataloader = training_dataloader
self.validation_dataloader = validation_dataloader or training_dataloader
self.optimizer = optimizer
self.epochs = epochs
self.loss_metric = tf.keras.metrics.Mean(name="loss")
self.val_loss_metric = tf.keras.metrics.Mean(name="val_loss")
self.checkpoint_path = checkpoint_path
self.max_to_keep = max_to_keep
self.monitor = monitor
self.patience = patience
self.use_tensorboard = use_tensorboard
if self.use_tensorboard:
self.tensorboard = TensorBoard(logs_dir=logs_dir)
global RUN_EAGER
RUN_EAGER = run_eager
def train(self):
ckpt = tf.train.Checkpoint(
model=self.model, optimizer=self.optimizer, epoch=tf.Variable(1))
ckpt_manager = tf.train.CheckpointManager(
ckpt, self.checkpoint_path, max_to_keep=self.max_to_keep)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
tf.print("restore from latest checkpoint succeed !")
best_acc = 0.0
early_stop_epochs = 0
for epoch in tf.range(ckpt.epoch, self.epochs+1):
tf.print(f"Epoch {epoch}/{self.epochs}:")
# 更新ckpt中epoch值
ckpt.epoch.assign_add(1)
metrics = self.model.get_metrics(reset=True, training=True)
self.loss_metric.reset_states()
bar = tf.keras.utils.Progbar(
len(self.training_dataloader),
unit_name="sample",
stateful_metrics=["loss"] + list(metrics.keys()))
for batch in self.training_dataloader:
self.train_step(batch)
log_values = [("loss", self.loss_metric.result().numpy())]
log_values.extend(
[(k, v) for k, v in self.model.get_metrics(
training=True).items()])
bar.add(self.training_dataloader.batch_size, log_values)
evaluate_metrics = evaluate(
model=self.model, dataloader=self.validation_dataloader)
tf.print("validation result - " +
" - ".join([f"{k}: {v}" for k, v in evaluate_metrics.items()]))
if self.use_tensorboard:
self.tensorboard.write_logs(
Mode.train.value, log_values, epoch)
self.tensorboard.write_logs(
Mode.evaluate.value,
[(k, v) for k, v in evaluate_metrics.items()],
epoch)
if evaluate_metrics.get(self.monitor, 1.0) >= best_acc:
ckpt_save_path = ckpt_manager.save()
tf.print(
f"Saving checkpoint for epoch {epoch} at {ckpt_save_path}")
best_acc = evaluate_metrics.get(self.monitor, 1.0)
early_stop_epochs = 0
else:
tf.print(f"validation {self.monitor} is not improved")
early_stop_epochs += 1
if early_stop_epochs >= self.patience:
tf.print(f"Early stopping with patience {self.patience}")
break
tf.print("Training completed !")
@debug(run_eager=RUN_EAGER)
def train_step(self, batch):
with tf.GradientTape() as tape:
output_dict = self.model(**batch, training=True)
gradients = tape.gradient(
output_dict["loss"],
self.model.trainable_variables)
self.optimizer.apply_gradients(
zip(gradients, self.model.trainable_variables))
self.loss_metric.update_state(output_dict["loss"])
| [
"itachi971009@gmail.com"
] | itachi971009@gmail.com |
1553d5d277a72ef2274a5f58479348835444fb15 | c1e31f49a59beb6089328d09040f6f48d2e12cde | /lib/python2.7/tests/test_plotants.py | 7a7cdc4a579d40018e4ad412b42fcc84faf8eb45 | [
"Python-2.0"
] | permissive | kernsuite-debian/casalite | 3d81761e0d8ae497f97ea242e98d4357618a7591 | b620981f14f4ba5b77f347f649cd2c16d498db04 | refs/heads/master | 2021-06-22T16:22:51.765703 | 2021-02-25T13:28:05 | 2021-02-25T13:28:05 | 80,822,139 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,517 | py | import os
import string
import sys
import shutil
import unittest
from __main__ import default
from tasks import *
#from taskinit import *
from __casac__ import tableplot
'''
Unit tests for task plotants. It tests the following parameters:
vis: wrong and correct values
figfile: if output is created
'''
tp = tableplot.tableplot()
class plotants_test(unittest.TestCase):
# Input and output names
msfile = 'ic2233_1.ms'
res = None
fig = 'plotantstest.png'
#tp = tableplot.tableplot()
def setUp(self):
self.res = None
default(plotants)
# Switch off the displaying of the GUI
tp.setgui(gui=False)
# It is not necessary to copy it for all tests
if (not os.path.exists(self.msfile)):
datapath = os.environ.get('CASAPATH').split()[0] + '/data/regression/ic2233/'
shutil.copytree(datapath+self.msfile, self.msfile)
def tearDown(self):
if (os.path.exists(self.msfile)):
os.system('rm -rf ' + self.msfile)
os.system('rm -rf ' + self.fig)
# Switch GUI back on
tp.setgui(gui=True)
def test1(self):
'''Test 1: Default parameters'''
self.res = plotants()
self.assertFalse(self.res)
def test2(self):
'''Test 2: Bad input file'''
msfile = 'badfile'
self.res = plotants(vis=msfile)
self.assertFalse(self.res)
def test3(self):
'''Test 3: Good input file and output exists'''
self.res = plotants(vis=self.msfile, figfile=self.fig)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test4(self):
'''Test 4: Label antenna IDs'''
self.res = plotants(vis=self.msfile, figfile=self.fig, antindex=True)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test5(self):
'''Test 5: Logarithmic antenna positions'''
self.res = plotants(vis=self.msfile, figfile=self.fig, logpos=True)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test6(self):
'''Test 6: Exclude antenna positions'''
self.res = plotants(vis=self.msfile, figfile=self.fig,
exclude='1,5,19,14,10,13')
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test7(self):
'''Test 7: checkbaselines'''
self.res = plotants(vis=self.msfile, figfile=self.fig,
checkbaselines=True)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test8(self):
'''Test 8: exclude checkbaselines'''
# antenna (name) 11 is already excluded by checkbaselines
# (warning)
self.res = plotants(vis=self.msfile, figfile=self.fig,
exclude='11', checkbaselines=True)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test9(self):
'''Test 9: Title'''
self.res = plotants(vis=self.msfile, figfile=self.fig,
title='IC2233')
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test10(self):
'''Test 10: All arguments'''
self.res = plotants(self.msfile, self.fig, True, True, '1,3,5,7,9',
True, "IC2233")
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def suite():
return [plotants_test]
| [
"gijs@pythonic.nl"
] | gijs@pythonic.nl |
69bf66e39c764d3908167da27714165e6daf5213 | 865ca1055efa25c137aa0b3d977b028c0cc19d70 | /forkedapps/voucher/config.py | 8aa11027d9488feb15986136cbace374bd5dc320 | [] | no_license | mnzil/mnzil-test | f76ab8b5924e9810ad8e03d485b721e091da8980 | f7234f9e0306ceb126b774c0615740d4395d1b23 | refs/heads/master | 2021-01-20T19:57:02.588470 | 2016-03-14T16:17:36 | 2016-03-14T16:17:36 | 49,824,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from oscar.apps.voucher import config
class VoucherConfig(config.VoucherConfig):
name = 'forkedapps.voucher'
| [
"root@ip-172-31-28-4.us-west-2.compute.internal"
] | root@ip-172-31-28-4.us-west-2.compute.internal |
e7216dadcd35fa5652fd49a944bc5e806253d1fb | feca738182b7bcac3fe4f70a8eca2b628b2d0809 | /LojaSyst/Models/Equipamento.py | 6c526f70c11adeb03c9d023d668c04fc96d8d6a0 | [] | no_license | ViniciusAugustoRR/LojaSystem | bd82ae2a7fecbddc6df14af7f2c4331a2fe5e550 | 510d53343d7c9b083ff5fafe89b016288335ecd7 | refs/heads/master | 2020-12-09T01:21:47.131777 | 2020-02-13T23:11:53 | 2020-02-13T23:11:53 | 233,149,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py |
'''nserie: str,
equipamentoNome: str,
modelo: str,
marcaid: int,
acessorios: str'''
class EquipamentoMD:
def __init__(self):
self.Id = int
self.Serie_N = str
self.Nome = str
self.Modelo = str
self.Marca_id = int
self.Acessorios = str
| [
"vini.rios.rocha@gmail.com"
] | vini.rios.rocha@gmail.com |
ceacf929311c32f3db1575b140d2548b6ce06f9d | 2dd26e031162e75f37ecb1f7dd7f675eeb634c63 | /nemo/collections/asr/models/k2_sequence_models.py | 087e9e41b85dd8673ac6e2ff667bad355c5e747f | [
"Apache-2.0"
] | permissive | NVIDIA/NeMo | 1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1 | c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7 | refs/heads/main | 2023-08-21T15:28:04.447838 | 2023-08-21T00:49:36 | 2023-08-21T00:49:36 | 200,722,670 | 7,957 | 1,986 | Apache-2.0 | 2023-09-14T18:49:54 | 2019-08-05T20:16:42 | Python | UTF-8 | Python | false | false | 13,426 | py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.asr.models.ctc_bpe_models import EncDecCTCModelBPE
from nemo.collections.asr.models.ctc_models import EncDecCTCModel
from nemo.collections.asr.models.rnnt_bpe_models import EncDecRNNTBPEModel
from nemo.collections.asr.models.rnnt_models import EncDecRNNTModel
from nemo.collections.asr.parts.k2.classes import ASRK2Mixin
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.utils import logging
class EncDecK2SeqModel(EncDecCTCModel, ASRK2Mixin):
"""Encoder decoder models with various lattice losses."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
loss_type = cfg.graph_module_cfg.get("loss_type", "ctc")
if loss_type != "ctc" and loss_type != "mmi":
raise ValueError(f"Class {self.__class__.__name__} does not support `loss_type`={loss_type}")
super().__init__(cfg=cfg, trainer=trainer)
self._init_k2()
@classmethod
def list_available_models(cls) -> Optional[List[PretrainedModelInfo]]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
pass
def change_vocabulary(self, new_vocabulary: List[str]):
"""
Changes vocabulary used during CTC decoding process. Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
If new_vocabulary == self.decoder.vocabulary then nothing will be changed.
Args:
new_vocabulary: list with new vocabulary. Must contain at least 2 elements. Typically, \
this is target alphabet.
Returns: None
"""
super().change_vocabulary(new_vocabulary)
if self.use_graph_lm:
self.token_lm = None
logging.warning(
f"""With .change_vocabulary() call for a model with criterion_type=`{self.loss.criterion_type}`,
a new token_lm has to be set manually: call .update_k2_modules(new_cfg)
or update .graph_module_cfg.backend_cfg.token_lm before calling this method."""
)
self.update_k2_modules(self.graph_module_cfg)
@typecheck()
def forward(
self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None,
):
"""
Forward pass of the model.
Args:
input_signal: Tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps, with 1 second of audio represented as
`self.sample_rate` number of floating point values.
input_signal_length: Vector of length B, that contains the individual lengths of the audio
sequences.
processed_signal: Tensor that represents a batch of processed audio signals,
of shape (B, D, T) that has undergone processing via some DALI preprocessor.
processed_signal_length: Vector of length B, that contains the individual lengths of the
processed audio sequences.
Returns:
A tuple of 3 elements -
1) The log probabilities tensor of shape [B, T, D].
2) The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
3) The greedy token predictions of the model of shape [B, T] (via argmax)
"""
log_probs, encoded_len, greedy_predictions = super().forward(
input_signal=input_signal,
input_signal_length=input_signal_length,
processed_signal=processed_signal,
processed_signal_length=processed_signal_length,
)
return self._forward_k2_post_processing(
log_probs=log_probs, encoded_length=encoded_len, greedy_predictions=greedy_predictions
)
class EncDecK2SeqModelBPE(EncDecCTCModelBPE, ASRK2Mixin):
"""Encoder decoder models with Byte Pair Encoding and various lattice losses."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
loss_type = cfg.graph_module_cfg.get("loss_type", "ctc")
if loss_type != "ctc" and loss_type != "mmi":
raise ValueError(f"Class {self.__class__.__name__} does not support `loss_type`={loss_type}")
super().__init__(cfg=cfg, trainer=trainer)
self._init_k2()
@classmethod
def list_available_models(cls) -> Optional[List[PretrainedModelInfo]]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
pass
def change_vocabulary(self, new_tokenizer_dir: str, new_tokenizer_type: str):
"""
Changes vocabulary of the tokenizer used during CTC decoding process.
Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
Args:
new_tokenizer_dir: Path to the new tokenizer directory.
new_tokenizer_type: Either `bpe` or `wpe`. `bpe` is used for SentencePiece tokenizers,
whereas `wpe` is used for `BertTokenizer`.
Returns: None
"""
super().change_vocabulary(new_tokenizer_dir, new_tokenizer_type)
if self.use_graph_lm:
self.token_lm = None
logging.warning(
f"""With .change_vocabulary() call for a model with criterion_type=`{self.loss.criterion_type}`,
a new token_lm has to be set manually: call .update_k2_modules(new_cfg)
or update .graph_module_cfg.backend_cfg.token_lm before calling this method."""
)
self.update_k2_modules(self.graph_module_cfg)
@typecheck()
def forward(
self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None,
):
"""
Forward pass of the model.
Args:
input_signal: Tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps, with 1 second of audio represented as
`self.sample_rate` number of floating point values.
input_signal_length: Vector of length B, that contains the individual lengths of the audio
sequences.
processed_signal: Tensor that represents a batch of processed audio signals,
of shape (B, D, T) that has undergone processing via some DALI preprocessor.
processed_signal_length: Vector of length B, that contains the individual lengths of the
processed audio sequences.
Returns:
A tuple of 3 elements -
1) The log probabilities tensor of shape [B, T, D].
2) The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
3) The greedy token predictions of the model of shape [B, T] (via argmax)
"""
log_probs, encoded_len, greedy_predictions = super().forward(
input_signal=input_signal,
input_signal_length=input_signal_length,
processed_signal=processed_signal,
processed_signal_length=processed_signal_length,
)
return self._forward_k2_post_processing(
log_probs=log_probs, encoded_length=encoded_len, greedy_predictions=greedy_predictions
)
class EncDecK2RnntSeqModel(EncDecRNNTModel, ASRK2Mixin):
"""Encoder decoder models with various lattice losses."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
loss_type = cfg.graph_module_cfg.get("loss_type", "rnnt")
criterion_type = cfg.graph_module_cfg.get("criterion_type", "ml")
if loss_type != "rnnt" or criterion_type != "ml":
raise ValueError(
f"""Class {self.__class__.__name__} does not support
`criterion_type`={criterion_type} with `loss_type`={loss_type}"""
)
super().__init__(cfg=cfg, trainer=trainer)
self._init_k2()
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
pass
def change_vocabulary(self, new_vocabulary: List[str]):
"""
Changes vocabulary used during CTC decoding process. Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
If new_vocabulary == self.decoder.vocabulary then nothing will be changed.
Args:
new_vocabulary: list with new vocabulary. Must contain at least 2 elements. Typically, \
this is target alphabet.
Returns: None
"""
super().change_vocabulary(new_vocabulary)
if self.use_graph_lm:
self.token_lm = None
logging.warning(
f"""With .change_vocabulary() call for a model with criterion_type=`{self.loss.criterion_type}`,
a new token_lm has to be set manually: call .update_k2_modules(new_cfg)
or update .graph_module_cfg.backend_cfg.token_lm before calling this method."""
)
self.update_k2_modules(self.graph_module_cfg)
class EncDecK2RnntSeqModelBPE(EncDecRNNTBPEModel, ASRK2Mixin):
"""Encoder decoder models with Byte Pair Encoding and various lattice losses."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
loss_type = cfg.graph_module_cfg.get("loss_type", "rnnt")
criterion_type = cfg.graph_module_cfg.get("criterion_type", "ml")
if loss_type != "rnnt" or criterion_type != "ml":
raise ValueError(
f"""Class {self.__class__.__name__} does not support
`criterion_type`={criterion_type} with `loss_type`={loss_type}"""
)
super().__init__(cfg=cfg, trainer=trainer)
self._init_k2()
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
pass
def change_vocabulary(self, new_tokenizer_dir: str, new_tokenizer_type: str):
"""
Changes vocabulary of the tokenizer used during CTC decoding process.
Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
Args:
new_tokenizer_dir: Path to the new tokenizer directory.
new_tokenizer_type: Either `bpe` or `wpe`. `bpe` is used for SentencePiece tokenizers,
whereas `wpe` is used for `BertTokenizer`.
Returns: None
"""
super().change_vocabulary(new_tokenizer_dir, new_tokenizer_type)
if self.use_graph_lm:
self.token_lm = None
logging.warning(
f"""With .change_vocabulary() call for a model with criterion_type=`{self.loss.criterion_type}`,
a new token_lm has to be set manually: call .update_k2_modules(new_cfg)
or update .graph_module_cfg.backend_cfg.token_lm before calling this method."""
)
self.update_k2_modules(self.graph_module_cfg)
| [
"noreply@github.com"
] | noreply@github.com |
f821bfab7ae9d9781d32c46492f098dc9b856612 | 91e2f963ec4b13d38c3d5d0258ad0c48d4d674f1 | /wrappers/BlockDataWrapper.py | 84aca9a6dbc6f9535d864b0cfd6f2876d1b654ce | [
"MIT"
] | permissive | WouterGlorieux/BitcoinSpellbook-v0.2 | 6265589afc1197fe52e41b9dc701e4cc5294187f | 93b5480f87f4dc41c2d71093aa98d1fbdd83625c | refs/heads/master | 2021-06-02T06:48:25.607028 | 2016-09-23T20:03:55 | 2016-09-23T20:03:55 | 61,746,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,554 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
import json
import urllib
import hashlib
import hmac
import base64
import logging
class BlockDataWrapper():
def __init__(self, url):
self.url = url
def utxos(self, addresses=None, provider=''):
if not addresses:
addresses = []
response = {'success': 0}
parameters = {'provider': provider}
if addresses:
str_addresses = ""
for address in addresses:
str_addresses += address + "|"
str_addresses = str_addresses[:-1]
parameters['addresses'] = str_addresses
query_string = urllib.urlencode(parameters)
url = self.url + "/data/utxos?" + query_string
try:
ret = urllib2.urlopen(urllib2.Request(url))
response = json.loads(ret.read())
except Exception as ex:
logging.warning(str(ex))
response['error'] = 'Unable to retrieve utxos'
return response
def balances(self, addresses=None, provider=''):
if not addresses:
addresses = []
response = {'success': 0}
parameters = {'provider': provider}
if addresses:
str_addresses = ""
for address in addresses:
str_addresses += address + "|"
str_addresses = str_addresses[:-1]
parameters['addresses'] = str_addresses
query_string = urllib.urlencode(parameters)
url = self.url + "/data/balances?" + query_string
try:
ret = urllib2.urlopen(urllib2.Request(url))
response = json.loads(ret.read())
except Exception as ex:
logging.warning(str(ex))
response['error'] = 'Unable to retrieve balances'
return response
def transactions(self, address, provider=''):
response = {'success': 0}
parameters = {'address': address,
'provider': provider}
query_string = urllib.urlencode(parameters)
url = self.url + "/data/transactions?" + query_string
try:
ret = urllib2.urlopen(urllib2.Request(url))
response = json.loads(ret.read())
except Exception as ex:
logging.warning(str(ex))
response['error'] = 'Unable to retrieve transactions'
return response
def block(self, height, provider=''):
response = {'success': 0}
parameters = {'height': str(height),
'provider': provider}
query_string = urllib.urlencode(parameters)
url = self.url + "/data/block?" + query_string
try:
ret = urllib2.urlopen(urllib2.Request(url))
response = json.loads(ret.read())
except Exception as ex:
logging.warning(str(ex))
response['error'] = 'Unable to retrieve block'
return response
def latest_block(self, provider=''):
response = {'success': 0}
parameters = {'provider': provider}
query_string = urllib.urlencode(parameters)
url = self.url + "/data/latest_block?" + query_string
try:
ret = urllib2.urlopen(urllib2.Request(url))
response = json.loads(ret.read())
except Exception as ex:
logging.warning(str(ex))
response['error'] = 'Unable to retrieve latest block'
return response
def prime_input_address(self, txid, provider=''):
response = {'success': 0}
parameters = {'txid': txid,
'provider': provider}
query_string = urllib.urlencode(parameters)
url = self.url + "/data/prime_input_address?" + query_string
try:
ret = urllib2.urlopen(urllib2.Request(url))
response = json.loads(ret.read())
except Exception as ex:
logging.warning(str(ex))
response['error'] = 'Unable to retrieve prime input address'
return response
def save_provider(self, name, priority, provider_type, param="", api_key='', api_secret=''):
response = {'success': 0}
parameters = {'name': name,
'priority': priority,
'provider_type': provider_type}
if provider_type == 'Blocktrail.com' or 'Insight':
parameters['param'] = param
query_string = urllib.urlencode(parameters)
url = self.url + "/data/save_provider?" + query_string
postdata = urllib.urlencode(parameters)
message = hashlib.sha256(postdata).digest()
signature = hmac.new(base64.b64decode(api_secret), message, hashlib.sha512)
headers = {
'API_Key': api_key,
'API_Sign': base64.b64encode(signature.digest())
}
try:
request = urllib2.Request(url=url, data=postdata, headers=headers)
data = urllib2.urlopen(request).read()
response = json.loads(data)
except Exception as ex:
logging.warning(str(ex))
response['error'] = 'Unable to save provider'
return response
def delete_provider(self, name, api_key='', api_secret=''):
response = {'success': 0}
parameters = {'name': name}
query_string = urllib.urlencode(parameters)
url = self.url + "/data/delete_provider?" + query_string
postdata = urllib.urlencode(parameters)
message = hashlib.sha256(postdata).digest()
signature = hmac.new(base64.b64decode(api_secret), message, hashlib.sha512)
headers = {
'API_Key': api_key,
'API_Sign': base64.b64encode(signature.digest())
}
try:
request = urllib2.Request(url=url, data=postdata, headers=headers)
data = urllib2.urlopen(request).read()
response = json.loads(data)
except Exception as ex:
logging.warning(str(ex))
response['error'] = 'Unable to delete provider'
return response
def get_providers(self):
response = {'success': 0}
parameters = {}
query_string = urllib.urlencode(parameters)
url = self.url + "/data/get_providers?" + query_string
try:
request = urllib2.Request(url=url)
data = urllib2.urlopen(request).read()
response = json.loads(data)
except Exception as ex:
logging.warning(str(ex))
response['error'] = 'Unable to get providers'
return response | [
"wouter@valyrian.tech"
] | wouter@valyrian.tech |
8dbe860dcd142c6356ebf8fbb80b5b2200bcd6e4 | 0feb885020bda66d4828e7a5b73caac4ca944303 | /code/test/integration_tests/pages/header_page.py | 21377146a2565c293081bfde0ff97440ab17dd47 | [] | no_license | OlehHappy/smartnest | 0bcfb373ca1dd9770d0cb5f7a983ab07be5db38f | 18757cf5576b7a6880c6aabf7f3da7539a8cf19a | refs/heads/master | 2021-01-12T05:08:02.818394 | 2017-01-02T22:44:33 | 2017-01-02T22:44:33 | 77,866,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | __author__ = 'Juraj'
from selenium.webdriver.common.by import By
from pages.page_base import Page
class Header(Page):
def __init__(self, driver):
self.driver = driver
# page elements locators section
# link pointing to user's name in page right upper corner
# self._user_name_link_locator = (By.CSS_SELECTOR, 'li.dropdown')
self._user_name_link_locator = (By.XPATH, '//*[@id="admin-property"]/header/nav/ul/li/a/span')
self._signout_link_locator = (By.LINK_TEXT, 'Signout')
@property
def user_name_link(self):
return self.driver.find_element(*self._user_name_link_locator)
@property
def signout_link(self):
return self.driver.find_element(*self._signout_link_locator)
| [
"olegtranslator@gmail.com"
] | olegtranslator@gmail.com |
5279c267aee7c9ab4445a98576cfcfaf40093b5f | d75b740cfe43b283fda6ea7370cbea6aaf3deb6c | /collective/constants.py | 0b9bdb55087adaab89664ab1952c5708baca2d04 | [] | no_license | HaySayCheese/OpenCorporaUA | 0911c29bf5d37880b130bfee2c0eb4793ce32c67 | 794254c65a098bcf38b2999704840e2614dc28f4 | refs/heads/master | 2021-01-17T05:19:19.519345 | 2015-09-14T11:58:10 | 2015-09-14T11:58:10 | 42,046,726 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | #coding=utf-8
class AbstractConstant(object):
def __init__(self):
self.ids = {}
self.count = 0
def set_ids(self, ids_dict):
# check for duplicates
if len(set(ids_dict.__values())) < len(ids_dict.__values()):
raise ValueError('Duplicate id detected.')
self.ids = ids_dict
self.count = len(self.ids.__keys())
# system functions
def __records(self):
return self.ids
def __values(self):
return self.ids.values()
def __keys(self):
return self.ids.keys()
class Enum(object):
@classmethod
def values(cls):
return cls.__dict().values()
@classmethod
def keys(cls):
return cls.__dict().keys()
@classmethod
def iteritems(cls):
return cls.__dict().iteritems()
@classmethod
def __dict(cls):
result = {}
for k, v in vars(cls).iteritems():
if not isinstance(k, basestring):
if not isinstance(v, basestring):
result[k] = v
elif not v[:2] == '__':
result[k] = v
else:
if not k[:2] == '__':
if not isinstance(v, basestring):
result[k] = v
elif not v[:2] == '__':
result[k] = v
return result | [
"dima.chizhevsky@gmail.com"
] | dima.chizhevsky@gmail.com |
6adb63d5796714f98ca7a02f21d5d0837cf0acc6 | 60cf6e400ec093268cf7b89ed818119e9c59f0ff | /miller_rabin_fermat_lucas_lehmer.py | fcb278c36a1f76adaf30538372be9a24a7e0d693 | [] | no_license | lion137/Python-Algorithms | 7b5e396ebcb113e112907661f88715791c90b0fe | 91502619f03c4be2229dd1f591e07ec1bb150859 | refs/heads/master | 2022-01-28T13:23:12.087976 | 2022-01-06T06:56:22 | 2022-01-06T06:56:22 | 79,455,277 | 17 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,476 | py | # Primality Tests
#
# Copyleft 2018 lion137
#
# 1. Fermat Test
from random import randint
def modularexponenation(base, exponent, modulus):
"""modular exponentiation"""
if modulus == 1:
return 0
result = 1
base = base % modulus
while exponent > 0:
if exponent % 2 == 1:
result = (result * base) % modulus
exponent >>= 1
base = (base * base) % modulus
return result
# Fast Exp
def exp(x, n):
if n == 0:
return 1
elif n == 1:
return x
elif n % 2 == 0:
a = exp(x * x, n // 2)
return a
elif not n % 2 == 0:
b = x * exp(x * x, (n - 1) // 2)
return b
def fermat_test_single(n):
"""performs single Fermat test"""
def test(a):
return modularexponenation(a, n, n) == a
return test(randint(1, n - 1))
def fermat_extended(n, cnt):
"""performing fermat tests in a loop,
for #cnt of numbers (0, n) """
for i in range(cnt):
if not fermat_test_single(n):
return False
return True
# 2. Miler - Rabin
def miller_rabin(n, s):
"""Miler Rabin Test"""
# procedure nontrivial_root, which looking for a non trivial square root of one mod n
def nontrivial_root(a, n):
"""checking Fermat Theorem, and non trivial square root"""
# find t and u, such that u odd, t >= 1 and n - 1 = 2^tu:
t, u = 0, n - 1
while u % 2 == 0:
t += 1
u //= 2
x0 = modularexponenation(a, u, n)
for i in range(1, t + 1):
x1 = x0 ** 2 % n
if x1 == 1 and x0 != 1 and x0 != n - 1:
return True
if x1 != 1:
return True
return False
# Return True if n = 2
if n == 2:
return True
# Return False if even
if n % 2 == 0:
return False
for i in range(s):
a = randint(1, n - 1)
if nontrivial_root(a, n):
return False
return True
# 3. Lucas - Lehmer Test, check primality only Mersenne Primes:
# https://en.wikipedia.org/wiki/Mersenne_prime
# Procedure Lucas-Lehmer, tests if Mp is prime, p > 2:
def lucas_lehmer(p):
s = 4
m = exp(2, p) - 1
for i in range(p - 2):
s = ((s * s) - 2) % m
return True if s == 0 else False
if __name__ == '__main__':
# print(miller_rabin(2 ** 44497 - 1, 40)) # -> True in... stop after 2008 secs:)
print(lucas_lehmer(44497)) # -> True in 79.08 sec
| [
"lion137@gmail.com"
] | lion137@gmail.com |
f8b68369b0629d62b060a8a9ec9b70d6ad94d184 | 7d53faedef395ca31eb5d056da358ee9dfcc05a1 | /venv/bin/names | cd6580c21a1b060b9ed11b4b24fdc77cb1f6c7ac | [] | no_license | MaciejSwierad/hd_generator | 343622f129e96d805027bddef4ca38c522e5ecb8 | c5e855d08303113410786f9d6f0f67a0abd2800d | refs/heads/master | 2020-09-14T21:08:24.551584 | 2019-11-21T20:09:22 | 2019-11-21T20:09:22 | 223,256,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | #!/home/macias/PycharmProjects/hdGenerator/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'names==0.3.0','console_scripts','names'
__requires__ = 'names==0.3.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('names==0.3.0', 'console_scripts', 'names')()
)
| [
"maciej.swierad@gmail.com"
] | maciej.swierad@gmail.com | |
fbb4658c90d6d28b0bdc3e8198164c1013309d5a | 31e048782f9161894587489ff2a3f0ee1ddb8d7a | /leet_code/Tree/sum_of_left_leaves.py | 20b70e4cee9c2493b72d1f9eb49a7ba4574844e5 | [] | no_license | sankeerth/Practice | f2c81f0273ba5edbb4491f473278c5d2c4cd5e1e | df9791d18e98bd175bcddf47fcd24f06aef46258 | refs/heads/master | 2021-01-11T14:53:32.958725 | 2017-01-27T20:28:21 | 2017-01-27T20:28:21 | 80,243,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | class TreeNode:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def __repr__(self):
return 'TreeNode({})'.format(self.val)
def deserialize(string):
if string == '{}':
return None
nodes = [None if val == 'null' else TreeNode(int(val))
for val in string.strip('[]{}').split(',')]
kids = nodes[::-1]
root = kids.pop()
for node in nodes:
if node:
if kids: node.left = kids.pop()
if kids: node.right = kids.pop()
return root
def sum_of_left_leaves(root, is_left=False):
global res
if root is not None:
sum_of_left_leaves(root.left, True)
if root.left is None and root.right is None and is_left:
res += root.val
sum_of_left_leaves(root.right, False)
r = deserialize('[3,9,20,null,null,15,7]')
r = deserialize('[6,2,9,1,3,7,10,null,null,null,4,null,null,null,11]')
res = 0
sum_of_left_leaves(r)
print(res)
| [
"sankeerth456@gmail.com"
] | sankeerth456@gmail.com |
105b5bf8cb48f2ba85f5700e00a2b72766734d88 | 303d119180421b6cb7b2fbbf41f6c4959b54b2cd | /MON/osm_mon/plugins/CloudWatch/prod.py | 36766dc2add21579bae64a07f0ea39a19baa0022 | [
"Apache-2.0"
] | permissive | usmanjavaiduj/OSM | 2c508b1fcf89ccc80f7b9054743e9ceeb389f056 | 8a65c37e90cec9e896d52ea41eded8ac3541e063 | refs/heads/master | 2021-08-28T04:46:26.803468 | 2017-12-11T07:40:17 | 2017-12-11T07:40:17 | 106,265,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,814 | py | import os
import sys
sys.path.append("../../core/message_bus")
from producer import KafkaProducer
from kafka import KafkaConsumer
from kafka.errors import KafkaError
import logging
import json
from jsmin import jsmin
producer = KafkaProducer('create_alarm_request')
producer.create_alarm_request( 'create_alarm_request', '','alarm_request')
#producer.update_alarm_request( 'update_alarm_request', '','alarm_request')
#producer.delete_alarm_request( 'delete_alarm_request', '','alarm_request')
#producer.acknowledge_alarm( 'acknowledge_alarm', '','alarm_request')
#producer.list_alarm_request( 'alarm_list_request', '','alarm_request')
server = {'server': 'localhost:9092', 'topic': 'alarm_request'}
_consumer = KafkaConsumer(bootstrap_servers=server['server'])
_consumer.subscribe(['alarm_response'])
for message in _consumer:
print json.loads(message.value)
#---------------------------------------------------------------------------------
"""
producer = KafkaProducer('read_metric_data_request')
#producer.create_metrics_request( 'create_metric_request', '','metric_request')
#producer.read_metric_data_request( 'read_metric_data_request', '','metric_request')
producer.update_metric_request( 'update_metric_request', '','metric_request')
#producer.delete_metric_request( 'delete_metric_request', '','metric_request')
#producer.list_metric_request( 'list_metric_request', '','metric_request')
# json_path = open(os.path.join("../../core/models/list_metric_req.json"))
# metric_info = json_path.read()
# metric_info = json.loads(metric_info)
# print metric_info
server = {'server': 'localhost:9092', 'topic': 'metric_response'}
_consumer = KafkaConsumer(bootstrap_servers=server['server'])
_consumer.subscribe(['metric_response'])
for message in _consumer:
print json.loads(message.value)
"""
| [
"usmanjavaid9423@gmail.com"
] | usmanjavaid9423@gmail.com |
8f2e3caa6aa3e8037118946cc4681495f25b6c40 | 9e4003f3a5974663a8ef49263fb3bb3028581604 | /hw24/EoS Game/level.py | 00e2747b8b692b3b07f1f0b6a8dff9ddafb127b5 | [] | no_license | RMeiselman/CS112-Spring2012 | 624fc89745eacbf08f7931dcd8041eeda2bb808c | dc55b3c04cb59d3ad26a6836c31c0c6e86781018 | refs/heads/master | 2021-01-18T12:17:01.315712 | 2012-05-06T00:35:43 | 2012-05-06T00:35:43 | 3,266,344 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | #!/usr/bin/env python
import os
import pygame
from pygame import Rect, Surface
class Level(object):
def __init__(self, size):
self.bounds = Rect((0,0), size)
def restart(self):
self.player = Player()
self.player.rect.center = self.bounds.center
def update(self, dt):
self.player.update(dt)
| [
"Dagerothmeiselman@gmail.com"
] | Dagerothmeiselman@gmail.com |
12bfd823bba8659e67c22af6c2bd0062937a4c5f | 362224f8a23387e8b369b02a6ff8690c200a2bce | /django/django_orm/courses/courses/settings.py | d11c36f0bb97c266c1f7db84060415fcde1a5412 | [] | no_license | Helenyixuanwang/python_stack | ac94c7c532655bf47592a8453738daac10f220ad | 97fbc77e3971b5df1fe3e79652b294facf8d6cee | refs/heads/main | 2023-06-11T02:17:27.277551 | 2021-06-21T17:01:09 | 2021-06-21T17:01:09 | 364,336,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,353 | py | """
Django settings for courses project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_1&w+4p@b%g)to7vg0oi5+wjevbh58q0l1k3ieg9m7!lsjak@e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'courses_app',
'django_extensions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',#newly added when watch django extra on platform
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',#newly added May 19,2021
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'courses.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'courses.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
GRAPH_MODELS = {
'all_applications': True,
'group_models': True,
}
| [
"wangyixuan@msn.com"
] | wangyixuan@msn.com |
c49b770e7e0eb0e6b026077bfc57767e6170ae0f | 9badb76369f2bb081afced41bb97900fb9b29245 | /site-packages/kivy/version.py | e6674be5c3ed45cf8eac5642212118096718451c | [
"MIT"
] | permissive | officekamiya/5yen | 69a8ba0f2edd10d56934b2806fa3644f46847406 | cb0e8a03b6a9decde0b5ab9329871da1ed1ba85f | refs/heads/master | 2020-12-05T20:08:54.191521 | 2020-03-04T01:34:40 | 2020-03-04T01:34:40 | 232,232,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | # THIS FILE IS GENERATED FROM KIVY SETUP.PY
__version__ = '1.11.1'
__hash__ = 'Unknown'
__date__ = '20190717'
| [
"hayato@officekamiya.co.jp"
] | hayato@officekamiya.co.jp |
dd65586916f290702c8d0c8f526cdfa0b8907fc7 | 22e3a62bdd5922c2a7ff70216cf9a6b0d30ed3c3 | /TestQARTRole.py | 3103c53120e2fa86773f6c871b99d089362a0f48 | [] | no_license | RobQA/test | 74d7416ac2c5f9ae52e59a04fddd03c1a3615cd1 | be2106c013fb08dbbdf982e348a6f8e7060764a6 | refs/heads/master | 2020-07-13T02:43:25.327197 | 2019-09-01T13:53:04 | 2019-09-01T13:53:04 | 204,971,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,638 | py | from selenium import webdriver
from Logout import Logout
from Roles import Roles
from SearchFillAll import SearchFillAll
from Payroll import Payroll
from TestGroup import TestGroup
from TestCase import TestCase
from Obligation import Obligation
from IncreaseOverpayment import IncreaseOverpayment
from DecreaseOverpayment import DecreaseOverpayment
from SetOff import SetOff
from FreezeUnFreeze import FreezeUnFreeze
from ViewOverpaymentsDebts import ViewOverpaymentsDebts
from Utils import Utils
driver = webdriver.Chrome()
driver.implicitly_wait(1)
Utils.driver = driver
createObligationSteps = [
{'func': Utils.login, 'args': [Roles.CAP_QART]},
{'func': Obligation.create_obligation, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Ob_Cr = TestCase('CREATE-OBLIGATION', createObligationSteps, '')
updateObligationSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Obligation.create_obligation, 'args': None},
{'func': Obligation.update_obligation, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Ob_Up = TestCase('UPDATE-OBLIGATION', updateObligationSteps, '')
PayFromOverpaymentSteps = [
#{'func': Utils.login, 'args': [Roles.CAP_QART]},
{'func': Obligation.create_obligation, 'args': None},
{'func': Logout.logout, 'args': None},
{'func': Utils.login, 'args': [Roles.CAP_ALL]},
{'func': Obligation.obligation_allow, 'args': None},
{'func': Logout.logout, 'args': None},
{'func': Utils.login, 'args': [Roles.CAP_QART]},
{'func': Obligation.pay_from_overpayment, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Pa_Ov = TestCase('PAY-FROM-OVERPAYMENT', PayFromOverpaymentSteps, '')
ObligationCancelSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Obligation.create_obligation, 'args': None},
{'func': Obligation.obligation_cancel, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Ob_Ca = TestCase('OBLIGATION-CANCEL', ObligationCancelSteps, '')
CreatePayrollForObligationSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Obligation.create_obligation, 'args': None},
{'func': Obligation.create_payroll_for_obligation, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Cr_Py_Ob = TestCase('CREATE-PAYROLL-FOR-OBLIGATION', CreatePayrollForObligationSteps, '')
CreatePayrollSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Payroll.create_payroll, 'args': None},
#{'func': Utils.close_browser, 'args': None},
]
Cr_Py = TestCase('CREATE-PAYROLL', CreatePayrollSteps, '')
UpdatePayrollSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Payroll.create_payroll, 'args': None},
{'func': Payroll.update_payroll, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Up_Py = TestCase('UPDATE_PAYROLL', UpdatePayrollSteps, '')
PayPayrollAndConfirmSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Payroll.create_payroll, 'args': None},
{'func': Payroll.pay_payroll, 'args': None},
{'func': Payroll.confirm_pay, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Pa_Py_Co = TestCase('PAY-PAYROLL-AND-CONFIRM', PayPayrollAndConfirmSteps, '')
CancelPayrollSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Payroll.create_payroll, 'args': None},
{'func': Payroll.cancel_payroll, 'args': None}
#{'func': Utils.close_browser, 'args': None}
]
Ca_Py = TestCase('CANCEL-PAYROLL', CancelPayrollSteps, '')
IncreaseOverpaymentSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': IncreaseOverpayment.increase_overpayment, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
In_Ov = TestCase('INCREASE-OVERPAYMENT', IncreaseOverpaymentSteps, '')
DecreaseOverpaymentSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': DecreaseOverpayment.decrease_overpayment, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
De_Ov = TestCase('DECREASE-OVERPAYMENT', DecreaseOverpaymentSteps, '')
ViewObligationSteps = [
# {'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Obligation.view_obligation, 'args': None},
# {'func': Utils.close_browser, 'args': None}
]
Vw_Ob = TestCase('VIEW-OBLIGATION', ViewObligationSteps, '')
ViewAndPrint99993Steps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Obligation.view_and_print_99993, 'args': None}
#{'func': Utils.close_browser, 'args': None}
]
Vw_Pr_93 = TestCase('VIEW-AND-PRINT-99993', ViewAndPrint99993Steps, '')
PrintPayrollSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Payroll.print_payroll, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Pr_Py = TestCase('PRINT-PAYROLL', PrintPayrollSteps, '')
SearchFillAllSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': SearchFillAll.search_fill_all, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Se_All = TestCase('SEARCH-FILL-ALL', SearchFillAllSteps, '')
ObligationCreateBarSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Obligation.obligation_create_bar, 'args': None},
# {'func': Utils.close_browser, 'args': None}
]
Ob_Cr_Bar = TestCase('OBLIGATION-CREATE-BAR', ObligationCreateBarSteps, '')
PayrollCreateBarSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Payroll.payroll_create_bar, 'args': None},
# {'func': Utils.close_browser, 'args': None}
]
Py_Cr_Bar = TestCase('PAYROLL-CREATE-BAR', PayrollCreateBarSteps, '')
SetOffSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': SetOff.set_off, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Set_Off = TestCase('SET-OFF', SetOffSteps, '')
UnFreezeSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': FreezeUnFreeze.un_freeze, 'args': None},
{'func': Utils.close_browser, 'args': None}
]
#TestCase('UN-FREEZE', UnFreezeSteps, '').run()
ViewOverpaymentsDebtsSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': ViewOverpaymentsDebts.view_overpayments_debts, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Vw_Op_Db = TestCase('VIEW-OVERPAYMENTS-DEBTS', ViewOverpaymentsDebtsSteps, '')
HistoryObligationPrint99993Steps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Obligation.history_obligation_print_99993, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Hi_Ob_Pr_93 = TestCase('HISTORY-OBLIGATION-PRINT=99993', HistoryObligationPrint99993Steps, '')
HistoryPayrollPrintSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Payroll.history_payroll_print, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Hi_Py_Pr = TestCase('HISTORY-PAYROLL-PRINT', HistoryPayrollPrintSteps, '')
View00000PayrollPrintSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Payroll.view_00000_payroll_print, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Vw_0_Py_Pr = TestCase('VIEW-ACTUAL-PAYMENT', View00000PayrollPrintSteps, '')
AriaControlsObSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Obligation.aria_controls_ob, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Ar_Co_Ob = TestCase('ARIA_CONTROLS_OB', AriaControlsObSteps, '')
AriaControlsPySteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Payroll.aria_controls_py, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Ar_Co_Py = TestCase('ARIA_CONTROLS_PY', AriaControlsPySteps, '')
Open_By_NumberSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Obligation.open_by_number, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Op_By_Nm = TestCase('OPEN_BY_NUMBER', Open_By_NumberSteps, '')
PayrollPrintViewActualPaymentViewReallocationsSteps = [
#{'func': Utils.login, 'args': [Roles.ADMIN]},
{'func': Payroll.payroll_print_view_actual_payment_view_reallocations, 'args': None},
#{'func': Utils.close_browser, 'args': None}
]
Py_Pr_Vw_Ac = TestCase('PAYROLL_PRINT_VIEW_ACTUAL_PAYMENTS_VIEW_REALLOCATION', PayrollPrintViewActualPaymentViewReallocationsSteps, '')
TestGroup('TEST_CROUP_QART_ROLL',
[Ob_Cr, Ob_Up, Pa_Ov, Ob_Ca, Cr_Py_Ob, Cr_Py, Up_Py, Pa_Py_Co, Ca_Py, In_Ov, De_Ov, Vw_Ob, Pr_Py, Se_All,
Ob_Cr_Bar, Py_Cr_Bar, Set_Off, Vw_Pr_93, Vw_Op_Db, Hi_Ob_Pr_93, Hi_Py_Pr, Vw_0_Py_Pr, Ar_Co_Py, Ar_Co_Ob,
Py_Pr_Vw_Ac, Op_By_Nm])
| [
"r-akopyannn@mail.ru"
] | r-akopyannn@mail.ru |
4e6731c513f7aa97ea99261cc61ad55cdfee5dcf | d7a03a982239e5bb11b98f05cee0ce79f418cf72 | /schedule_class.py | d07bf08e4355d3e68e013868495a3936dbcabed8 | [] | no_license | furfa/Pulkovo.Hack | 6d6c869ac131faa14a086b4f81f89ecd466dbf86 | 30b088149e182124ae0ab75d665bf7d4fa0f7fb7 | refs/heads/master | 2022-11-30T05:13:47.496860 | 2020-08-16T15:36:52 | 2020-08-16T15:36:52 | 287,518,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,222 | py | from entity_classes import *
from entity_getters import *
from datetime import datetime
import pandas as pd
class Schedule_item:
def __init__(self, time:Time, task:Task, teacher:Teacher, audience:Audience):
self.time = time
self.task = task
self.teacher = teacher
self.audience = audience
def __str__(self):
s = "{}({}) проводит занятие {} {}({}) c {} по {} в {} аудитории"
return s.format(
self.teacher.name,
self.teacher.id,
self.task.discipline,
self.task.programm,
self.task.id,
self.time.start,
self.time.end,
self.audience.id
)
class Schedule:
data = list()
def __init__(self, *args, **kwargs):
pass
def append(self, *args ,**kwargs):
self.data.append( Schedule_item(*args, **kwargs) )
def print(self):
self.sort()
print(self.to_dataframe())
def sort(self):
self.data.sort(key=lambda x:x.time.start)
def to_dataframe(self):
to_df = list()
columns = [
"teacher.name",
"teacher.id",
"task.discipline",
"task.programm",
"task.id",
"time.start",
"time.end",
"audience.id"
]
for row in self.data:
to_df.append([
row.teacher.name,
row.teacher.id,
row.task.discipline,
row.task.programm,
row.task.id,
row.time.start,
row.time.end,
row.audience.id
])
return pd.DataFrame(to_df, columns=columns)
def to_excel(self):
self.sort()
df = self.to_dataframe()
path = f"RESULT/{ str(datetime.now()) }.xlsx"
df.to_excel(path)
print("Сохранено в " + path)
if __name__ == "__main__":
s = Schedule()
TIME = get_times()[0]
TASK = get_tasks()[0]
TEACHER = get_teachers()[0]
AUDIENCE = get_audiences()[0]
s.append(TIME, TASK, TEACHER, AUDIENCE)
s.print()
| [
"gleksandr.myashin@gmail.com"
] | gleksandr.myashin@gmail.com |
0b2318f827065c29077d00fa717ad062eba5b34e | 3260490045fd2bd929b3861fe2b4dd35a14cc9fb | /exercicios/Mundo01/ex028.py | 588c922a2dab89987d43e367d451858bb5652dcf | [] | no_license | lucaslamdev/cursoemvideo-exercicios-python3 | a07292bffd8733ce56192f80fb80b20fd6b5bfb9 | 545ca6dc2952019f5d29da71d6293f80d5f99a73 | refs/heads/main | 2023-08-03T14:46:36.619723 | 2021-09-17T00:44:27 | 2021-09-17T00:44:27 | 353,704,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from random import randint
numeroPensando = randint(0,5)
print("O computador pensou um número entre 0 e 5 tente adivinhar!")
numeroDigitado = int(input('Que número você acha q eu pensei? '))
if numeroDigitado == numeroPensando:
print('Parabéns você acertou!')
else:
print(f'Que triste você perdeu, o número era {numeroPensando}.') | [
"50436012+lucaslamdev@users.noreply.github.com"
] | 50436012+lucaslamdev@users.noreply.github.com |
5577c038a68b1019ac4522c163caa04ecaebe6be | c39c0449f8efbc3bf39cebe9a33724dfa237f670 | /ss_dnn/merlin_scripts/src/layers/mdn_layers.py | f8318e1a77b6a548a8a67efd3d43a3778da2f298 | [] | no_license | pbaljeka/world_merlin | 81561f8817e516e90adf4dd60ce2847ffd17127f | 5cf1cdfd75b570dda5e35c74bc2b5e500437786e | refs/heads/master | 2021-01-11T17:45:46.706562 | 2017-01-25T21:28:51 | 2017-01-25T21:28:51 | 79,839,178 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,415 | py | ################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import numpy, time, cPickle, gzip, sys, os, copy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import logging
class MixtureDensityOutputLayer(object):
def __init__(self, rng, input, n_in, n_out, n_component):
self.input = input
W_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_out*n_component))
self.W_mu = theano.shared(value=numpy.asarray(W_value, dtype=theano.config.floatX), name='W_mu', borrow=True)
self.W_sigma = theano.shared(value=numpy.asarray(W_value.copy(), dtype=theano.config.floatX), name='W_sigma', borrow=True)
W_mix_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_component))
self.W_mix = theano.shared(value=numpy.asarray(W_mix_value, dtype=theano.config.floatX), name='W_mix', borrow=True)
self.mu = T.dot(self.input, self.W_mu) #assume linear output for mean vectors
self.sigma = T.nnet.softplus(T.dot(self.input, self.W_sigma)) # + 0.0001
#self.sigma = T.exp(T.dot(self.input, self.W_sigma)) # + 0.0001
self.mix = T.nnet.softmax(T.dot(self.input, self.W_mix))
self.delta_W_mu = theano.shared(value = numpy.zeros((n_in, n_out*n_component),
dtype=theano.config.floatX), name='delta_W_mu')
self.delta_W_sigma = theano.shared(value = numpy.zeros((n_in, n_out*n_component),
dtype=theano.config.floatX), name='delta_W_sigma')
self.delta_W_mix = theano.shared(value = numpy.zeros((n_in, n_component),
dtype=theano.config.floatX), name='delta_W_mix')
self.params = [self.W_mu, self.W_sigma, self.W_mix]
self.delta_params = [self.delta_W_mu, self.delta_W_sigma, self.delta_W_mix]
class LinearLayer(object):
def __init__(self, rng, input, n_in, n_out, W = None, b = None):
self.input = input
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
if W is None:
W_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_out))
W = theano.shared(value=numpy.asarray(W_value, dtype=theano.config.floatX), name='W', borrow=True)
if b is None:
b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
self.output = T.dot(self.input, self.W) + self.b
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def errors(self, y):
L = T.sum( (self.output-y)*(self.output-y), axis=1 )
errors = T.mean(L)
return (errors)
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
class SigmoidLayer(object):
def __init__(self, rng, input, n_in, n_out, W = None, b = None, activation = T.tanh):
self.input = input
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
if W is None:
W_value = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_value,
name='W', borrow=True)
if b is None:
b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
self.output = T.dot(self.input, self.W) + self.b
self.output = activation(self.output)
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def errors(self, y):
L = T.sum( (self.output-y)*(self.output-y), axis=1 )
errors = T.mean(L)
return (errors)
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
class GeneralLayer(object):
def __init__(self, rng, input, n_in, n_out, W = None, b = None, activation = 'linear'):
self.input = input
self.n_in = n_in
self.n_out = n_out
self.logger = logging.getLogger('general_layer')
# randomly initialise the activation weights based on the input size, as advised by the 'tricks of neural network book'
if W is None:
W_values = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
lin_output = T.dot(input, self.W) + self.b
if activation == 'sigmoid':
self.output = T.nnet.sigmoid(lin_output)
elif activation == 'tanh':
self.output = T.tanh(lin_output)
elif activation == 'linear':
self.output = lin_output
elif activation == 'ReLU': ## rectifier linear unit
self.output = T.maximum(0.0, lin_output)
elif activation == 'ReSU': ## rectifier smooth unit
self.output = numpy.log(1.0 + numpy.exp(lin_output))
else:
self.logger.critical('the input activation function: %s is not supported right now. Please modify layers.py to support' % (activation))
raise
# parameters of the model
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def errors(self, y):
errors = T.mean(T.sum((self.output-y)**2, axis=1))
return errors
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh, do_maxout = False, pool_size = 1,
do_pnorm = False, pnorm_order = 1):
""" Class for hidden layer """
self.input = input
self.n_in = n_in
self.n_out = n_out
if W is None:
W_values = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
dtype=theano.config.floatX), name='delta_W')
self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
dtype=theano.config.floatX), name='delta_b')
lin_output = T.dot(input, self.W) + self.b
if do_maxout == True:
self.last_start = n_out - pool_size
self.tmp_output = lin_output[:,0:self.last_start+1:pool_size]
for i in range(1, pool_size):
cur = lin_output[:,i:self.last_start+i+1:pool_size]
self.tmp_output = T.maximum(cur, self.tmp_output)
self.output = activation(self.tmp_output)
elif do_pnorm == True:
self.last_start = n_out - pool_size
self.tmp_output = abs(lin_output[:,0:self.last_start+1:pool_size]) ** pnorm_order
for i in range(1, pool_size):
cur = abs(lin_output[:,i:self.last_start+i+1:pool_size]) ** pnorm_order
self.tmp_output = self.tmp_output + cur
self.tmp_output = self.tmp_output ** (1.0 / pnorm_order)
self.output = activation(self.tmp_output)
else:
self.output = (lin_output if activation is None
else activation(lin_output))
# self.output = self.rectifier_linear(lin_output)
# parameters of the model
self.params = [self.W, self.b]
self.delta_params = [self.delta_W, self.delta_b]
def rectifier_linear(self, x):
x = T.maximum(0.0, x)
return x
def rectifier_smooth(self, x):
x = numpy.log(1.0 + numpy.exp(x))
return x
class dA(object):
def __init__(self, numpy_rng, theano_rng = None, input = None,
n_visible= None, n_hidden= None, W = None, bhid = None,
bvis = None, firstlayer = 0, variance = None ):
self.n_visible = n_visible
self.n_hidden = n_hidden
# create a Theano random generator that gives symbolic random values
if not theano_rng :
theano_rng = RandomStreams(numpy_rng.randint(2**30))
if not W:
initial_W = numpy.asarray( numpy_rng.uniform(
low = -4*numpy.sqrt(6./(n_hidden+n_visible)),
high = 4*numpy.sqrt(6./(n_hidden+n_visible)),
size = (n_visible, n_hidden)),
dtype = theano.config.floatX)
W = theano.shared(value = initial_W, name ='W')
if not bvis:
bvis = theano.shared(value = numpy.zeros(n_visible,
dtype = theano.config.floatX))
if not bhid:
bhid = theano.shared(value = numpy.zeros(n_hidden,
dtype = theano.config.floatX), name ='b')
self.W = W
self.b = bhid
self.b_prime = bvis
self.W_prime = self.W.T
self.theano_rng = theano_rng
if input == None :
self.x = T.dmatrix(name = 'input')
else:
self.x = input
self.params = [self.W, self.b, self.b_prime]
# first layer, use Gaussian noise
self.firstlayer = firstlayer
if self.firstlayer == 1 :
if variance == None :
self.var = T.vector(name = 'input')
else :
self.var = variance
else :
self.var = None
def get_corrupted_input(self, input, corruption_level):
if self.firstlayer == 0 :
return self.theano_rng.binomial(
size = input.shape,
n = 1,
p = 1 - corruption_level,
dtype=theano.config.floatX) * input
else :
noise = self.theano_rng.normal( size = input.shape,
dtype = theano.config.floatX)
denoises = noise * self.var * corruption_level
return input+denoises
def get_hidden_values(self, input):
return T.nnet.sigmoid(T.dot(input, self.W) + self.b)
def get_reconstructed_input(self, hidden ):
if self.firstlayer == 1 :
return T.dot(hidden, self.W_prime) + self.b_prime
else :
return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)
def get_cost_updates(self, corruption_level, learning_rate):
tilde_x = self.get_corrupted_input(self.x, corruption_level)
y = self.get_hidden_values( tilde_x )
z = self.get_reconstructed_input(y)
L = T.sum ( (self.x-z) * (self.x-z), axis=1 )
cost = T.mean(L) / 2
gparams = T.grad(cost, self.params)
updates = {}
for param, gparam in zip(self.params, gparams):
updates[param] = param - learning_rate*gparam
return (cost, updates)
def init_params(self, iparams):
updates = {}
for param, iparam in zip(self.params, iparams):
updates[param] = iparam
return updates
def get_test_cost(self, corruption_level):
""" This function computes the cost and the updates for one trainng
step of the dA """
# tilde_x = self.get_corrupted_input(self.x, corruption_level, 0.5)
y = self.get_hidden_values( self.x )
z = self.get_reconstructed_input(y)
L = T.sum ( (self.x-z) * (self.x-z), axis=1)
cost = T.mean(L)
return cost
| [
"pallavibaljekar@gmail.com"
] | pallavibaljekar@gmail.com |
4feb8e94c1009ed1a7bd4e668bd531bd760e00c5 | 24e7e0dfaaeaca8f911b40fcc2937342a0f278fd | /venv/Lib/site-packages/plotly/graph_objs/parcoords/line/_colorbar.py | 257f038abfbb04d2dbc94a73796c799953fc8c52 | [
"MIT"
] | permissive | BimiLevi/Covid19 | 90e234c639192d62bb87364ef96d6a46d8268fa0 | 5f07a9a4609383c02597373d76d6b6485d47936e | refs/heads/master | 2023-08-04T13:13:44.480700 | 2023-08-01T08:36:36 | 2023-08-01T08:36:36 | 288,455,446 | 1 | 0 | MIT | 2021-01-22T19:36:26 | 2020-08-18T12:53:43 | HTML | UTF-8 | Python | false | false | 70,590 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "parcoords.line"
_path_str = "parcoords.line.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"minexponent",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# minexponent
# -----------
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcoords.line.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-time-format#locale_format We add
one item to d3's date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.parcoords.line.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.parcoords.line.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.parcoords.line
.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
parcoords.line.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.parcoords.line.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.parcoords.line.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use parcoords.line.colorbar.title.font
instead. Sets this color bar's title font. Note that the
title's font used to be set by the now deprecated `titlefont`
attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use parcoords.line.colorbar.title.side
instead. Determines the location of color bar's title with
respect to the color bar. Note that the title's location used
to be set by the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-time-
format#locale_format We add one item to d3's date
formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.parcoords.line.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.parcoo
rds.line.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
parcoords.line.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.parcoords.line.colorbar.Ti
tle` instance or dict with compatible properties
titlefont
Deprecated: Please use
parcoords.line.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
parcoords.line.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcoords.line.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-time-
format#locale_format We add one item to d3's date
formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.parcoords.line.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.parcoo
rds.line.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
parcoords.line.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.parcoords.line.colorbar.Ti
tle` instance or dict with compatible properties
titlefont
Deprecated: Please use
parcoords.line.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
parcoords.line.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.parcoords.line.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.line.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("minexponent", None)
_v = minexponent if minexponent is not None else _v
if _v is not None:
self["minexponent"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"50989568+BimiLevi@users.noreply.github.com"
] | 50989568+BimiLevi@users.noreply.github.com |
6d150af553878700d5df20c1eccef683e5acb322 | c3ffb020314af5894242073c23c7138a9aa6ea6e | /Past/Rest/script.py | 66cc3e50b6f1c8087cc3a27b882438816d74bbb2 | [] | no_license | mohammedjasam/CleverNator | 1fa8a54c8dca281696de1f33c4c62d7ab78725a1 | dd04b975c4caaa201ccdf92df51635213156c920 | refs/heads/master | 2020-05-18T15:13:40.667968 | 2017-11-25T17:44:35 | 2017-11-25T17:44:35 | 84,256,689 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,907 | py | """# Pull the existing Tensorflow Environment
docker run -it gcr.io/tensorflow/tensorflow:latest-devel
# Download the multilabel data from internet to a single folder
# Ex: Place Darth_vader pics folder + Darth_Maul Pics Folder in Star_Wars folder
# Move the multi-label image folder(star_wars) to docker
mv "c:../.../star_wars/" .
# link that folder in the container
docker run -it -v $HOME/data:/data/ gcr.io/tensorflow/tensorflow:latest-devel
docker run -it -v $HOME/dataa:/data/ ci:new
# Go to root
cd ..
# Pull latest tf image
cd tensorflow
git pull
# Train the model using the images
python35 tensorflow/examples/image_retraining/retrain.py \
--bottleneck_dir=/tf_files/bottlenecks \
--how_many_training_steps 500 \
--model_dir=/tf_files/inception \
--output_graph=/tf_files/retrained_graph.pb \
--output_labels=/tf_files/retrained_labels.txt \
--image_dir /tf_files/trainData"""
C:\Users\Stark\Desktop\CleverNator\KerasBuild\
python35 retrain.py --bottleneck_dir=C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\bottlenecks --how_many_training_steps 500 --model_dir=C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\inception --output_graph=C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\retrained_graph.pb --output_labels=C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\retrained_labels.txt --image_dir C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\trainData
# go into tf_files and write python file
cat > classifier.py
write code then ctrl + c
$ docker commit f6434fa9498e star_wars_classifier:initial
docsha256:d0484f84fbf56d0271c0e35730c2d6ae1f13fb9a06910966380336864b5f2d30
Stark@LAPTOP-M7QFG7RS MINGW64 ~
$ docker run -it -v $HOME/star_wars:/star_wars/ star_wars_classifier:initial
$ docker commit 4f27d772af7b violent:initial
import tensorflow as tf
import sys
# change this as you see fit
image_path = sys.argv[1]
# Read in the image_data
image_data = tf.gfile.FastGFile(image_path, 'rb').read()
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("/tf_files/retrained_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("/tf_files/retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
print('%s (score = %.5f)' % (human_string, score))
| [
"mnqnd@mst.edu"
] | mnqnd@mst.edu |
88e8f0be914f7822411fdb479f9868d5490751f1 | b3ab02d32b7ed543d95f5a4a38d6cc3cbda138d9 | /Python-数据结构-青岛大学/线性表实现/数组(序列类型)/数组.py | ac9ab4ea381d47a5d68dcde000ab8840076aa8ae | [] | no_license | reallyz/Beta | b09d7a53d4cd4a9cfdea984be8c61eade28d5a15 | 0c91b162d0f7367df13930390dd306aca5d20b3d | refs/heads/master | 2021-01-02T17:12:27.478572 | 2020-05-08T04:08:15 | 2020-05-08T04:08:15 | 239,717,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | # 一维
array=[]
# 二维
array=[[],]
import numpy as np
array=np.array() | [
"zhuruilicust@163.com"
] | zhuruilicust@163.com |
f6a28321d328796b38fa66f112d42b2f54807025 | d37a6c5982a4dd768898dfad13b93f3e30ff8497 | /Code/model_compactor.py | 5595e2272d6ca1e541666aa8520cd3841810776f | [
"MIT"
] | permissive | Jinwon-DK/GaitAnalysis | 691a4fc815dbaf2f59b6d0a45834627a40ae3a32 | 6b7be4aae9963b8986519af5bcbff39f32ebf2cd | refs/heads/master | 2022-11-22T13:02:52.675870 | 2020-07-21T13:26:14 | 2020-07-21T13:26:14 | 281,865,622 | 0 | 0 | MIT | 2020-07-23T06:05:54 | 2020-07-23T06:05:53 | null | UTF-8 | Python | false | false | 3,435 | py | import tensorflow as tf
from keras.backend import set_session
from keras import backend as K
import keras
from Code.Model import basic_10_network as BasicNet
from Code.Model import resnet as ResNet
from Code.Model import vgg_network as VGGNet
from Code.Model import base_model as dcnn
from Code.Model import lstm_model as lstm
from Code.Model import cnn_lstm_model as clstm
from Code.Model import bidirectinal_lstm_model as bilstm
from Code.Model import lightGBM_model as lgbm
from Code.Model import cropNet_model as crop
model_info = {
'dl': ['BasicNet', 'ResNet', 'VGG', 'pVGG', 'base', 'lstm', 'bi-lstm', 'lstm_attention', 'cnn_lstm'],
'ml': ['lgbm']
}
def model_setting(param, train, test, label_info):
model = param.model_name
nb_class = label_info[0]
nb_people = label_info[1]
nb_modal = param.nb_modal
if model == 'BasicNet':
shape_list = list()
for i in range(nb_modal):
_, row, col, _ = train[f"data_{i}"].shape
shape_list.append((row, col))
model = BasicNet.basic_10_network(shape_list=shape_list, nb_class=nb_class)
elif model == 'ResNet':
shape_list = list()
for i in range(nb_modal):
_, row, col, ch = train[f"data_{i}"].shape
shape_list.append((row, col, ch))
model = ResNet.resnet_builder(shape_list=shape_list, nb_class=nb_class)
elif model == 'VGG':
shape_list = list()
for i in range(nb_modal):
_, row, col, ch = train[f"data_{i}"].shape
shape_list.append((row, col, ch))
model = VGGNet.trained_vgg_builder(shape_list=shape_list, nb_class=nb_class, trainable=False)
elif model == 'pVGG':
shape_list = list()
for i in range(nb_modal):
_, row, col, ch = train[f"data_{i}"].shape
shape_list.append((row, col, ch))
model = VGGNet.trained_vgg_builder(shape_list=shape_list, nb_class=nb_class, trainable=False)
elif model == 'base':
shape_list = list()
for i in range(nb_modal):
_, row, col = train[f"data_{i}"].shape
shape_list.append((row, col))
model = dcnn.dcnn_network(shape_list=shape_list, nb_class=nb_class, comb_degree=param.nb_combine)
elif model == 'lstm':
shape_list = list()
for i in range(nb_modal):
_, row, col = train[f"data_{i}"].shape
shape_list.append((row, col))
model = lstm.lstm_network(shape_list=shape_list, nb_class=nb_class, comb_degree=param.nb_combine)
elif model == 'cnn_lstm':
shape_list = list()
for i in range(nb_modal):
_, row, col = train[f"data_{i}"].shape
shape_list.append((row, col))
model = clstm.cnn_lstm_network(shape_list=shape_list, nb_class=nb_class, comb_degree=param.nb_combine)
elif model == 'bi-lstm':
shape_list = list()
for i in range(nb_modal):
_, row, col = train[f"data_{i}"].shape
shape_list.append((row, col))
model = bilstm.bilstm_network(shape_list=shape_list, nb_class=nb_class, comb_degree=param.nb_combine)
elif model == 'lstm_attention':
NotImplemented
elif model == 'lgbm':
model = lgbm.lgbm_construct(param, dataset=[train, test], label=[nb_class, nb_people])
elif model == 'cropping':
model = crop.cropping_network(dataset=[train, test], nb_class=nb_class)
return model
| [
"hchan11@naver.com"
] | hchan11@naver.com |
6df3222d955efd0abe5781c7c48aced830dbed13 | 5dcaf0c31a8362d64134d0dcd9131fb8e827307a | /footmark/vpc/router.py | 9ca6687ef880578501b031e2b61357f50519bf50 | [
"Apache-2.0"
] | permissive | lixue323/footmark | 10a94ef97cefdab2264088cda70c937c63b819ec | 30cbb2f4b61546d530d955079ccbb38f22fa3edb | refs/heads/master | 2020-08-11T21:57:07.782124 | 2019-10-15T16:15:17 | 2019-10-15T16:16:08 | 211,007,645 | 0 | 0 | Apache-2.0 | 2019-09-26T05:34:05 | 2019-09-26T05:34:05 | null | UTF-8 | Python | false | false | 1,532 | py | """
Represents an VPC Security Group
"""
from footmark.vpc.vpcobject import TaggedVPCObject
class RouteTable(TaggedVPCObject):
def __init__(self, connection=None, ):
super(RouteTable, self).__init__(connection)
self.tags = {}
def __repr__(self):
return 'RouteTable:%s' % self.id
def __getattr__(self, name):
if name == 'id':
return self.route_table_id
raise AttributeError
def __setattr__(self, name, value):
if name == 'id':
self.route_table_id = value
super(TaggedVPCObject, self).__setattr__(name, value)
class RouteEntry(TaggedVPCObject):
def __init__(self, connection=None, ):
super(RouteEntry, self).__init__(connection)
self.tags = {}
def __repr__(self):
return 'RouteEntry:%s' % self.destination_cidrblock
def __getattr__(self, name):
if name == 'destination_cidrblock':
return self.destination_cidr_block
if name == 'next_hop_id':
return self.instance_id
if name.startswith('nexthop_'):
return getattr(self, 'next_hop' + name[7:])
raise AttributeError
def __setattr__(self, name, value):
if name == 'destination_cidrblock':
self.destination_cidr_block = value
if name == 'next_hop_id':
self.instance_id = value
if name.startswith('nexthop_'):
setattr(self, 'next_hop' + name[7:], value)
super(TaggedVPCObject, self).__setattr__(name, value)
| [
"guimin.hgm@alibaba-inc.com"
] | guimin.hgm@alibaba-inc.com |
0fad5455bca400dc1c8cd50c5e7720d7187ce031 | f5a5599df7be4f05365f3c10138a32064bdf19d9 | /WorldHappinessReport-InsetPlot.py | ed317eb07efac99a4766f64f7fe019a373b11819 | [] | no_license | ErkanCetinyamac/Data-Visualization-with-Plotly | c09bdf90989794dca4319760f9574928e5c829c3 | 445d5cc54f7402f3b98760a90df690d6b918a2ab | refs/heads/master | 2020-05-07T06:04:40.086268 | 2019-04-09T07:41:59 | 2019-04-09T07:41:59 | 180,300,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 8 18:09:22 2019
@author: Erkan Çetinyamaç
"""
import pandas as pd
# Plotly library usage in Spyder
from plotly.offline import plot
import plotly.graph_objs as go # import graph objects as "go"
data=pd.read_csv(r"C:\Users\eceti\Desktop\phyton\DataAi\Data Visualization\WorldHappinessReport.csv", encoding="cp1252")
data.info()
data.head()
# data preparation
HappinessRank=data.iloc[:,2]
Countries=data.iloc[:,0]
Family=data.iloc[:,6]
WhiskerHigh=data.iloc[:,3]
Freedom=data.iloc[:,8]
# import figure factory
import plotly.figure_factory as ff
# first line plot
trace1 = go.Scatter(
x=HappinessRank,
y=WhiskerHigh,
name = "WhiskerHigh",
marker = dict(color = 'rgba(16, 112, 2, 0.8)'),
)
# second line plot
trace2 = go.Scatter(
x=HappinessRank,
y=Freedom,
xaxis='x2',
yaxis='y2',
name = "Freedom",
marker = dict(color = 'rgba(160, 112, 20, 0.8)'),
)
data = [trace1, trace2]
layout = go.Layout(
xaxis2=dict(
domain=[0.6, 0.95],
anchor='y2',
),
yaxis2=dict(
domain=[0.6, 0.95],
anchor='x2',
),
title = 'Happiness'
)
fig = go.Figure(data=data, layout=layout)
plot(fig) | [
"noreply@github.com"
] | noreply@github.com |
3c68bcd1c8eb969fe6f027f047b84e08c321097f | 0b2acfb420f4f1354c1f0db78a5fb192f7ffa3fb | /ds_in/sampler_base.py | 7ecf9a7af0460ebfa27a6b151291ac232d78ef68 | [
"WTFPL"
] | permissive | wsfreund/decoder-generative-models | 0ef7aed7fb53e5307a1d27738e153cbcf5dc8902 | 927409dda96d5e886156214ecb6960eb2072a9fd | refs/heads/master | 2023-06-04T19:47:49.322157 | 2021-06-25T13:35:09 | 2021-06-25T13:39:47 | 279,616,507 | 0 | 1 | NOASSERTION | 2021-03-25T20:25:44 | 2020-07-14T15:00:39 | Python | UTF-8 | Python | false | false | 11,774 | py | from abc import ABC, abstractmethod
import numpy as np
import tensorflow as tf
import copy
import itertools
from ..misc import *
class SpecificFlowSamplingOpts(object):
def __init__( self, batch_size
, take_n = NotSet, drop_remainder = NotSet
, shuffle = NotSet, shuffle_kwargs = NotSet
, memory_cache = False ):
assert batch_size is not NotSet
self.batch_size = batch_size
self.take_n = take_n
self.drop_remainder = drop_remainder
self.shuffle = shuffle
self.shuffle_kwargs = shuffle_kwargs
self.memory_cache = memory_cache
def set_unset_to_default(self, sampler, df):
if self.take_n is NotSet:
self.take_n = None
if self.drop_remainder is NotSet:
self.drop_remainder = True
if self.shuffle is NotSet:
if self.batch_size is not None:
if self.take_n not in (None, NotSet):
n_examples = self.take_n
else:
n_examples, n_dims = df.shape
self.shuffle = True if self.batch_size < n_examples // 4 else False
else:
self.shuffle = True
if self.shuffle_kwargs is NotSet:
self.shuffle_kwargs = {}
if "reshuffle_each_iteration" not in self.shuffle_kwargs:
self.shuffle_kwargs["reshuffle_each_iteration"] = False
if "buffer_size" not in self.shuffle_kwargs:
if self.batch_size is not None:
self.shuffle_kwargs["buffer_size"] = self.batch_size * 8
# FIXME This is a singleton shared within all instances
# TODO Implement a dict where the key is each instance
class _CacheStorage(CleareableCache):
cached_functions = []
class SamplerBase(ABC):
"""
NOTE: Any modification to sampler object requires removing any previous
cache_filepath to take effect.
"""
def __init__( self, raw_data
, split_kw = {}, **kw ):
"""
There are several sampler functions to be used:
- training_sampler: Iteration considering the training conditions. These
conditions are provided by training_sampler_kwargs. The arguments are
provided to make_dataset.
- evaluation_sampler: Iteration using evaluation conditions. These
conditions are provided by eval_sampler_kwargs. The arguments are
provided to make_dataset.
- new sampler: Create a new sampler using custom conditions.
"""
# Splitting instructions
self._val_frac = retrieve_kw(kw, "val_frac", .2 )
self._test_frac = retrieve_kw(kw, "test_frac", .2 )
self._split_data( raw_data, self._val_frac, self._test_frac, **split_kw )
# Specify specific sampling instructions for each dataset
SpecificOptsCls = retrieve_kw(kw, "specific_flow_sampling_opt_class", SpecificFlowSamplingOpts )
training_sampler_kwargs = retrieve_kw(kw, "training_sampler_kwargs", {} )
if not "batch_size" in training_sampler_kwargs:
training_sampler_kwargs["batch_size"] = 128
if not "drop_remainder" in training_sampler_kwargs:
training_sampler_kwargs["drop_remainder"] = True
self.training_sampler_opts = SpecificOptsCls( **training_sampler_kwargs )
eval_sampler_kwargs = retrieve_kw(kw, "eval_sampler_kwargs", {} )
if "batch_size" not in eval_sampler_kwargs:
eval_sampler_kwargs["batch_size"] = self.training_sampler_opts.batch_size * 16
if not "drop_remainder" in eval_sampler_kwargs:
eval_sampler_kwargs["drop_remainder"] = False
if "take_n" not in eval_sampler_kwargs:
eval_sampler_kwargs["take_n"] = 2**20 # 1,048,576
self.eval_sampler_opts = SpecificOptsCls( **eval_sampler_kwargs )
# Other configuration
self._cache_filepath = retrieve_kw(kw, "cache_filepath", '' )
self._cached_filepath_dict = {}
def new_sampler_from_train_ds(self, **kw):
"""
Keywords are passed to make_dataset
"""
return self._make_dataset(self.raw_train_data, **kw)
def new_sampler_from_val_ds(self, **kw):
"""
Keywords are passed to make_dataset
"""
return self._make_dataset(self.raw_val_data, **kw)
def new_sampler_from_test_ds(self, **kw):
"""
Keywords are passed to make_dataset
"""
return self._make_dataset(self.raw_test_data, **kw)
@property
def has_train_ds(self):
return hasattr(self,"raw_train_data")
@property
def has_val_ds(self):
return hasattr(self,"raw_val_data")
@property
def has_test_ds(self):
return hasattr(self,"raw_test_data")
@abstractmethod
def _make_dataset( self, df, opts, cache_filepath):
pass
@abstractmethod
def _split_data(self, val_frac, test_frac ):
pass
@_CacheStorage.cached_property()
def training_sampler(self):
"""
Sampler on the same conditions as those specified for class instance
"""
cache_filepath = self._cache_filepath
label = 'train_surrogate'
if cache_filepath: cache_filepath += '_' + label
ds = self._make_dataset(self.raw_train_data, self.training_sampler_opts, cache_filepath )
ds.label = label
ds.opts = self.training_sampler_opts
return ds
def evaluation_sampler_from_ds(self, ds):
if ds == "train":
return self.evaluation_sampler_from_train_ds()
elif ds == "val":
return self.evaluation_sampler_from_val_ds()
elif ds == "test":
return self.evaluation_sampler_from_test_ds()
else:
raise RuntimeError("Unspecified dataset label %s" % ds)
@_CacheStorage.cached_property()
def evaluation_sampler_from_train_ds(self):
"""
Sampler on the same conditions as those specified for class instance
"""
cache_filepath = self._cache_filepath
label = 'train_perf'
if cache_filepath: cache_filepath += '_' + label
ds = self._make_dataset(self.raw_train_data, self.eval_sampler_opts, cache_filepath )
ds.label = label
ds.opts = self.eval_sampler_opts
return ds
@_CacheStorage.cached_property()
def evaluation_sampler_from_val_ds(self):
"""
Sampler on the same conditions as those specified for class instance
"""
cache_filepath = self._cache_filepath
label = 'val_perf'
if cache_filepath: cache_filepath += '_' + label
ds = self._make_dataset(self.raw_val_data, self.eval_sampler_opts, cache_filepath )
ds.label = label
ds.opts = self.eval_sampler_opts
return ds
@_CacheStorage.cached_property()
def evaluation_sampler_from_test_ds(self):
"""
Sampler on the same conditions as those specified for class instance
"""
cache_filepath = self._cache_filepath
label = 'test_perf'
if cache_filepath: cache_filepath += '_' + label
ds = self._make_dataset(self.raw_test_data, self.eval_sampler_opts, cache_filepath )
ds.label = label
ds.opts = self.eval_sampler_opts
return ds
def batch_subsample(self, n_samples = 1, mode = "first_n", ds = "val"):
"""
Sample data using evaluation iteration condition with minibatch. Extra
minibatch samples are discarded.
mode can take the following values:
- first_n: sample the first n values from each batch;
"""
if ds == "train":
f_iter = self._batch_sample_cached_train_iter
fget = type(self)._batch_sample_cached_train_iter.fget
elif ds == "val":
f_iter = self._batch_sample_cached_val_iter
fget = type(self)._batch_sample_cached_val_iter.fget
elif ds == "test":
f_iter = self._batch_sample_cached_test_iter
fget = type(self)._batch_sample_cached_test_iter.fget
else:
raise RuntimeError("unknown dataset %s.", ds)
try:
samples = next(f_iter)
except StopIteration:
# uncache and cache new iterator
fget.cache_clear()
f_iter = fget.__get__(self, type(self))()
samples = next(f_iter)
if mode == "first_n":
return self.subsample(samples, slice(n_samples))
else:
raise ValueError("invalid mode '%s'" % mode)
def subsample(self, data, data_slice):
if isinstance(data, dict):
return {k : self.subsample(v, data_slice) for k, v in data.items()}
elif isinstance(data,(tuple,list)):
return [self.subsample(v,data_slice) for v in data]
else:
return data[data_slice]
def select_samples(self, data, sample_mask):
if isinstance(data, dict):
return {k : self.select_samples(v, sample_mask) for k, v in data.items()}
elif isinstance(data,(tuple,list)):
return [self.select_samples(v,sample_mask) for v in data]
else:
return data[sample_mask]
def sample(self, n_samples = 1, ds = "val"):
"""
Sample data using evaluation iteration conditions, but without using
minibatch. It will create a new cached sampler, which may require creating
a new shuffle buffer.
"""
if ds == "train":
f_iter = self._single_sample_cached_train_iter
fget = type(self)._single_sample_cached_train_iter.fget
elif ds == "val":
f_iter = self._single_sample_cached_val_iter
fget = type(self)._single_sample_cached_val_iter.fget
elif ds == "test":
f_iter = self._single_sample_cached_test_iter
fget = type(self)._single_sample_cached_test_iter.fget
else:
raise RuntimeError("unknown dataset %s.", ds)
samples = []
for _ in range(n_samples):
try:
sample = next(f_iter)
except StopIteration:
# uncache and cache new iterator
fget.cache_clear()
f_iter = fget.__get__(self, type(self))()
sample = next(f_iter)
samples.append(sample)
if n_samples > 1:
if isinstance(sample, dict):
ret_samples = {}
for k in sample.keys():
if isinstance(sample[k],(tuple,list)):
ret_samples[k] = [tf.stack([s[k][i] for s in samples]) for i in range(len(sample[k]))]
else:
ret_samples[k] = tf.stack([s[k] for s in samples])
samples = ret_samples
elif isinstance(sample, (tuple,list)):
samples = [tf.stack([s[i] for s in samples]) for i in range(len(sample))]
else:
samples = tf.stack(samples)
else:
samples = samples[0]
return samples
@_CacheStorage.cached_property()
def _batch_sample_cached_train_iter(self):
return iter(self.evaluation_sampler_from_train_ds)
@_CacheStorage.cached_property()
def _batch_sample_cached_val_iter(self):
return iter(self.evaluation_sampler_from_val_ds)
@_CacheStorage.cached_property()
def _batch_sample_cached_test_iter(self):
return iter(self.evaluation_sampler_from_test_ds)
@_CacheStorage.cached_property()
def _single_sample_cached_train_iter(self):
eval_opts = copy.copy(self.eval_sampler_opts)
eval_opts.batch_size = None
eval_opts.take_n = None
eval_opts.shuffle = True
eval_opts.shuffle_kwargs = dict( buffer_size = self.eval_sampler_opts.batch_size*8 )
eval_opts.cache = False
cache_filepath = ''
return iter(self._make_dataset(self.raw_train_data,eval_opts,cache_filepath))
@_CacheStorage.cached_property()
def _single_sample_cached_val_iter(self):
eval_opts = copy.copy(self.eval_sampler_opts)
eval_opts.batch_size = None
eval_opts.take_n = None
eval_opts.shuffle = True
eval_opts.shuffle_kwargs = dict( buffer_size = self.eval_sampler_opts.batch_size*8 )
eval_opts.cache = False
cache_filepath = ''
return iter(self._make_dataset(self.raw_val_data,eval_opts,cache_filepath))
@_CacheStorage.cached_property()
def _single_sample_cached_test_iter(self):
eval_opts = copy.copy(self.eval_sampler_opts)
eval_opts.batch_size = None
eval_opts.take_n = None
eval_opts.shuffle = True
eval_opts.shuffle_kwargs = dict( buffer_size = self.eval_sampler_opts.batch_size*8 )
eval_opts.cache = False
cache_filepath = ''
return iter(self._make_dataset(self.raw_test_data,eval_opts,cache_filepath))
| [
"wsfreund@gmail.com"
] | wsfreund@gmail.com |
2f5b97a5828fd10e996f4e639fd1fd2130a65158 | a367b9dc148be03616c10eb76e169561789acd43 | /src/predict.py | 9fc99791e365c7a5c765bbda37efd244fd060225 | [] | no_license | Rabahjamal/Visual-Question-Answering | 9e66d62eeb3da33a6e2bacc40349908f1c983992 | d5d8bdeb5d7fe7ac8518c11d489e229e81a99318 | refs/heads/master | 2020-03-21T03:32:01.288282 | 2018-06-30T13:33:06 | 2018-06-30T13:33:06 | 138,059,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,585 | py | import numpy as np
import keras
import pandas as pd
from keras.models import Sequential,Model
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Concatenate,Reshape , Input ,LSTM, Dense, Dropout ,concatenate , Flatten ,GlobalMaxPooling1D
from keras.layers.convolutional import Conv2D, MaxPooling2D , Conv1D
from keras.layers.embeddings import Embedding
from keras.layers.core import Activation,Lambda
from keras.models import model_from_json
import tensorflow as tf
from keras import backend as K
from keras.engine.topology import Layer
from src.Coattention import *
from src.Question_Hierarichy import *
import pandas as pd
from tkinter import *
import pandas as pd
from tkinter import messagebox
Q = Input(shape=(55,))
V = Input(shape=(512,49))
w_level ,p_level, q_level = Ques_Hierarchy(Q,V)
qw,vw = Co_attention([w_level,V])
qp,vp = Co_attention([w_level,V])
qs,vs = Co_attention([q_level,V])
w_att = keras.layers.Add()([qw,vw])
hw = Dense(512,activation='tanh')(w_att)
hw = Dropout(0.5)(hw)
p_att = keras.layers.Add()([qp,vp])
hp = keras.layers.concatenate([p_att,hw],axis=-1)
hp = Dense(512,activation='tanh')(p_att)
hp = Dropout(0.5)(hp)
s_att = keras.layers.Add()([qs,vs])
hs = keras.layers.concatenate([s_att,hp],axis=-1)
hs = Dense(512,activation='tanh')(s_att)
hs = Reshape((512,),input_shape=(1,512))(hs)
hs = Dropout(0.5)(hs)
p = Dense(430,activation='softmax')(hs)
Rms = keras.optimizers.RMSprop(lr=0.0004, rho=0.9, epsilon=None, decay=0.00000001)
model = Model(inputs=[Q,V], outputs=p)
| [
"rabah9686@gmail.com"
] | rabah9686@gmail.com |
42be3d1e9cacde54e580d4767efb954f52e2fdd4 | a65e5dc54092a318fc469543c3b96f6699d0c60b | /Personel/Ruthwik/Python/4mar/list_even.py | 287949f2c9dd5179c08725f9c845b5ed20460460 | [] | no_license | shankar7791/MI-10-DevOps | e15bfda460ffd0afce63274f2f430445d04261fe | f0b9e8c5be7b28298eb6d3fb6badf11cd033881d | refs/heads/main | 2023-07-04T15:25:08.673757 | 2021-08-12T09:12:37 | 2021-08-12T09:12:37 | 339,016,230 | 1 | 0 | null | 2021-08-12T09:12:37 | 2021-02-15T08:50:08 | JavaScript | UTF-8 | Python | false | false | 206 | py | #Python program function to print the even numbers from a given list.
n1=[15,42,40,85,96,80,34,50,55,12,23,24]
def even(n):
for i in n:
if i % 2 == 0:
print(i, end=" ")
even(n1)
| [
"ruthwikraja@gmail.com"
] | ruthwikraja@gmail.com |
64c6e624e5cdc1d68562c50856c653259e6a714f | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/spanner/v1/spanner-v1-py/tests/unit/gapic/spanner_v1/test_spanner.py | 48c0e610faf11dbdc0bd2728018980965350473f | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150,377 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.spanner_v1.services.spanner import SpannerAsyncClient
from google.cloud.spanner_v1.services.spanner import SpannerClient
from google.cloud.spanner_v1.services.spanner import pagers
from google.cloud.spanner_v1.services.spanner import transports
from google.cloud.spanner_v1.services.spanner.transports.base import _API_CORE_VERSION
from google.cloud.spanner_v1.services.spanner.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.spanner_v1.types import commit_response
from google.cloud.spanner_v1.types import keys
from google.cloud.spanner_v1.types import mutation
from google.cloud.spanner_v1.types import result_set
from google.cloud.spanner_v1.types import spanner
from google.cloud.spanner_v1.types import transaction
from google.cloud.spanner_v1.types import type as gs_type
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-api-core >= 1.26.0 is required:
# - Delete all the api-core and auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
requires_api_core_lt_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"),
reason="This test requires google-api-core < 1.26.0",
)
requires_api_core_gte_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"),
reason="This test requires google-api-core >= 1.26.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SpannerClient._get_default_mtls_endpoint(None) is None
assert SpannerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert SpannerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert SpannerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert SpannerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert SpannerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
SpannerClient,
SpannerAsyncClient,
])
def test_spanner_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'spanner.googleapis.com:443'
@pytest.mark.parametrize("client_class", [
SpannerClient,
SpannerAsyncClient,
])
def test_spanner_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'spanner.googleapis.com:443'
def test_spanner_client_get_transport_class():
transport = SpannerClient.get_transport_class()
available_transports = [
transports.SpannerGrpcTransport,
]
assert transport in available_transports
transport = SpannerClient.get_transport_class("grpc")
assert transport == transports.SpannerGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SpannerClient, transports.SpannerGrpcTransport, "grpc"),
(SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(SpannerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerClient))
@mock.patch.object(SpannerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerAsyncClient))
def test_spanner_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SpannerClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SpannerClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(SpannerClient, transports.SpannerGrpcTransport, "grpc", "true"),
(SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(SpannerClient, transports.SpannerGrpcTransport, "grpc", "false"),
(SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(SpannerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerClient))
@mock.patch.object(SpannerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_spanner_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SpannerClient, transports.SpannerGrpcTransport, "grpc"),
(SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_spanner_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SpannerClient, transports.SpannerGrpcTransport, "grpc"),
(SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_spanner_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_spanner_client_client_options_from_dict():
with mock.patch('google.cloud.spanner_v1.services.spanner.transports.SpannerGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = SpannerClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_create_session(transport: str = 'grpc', request_type=spanner.CreateSessionRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session(
name='name_value',
)
response = client.create_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CreateSessionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.Session)
assert response.name == 'name_value'
def test_create_session_from_dict():
test_create_session(request_type=dict)
def test_create_session_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
client.create_session()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CreateSessionRequest()
@pytest.mark.asyncio
async def test_create_session_async(transport: str = 'grpc_asyncio', request_type=spanner.CreateSessionRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session(
name='name_value',
))
response = await client.create_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CreateSessionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.Session)
assert response.name == 'name_value'
@pytest.mark.asyncio
async def test_create_session_async_from_dict():
await test_create_session_async(request_type=dict)
def test_create_session_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.CreateSessionRequest()
request.database = 'database/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
call.return_value = spanner.Session()
client.create_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'database=database/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_session_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.CreateSessionRequest()
request.database = 'database/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session())
await client.create_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'database=database/value',
) in kw['metadata']
def test_create_session_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_session(
database='database_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].database == 'database_value'
def test_create_session_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_session(
spanner.CreateSessionRequest(),
database='database_value',
)
@pytest.mark.asyncio
async def test_create_session_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_session(
database='database_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].database == 'database_value'
@pytest.mark.asyncio
async def test_create_session_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_session(
spanner.CreateSessionRequest(),
database='database_value',
)
def test_batch_create_sessions(transport: str = 'grpc', request_type=spanner.BatchCreateSessionsRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.BatchCreateSessionsResponse(
)
response = client.batch_create_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BatchCreateSessionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.BatchCreateSessionsResponse)
def test_batch_create_sessions_from_dict():
test_batch_create_sessions(request_type=dict)
def test_batch_create_sessions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
client.batch_create_sessions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BatchCreateSessionsRequest()
@pytest.mark.asyncio
async def test_batch_create_sessions_async(transport: str = 'grpc_asyncio', request_type=spanner.BatchCreateSessionsRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.BatchCreateSessionsResponse(
))
response = await client.batch_create_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BatchCreateSessionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.BatchCreateSessionsResponse)
@pytest.mark.asyncio
async def test_batch_create_sessions_async_from_dict():
await test_batch_create_sessions_async(request_type=dict)
def test_batch_create_sessions_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.BatchCreateSessionsRequest()
request.database = 'database/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
call.return_value = spanner.BatchCreateSessionsResponse()
client.batch_create_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'database=database/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_batch_create_sessions_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.BatchCreateSessionsRequest()
request.database = 'database/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.BatchCreateSessionsResponse())
await client.batch_create_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'database=database/value',
) in kw['metadata']
def test_batch_create_sessions_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.BatchCreateSessionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_create_sessions(
database='database_value',
session_count=1420,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].database == 'database_value'
assert args[0].session_count == 1420
def test_batch_create_sessions_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_create_sessions(
spanner.BatchCreateSessionsRequest(),
database='database_value',
session_count=1420,
)
@pytest.mark.asyncio
async def test_batch_create_sessions_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.BatchCreateSessionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.BatchCreateSessionsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_create_sessions(
database='database_value',
session_count=1420,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].database == 'database_value'
assert args[0].session_count == 1420
@pytest.mark.asyncio
async def test_batch_create_sessions_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_create_sessions(
spanner.BatchCreateSessionsRequest(),
database='database_value',
session_count=1420,
)
def test_get_session(transport: str = 'grpc', request_type=spanner.GetSessionRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session(
name='name_value',
)
response = client.get_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.GetSessionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.Session)
assert response.name == 'name_value'
def test_get_session_from_dict():
test_get_session(request_type=dict)
def test_get_session_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
client.get_session()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.GetSessionRequest()
@pytest.mark.asyncio
async def test_get_session_async(transport: str = 'grpc_asyncio', request_type=spanner.GetSessionRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session(
name='name_value',
))
response = await client.get_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.GetSessionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.Session)
assert response.name == 'name_value'
@pytest.mark.asyncio
async def test_get_session_async_from_dict():
await test_get_session_async(request_type=dict)
def test_get_session_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.GetSessionRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
call.return_value = spanner.Session()
client.get_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_session_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.GetSessionRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session())
await client.get_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_session_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_session(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_session_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_session(
spanner.GetSessionRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_session_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_session(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_session_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_session(
spanner.GetSessionRequest(),
name='name_value',
)
def test_list_sessions(transport: str = 'grpc', request_type=spanner.ListSessionsRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.ListSessionsResponse(
next_page_token='next_page_token_value',
)
response = client.list_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ListSessionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSessionsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_sessions_from_dict():
test_list_sessions(request_type=dict)
def test_list_sessions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
client.list_sessions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ListSessionsRequest()
@pytest.mark.asyncio
async def test_list_sessions_async(transport: str = 'grpc_asyncio', request_type=spanner.ListSessionsRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.ListSessionsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ListSessionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSessionsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_sessions_async_from_dict():
await test_list_sessions_async(request_type=dict)
def test_list_sessions_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ListSessionsRequest()
request.database = 'database/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
call.return_value = spanner.ListSessionsResponse()
client.list_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'database=database/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_sessions_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ListSessionsRequest()
request.database = 'database/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.ListSessionsResponse())
await client.list_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'database=database/value',
) in kw['metadata']
def test_list_sessions_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.ListSessionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_sessions(
database='database_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].database == 'database_value'
def test_list_sessions_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_sessions(
spanner.ListSessionsRequest(),
database='database_value',
)
@pytest.mark.asyncio
async def test_list_sessions_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.ListSessionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.ListSessionsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_sessions(
database='database_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].database == 'database_value'
@pytest.mark.asyncio
async def test_list_sessions_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_sessions(
spanner.ListSessionsRequest(),
database='database_value',
)
def test_list_sessions_pager():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
spanner.Session(),
],
next_page_token='abc',
),
spanner.ListSessionsResponse(
sessions=[],
next_page_token='def',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
],
next_page_token='ghi',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('database', ''),
)),
)
pager = client.list_sessions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, spanner.Session)
for i in results)
def test_list_sessions_pages():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
spanner.Session(),
],
next_page_token='abc',
),
spanner.ListSessionsResponse(
sessions=[],
next_page_token='def',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
],
next_page_token='ghi',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
],
),
RuntimeError,
)
pages = list(client.list_sessions(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_sessions_async_pager():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
spanner.Session(),
],
next_page_token='abc',
),
spanner.ListSessionsResponse(
sessions=[],
next_page_token='def',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
],
next_page_token='ghi',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
],
),
RuntimeError,
)
async_pager = await client.list_sessions(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, spanner.Session)
for i in responses)
@pytest.mark.asyncio
async def test_list_sessions_async_pages():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
spanner.Session(),
],
next_page_token='abc',
),
spanner.ListSessionsResponse(
sessions=[],
next_page_token='def',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
],
next_page_token='ghi',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_sessions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_delete_session(transport: str = 'grpc', request_type=spanner.DeleteSessionRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.DeleteSessionRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_session_from_dict():
test_delete_session(request_type=dict)
def test_delete_session_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
client.delete_session()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.DeleteSessionRequest()
@pytest.mark.asyncio
async def test_delete_session_async(transport: str = 'grpc_asyncio', request_type=spanner.DeleteSessionRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.DeleteSessionRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_session_async_from_dict():
await test_delete_session_async(request_type=dict)
def test_delete_session_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.DeleteSessionRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
call.return_value = None
client.delete_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_session_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.DeleteSessionRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_session_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_session(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_session_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_session(
spanner.DeleteSessionRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_session_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_session(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_session_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_session(
spanner.DeleteSessionRequest(),
name='name_value',
)
def test_execute_sql(transport: str = 'grpc', request_type=spanner.ExecuteSqlRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_sql),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = result_set.ResultSet(
)
response = client.execute_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, result_set.ResultSet)
def test_execute_sql_from_dict():
test_execute_sql(request_type=dict)
def test_execute_sql_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_sql),
'__call__') as call:
client.execute_sql()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
@pytest.mark.asyncio
async def test_execute_sql_async(transport: str = 'grpc_asyncio', request_type=spanner.ExecuteSqlRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_sql),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(result_set.ResultSet(
))
response = await client.execute_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, result_set.ResultSet)
@pytest.mark.asyncio
async def test_execute_sql_async_from_dict():
await test_execute_sql_async(request_type=dict)
def test_execute_sql_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteSqlRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_sql),
'__call__') as call:
call.return_value = result_set.ResultSet()
client.execute_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_execute_sql_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteSqlRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_sql),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(result_set.ResultSet())
await client.execute_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_execute_streaming_sql(transport: str = 'grpc', request_type=spanner.ExecuteSqlRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = iter([result_set.PartialResultSet()])
response = client.execute_streaming_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(message, result_set.PartialResultSet)
def test_execute_streaming_sql_from_dict():
test_execute_streaming_sql(request_type=dict)
def test_execute_streaming_sql_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql),
'__call__') as call:
client.execute_streaming_sql()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
@pytest.mark.asyncio
async def test_execute_streaming_sql_async(transport: str = 'grpc_asyncio', request_type=spanner.ExecuteSqlRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(side_effect=[result_set.PartialResultSet()])
response = await client.execute_streaming_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, result_set.PartialResultSet)
@pytest.mark.asyncio
async def test_execute_streaming_sql_async_from_dict():
await test_execute_streaming_sql_async(request_type=dict)
def test_execute_streaming_sql_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteSqlRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql),
'__call__') as call:
call.return_value = iter([result_set.PartialResultSet()])
client.execute_streaming_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_execute_streaming_sql_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteSqlRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql),
'__call__') as call:
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(side_effect=[result_set.PartialResultSet()])
await client.execute_streaming_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_execute_batch_dml(transport: str = 'grpc', request_type=spanner.ExecuteBatchDmlRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.ExecuteBatchDmlResponse(
)
response = client.execute_batch_dml(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteBatchDmlRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.ExecuteBatchDmlResponse)
def test_execute_batch_dml_from_dict():
test_execute_batch_dml(request_type=dict)
def test_execute_batch_dml_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml),
'__call__') as call:
client.execute_batch_dml()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteBatchDmlRequest()
@pytest.mark.asyncio
async def test_execute_batch_dml_async(transport: str = 'grpc_asyncio', request_type=spanner.ExecuteBatchDmlRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.ExecuteBatchDmlResponse(
))
response = await client.execute_batch_dml(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteBatchDmlRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.ExecuteBatchDmlResponse)
@pytest.mark.asyncio
async def test_execute_batch_dml_async_from_dict():
await test_execute_batch_dml_async(request_type=dict)
def test_execute_batch_dml_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteBatchDmlRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml),
'__call__') as call:
call.return_value = spanner.ExecuteBatchDmlResponse()
client.execute_batch_dml(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_execute_batch_dml_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteBatchDmlRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.ExecuteBatchDmlResponse())
await client.execute_batch_dml(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_read(transport: str = 'grpc', request_type=spanner.ReadRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = result_set.ResultSet(
)
response = client.read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, result_set.ResultSet)
def test_read_from_dict():
test_read(request_type=dict)
def test_read_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read),
'__call__') as call:
client.read()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
@pytest.mark.asyncio
async def test_read_async(transport: str = 'grpc_asyncio', request_type=spanner.ReadRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(result_set.ResultSet(
))
response = await client.read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, result_set.ResultSet)
@pytest.mark.asyncio
async def test_read_async_from_dict():
await test_read_async(request_type=dict)
def test_read_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ReadRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read),
'__call__') as call:
call.return_value = result_set.ResultSet()
client.read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_read_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ReadRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(result_set.ResultSet())
await client.read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_streaming_read(transport: str = 'grpc', request_type=spanner.ReadRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = iter([result_set.PartialResultSet()])
response = client.streaming_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(message, result_set.PartialResultSet)
def test_streaming_read_from_dict():
test_streaming_read(request_type=dict)
def test_streaming_read_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read),
'__call__') as call:
client.streaming_read()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
@pytest.mark.asyncio
async def test_streaming_read_async(transport: str = 'grpc_asyncio', request_type=spanner.ReadRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(side_effect=[result_set.PartialResultSet()])
response = await client.streaming_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, result_set.PartialResultSet)
@pytest.mark.asyncio
async def test_streaming_read_async_from_dict():
await test_streaming_read_async(request_type=dict)
def test_streaming_read_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ReadRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read),
'__call__') as call:
call.return_value = iter([result_set.PartialResultSet()])
client.streaming_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_streaming_read_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ReadRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read),
'__call__') as call:
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(side_effect=[result_set.PartialResultSet()])
await client.streaming_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_begin_transaction(transport: str = 'grpc', request_type=spanner.BeginTransactionRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = transaction.Transaction(
id=b'id_blob',
)
response = client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BeginTransactionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, transaction.Transaction)
assert response.id == b'id_blob'
def test_begin_transaction_from_dict():
test_begin_transaction(request_type=dict)
def test_begin_transaction_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
client.begin_transaction()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BeginTransactionRequest()
@pytest.mark.asyncio
async def test_begin_transaction_async(transport: str = 'grpc_asyncio', request_type=spanner.BeginTransactionRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(transaction.Transaction(
id=b'id_blob',
))
response = await client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BeginTransactionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, transaction.Transaction)
assert response.id == b'id_blob'
@pytest.mark.asyncio
async def test_begin_transaction_async_from_dict():
await test_begin_transaction_async(request_type=dict)
def test_begin_transaction_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.BeginTransactionRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
call.return_value = transaction.Transaction()
client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_begin_transaction_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.BeginTransactionRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(transaction.Transaction())
await client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_begin_transaction_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = transaction.Transaction()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.begin_transaction(
session='session_value',
options=transaction.TransactionOptions(read_write=None),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].session == 'session_value'
assert args[0].options == transaction.TransactionOptions(read_write=None)
def test_begin_transaction_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.begin_transaction(
spanner.BeginTransactionRequest(),
session='session_value',
options=transaction.TransactionOptions(read_write=None),
)
@pytest.mark.asyncio
async def test_begin_transaction_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = transaction.Transaction()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(transaction.Transaction())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.begin_transaction(
session='session_value',
options=transaction.TransactionOptions(read_write=None),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].session == 'session_value'
assert args[0].options == transaction.TransactionOptions(read_write=None)
@pytest.mark.asyncio
async def test_begin_transaction_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.begin_transaction(
spanner.BeginTransactionRequest(),
session='session_value',
options=transaction.TransactionOptions(read_write=None),
)
def test_commit(transport: str = 'grpc', request_type=spanner.CommitRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = commit_response.CommitResponse(
)
response = client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CommitRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, commit_response.CommitResponse)
def test_commit_from_dict():
test_commit(request_type=dict)
def test_commit_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
client.commit()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CommitRequest()
@pytest.mark.asyncio
async def test_commit_async(transport: str = 'grpc_asyncio', request_type=spanner.CommitRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(commit_response.CommitResponse(
))
response = await client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CommitRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, commit_response.CommitResponse)
@pytest.mark.asyncio
async def test_commit_async_from_dict():
await test_commit_async(request_type=dict)
def test_commit_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.CommitRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
call.return_value = commit_response.CommitResponse()
client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_commit_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.CommitRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(commit_response.CommitResponse())
await client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_commit_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = commit_response.CommitResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.commit(
session='session_value',
transaction_id=b'transaction_id_blob',
mutations=[mutation.Mutation(insert=mutation.Mutation.Write(table='table_value'))],
single_use_transaction=transaction.TransactionOptions(read_write=None),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].session == 'session_value'
assert args[0].mutations == [mutation.Mutation(insert=mutation.Mutation.Write(table='table_value'))]
assert args[0].single_use_transaction == transaction.TransactionOptions(read_write=None)
def test_commit_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.commit(
spanner.CommitRequest(),
session='session_value',
transaction_id=b'transaction_id_blob',
mutations=[mutation.Mutation(insert=mutation.Mutation.Write(table='table_value'))],
single_use_transaction=transaction.TransactionOptions(read_write=None),
)
@pytest.mark.asyncio
async def test_commit_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = commit_response.CommitResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(commit_response.CommitResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.commit(
session='session_value',
transaction_id=b'transaction_id_blob',
mutations=[mutation.Mutation(insert=mutation.Mutation.Write(table='table_value'))],
single_use_transaction=transaction.TransactionOptions(read_write=None),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].session == 'session_value'
assert args[0].mutations == [mutation.Mutation(insert=mutation.Mutation.Write(table='table_value'))]
assert args[0].single_use_transaction == transaction.TransactionOptions(read_write=None)
@pytest.mark.asyncio
async def test_commit_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.commit(
spanner.CommitRequest(),
session='session_value',
transaction_id=b'transaction_id_blob',
mutations=[mutation.Mutation(insert=mutation.Mutation.Write(table='table_value'))],
single_use_transaction=transaction.TransactionOptions(read_write=None),
)
def test_rollback(transport: str = 'grpc', request_type=spanner.RollbackRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.RollbackRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_rollback_from_dict():
test_rollback(request_type=dict)
def test_rollback_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
client.rollback()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.RollbackRequest()
@pytest.mark.asyncio
async def test_rollback_async(transport: str = 'grpc_asyncio', request_type=spanner.RollbackRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.RollbackRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_rollback_async_from_dict():
await test_rollback_async(request_type=dict)
def test_rollback_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.RollbackRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
call.return_value = None
client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_rollback_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.RollbackRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_rollback_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.rollback(
session='session_value',
transaction_id=b'transaction_id_blob',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].session == 'session_value'
assert args[0].transaction_id == b'transaction_id_blob'
def test_rollback_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.rollback(
spanner.RollbackRequest(),
session='session_value',
transaction_id=b'transaction_id_blob',
)
@pytest.mark.asyncio
async def test_rollback_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.rollback(
session='session_value',
transaction_id=b'transaction_id_blob',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].session == 'session_value'
assert args[0].transaction_id == b'transaction_id_blob'
@pytest.mark.asyncio
async def test_rollback_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.rollback(
spanner.RollbackRequest(),
session='session_value',
transaction_id=b'transaction_id_blob',
)
def test_partition_query(transport: str = 'grpc', request_type=spanner.PartitionQueryRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_query),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.PartitionResponse(
)
response = client.partition_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionQueryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.PartitionResponse)
def test_partition_query_from_dict():
test_partition_query(request_type=dict)
def test_partition_query_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_query),
'__call__') as call:
client.partition_query()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionQueryRequest()
@pytest.mark.asyncio
async def test_partition_query_async(transport: str = 'grpc_asyncio', request_type=spanner.PartitionQueryRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_query),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.PartitionResponse(
))
response = await client.partition_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionQueryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.PartitionResponse)
@pytest.mark.asyncio
async def test_partition_query_async_from_dict():
await test_partition_query_async(request_type=dict)
def test_partition_query_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.PartitionQueryRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_query),
'__call__') as call:
call.return_value = spanner.PartitionResponse()
client.partition_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_partition_query_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.PartitionQueryRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_query),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.PartitionResponse())
await client.partition_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_partition_read(transport: str = 'grpc', request_type=spanner.PartitionReadRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_read),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.PartitionResponse(
)
response = client.partition_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionReadRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.PartitionResponse)
def test_partition_read_from_dict():
test_partition_read(request_type=dict)
def test_partition_read_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_read),
'__call__') as call:
client.partition_read()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionReadRequest()
@pytest.mark.asyncio
async def test_partition_read_async(transport: str = 'grpc_asyncio', request_type=spanner.PartitionReadRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_read),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.PartitionResponse(
))
response = await client.partition_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionReadRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.PartitionResponse)
@pytest.mark.asyncio
async def test_partition_read_async_from_dict():
await test_partition_read_async(request_type=dict)
def test_partition_read_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.PartitionReadRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_read),
'__call__') as call:
call.return_value = spanner.PartitionResponse()
client.partition_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_partition_read_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.PartitionReadRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_read),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.PartitionResponse())
await client.partition_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SpannerClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SpannerClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SpannerClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SpannerGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.SpannerGrpcTransport,
transports.SpannerGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.SpannerGrpcTransport,
)
def test_spanner_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SpannerTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_spanner_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.spanner_v1.services.spanner.transports.SpannerTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.SpannerTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'create_session',
'batch_create_sessions',
'get_session',
'list_sessions',
'delete_session',
'execute_sql',
'execute_streaming_sql',
'execute_batch_dml',
'read',
'streaming_read',
'begin_transaction',
'commit',
'rollback',
'partition_query',
'partition_read',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_spanner_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.spanner_v1.services.spanner.transports.SpannerTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SpannerTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_spanner_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.spanner_v1.services.spanner.transports.SpannerTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SpannerTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
quota_project_id="octopus",
)
def test_spanner_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.spanner_v1.services.spanner.transports.SpannerTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SpannerTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_spanner_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SpannerClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_spanner_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SpannerClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/spanner.data',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SpannerGrpcTransport,
transports.SpannerGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_spanner_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/spanner.data',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SpannerGrpcTransport,
transports.SpannerGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_spanner_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SpannerGrpcTransport, grpc_helpers),
(transports.SpannerGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_gte_1_26_0
def test_spanner_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"spanner.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
scopes=["1", "2"],
default_host="spanner.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SpannerGrpcTransport, grpc_helpers),
(transports.SpannerGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_lt_1_26_0
def test_spanner_transport_create_channel_old_api_core(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus")
create_channel.assert_called_with(
"spanner.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SpannerGrpcTransport, grpc_helpers),
(transports.SpannerGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_lt_1_26_0
def test_spanner_transport_create_channel_user_scopes(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"spanner.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=["1", "2"],
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport])
def test_spanner_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_spanner_host_no_port():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='spanner.googleapis.com'),
)
assert client.transport._host == 'spanner.googleapis.com:443'
def test_spanner_host_with_port():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='spanner.googleapis.com:8000'),
)
assert client.transport._host == 'spanner.googleapis.com:8000'
def test_spanner_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SpannerGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_spanner_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SpannerGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport])
def test_spanner_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport])
def test_spanner_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_database_path():
project = "squid"
instance = "clam"
database = "whelk"
expected = "projects/{project}/instances/{instance}/databases/{database}".format(project=project, instance=instance, database=database, )
actual = SpannerClient.database_path(project, instance, database)
assert expected == actual
def test_parse_database_path():
expected = {
"project": "octopus",
"instance": "oyster",
"database": "nudibranch",
}
path = SpannerClient.database_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_database_path(path)
assert expected == actual
def test_session_path():
project = "cuttlefish"
instance = "mussel"
database = "winkle"
session = "nautilus"
expected = "projects/{project}/instances/{instance}/databases/{database}/sessions/{session}".format(project=project, instance=instance, database=database, session=session, )
actual = SpannerClient.session_path(project, instance, database, session)
assert expected == actual
def test_parse_session_path():
expected = {
"project": "scallop",
"instance": "abalone",
"database": "squid",
"session": "clam",
}
path = SpannerClient.session_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_session_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = SpannerClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = SpannerClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder, )
actual = SpannerClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = SpannerClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization, )
actual = SpannerClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = SpannerClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project, )
actual = SpannerClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = SpannerClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = SpannerClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = SpannerClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.SpannerTransport, '_prep_wrapped_messages') as prep:
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.SpannerTransport, '_prep_wrapped_messages') as prep:
transport_class = SpannerClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
3bed628c0eefc76fd507d162f5644a7d9a624739 | 29ca59f02597d1241817233def8fb433823f959c | /nth_node_from_end_of_linked_list.py | a3e494a301f2ff054cb0fe01baf9c84e07f3ed5c | [] | no_license | rupalisinha23/problem-solving | eebb5690cf226f49d8d3fd79fb7d1382d7d716a3 | 85e4eb2b5d762c99b34683dac0cb4ac42c1683dc | refs/heads/master | 2020-05-18T03:16:28.982448 | 2020-01-31T12:23:58 | 2020-01-31T12:23:58 | 184,141,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | class Node:
def __init__(self,val):
self.val = val
self.next = None
class Solution:
def remove_nth_node(self, head,n):
dummy = Node(0)
dummy.next = head
first = dummy
second = dummy
for i in range(n):
first = first.next
while(first.next) is not None:
first = first.next
second = second.next
else:
second.next = second.nex.next
return dummy.next
sol = Solution()
| [
"rupalisinha23@gmail.com"
] | rupalisinha23@gmail.com |
9cd5a7daa2bcddd413ea13e5c88b3ae9de3aece4 | 6b3870bb321d930c34c6f65f19091169425f4dee | /scripts/scil_print_header.py | aabdeebfb6da8f04cf27aa6d65dae5373a64ada6 | [
"MIT"
] | permissive | fred-laberge/scilpy | a15b3c525be892ca99b1dad25e35a3667ac116db | b0055521a87eb803876b8122eddc77608f520e89 | refs/heads/master | 2021-03-13T05:12:25.237411 | 2020-03-11T17:44:20 | 2020-03-11T17:44:20 | 246,642,805 | 0 | 0 | NOASSERTION | 2020-03-11T17:55:22 | 2020-03-11T17:55:22 | null | UTF-8 | Python | false | false | 1,554 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Print the raw header from the provided file or only the specified keys.
Support trk, nii and mgz files.
"""
import argparse
import pprint
import nibabel as nib
from scilpy.io.utils import assert_inputs_exist
from scilpy.utils.filenames import split_name_with_nii
def _build_args_parser():
p = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument('in_file',
help='Input file (trk, nii and mgz).')
p.add_argument('--keys', nargs='+',
help='Print only the specified keys.')
return p
def main():
parser = _build_args_parser()
args = parser.parse_args()
assert_inputs_exist(parser, args.in_file)
_, in_extension = split_name_with_nii(args.in_file)
if in_extension in ['.tck', '.trk']:
header = nib.streamlines.load(args.in_file, lazy_load=True).header
elif in_extension in ['.nii', '.nii.gz', '.mgz']:
header = dict(nib.load(args.in_file).header)
else:
parser.error('{} is not a supported extension.'.format(in_extension))
if args.keys:
for key in args.keys:
if key not in header:
parser.error('Key {} is not in the header of {}.'.format(key,
args.in_file))
print(" '{}': {}".format(key, header[key]))
else:
pp = pprint.PrettyPrinter(indent=1)
pp.pprint(header)
if __name__ == "__main__":
main()
| [
"francois.m.rheault@usherbrooke"
] | francois.m.rheault@usherbrooke |
e07a47abfbe7d722114af4192739a875854c3f68 | aac55184b20defb682c781fd4bf7aae2a402d439 | /RedditAnalyzer.py | ce5d93ec2a226b043796109732da5f9b94701bbd | [] | no_license | bianchima/reddit_corona_scraper | 632824ed151cbf38d25621660352d681489850d3 | 6442c12e8b495c450b49ff760e6d25e0411ce34c | refs/heads/master | 2022-06-19T05:35:45.724944 | 2020-05-11T06:25:04 | 2020-05-11T06:25:04 | 262,848,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | import json
import operator
from datetime import datetime as dt
# Load from File
with open("data.json", "r") as fp:
data = json.load(fp)
words = {}
authors = {}
dates = {}
for key in data:
# Words in Post
title = data[key]["title"]
title_words = title.split()
for word in title_words:
if word.lower() not in words:
words[word.lower()] = 0
words[word.lower()] += 1
# Authors of Post
author = data[key]["author"]
if author not in authors:
authors[author] = 0
authors[author] += 1
# Date of Post
date_and_time = dt.strptime(data[key]["timestamp"], "%Y-%m-%d %H:%M:%S")
date = date_and_time.date()
date_string = dt.strftime(date, "%Y-%m-%d")
if date_string not in dates:
dates[date_string] = 0
dates[date_string] += 1
# Sort Words and Authors by value
sorted_words = sorted(words.items(), key=operator.itemgetter(1), reverse=True)
sorted_authors = sorted(authors.items(), key=operator.itemgetter(1), reverse=True)
# Save to Files
with open("data_words.json", "w") as fp:
json.dump(sorted_words, fp)
with open("data_authors.json", "w") as fp:
json.dump(sorted_authors, fp)
with open("data_dates.json", "w") as fp:
json.dump(dates, fp)
| [
"Nomentum@10.145.253.205"
] | Nomentum@10.145.253.205 |
2e83051cab98c6e966a89981f641b396e0630240 | efde9197a0a0ea1e11113e79bce87c3ded80573e | /hackerRank/cyclic binary string.py | 00f79a18ec9e5abbffb5f41d0b5d339823f58e5e | [] | no_license | dkarthicks27/ML_Database | bb370366e7b4d2ad06d992778c02815304a30f2d | 9f3856b1ac2aead5df4e3ef05e1800b1152f777e | refs/heads/master | 2021-10-27T10:45:47.258344 | 2021-10-24T10:42:55 | 2021-10-24T10:42:55 | 238,627,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,092 | py | """Algorithm: 1011010
So it is actually left rotation
So what we can actually do is that we can probably shift each time convert to decimal and see if there exist a integer log for this number but I don’t know if this method is feasible
Take the input string save it to a variable original
1. Convert it to decimal and then check if there exist a positive log to the base 2 for this number
2. If it exist, store it as current value and also check if its greater than previous value if its replace it as the new value
3. Now check left shift the string and check if it is different from the original, if its different repeat the process else exist.
"""
from math import log2
from copy import deepcopy
def leftShift(string):
new_string = string[-1] + string[:-1]
return new_string
def maximumPower(string):
originals = deepcopy(string)
print('string: ', string)
original = string
number = int(original, 2)
print('number:', number)
val = log2(number)
print('val: ', val)
maximumVal = 0
if val.is_integer():
maximumVal = int(val)
string = leftShift(originals)
while string != originals:
print('\n')
print('binary string:', string)
number = int(string, 2)
print('decimal value:', number)
val = log2(number)
print('val:', val)
if val.is_integer():
maximumVal = max(maximumVal, int(val))
print('maximum_value: ', maximumVal)
string = leftShift(string)
else:
string = leftShift(originals)
while string != originals:
print('\n')
print('binary string:', string)
number = int(string, 2)
print('decimal value:', number)
val = log2(number)
print('val:', val)
if val.is_integer():
maximumVal = max(maximumVal, int(val))
print('maximum_value: ', maximumVal)
string = leftShift(string)
print('\n\n\n')
return maximumVal
print(maximumPower('0011'))
| [
"dkarthicks27@gmail.com"
] | dkarthicks27@gmail.com |
fdeba04ec009c89a271ec8cdbf952115c6138d06 | e7580d924666db728ea02611819f6cc0e5077ec3 | /accuracy.py | 66a275b7a12436fae14b5ccc913ca13a3d8a6858 | [] | no_license | lawauditswe/Pokemon-Battle | ee48c3625c2a2816d16c9cf321096a05280c3669 | 54585b466f39f9b986d745e4ea08e0934a67826e | refs/heads/main | 2023-07-07T22:16:40.707571 | 2021-08-25T20:27:34 | 2021-08-25T20:27:34 | 349,981,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | # Accuracy checks
from random import randint
import random
def accuracy(move):
miss = random.randint(1,100)
if miss > move.accuracy:
return 0
return 1 | [
"noreply@github.com"
] | noreply@github.com |
276cdbfd909177b2431c7eae9a27253766a28570 | 654b2ece53bddd8c059cfe3e0e07d02c7dcfb6dd | /parladata/migrations/0023_alter_personmembership_role.py | 6fe9585d7c9949adb45d0d6aaab321d54cbf5884 | [] | no_license | danesjenovdan/parladata | a5bfa85b63f773a60ff393e6377192006da1eb67 | b2a4a5d622b9286f902edea817f4128dfad806fe | refs/heads/dev | 2023-08-16T20:26:26.851218 | 2023-07-07T15:34:48 | 2023-07-07T15:34:48 | 113,332,736 | 1 | 0 | null | 2023-07-11T08:39:57 | 2017-12-06T15:24:26 | Python | UTF-8 | Python | false | false | 483 | py | # Generated by Django 3.2 on 2021-05-12 19:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parladata', '0022_alter_law_passed'),
]
operations = [
migrations.AlterField(
model_name='personmembership',
name='role',
field=models.TextField(default='member', help_text='The role that the person fulfills in the organization', verbose_name='role'),
),
]
| [
"filipdobranic@gmail.com"
] | filipdobranic@gmail.com |
68d23317f6f0a24c9995428930f87cb7863815ca | 53c309591a91b014c30421e8af4dfe20eb532a3b | /testsite/settings.py | 4b7f1fec52669ed0e7ff8616bb73a147db004d8e | [] | no_license | codecampNick/TestSite | af4e2dea0077bfc1f6cc9084dcef2125bfa0bdfa | 1221c644ed5d4be3ad366dbf3eedda079e30edf6 | refs/heads/master | 2020-06-03T06:50:33.674154 | 2019-06-20T02:14:26 | 2019-06-20T02:14:26 | 191,269,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,228 | py | """
Django settings for testsite project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('TS_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'testsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'pageone/templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'testsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
'pageone/static',
]
| [
"nick.jp.ross@gmail.com"
] | nick.jp.ross@gmail.com |
cd2488e6fb072ad9466b9e8656c5c2ce6f99929e | c839961aeab22795200d9edef9ba043fe42eeb9c | /data/script1161.py | d205b142ad1e49664fe6d443dda932a4ef1e8727 | [] | no_license | StevenLOL/kaggleScape | ad2bb1e2ed31794f1ae3c4310713ead1482ffd52 | 18bede8420ab8d2e4e7c1eaf6f63280e20cccb97 | refs/heads/master | 2020-03-17T05:12:13.459603 | 2018-05-02T19:35:55 | 2018-05-02T19:35:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,556 | py |
# coding: utf-8
# # A Laconic Approach - EDA (Version 1)
# In this notebook, I will be doing exploratory data analysis for given data for text normalization in laconic fashion. The task given to us in this competition is to convert written text into spoken forms, for example - 6ft will be converted into 6 feets, or $1.13 will convert into one dollar and thirteen cents. I will work first by taking a glimpse of dataset shared and then go towards features extraction or rule based approach or selecting a RNN for this task. ** Lastly I will explain my analysis in laconic fashion, less words and more information**
#
# - PS - My Second public kernel, do upvote if you find this analysis useful.
#
#
# ## Loading packages
# In[ ]:
import pandas as pd #pandas for using dataframe and reading csv
import numpy as np #numpy for vector operations and basic maths
import re #for processing regular expressions
import datetime #for datetime operations
import calendar #for calendar for datetime operations
import time #to get the system time
import scipy #for other dependancies
from sklearn.cluster import KMeans # for doing K-means clustering
import math #for basic maths operations
import seaborn as sns#for making plots
import matplotlib.pyplot as plt # for plotting
import os # for operating system commands
import plotly.plotly as py # for Ploting
import plotly.graph_objs as go # for ploting
import plotly # for ploting
plotly.offline.init_notebook_mode() # for using plotly in offline mode
# ## Importing input data
# **Train** - The dataset provided has following fields
# 1. Setence_id - it signifies the id of sentence
# 2. Token id - it signifies the word's id inside that particular sentence
# 3. class - TBU
# 4. before/ after - they shows how the token is getting changed after
#
# **Test** - It won't have the field after
# In[ ]:
s = time.time()
train_df = pd.read_csv("../input/en_train.csv")
test_df = pd.read_csv("../input/en_test.csv")
end = time.time()
print("time taken by above cell is {}.".format(end -s))
train_df.head()
# In[ ]:
train_seq = train_df.copy() # storing an original copy for later use
# ## Sanity check -
# Let's check three things -
# 1. Number of rows in training and test dataets provided in this competition
# 2. Number of sentence in training and test dataets provided in this competition
# 3. Number of Nulls in training and test data and column wise nulls distribution
# In[ ]:
start = time.time()
print("Total number of rows in given training data is {}.".format(train_df.shape[0]))
print("Total number of sentence in given training data is {}".format(len(set(train_df.sentence_id))))
print("Total number of Nulls in given training data is \n{}.".format(train_df.isnull().sum()))
print("Total number of rows in given test data is {}.".format(test_df.shape[0]))
print("Total number of sentence in given test data is {}".format(len(set(test_df.sentence_id))))
print("Total number of Nulls in given test data is \n{}.".format(test_df.isnull().sum()))
end = time.time()
print("Time taken by above cell is {}.".format(end - start))
# ## Lets explore given variables in training data
# **1. Sentence_id and Token_id ** - Let's plot a hoistogram and check the number of words in a given sentence and their frequency
# In[ ]:
get_ipython().run_line_magic('matplotlib', 'inline')
start = time.time()
sns.set(style="white", palette="muted", color_codes=True)
f, axes = plt.subplots(1, 1, figsize=(11, 7), sharex=True)
sns.despine(left=True)
temp_tr = pd.DataFrame(train_df.groupby('sentence_id')['token_id'].count())
sns.distplot(temp_tr['token_id'], axlabel = 'Number of words in a sentence', label = 'Number of words in a sentence', color="r")
plt.setp(axes, yticks=[])
plt.tight_layout()
end = time.time()
print("Min and Max of word per sentence is {} and {}.".format(temp_tr.token_id.min(),temp_tr.token_id.max()))
del temp_tr
print("Time taken by above cell is {}.".format((end-start)))
plt.show()
# **Findings**
# - From the above plot, it is clear that the most of the sentence has less than or equal to 30 tokenper sentence and very few sentence s have more than 30 token per sentence.
# - Minimum words per sentence is 2
# - Maximum words per sentence is 256
# **2. Class** - Lets make box plots of classes and check the distributions of class variable
# In[ ]:
start = time.time()
temp_tr = pd.DataFrame(train_df.groupby('class')['token_id'].count())
temp_tr = temp_tr.reset_index()
X = list(temp_tr['class'])
Y = list(temp_tr['token_id'])
data = [go.Bar(
x=X,
y=Y
)]
del temp_tr
plotly.offline.iplot(data, filename='basic-bar')
end = time.time()
print("Total number of different classes in training data is {}.".format(len(X)))
print("Time taken by above cell is {}.".format((end-start)))
# ** Findings - **
# - We can see that most frequent classes are plain, punct, letters, verbatim, date and cardinal ( total 6)
# - Rest 10 classes are occuring very less frequently
# - ** Class vaiable isn't present in test data => We have to assign class variable to test (you got it, right ? - cool ;) )**
# **3. Lets see change before/ after with class** -
# - Lets create a flag variable for token and check if before after is same or not
# - Summarize over class varaible and see the effect of class type on normalization
# In[ ]:
# Lets first assign a variable change as 0 and if there is any change we will modify this change varaible to 1
start = time.time()
def isChange(row):
"""function to check if before after is getting changed or not"""
chan = 0
if row['before'] == row['after']:
chan = 0
else:
chan = 1
return chan
train_df['change'] = 0
train_df['change'] = train_df.apply(lambda row: isChange(row), axis = 1)
end = time.time()
print("Time taken by above cell is {}.".format((end-start)))
train_df.head()
# In[ ]:
start = time.time()
temp_chn = train_df.loc[train_df['change']==1]
temp_nchn = train_df.loc[train_df['change']==0]
temp_tr1 = pd.DataFrame(temp_chn.groupby('class')['token_id'].count())
temp_tr1 = temp_tr1.reset_index()
X1 = list(temp_tr1['class'])
Y1 = list(temp_tr1['token_id'])
temp_tr2 = pd.DataFrame(temp_nchn.groupby('class')['token_id'].count())
temp_tr2 = temp_tr2.reset_index()
X2 = list(temp_tr2['class'])
Y2 = list(temp_tr2['token_id'])
trace1 = go.Bar(
x=X1,
y=Y1,
name='Change'
)
trace2 = go.Bar(
x=X2,
y=Y2,
name='NO Change'
)
data = [trace1, trace2]
layout = go.Layout(
barmode='group'
)
fig = go.Figure(data=data, layout=layout)
plotly.offline.iplot(fig, filename='grouped-bar')
end = time.time()
print("Time taken by above cell is {}.".format((end-start)))
# ** Findings ** -
# - Most of the things that are getting changed are in all the difference classes but in plain and punct, and few in verbatim
# - Implies most of the data that is given to us in this competition is not changing and is redundent
# - **Cardinal is changing => cardinal is getting spoken in english, 24 becomes twenty-four**
# - ** Date is changing => date 2Jan or 2/01/2001 is spoken as second January two thousand one**
# - ** Same is the case with letters, time, telephone**
# **4. Class vs changes of token in sentence** - Lets plot changes in sentence grouped by over class and see the distribution using swarmplots in seaborn packages
# - first plot is when the token_id change is considered as it is
# - second plot, limit on y is set, for better visulization of data
# In[ ]:
start = time.time()
temp_tr = pd.DataFrame(train_df.groupby(['class', 'sentence_id', 'change'])['token_id'].count())
temp_tr.reset_index(inplace = True)
sns.set(style="ticks")
sns.set_context("poster")
sns.boxplot(x="class", y="token_id", hue="change", data=temp_tr, palette="PRGn")
plt.ylim(0, 150)
sns.despine(offset=10, trim=True)
end = time.time()
print("Time taken by above cell is {}.".format((end-start)))
# In[ ]:
start = time.time()
temp_tr = pd.DataFrame(train_df.groupby(['class', 'sentence_id', 'change'])['token_id'].count())
temp_tr.reset_index(inplace = True)
sns.set(style="ticks")
sns.set_context("poster")
sns.boxplot(x="class", y="token_id", hue="change", data=temp_tr, palette="PRGn")
plt.ylim(0, 15)
sns.despine(offset=10, trim=True)
end = time.time()
print(temp_tr['class'].unique())
print("Time taken by above cell is {}.".format((end-start)))
# In[ ]:
start = time.time()
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
temp_tr1 = pd.DataFrame(temp_chn.groupby('sentence_id')['token_id'].count())
temp_tr2 = pd.DataFrame(temp_nchn.groupby('sentence_id')['token_id'].count())
sns.distplot(temp_tr1['token_id'], ax=ax[0], color='blue', label='With Change')
sns.distplot(temp_tr2['token_id'], ax=ax[1], color='green', label='Without Change')
ax[0].legend(loc=0)
ax[1].legend(loc=0)
plt.show()
end = time.time()
print("Time taken by above cell is {}.".format((end-start)))
# **Findings ** -
# - From the above plot, it is clear that the distribution of sentences having change is somewhat similar to complete data
# - Distribution of data for which there is no change is completely different than dist of complete data
# In[ ]:
print("Fraction of token in complete data that are being changed are {}.".format(temp_tr1.shape[0]*100/train_df.shape[0]))
# **Findings ** -
# - Fraction of data that is being changed is around ~4%, and anyway plain class data is redundent, **be careful of the class**
# In[ ]:
# lets check overlap between train and test
train_list = train_df['before'].tolist()
test_list = test_df['before'].tolist()
s1 = set(train_list)
s2 = set(test_list)
common = s1.intersection(s2)
print("Common tokens between train and test is {}".format(len(common)/len(s2)))
# In[ ]:
def Assign(test, train):
""" function to assign results"""
token_dict = {}
token_dict = dict(zip(train.before, train.after))
#test['after'] = ''
print("test shape {}".format(test.shape[0]))
train.sort_values('before', ascending = True, inplace = True)
train.drop_duplicates(subset='before', keep='first', inplace=True)
train_new = train[['before', 'after']]
print(train_new.head())
print(test.head())
test_new = pd.merge(test, train_new, how = 'left', on = 'before')
print(test_new.head())
#test_new['after'] = list(map(str, test_new['after']))
def isNaN(num):
return num != num
test_new.after = np.where(isNaN(test_new.after), test_new.before, test_new.after)
return(test_new)
start = time.time()
sub = Assign(test_df, train_df)
end = time.time()
sub.head(5)
#sub1.shape[0]
# In[ ]:
def submission(row):
a = str(row['sentence_id'])+ "_"+ str(row['token_id'])
return(a)
sub['id'] = sub.apply(lambda row: submission(row), axis =1)
sub[['id', 'after']].to_csv("mahesh_common_token.csv", index = False)
# ## Data preprocessing for Seq2Seq Modeling using RNN
# My plan is now is to make a RNN for seq2seq modelling, As there can be contextual information and to capture that you must have the idea of context which can only be there is you are seeing sequences and not the words. Now for sequence to sequence modelling the first task is to convert the output sequence to correct output format.
# In[ ]:
# I am defining the functions and will work on it later when I get time
print(train_seq.head(2))
def words_to_sequence(train_sub):
"""function takes the input dataframe and outputs a df which has sequence/sentences"""
seq_ids = list(train_sub.sentence_id.unique())
seq_df = pd.DataFrame(columns = ['sentence_id', 'before', 'after'])
for i in seq_ids:
temp = train_sub.loc[train_sub['sentence_id']==i]
before_ = list(temp.before)
#print(before_)
before_list = ' '.join(word for word in before_)
#print(before_list)
after_ = list(temp.after)
after_list = ' '.join(word for word in after_)
seq_dict = {}
seq_dict['sentence_id'] =i
seq_dict['before'] = before_list
seq_dict['after'] = after_list
seq_temp = pd.DataFrame([seq_dict], columns=seq_dict.keys())
seq_df = seq_df.append(seq_temp, ignore_index=True)
return(seq_df)
train_sub_seq = words_to_sequence(train_seq.loc[train_seq.sentence_id < 25].copy())
train_sub_seq.head(10)
# In[ ]:
def seq_to_words(seq_df):
"""function to convert seq dataframe to input kind of df"""
return(words_df)
# Will finish this function later..
# # To be continued ....
| [
"adithyagirish@berkeley.edu"
] | adithyagirish@berkeley.edu |
86c6bda0603c609d70afe20bb9b6a60d1be9327b | 7dfc850b1e82ba852c1b6f81153f499466faa426 | /database_imports/dataimport_symbols_Single.py | 1313e169132fd7a17ec7ba0543934ba76b18e179 | [] | no_license | ollieboard/CodeA | e06053989e55da7b468f0f567eb614995ffb0393 | 497e51a6d7f555ec9fb709f22fc04c57139f233a | refs/heads/master | 2020-12-14T16:03:53.764505 | 2017-06-12T16:45:16 | 2017-06-12T16:45:16 | 52,689,792 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | # Python script for filling database from csv file
import sys, os, csv
# Full path and name to csv file
csv_filepathname="FSE-datasets-codes.csv"
# Full path to your django project directory
your_djangoproject_home="/home/oliver/Repositories/CodeA/codea/"
sys.path.append(your_djangoproject_home)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "codea.settings")
# This is so my local_settings.py gets loaded.
#os.chdir(proj_path)
# This is so models get loaded.
from django.core.wsgi import get_wsgi_application
stockplot = get_wsgi_application()
from stockplot.models import Stock
stock = Stock()
stock.name = 'Random'
stock.symbol = 'RAN.DOM'
stock.stockExchange = 'Frankfurt'
stock.source = 'Random'
stock.sourceName = 'Random'
stock.sourceSymbol = 'RAN.DOM'
print("Saving " + stock.name)
stock.save()
print("Done.")
| [
"ofeindt@hotmail.com"
] | ofeindt@hotmail.com |
721e3853f3a1799fc3552d4f8d2e08bdd76aa6cc | 54004129b8d5933aeed28e71aabf34daf2fb061a | /venv/untitled/hp/models.py | 4e468419d44d8983dc3c2615cd795f975c6d5aff | [] | no_license | eva990417744/zsxywwhp | 1e6c9a4e1831891630e9ea22b49a3c4eb3e18603 | 5a2d46ef68b9028fa30cd1f5a66b175b84cf39fc | refs/heads/master | 2022-12-07T14:06:20.942616 | 2016-11-02T18:08:05 | 2016-11-02T18:08:05 | 69,695,877 | 0 | 1 | null | 2022-11-18T03:41:59 | 2016-09-30T19:29:31 | Python | UTF-8 | Python | false | false | 4,860 | py | from django.db import models
class Excle(models.Model):
name = models.CharField(max_length=10)
excle = models.FileField(upload_to='%Y/%m/%d/')
add_time = models.DateField(auto_now=True)
class HP(models.Model):
name = models.CharField(default='', max_length=50, verbose_name='hp_name')
blood = models.IntegerField(default=50)
add_time = models.DateField(auto_now_add=True)
change_time = models.DateField(auto_now=True)
grade_choices = (('one', 'one'),
('two', 'two'),
('three', 'three'))
grade = models.CharField(choices=grade_choices, default='one', max_length=100)
def debuff(self, name_text):
q = DeBuff.objects.filter(name="%s" % name_text)
return q
def buff(self, name_text):
q = DeBuff.objects.filter(name="%s" % name_text)
return q
def __str__(self):
return self.name
class DeBuff(models.Model):
key = models.ForeignKey(HP)
name = models.CharField(max_length=50, default='', verbose_name='DeBuff_name')
Level1 = ('值班没带工作证', '遗漏报修', '工单超时', '私活影响值班', '不签到', '值班联系不上','Other1')
Level2 = ('不接电话', '工单被投诉', '不回短信', '旷工', '不能单刷',
'对女生言行不当', '私自以网维名义发布消息', '查到路由不反映', '攻击网络',
'使用路由器影响正常上网',
'态度消极', '泄露资料', '被教职员工投诉','Other2')
Level3 = ('认知不清', '泄露私人号码', '借出工作证', '丢失工作证',
'宣传路由', '出售IP', '分裂', 'Other3')
DeBuff_Choices = (
('Level1', (
('没带工作证', '没带工作证'),
('遗漏报修', '遗漏报修'),
('工单超时', '工单超时'),
('私活影响值班', '私活影响值班'),
('不签到', '不签到'),
('值班联系不上','值班联系不上'),
('Other1', 'Other'),
),
),
('Level2', (
('不接电话', '不接电话'),
('工单被投诉', '工单被投诉'),
('不回短信', '不回短信'),
('旷工', '旷工'),
('不能单刷', '不能单刷'),
('对女生言行不当', '对女生言行不当'),
('私自以网维名义发布消息', '私自以网维名义发布消息'),
('查到路由不反映', '查到路由不反映'),
('攻击网络', '攻击网络'),
('使用路由器影响正常上网', '使用路由器影响正常上网'),
('态度消极', '态度消极'),
('泄露资料', '泄露资料'),
('被教职员工投诉','被教职员工投诉'),
('Other2', 'Other2')
)
),
('Level3', (
('认知不清', '认知不清'),
('泄露私人号码', '泄露私人号码'),
('借出工作证', '借出工作证'),
('丢失工作证', '丢失工作证'),
('宣传路由', '宣传路由'),
('出售IP', '出售IP'),
('分裂', '分裂'),
('Other3', 'Other3')
)
)
)
DeBuff_Reason = models.CharField(choices=DeBuff_Choices, max_length=100)
DeBuff_Text = models.CharField(max_length=100, default='none', blank=True)
add_time = models.DateField(auto_now_add=True, null=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
super(DeBuff, self).save(*args, **kwargs)
q = HP.objects.get(name=self.name)
if self.DeBuff_Reason in self.Level1:
q.blood -= 10
q.save()
elif self.DeBuff_Reason in self.Level2:
q.blood -= 20
q.save()
elif self.DeBuff_Reason in self.Level3:
q.blood -= 30
q.save()
class Buff(models.Model):
key = models.ForeignKey(HP)
name = models.CharField(default='', max_length=50, verbose_name='Buff_name')
Buff_Choices = (
('工作积极', '工作积极'),
('表现良好', '表现良好'),
('加班', '加班'),
('态度积极', '态度积极'),
('贡献想法', '贡献想法')
)
Buff_Reason = models.CharField(choices=Buff_Choices, max_length=100)
Buff_Text = models.CharField(max_length=100, default='none', blank=True)
add_time = models.DateField(auto_now_add=True, null=True)
Level = ('工作积极', '表现良好', '加班', '态度积极', '贡献想法',)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
super(Buff, self).save(*args, **kwargs)
q = HP.objects.get(name=self.name)
q.blood += 5
q.save()
| [
"990417744@qq.com"
] | 990417744@qq.com |
96cdbb081e5c25bb5168b73c58283eac5eec2ae4 | 642dde87cfbfd67178a02ec9536ea627526058f7 | /bpfe/vectorizer/BPFEVectorizer.py | 6154201b05b7fb37d9654bd825ab3f764089a267 | [] | no_license | JesseBuesking/dd-bpfe | 3f463b5ee474740c5a47b5cd17a0e2609862fc0d | 9521205ec82baf661076ff41b58178d9e04723dc | refs/heads/master | 2020-12-25T18:16:30.177324 | 2015-01-10T05:45:01 | 2015-01-10T05:45:01 | 26,792,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,747 | py |
from collections import Counter
import re
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.feature_extraction import DictVectorizer
from bpfe.entities import Data
from bpfe.feature_engineering import all_ngrams_list
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
sbs = SnowballStemmer('english')
english_stopwords = stopwords.words('english')
class BPFEVectorizer(BaseEstimator, TransformerMixin):
def __init__(self, ngrams=1, use_stemmer=False, use_grades=False,
use_titles=False, filter_stopwords=True, binarize=False,
min_df=None, max_df=None):
self.dv = None
self.ngrams = ngrams
self.use_stemmer = use_stemmer
self.use_grades = use_grades
self.use_titles = use_titles
self.filter_stopwords = filter_stopwords
self.binarize = binarize
self.min_df = min_df
self.max_df = max_df
self.counts = Counter()
# noinspection PyUnusedLocal
def fit(self, X, y=None):
return self
def transform(self, X, y=None, **fit_params):
return self.fit_transform(X, y, **fit_params)
def fit_transform(self, X, y=None, **fit_params):
tmp = []
for data in X:
feats = self.features(data)
tmp.append(feats)
for d in tmp:
for k in d.keys():
self.counts[k] += 1
if self.min_df is not None:
for i in range(len(tmp)):
d = tmp[i]
for k in d.keys():
if self.counts[k] < self.min_df:
del d[k]
if self.max_df is not None:
for i in range(len(tmp)):
d = tmp[i]
for k in d.keys():
if self.counts[k] > self.max_df:
del d[k]
if self.dv is None:
self.dv = DictVectorizer().fit(tmp)
ft = self.dv.transform(tmp)
return ft
def features(self, data):
feats = self.word_features(data)
if self.use_grades:
feats.update(self.grade_features(data))
if self.use_titles:
feats.update(self.title_features(data))
return feats
def grade_features(self, data):
d = dict()
for idx, grade in enumerate(data.grades):
if grade:
d['grade - {}'.format(idx)] = 1
return d
def title_features(self, data):
d = dict()
for idx, title in enumerate(data.title):
if title:
d['title - {}'.format(idx)] = 1
return d
def word_features(self, data):
d = dict()
for attr in Data.text_attributes:
value = data.cleaned[attr + '-mapped']
# value = getattr(data, attr)
b_o_w = []
for i in self.bow(value):
if self.use_stemmer:
i = sbs.stem(i)
b_o_w.append(i)
ng = all_ngrams_list(b_o_w, self.ngrams)
if self.filter_stopwords:
# trim the stopwords from either end
# e.g. salaries and wages -> salaries and wages
# salaries and -> salaries
new_ng = []
for i in ng:
while len(i) > 0 and i[0] in english_stopwords:
i = i[1:]
while len(i) > 0 and i[-1] in english_stopwords:
i = i[:-1]
if len(i) > 0:
new_ng.append(tuple(i))
# this helps a lot apparently
for i in range(len(new_ng)):
pre = []
suf = []
for j in new_ng[i]:
pre.append(j[:3])
suf.append(j[-3:])
new_ng += [tuple(pre)]
new_ng += [tuple(suf)]
# only keep the distinct occurrences
ng = list(set(new_ng))
else:
ng = [tuple(i) for i in ng]
if self.binarize:
for i in ng:
d[i] = 1
d[('|' + attr + '|',) + i] = 1
else:
for i in ng:
d[i] = d.get(i, 0) + 1
d[('|' + attr + '|',) + i] = d.get(i, 0) + 1
return d
def bow(self, string):
# return util.sentence_splitter(string)
for word in re.findall(
r'GRADE=k\|k|GRADE=k\|\d+|GRADE=\d+\|\d+|TITLE=\d+|\w+|\d+',
string
):
if 'GRADE=' in word or 'TITLE=' in word:
continue
yield word
| [
"jessebuesking@gmail.com"
] | jessebuesking@gmail.com |
a450a16b1200362256b6935d2e9e58dab3f43813 | ffa967abfb47af7a370f02a09dd0df14318c20a3 | /Modelling/Regression/Gradient Boosting.py | f6a1efbb99eaab0c297a6706364083c64894061b | [
"MIT"
] | permissive | fraunhofer-iais/IAIS-Python-Snippets | f564d4ba4f9ee633fbda1bb3aa00ed8970fac55c | a3ee610d6270cda2c891688851696c34831ffa2b | refs/heads/master | 2023-01-08T19:59:05.055937 | 2020-11-11T10:00:46 | 2020-11-11T10:00:46 | 293,539,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import GridSearchCV
parameters = {'n_estimators':[2, 5, 10, 50, 100],
'max_depth':[3, 4, 5],
'learning_rate':[0.001, 0.01, 0.1]}
model = GradientBoostingRegressor()
regs = GridSearchCV(model, parameters, cv=3)
regs.fit(X_train, y_train)
reg = regs.best_estimator_ | [
"hammam.abdelwahab@iais.fraunhofer.de"
] | hammam.abdelwahab@iais.fraunhofer.de |
d2ef8d920e4103f095845cc5539c6c544a985b29 | 10ea8247d4493496f940c565881d572bebd9acd5 | /MyParser.py | e8c2879f4a028a4d2cef5eb2e40b4a1f255a60ca | [] | no_license | arizalsaputro/simple-parser-tba | 6fbcd80d1a17c6c90b04c534098284d61eca7257 | 3040c155897e90a8af274d6d697a9709bce99e39 | refs/heads/master | 2021-08-23T08:53:14.154547 | 2017-12-04T11:27:22 | 2017-12-04T11:27:22 | 112,942,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,079 | py | #Tugas Tahap 1
operator = ['+','-','x',':']
def parse(inputan,index,output,hitungBuka,error):
if error :
return output
if index is len(inputan):
#if hitungBuka > 0:
# output += "error "
return output
count = index
if inputan[count] is " ":
index = index + 1
return parse(inputan, index, output, hitungBuka,error)
counter = 1
while (count+1) is not len(inputan) and inputan[count+1] is not " " :
counter = counter + 1
count = count + 1
if (counter > 1) :
isNum = False
before = 'none'
count = count + 1
for i in range(index,count):
if before is 'none':
if inputan[i].isdigit() :
before = 'num'
isNum = True
elif inputan[i] is '+' or inputan[i] is '-':
before = 'plusmin'
isNum = False
else:
isNum = False
break
elif before is 'num' :
if inputan[i].isdigit() :
before = 'num'
isNum = True
elif inputan[i] is ',' :
before = 'com'
isNum = False
elif inputan[i] is 'E' :
before = 'E'
isNum = False
else:
isNum = False
break
elif before is 'plusmin' :
if inputan[i].isdigit() :
before = 'num'
isNum = True
else:
isNum = False
break
elif before is 'E':
if inputan[i].isdigit() :
before = 'num'
isNum = True
elif inputan[i] is '+' or inputan[i] is '-':
before = 'plusmin'
isNum = False
else:
isNum = False
break
elif before is 'com':
if inputan[i].isdigit():
before = 'num'
isNum = True
else:
isNum = False
break
else:
isNum = False
break
if isNum :
output += 'num '
else:
output += 'error '
error = True
index = count
else :
if(inputan[count] in operator):
output += 'opr '
elif(inputan[count] is '('):
output += 'kurbuka '
hitungBuka = hitungBuka + 1
elif(inputan[count] is ')'):
output += 'kurtutup '
hitungBuka = hitungBuka - 1
elif(inputan[count].isdigit()):
output += 'num '
else:
output += 'error '
error = True
index = index + 1
return parse(inputan,index,output,hitungBuka,error)
def doParse(inputan):
return parse(inputan,0,"",0,False) | [
"muharizals@students.telkomuniversity.ac.id"
] | muharizals@students.telkomuniversity.ac.id |
ec342f342906642d2e195b79e2e610331c52b5c2 | 73bc168768a5f8bb139b53bd618ecf19cf61ac66 | /courses/courses/spiders/udacity.py | 5ba4add5845f5846ff109726c7ba8043505c928f | [] | no_license | rodrigo-ghiraldeli/udemy_python-com-scrapy | 9cd905457992b3804cebb79286dbd68f204e8cea | 6342a5da6afebdbe09c188918991efb0109587fc | refs/heads/master | 2022-12-03T18:29:40.887923 | 2020-08-19T00:42:27 | 2020-08-19T00:42:27 | 288,595,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,715 | py | import scrapy
class UdacitySpider(scrapy.Spider):
name = 'udacity'
start_urls = ['https://www.udacity.com/courses/all/']
def parse_detail(self, response):
title = response.xpath('//h1/text()').extract_first()
headline = response.xpath('//div[contains(@class, "legible")]/text()').extract_first()
image = response.xpath('//div/div/section/div/div[2]/section[1]/div[1]').extract_first()
instructors = []
for div in response.xpath('//div[contains(@class, "card instructor")]'):
instructors.append(
{
'name': div.xpath('.//h5/text()').extract_first(),
'image': div.xpath('.//img/@src').extract_first()
}
)
yield {
'title': title,
'headline': headline,
'image': image,
'instructors': instructors
}
def parse(self, response):
divs = response.xpath('//*[@id="__next"]/div/div/div[2]/div[2]/div/div[2]/main/div[2]/ul/li/article')
for div in divs:
link = div.xpath('.//a')
href = link.xpath('./@href').extract_first()
yield scrapy.Request(
url=f'https://www.udacity.com{href}',
callback=self.parse_detail
)
# title = link.xpath('./@aria-label').extract_first()
# img = link.xpath('.//div[1]/div/div[2]').extract_first()
# description = link.xpath('.//div[3]/section/p/text()').extract_first()
# yield {
# 'title': title,
# 'url': href,
# 'img': img,
# 'description': description
# }
| [
"rodrigo.ghiraldeli@gmail.com"
] | rodrigo.ghiraldeli@gmail.com |
eb1e71aacc3892c3756d3e6efab1d5dbebcb4e7a | 4331279865c4b1262179068ba5ac85d8d75123b6 | /final/home/views/insurance.py | ae8d65e45714f7c174c4d5d0273a66627fdbf017 | [] | no_license | manankshastri/NYmed-Scripts | fb4633c19dadfdf982f127454a5dd643ba0f8a8b | 993af47223ca7cb38a2e9af88a2fc99baa7f3d88 | refs/heads/master | 2020-04-07T11:29:01.839909 | 2019-05-16T22:39:50 | 2019-05-16T22:39:50 | 158,328,115 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,010 | py | from django.contrib import messages
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.db.models import Count
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse_lazy, reverse
from django.utils.decorators import method_decorator
from django.views.generic import (CreateView, ListView, DeleteView, DetailView, UpdateView)
from django.contrib.messages.views import SuccessMessageMixin
from ..decorators import insurance_required
from ..forms import InsuranceSignUpForm
from ..models import Insurance, User, Prescription, Patient
class InsuranceSignUpView(CreateView):
model = User
form_class = InsuranceSignUpForm
template_name = 'registration/signup_form.html'
def get_context_data(self, **kwargs):
kwargs['user_type'] = 'insurance'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
login(self.request, user)
return redirect('insurance:insurance_list')
@login_required
@insurance_required
def InsuranceDetailView(request, pk):
pat_all = Patient.objects.all()
template_name = 'home/insurance/insurance_detail.html'
return render(request, template_name, context = {'pat_all': pat_all},)
@method_decorator([login_required, insurance_required], name='dispatch')
class InsuranceListView(ListView):
model = Insurance
template_name = 'home/insurance/insurance_list.html'
@login_required
@insurance_required
def InsurancePatientBillsView(request, pk):
pat_all = Prescription.objects.all()
template_name = 'home/insurance/insurance_patient.html'
return render(request, template_name, context = {'pat_all': pat_all},)
@method_decorator([login_required, insurance_required], name='dispatch')
class InsuranceBillDetailView(DetailView):
model = Prescription
template_name = 'home/insurance/insurance_bills.html'
| [
"manank.shastri@gmail.com"
] | manank.shastri@gmail.com |
230e44d52e7267776c7ad0dd6590c9ae3760fd4c | 87da2a20260a8ba81aef92c517a51b46dfc0689c | /luhyacloud/walrus/urls.py | 4745abff359740bdc0d79f4a219f0166d20384cb | [
"CC-BY-4.0"
] | permissive | cloudfirst/eduCloud | 244e84ce2242f33854ed9fe197414fc9ef3d8313 | ac45bcbcdb9dae60e6c29056a75776fee43c361d | refs/heads/master | 2021-12-01T17:02:56.260817 | 2020-03-17T13:01:21 | 2020-03-17T13:01:21 | 22,567,057 | 0 | 1 | null | 2017-10-10T12:24:20 | 2014-08-03T04:18:24 | HTML | UTF-8 | Python | false | false | 210 | py | from django.conf.urls import patterns, url
from walrus import views
urlpatterns = patterns('',
#url(r'^main/top/$', views.admin_top_view, name='main_top_view'),
# API 1.0
# Web Page
) | [
"4167023@qq.com"
] | 4167023@qq.com |
1083aa60275a7e289d9f75c45560aab1ae17ed3d | 5700c42a07f271b94ca2a647fbdb388b9c249469 | /pages/basket_page.py | c53c003ee205bfaa80f1083a4058196ee3df5629 | [] | no_license | aurika-pixel/stepik---auto-tests-course | 175503d2989d6ce7d8ab6cb9c8b3a356437401dc | cfc75e66eb7d1e39dfd8cff65c6d058f983dbf16 | refs/heads/master | 2021-08-10T13:20:20.089086 | 2020-04-13T17:35:49 | 2020-04-13T17:35:49 | 253,438,666 | 0 | 0 | null | 2021-06-02T01:26:33 | 2020-04-06T08:28:26 | Python | UTF-8 | Python | false | false | 425 | py | from .base_page import BasePage
from .locators import BasketPageLocators
class BasketPage(BasePage):
def should_be_basket_is_empty_message(self):
assert self.is_element_present(*BasketPageLocators.BASKET_IS_EMPTY_MESSAGE), "message not exists!"
def should_not_be_product_in_basket_message(self):
assert self.is_not_element_present(*BasketPageLocators.PRODUCTS_IN_BASKET_MESSAGE), "message exists!"
| [
"aurikatu@mail.ru"
] | aurikatu@mail.ru |
91015339d5385a43b17af97fd72bce9f241c9932 | 41a9fcc8df963ffb3c58b196d0d5c8053cb08509 | /RegEx.py | 9959375b6ca3da6b3426232b556cfb6528d7e5b3 | [] | no_license | oun1982/gitrepo | 89c267b61e2dd555d049a8296b4949a7f1639621 | 73e4b5798f8e0ed805a7ee49d1e54cef2b7b51c1 | refs/heads/master | 2021-05-09T04:00:33.667791 | 2019-02-24T04:52:56 | 2019-02-24T04:52:56 | 119,258,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | __author__ = 'oun1982'
import re
if re.search("cat","A cat is eat fish"):
print("Have a Cat")
else:
print("Haven't Cat")
print(r"\\\\!@_!")
| [
"oun1982@gmail.com"
] | oun1982@gmail.com |
e98ddaa42ba0de08f5058b25bd20b30991dc607c | fc87b90aebfef2fa618241bbf3a95fbe0e5c2f18 | /manufactureview/pt/apps.py | 94de0ae640b39ad003b7948167b403b9d6adac09 | [] | no_license | ofiryak12/ManufacturerRoom | ce31570247ae6848e4cf6f97cdeedf3a2fee5217 | d942defec030d9ae3415028423f3e071e7f0561a | refs/heads/master | 2023-07-15T05:00:28.661909 | 2021-08-30T06:28:08 | 2021-08-30T06:28:08 | 400,302,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | from django.apps import AppConfig
class PtConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'pt'
| [
"ofiryak12@gmail.com"
] | ofiryak12@gmail.com |
5299c187799270f860c642046f4536a3f2a2a679 | 6ca758d6f2edad22e0029c283e65d5826f065461 | /nodes/freeplay_sandbox_supervisor/launcher.py | 3bd255c2c46dbfc5209122efa988b7f3270ec63f | [] | no_license | aaramirezd/web-supervisor | 2f98d1f2c060bb92275df008fb7a8ea35d0c617c | 1beed6cd86db6dc90dfc1c0b64012c1fd3941246 | refs/heads/master | 2020-04-11T04:35:17.567292 | 2017-06-28T14:40:06 | 2017-06-28T14:40:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,872 | py | import time
import sys
import subprocess
import psutil
import rospy
import roslaunch
import roslaunch.xmlloader
from roslaunch.core import RLException
from roslaunch.config import load_config_default
from rospkg.common import ResourceNotFound
class Launcher:
def __init__(self, package, launchfile):
self.package = package
self.launchfile = launchfile
self.name = launchfile.split("/")[-1].split(".launch")[0]
self.prettyname = self.name.replace("_", " ")
self.reachable=True
self.desc = ""
loader = roslaunch.xmlloader.XmlLoader(resolve_anon=False)
try:
config = load_config_default([self.launchfile],
None,
loader=loader,
verbose=False,
assign_machines=False)
except ResourceNotFound as e:
pass
except RLException as e:
pass
# contains default values + documentation
self.args = {}
for arg, v in loader.root_context.resolve_dict.get('arg_doc', {}).items():
doc, value = v
################
# manual type checking (!!) -- ast.literal_eval fails badly on strings like '/dev/video1'
if value:
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
if value.lower() == "false": value = False
elif value.lower() == "true": value = True
################
self.args[arg] = {"doc":doc,
"value": value,
"type": type(value).__name__,
"default_value": True
}
self.has_args = bool(self.args)
self.readytolaunch = self.checkargs()
self.pid = None
self.isrunning()
if(self.pid):
rospy.logwarn("Launch file <%s> is already running. Fine, I'm attaching myself to it." % self.name)
def checkargs(self):
"""Check whether all the arguments are defined
"""
for k,v in self.args.items():
if v["value"] == None:
return False
return True
def setarg(self, arg, value):
rospy.loginfo("Setting arg <%s> to <%s> for %s" % (arg, str(value), self.prettyname))
if self.args[arg]["type"] == "bool":
# special case for checkboxes: checked == 'on'; unchecked == 'off'
self.args[arg]["value"] = True if (value is True or value.lower() == "true") else False
else:
self.args[arg]["value"] = value
self.args[arg]["default_value"] = False # manually modified value!
rospy.loginfo(str(self.args[arg]))
self.readytolaunch = self.checkargs()
def make_rl_cmd(self):
argcmd = [a + ":=" + str(v["value"]) for a,v in self.args.items() if not v["default_value"]]
return ["roslaunch", self.package, self.name + ".launch"] + argcmd
def start(self, stdout=sys.stdout, stderr=sys.stderr, env=None):
if self.isrunning():
rospy.logwarn("Launch file <%s> is already running. PID: %d" % (self.name, self.pid))
return
if self.reachable and self.readytolaunch:
cmd = self.make_rl_cmd()
rospy.loginfo("****************************")
rospy.loginfo("Executing:")
rospy.loginfo(" ".join(cmd))
rospy.loginfo("****************************")
self.pid = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, env=env).pid
def isrunning(self):
"""Returns true if this launch file is running, False otherwise
Set or update self.pid accordingly.
Attention! The process might have been started by someone else! (like on the command-line)
"""
proc = None
if self.pid:
try:
proc = psutil.Process(self.pid)
if proc.status() != psutil.STATUS_ZOMBIE:
return True
else:
# cleanup zombie processes
self.shutdown(force=True)
except psutil.NoSuchProcess:
self.pid = None
roslaunch_processes = []
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'name','cmdline'])
if pinfo["name"] == "roslaunch" and len(pinfo["cmdline"]) > 3:
roslaunch_processes.append(pinfo)
except psutil.NoSuchProcess:
pass
for p in roslaunch_processes:
if p["cmdline"][2] == self.package \
and p["cmdline"][3] == self.name + ".launch":
self.pid = p["pid"]
return True if self.pid is not None else False
def shutdown(self, force=False):
"""Properly terminate the roslaunch process, starting with all the children, and
then the main process.
Kill them if necessary.
TODO: how does that work with nodes marked as 'respawn'?
"""
if force or self.isrunning():
psutil.Process(self.pid).terminate()
proc = psutil.Process(self.pid)
children = psutil.Process(self.pid).children()
for p in children:
p.terminate()
gone, still_alive = psutil.wait_procs(children, timeout=15)
for p in still_alive:
p.kill()
proc.terminate()
gone, still_alive = psutil.wait_procs([proc], timeout=15)
for p in still_alive:
p.kill()
| [
"severin.lemaignan@plymouth.ac.uk"
] | severin.lemaignan@plymouth.ac.uk |
c882342fe074152a5d6c323af163893ccd169ee5 | 33dafb9bedaaa7a78a857ef6c6e77d4559c444cc | /mysite/mysite/settings.py | d45d021079ef5ad58b63c766001ea7f38fb9de2f | [] | no_license | geraldojosesanto/my-first-blog | 09e44a2fe3ef2c93b6266a6573ab2647380560f3 | 77159e58d4795ef89c15579ae1ffbd871b8c1189 | refs/heads/master | 2022-11-21T19:40:06.986886 | 2020-07-22T20:25:38 | 2020-07-22T20:25:38 | 281,736,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,089 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.14.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g+5cjyl_=q1x#1e=(2l^mo847sz0=e#%7!1on2#f8i66pbao($'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"geraldojosesanto@hotmail.com"
] | geraldojosesanto@hotmail.com |
0e7c3b828694480c0b910383edc2dc5f6294ab81 | a728a685fa841388da0d27c8d596ce2178a60ad0 | /app/core/tests/test_models.py | 55151200fda78a0faad3fa52acf148c40fe6a526 | [
"MIT"
] | permissive | aikinyi/recipe-app-api | bd3c037acf650a09cdae35497c8e62b4988ad454 | 419ab18f715f66d044af125680ce3417f7af61f4 | refs/heads/main | 2023-03-19T13:46:50.341555 | 2021-03-20T22:39:36 | 2021-03-20T22:39:36 | 321,140,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
# Helper functions
def sample_user(email='test@gmail.com', password='test123456'):
return get_user_model().objects.create_user(email, password)
class ModelTest(TestCase):
"""
Creating Model TDD
"""
def test_create_user(self):
"""
Creating test user TDD function
"""
email = 'aikinyiltd@gmail.com'
password = '123456'
user = get_user_model().objects.create_user(
email=email,
password=password,
)
# Asserting the password and email
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_normalize_email(self):
"""
TDD for normalizing email
"""
email = 'aikinyiltd@GMAIL.COM'
user = get_user_model().objects.create_user(
email, 'aikinyiltd',
)
# Assertion on email normalization
self.assertEqual(user.email, email.lower())
def test_validate_user_email(self):
"""
Validating user email
"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'email address here')
def test_create_superuser(self):
"""
Creaating superuser
"""
user = get_user_model().objects.create_superuser(
'aikinyiltd@gmail.com',
'123abdcd'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
"""
Creating TDD for testing tag MODEL
"""
tag = models.Tag.objects.create(
user=sample_user(),
name='Abdul'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
"""
TDD for testing creation of new ingredient
"""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
| [
"learntoprogress@yahoo.com"
] | learntoprogress@yahoo.com |
bed170e3a61e169e68a386884050efbff4067342 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/labels_20200908183820.py | 429c534d5ee867e57cc47bc486a667f5c91d2405 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | def labels(S):
if len(S) == 0:
return 0
output_arr = []
last_indices = []
for i in range(len(S)):
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
c18989b9fc9e25bf40b4ac083d12d27f4d5d3a0e | 96b2009e5a3bcaa4a0a6bb699015e5d2d62e3ccc | /卷积神经网络+keras/__init__.py | c9f26105c126796a86360adc6512fe8d3d8fda08 | [] | no_license | zlszhonglongshen/NLP | 612d9f73ca017d387c48b8b3ebae24510dad6732 | 8f373f737b309c7441b516c0d408e43aebacff61 | refs/heads/master | 2022-07-24T20:18:12.465840 | 2019-11-27T09:06:57 | 2019-11-27T09:06:57 | 125,830,945 | 0 | 0 | null | 2022-07-15T20:19:20 | 2018-03-19T09:11:40 | Jupyter Notebook | UTF-8 | Python | false | false | 7,572 | py | #coding:utf-8
import os
import numpy as np
np.random.seed(1337)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense,Input,Flatten
from keras.layers import Conv1D,MaxPooling1D,Embedding
from keras.models import Model
from keras.optimizers import *
from keras.models import Sequential
from keras.layers import merge
import sys
BASE_DIR = 'E:/NLP/卷积神经网络+keras' #这里指定当前目录
GLOVE_DIR = BASE_DIR + '/glove.6B/' # 根据实际目录名更改
TEXT_DATA_DIR = BASE_DIR + '/news20/20_newsgroup/' # 根据实际目录名更改
MAX_SEQUENCE_LENGTH = 1000 #每个文本的最长选取程度,较短的文本可以设短一些
MAX_NB_WORDS = 20000 #整体词库字典中,词的多少,可以略微调大或者调小
EMBEDDING_DIM = 50 #词向量的维度,可以根据实际情况使用
VALIDATION_SPLIT = 0.4 #这里用作是测试集的比例,单词本身的意思是验证集
#first build index mapping words in the embedding set
#to their embedding vector 这段话是指简历一个词到向量之间的索引比如 peking 对应的词向量可能是(0.1,0,32,...0.35,0.5)等等。
print('Indexing word vectors...')
embedding_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.50d.txt'),encoding="utf-8") # 读入50维的词向量文件,可以改成100维或者其他
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:],dtype='float32')
embedding_index[word] = coefs
f.close()
print('Found %s word vectors.'%len(embedding_index))
#second prepare text samples and their labels
print('Processing text dateset') #下面主要是读入训练集和测试集
texts = [] #存储训练样本的list
labels_index = {} #词到词编号的字典,比如peking对应100
labels = [] #存储训练样本,类别编号的文本,比如文章A属于第一类文本
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR,name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path,fname)
if sys.version_info<(3,):
f = open(fpath)
else:
f = open(fpath,encoding='latin-1')
texts.append(f.read())
f.close()
labels.append(label_id)
print('Found %s texts.'%len(texts)) #输出训练样本的数量
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
# finally, vectorize the text samples into a 2D integer tensor,下面这段代码主要是将文本转换成文本序列,比如 文本'我爱中华' 转化为[‘我爱’,'中华'],然后再将其转化为[101,231],最后将这些编号展开成词向量,这样每个文本就是一个2维矩阵,这块可以参加本文‘<span style="font-size:18px;">二.卷积神经网络与词向量的结合’这一章节的讲述</span>
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.'%len(word_index))
data = pad_sequences(sequences,maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set,下面这段代码,主要是将数据集分为,训练集和测试集(英文原意是验证集,但是我略有改动代码)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples] # 训练集
y_train = labels[:-nb_validation_samples]# 训练集的标签
x_val = data[-nb_validation_samples:] # 测试集,英文原意是验证集
y_val = labels[-nb_validation_samples:] # 测试集的标签
print('Preparing embedding matrix.')
# prepare embedding matrix 这部分主要是创建一个词向量矩阵,使每个词都有其对应的词向量相对应
nb_words = min(MAX_NB_WORDS,len(word_index))
embedding_matrix = np.zeros((nb_words+1,EMBEDDING_DIM))
for word,i in word_index.items():
if i>MAX_NB_WORDS:
continue
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
#words not found in embedding index will be all_zeros
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# 神经网路的第一层,词向量层,本文使用了预训练glove词向量,可以把trainable那里设为False
embedding_layer = Embedding(nb_words+1,EMBEDDING_DIM,input_length=MAX_SEQUENCE_LENGTH,weights=[embedding_matrix],trainable=True)
#train a 1D convert with global maxpoolinnb_words
# left model 第一块神经网络,卷积窗口是5*50(50是词向量维度)
model_left = Sequential()
# model.add(Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32'))
model_left.add(embedding_layer)
model_left.add(Conv1D(128, 5, activation='tanh'))
model_left.add(MaxPooling1D(5))
model_left.add(Conv1D(128, 5, activation='tanh'))
model_left.add(MaxPooling1D(5))
model_left.add(Conv1D(128, 5, activation='tanh'))
model_left.add(MaxPooling1D(35))
model_left.add(Flatten())
# right model <span style="font-family:Arial, Helvetica, sans-serif;">第二块神经网络,卷积窗口是4*50</span>
model_right = Sequential()
model_right.add(embedding_layer)
model_right.add(Conv1D(128, 4, activation='tanh'))
model_right.add(MaxPooling1D(4))
model_right.add(Conv1D(128, 4, activation='tanh'))
model_right.add(MaxPooling1D(4))
model_right.add(Conv1D(128, 4, activation='tanh'))
model_right.add(MaxPooling1D(28))
model_right.add(Flatten())
# third model <span style="font-family:Arial, Helvetica, sans-serif;">第三块神经网络,卷积窗口是6*50</span>
model_3 = Sequential()
model_3.add(embedding_layer)
model_3.add(Conv1D(128, 6, activation='tanh'))
model_3.add(MaxPooling1D(3))
model_3.add(Conv1D(128, 6, activation='tanh'))
model_3.add(MaxPooling1D(3))
model_3.add(Conv1D(128, 6, activation='tanh'))
model_3.add(MaxPooling1D(30))
model_3.add(Flatten())
merged = merge([model_left, model_right, model_3],mode='concat') # 将三种不同卷积窗口的卷积层组合 连接在一起,当然也可以只是用三个model中的一个,一样可以得到不错的效果,只是本文采用论文中的结构设计
model = Sequential()
model.add(merged) # add merge
model.add(Dense(128, activation='tanh')) # 全连接层
model.add(Dense(len(labels_index), activation='softmax')) # softmax,输出文本属于20种类别中每个类别的概率
# 优化器我这里用了adadelta,也可以使用其他方法
model.compile(loss='categorical_crossentropy',
optimizer='Adadelta',
metrics=['accuracy'])
# =下面开始训练,nb_epoch是迭代次数,可以高一些,训练效果会更好,但是训练会变慢
model.fit(x_train, y_train, nb_epoch=3)
score = model.evaluate(x_train, y_train, verbose=0) # 评估模型在训练集中的效果,准确率约99%
print('train score:', score[0])
print('train accuracy:', score[1])
score = model.evaluate(x_val, y_val, verbose=0) # 评估模型在测试集中的效果,准确率约为97%,迭代次数多了,会进一步提升
print('Test score:', score[0])
print('Test accuracy:', score[1])
| [
"593956670@qq.com"
] | 593956670@qq.com |
da0854259a59bf045f2fa9cfc0c521134bcc7d32 | e39e0a8676c8a5b24b37d949e3fe6c05e48ab817 | /client/file/messages.py | 1c042ac5c8bb9181ef4ddd30e10fa027b224218d | [] | no_license | Joselsneto/ECOTorrent | a3966c0d58a3880524977458ebc35ac297e67be7 | 50d1c076a2adcb0ce8476d58795e50b11c52775e | refs/heads/master | 2022-10-20T14:15:23.820944 | 2020-07-01T00:33:25 | 2020-07-01T00:33:25 | 265,071,710 | 0 | 0 | null | 2020-06-30T22:31:23 | 2020-05-18T21:54:48 | Python | UTF-8 | Python | false | false | 373 | py | class Messages:
def __init__(self):
pass
def handshake(info_hash, peer_id):
hs = "146EcoTorrent0000000{}{}".format(info_hash, peer_id)
return hs
def verifyHandshake(hs_message):
pass
def getInfoHash(hs_message):
return hs_message[20:84]
def getMessageType(message):
return message[0:4]
def getNPiece(message):
return message[4:] | [
"joseitamonte@hotmail.com"
] | joseitamonte@hotmail.com |
7ef7011e89b8d22c40c5908a8f87b85e6bf1c799 | cc1c69962b0bcbb3bd01523f5366a75c36933b58 | /condiconales_bucles/Condicionales_ejercicio.py | 04adef891aa9129980c55c50f5a57925d1662ef7 | [] | no_license | JuanLuisRepositorio/Bloque-1 | 6acd7ef3e740da3c010ead47593b9e0e07bcec30 | c32b025dc8a2eb6a6f31e98cdec8f25a38d2a268 | refs/heads/master | 2020-08-23T04:29:49.133595 | 2020-01-10T08:49:34 | 2020-01-10T08:49:34 | 216,543,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | #cambia los textos para que se muestre el mensaje de acceder al contenido solo si es mayor de edad o es un menor que esta con sus padres
edad = int(input("¿Cuántos años tiene? "))
if edad < 18:
padres = input("¿Estan tus padres contigo(S/N)? ")
if padres == "S":
print("No puedes acceder al contenido")
else:
print("Puedes acceder al contenido")
else:
print("No puedes acceder al contenido")
| [
"noreply@github.com"
] | noreply@github.com |
83af6f83dc179242e5d702547f4e5a827285eb53 | 7ba9ec427aac637aa25788971114b4520909b4da | /test_scripts/continuous_transmissionv3.py | 2df4b37c1d7efada2b9ffc941ba4dbcea319f19b | [] | no_license | matthew-william-lock/intelligent-spectrum-sharing | 95160dc5de3dfdf08285b55c4a162696972b4471 | 34c2174f8de0ddb24d3f7ac886625c7bbd7e1533 | refs/heads/master | 2022-12-26T06:12:31.116608 | 2020-10-13T11:53:28 | 2020-10-13T11:53:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,090 | py | import select
import socket
import time
import _thread
import threading
import getopt
import queue
import random
from time import sleep
import signal, os
import sys
sys.path.append('../../')
sys.path.append('../../communicator/python/')
from communicator.python.Communicator import Message
from communicator.python.LayerCommunicator import LayerCommunicator
import communicator.python.interf_pb2 as interf
# ********************************* Definitions *********************************
# Number of bits (TB) that can be transmitted in one suframe, i.e., 1 ms. These number are for a bandwidth of 5 MHz, i.e., 25 RBs.
NUM_BYTES_PER_SUBFRAME_VS_MCS_1MHz4 = [19,26,32,41,51,63,75,89,101,117,117,129,149,169,193,217,225,225,241,269,293,325,349,373,405,437,453,469,549]
NUM_BYTES_PER_SUBFRAME_VS_MCS_3MHz = [49,65,81,109,133,165,193,225,261,293,293,333,373,421,485,533,573,573,621,669,749,807,871,935,999,1063,1143,1191,1383]
NUM_BYTES_PER_SUBFRAME_VS_MCS_5MHz = [85,113,137,177,225,277,325,389,437,501,501,549,621,717,807,903,967,967,999,1143,1239,1335,1431,1572,1692,1764,1908,1980,2292]
NUM_BYTES_PER_SUBFRAME_VS_MCS_10MHz = [173,225,277,357,453,549,645,775,871,999,999,1095,1239,1431,1620,1764,1908,1908,2052,2292,2481,2673,2865,3182,3422,3542,3822,3963,4587]
NUM_BYTES_PER_SUBFRAME_VS_MCS_15MHz = [261,341,421,549,669,839,967,1143,1335,1479,1479,1620,1908,2124,2385,2673,2865,2865,3062,3422,3662,4107,4395,4736,5072,5477,5669,5861,6882]
NUM_BYTES_PER_SUBFRAME_VS_MCS_20MHz = [349,453,573,717,903,1095,1287,1527,1764,1980,1980,2196,2481,2865,3182,3542,3822,3822,4107,4587,4904,5477,5861,6378,6882,7167,7708,7972,9422]
# PHY Results.
PHY_UNKNOWN = 0
PHY_SUCCESS = 100
PHY_ERROR = 101
PHY_TIMEOUT = 102
# Physical Layer States.
PHY_UNKNOWN_ST = 0
PHY_RX_ST = 1
PHY_TX_ST = 2
# BW Indexes.
BW_UNKNOWN = 0 # unknown
BW_IDX_OneDotFour = 1 # 1.4 MHz
BW_IDX_Three = 2 # 3 MHz
BW_IDX_Five = 3 # 5 MHz
BW_IDX_Ten = 4 # 10 MHz
BW_IDX_Fifteen = 5 # 15 MHz
BW_IDX_Twenty = 6 # 20 MHz
# ************ Functions ************
tx_exit_flag = False
def handler(signum, frame):
global tx_exit_flag
tx_exit_flag = True
def getExitFlag():
global tx_exit_flag
return tx_exit_flag
tx_stat_flag_lock = threading.Lock()
is_tx_stat_received = False
def get_is_stat_received():
tx_stat_flag_lock.acquire()
global is_tx_stat_received
flag = is_tx_stat_received
tx_stat_flag_lock.release()
return flag
def set_is_stat_received(flag):
tx_stat_flag_lock.acquire()
global is_tx_stat_received
is_tx_stat_received = flag
tx_stat_flag_lock.release()
txStatthreadLock = threading.Lock()
tx_sequence_number = 0
def set_seq_number(seq_num):
# Get lock to synchronize threads
txStatthreadLock.acquire()
global tx_sequence_number
tx_sequence_number = seq_num
# Free lock to release next thread
txStatthreadLock.release()
def get_seq_number():
# Get lock to synchronize threads
txStatthreadLock.acquire()
global tx_sequence_number
seq_num = tx_sequence_number
# Free lock to release next thread
txStatthreadLock.release()
return seq_num
def help():
print("Usage: pyhton3 continuous_transmissionv2.py")
def printPhyTxStat(internal):
print("************************ TX PHY Stats Packet ************************")
print("seq_number: ",internal.transaction_index)
print("status: ",internal.sendr.result)
print("host_timestamp: ",internal.sendr.phy_stat.host_timestamp)
print("fpga_timestamp: ",internal.sendr.phy_stat.fpga_timestamp)
print("frame: ",internal.sendr.phy_stat.frame)
print("slot: ",internal.sendr.phy_stat.slot)
print("ch: ",internal.sendr.phy_stat.ch)
print("mcs: ",internal.sendr.phy_stat.mcs)
print("num_cb_total: ",internal.sendr.phy_stat.num_cb_total)
print("num_cb_err: ",internal.sendr.phy_stat.num_cb_err)
print("power: ",internal.sendr.phy_stat.tx_stat.power)
print("*********************************************************************\n")
def printBasicControl(basic_control, seq_number, tx_data):
print("******************** Basic Control CMD Transmitted ******************")
print("trx_flag: ",basic_control.trx_flag)
print("seq_number: ",seq_number)
print("bw_index: ",basic_control.bw_index)
print("ch: ",basic_control.ch)
print("slot: ",basic_control.slot)
print("mcs: ",basic_control.mcs)
print("gain: ",basic_control.gain)
print("length: ",basic_control.length)
if(basic_control.trx_flag == 1):
data = []
for d in tx_data:
data.append(int(d))
print("data: ",data)
print("*********************************************************************\n")
def inputOptions(argv):
debug = False # by default debug is enabled.
profiling = False # by default profiling is disabled.
single = False # By default we use two computers to run the tests.
wait_for_tx_stats = False
tx_gain_cmd_line = 30 # by default 30 dB is applied.
mcs_cmd_line = 0 # by default mcs is 0.
delay_after_tx = 0.0 # by default there is no delay between transmissions.
try:
opts, args = getopt.getopt(argv,"hdpstg:m:e:",["help","debug","profile","single","txstats","gain","mcs","esperar"])
except getopt.GetoptError:
help()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
help()
sys.exit()
elif opt in ("-d", "--debug"):
debug = True
elif opt in ("-p", "--profile"):
profiling = True
elif opt in ("-s", "--single"):
single = True
elif opt in ("-t", "--txstats"):
wait_for_tx_stats = True
elif opt in ("-g", "--gain"):
tx_gain_cmd_line = int(arg)
print("New tx gain is:",tx_gain_cmd_line)
elif opt in ("-m", "--mcs"):
mcs_cmd_line = int(arg)
print("New mcs is:",mcs_cmd_line)
elif opt in ("-e", "--esperar"):
delay_after_tx = float(arg)
print("Sleep between tx:",delay_after_tx)
return debug, profiling, single, wait_for_tx_stats, tx_gain_cmd_line, mcs_cmd_line, delay_after_tx
def sendTxCtrlToPhy(lc, chan, bandwidth, mcs_idx, gain_value, slots, sequence_number, module, data, debug):
# Basic Control
trx_flag = PHY_TX_ST # TRX Mode. 1 TX - 0 RX; 2
send_to = 0
seq_number = sequence_number # Sequence number.
bw_idx = bandwidth # By default use BW: 5 MHz. Possible values: 0 - 1.4 MHz, 1 - 3 MHz, 2 - 5 MHz, 3 - 10 MHz.
mcs = mcs_idx # It has no meaning for RX. MCS is recognized automatically by receiver. MCS varies from 0 to 28.
channel = chan # By default use channel 0.
slot = 0 # Slot number (not used now, for future use)
frame = 0 # Frame number.
gain = gain_value # RX gain. We use -1 for AGC mode.
num_slots = slots # Number of slots. How many data slots we expect to receive from peer.
# Create an Internal message for TX procedure.
internal = interf.Internal()
# Add sequence number to internal message.
internal.transaction_index = seq_number
# Set basic control with proper values.
internal.send.basic_ctrl.trx_flag = trx_flag
internal.send.basic_ctrl.send_to = send_to
internal.send.basic_ctrl.bw_index = bw_idx
internal.send.basic_ctrl.ch = channel
internal.send.basic_ctrl.frame = frame
internal.send.basic_ctrl.slot = slot
internal.send.basic_ctrl.mcs = mcs
internal.send.basic_ctrl.gain = gain
internal.send.basic_ctrl.length = num_slots*getTransportBlockSize(bw_idx, mcs)
internal.send.app_data.data = data
# Check size of transmited data.
if(len(internal.send.app_data.data) != internal.send.basic_ctrl.length):
print("Length of data is diffrent of field length.")
sys.exit(-1)
# Print the basic control structure sent to PHY.
if(debug == True): printBasicControl(internal.send.basic_ctrl, internal.transaction_index, internal.send.app_data.data)
# Send basic control to PHY.
if(debug == True): print("Sending basic control to PHY.")
lc.send(Message(module, interf.MODULE_PHY, internal))
def generateRandomData(num_slots, bw_idx, mcs):
random_data = bytes()
for j in range(num_slots):
for i in range(getTransportBlockSize(bw_idx, mcs)):
random_data = random_data + bytes([random.randint(0, 255)])
return random_data
def generateData(num_slots, seq_number, bw_idx, mcs):
data = bytes()
for j in range(num_slots):
for i in range(getTransportBlockSize(bw_idx, mcs)):
data = data + bytes([seq_number])
return data
def generateData2(num_slots, bw_idx, mcs):
data = bytes()
for j in range(num_slots):
for i in range(getTransportBlockSize(bw_idx, mcs)):
data = data + bytes([i%256])
return data
def getTransportBlockSize(index, mcs):
if index == BW_IDX_OneDotFour:
tb_size = NUM_BYTES_PER_SUBFRAME_VS_MCS_1MHz4[mcs]
elif index == BW_IDX_Three:
tb_size = NUM_BYTES_PER_SUBFRAME_VS_MCS_3MHz[mcs]
elif index == BW_IDX_Five:
tb_size = NUM_BYTES_PER_SUBFRAME_VS_MCS_5MHz[mcs]
elif index == BW_IDX_Ten:
tb_size = NUM_BYTES_PER_SUBFRAME_VS_MCS_10MHz[mcs]
elif index == BW_IDX_Fifteen:
tb_size = NUM_BYTES_PER_SUBFRAME_VS_MCS_15MHz[mcs]
elif index == BW_IDX_Twenty:
tb_size = NUM_BYTES_PER_SUBFRAME_VS_MCS_20MHz[mcs]
else:
tb_size = -1;
return tb_size
def receiveTxStatisticsFromPhyThread(lc, tx_stat_queue, debug):
if(debug == True): print("Starting thread to wait for PHY TX Statistics from PHY.......")
while(getExitFlag() == False):
# Check if QUEUE is empty.
if(lc.get_low_queue().empty() == False):
try:
#Try to get next message without waiting.
msg = lc.get_low_queue().get_nowait()
internal = msg.message
if(internal.sendr.result > 0 and internal.sendr.result == PHY_SUCCESS):
if(internal.transaction_index < 100 and get_seq_number() == internal.transaction_index):
tx_stat_queue.put(internal)
set_is_stat_received(True)
else:
print("Result: ", internal.sendr.result)
print("Sequence number out of sequence, expecting:", get_seq_number(), "and received: ", internal.transaction_index, "exiting...............")
os._exit(-1)
except queue.Empty:
print("QUEUE is empty.");
# Check is exit flag is set.
if(getExitFlag() == True):
print("Exit receiving function.")
os._exit(0)
if __name__ == '__main__':
# Parse any input option.
debug, profiling, single, wait_for_tx_stats, tx_gain_cmd_line, mcs_cmd_line, delay_after_tx = inputOptions(sys.argv[1:])
# Set the signal handler.
signal.signal(signal.SIGINT, handler)
# decides if single or two host PCs.
if(single == False):
source_module = interf.MODULE_MAC # Make believe it is the MAC layer sending controls to PHY.
else:
source_module = interf.MODULE_DEBUG2
print("Create CommManager object.")
lc = LayerCommunicator(source_module, [interf.MODULE_PHY])
# Create a QUEUE to store data.
# Create a single input and a single output queue for all threads.
tx_stat_queue = queue.Queue()
channel = 0
bandwidth = BW_IDX_Five
mcs = mcs_cmd_line
gain = tx_gain_cmd_line # Good value for b200: 30 # for x310 use 200
num_slots = 1
seq_number = 0
num_of_tx = -1
data = generateRandomData(num_slots, bandwidth, mcs)
packet_counter = 0
if(wait_for_tx_stats == True):
# Start TX statistics thread.
# Wait for PHY TX Statistics from PHY.
try:
thread_id = _thread.start_new_thread( receiveTxStatisticsFromPhyThread, (lc, tx_stat_queue, debug, ) )
except:
print("Error: unable to start thread")
sys.exit(-1)
# Give the thread some time.
time.sleep(2)
while(True):
packet_counter = packet_counter + 1
# Send sequence number to TX stats thread.
set_seq_number(seq_number)
# Timestamp time of transmission.
if(profiling == True):
start = time.time()
# Generate data according to sequence number.
data = generateData(num_slots, seq_number+1, bandwidth, mcs)
# Send TX control information to PHY.
sendTxCtrlToPhy(lc, channel, bandwidth, mcs, gain, num_slots, seq_number, source_module, data, debug)
if(wait_for_tx_stats == True):
# Wait until TX statistics is received from PHY.
while(get_is_stat_received() == False and getExitFlag() == False):
pass
if(profiling == True):
# Timestamp time of reception.
end = time.time()
# Print time difference
print("TX Elapsed time:",end-start,"seconds")
# Remove TX stats from QUEUE so that it gets empty.
internal = tx_stat_queue.get()
if(debug == True): printPhyTxStat(internal)
# Set flag back to false.
set_is_stat_received(False)
if(packet_counter%300 == 0):
print("Packets sent:",packet_counter)
# Increment sequence number.
seq_number = (seq_number + 1)%100
# Wait sometime before transmitting again.
if(delay_after_tx > 0.0):
time.sleep(delay_after_tx/1000.0)
if(getExitFlag() == True or packet_counter == num_of_tx):
break
| [
"zz4fap@gmail.com"
] | zz4fap@gmail.com |
47de7604ce9f3dbba2dceae866ce17732272dc3e | b8c91ddeffe49d5aa2dcfd82d4cb5c2fb63f1c38 | /SC001/Homework/Assignment1/CheckerboardKarel.py | b6bce685148dfe4815e11a49b4552b3613b53763 | [] | no_license | astrochialinko/StanCode | 121ac5fb52f931c2148ca19c4a87b9f288dcdb38 | ec3a81ba94a4600ab20ebe8e31049bf3a372bae8 | refs/heads/master | 2023-05-27T22:36:21.979603 | 2021-06-13T14:26:31 | 2021-06-13T14:26:31 | 334,227,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,758 | py | from karel.stanfordkarel import *
"""
File: CheckerboardKarel.py
Name: Chia-Lin Ko
----------------------------
This program instructs Karel to draw a checkerboard using
beepers, as described in Assignment 1.
The program works for all of the sample worlds provided in
the starter folder.
"""
def main():
"""
purpose:
This program instructs Karel to draw a checkerboard using
beepers, as described in Assignment 1.
pre-condition:
Karel is at (1,1), facing East
post-condition:
Karel is at (1,N), facing North
(or at (1,1), facing West, if only one avenue. But facing North for 1x1).
N is the total number of Street (last Street).
"""
fill_one_line_with_spatial_arrangement()
# facing East
back_to_left()
# facing North
while front_is_clear():
if not on_beeper():
move()
turn_right()
# facing East
else:
move()
if right_is_clear():
turn_right()
move()
# facing East
else:
move()
# facing North (for the case of one avenue only)
fill_one_line_with_spatial_arrangement()
# facing East (or North, if only one avenue)
back_to_left()
# facing North (or West, if only one avenue)
def fill_one_line_with_spatial_arrangement():
"""
purpose:
Karel will fill up a line with spatial arrangement
as described in Assignment 1.
pre-condition:
Karel is at the beginning of the line, facing East (or North, if only one avenue).
post-condition:
Karel is at the end of the line, facing East (or North, if only one avenue) .
"""
# facing East or North (if only one avenue)
put_beeper()
while front_is_clear():
if front_is_clear():
move()
if front_is_clear():
move()
put_beeper()
def back_to_left():
"""
pre-condition:
Karel is at the end of the line, facing East (or North, if only one avenue)
post-condition:
Karel is at the beginning of the line, facing North (or West, if only one avenue)
"""
turn_around()
# facing West (or South, if only one avenue)
while front_is_clear():
move()
turn_right()
# facing North (or East, if only one avenue)
def turn_right():
"""
This function turns Karel to the left 3 times.
"""
for i in range(3):
turn_left()
def turn_around():
"""
This function turns Karel to the left 2 times.
"""
for i in range(2):
turn_left()
# DO NOT EDIT CODE BELOW THIS LINE #
if __name__ == '__main__':
execute_karel_task(main)
| [
"chialinko@Chia-Linde-MacBook-Pro.local"
] | chialinko@Chia-Linde-MacBook-Pro.local |
c1315a888e3fa400a7a29daa027a4b8545ff0168 | f3aecb7313ad69241782bd95d2a492299ee554cb | /i3status/py3status/chargestatus.py | 940f90e2e4c47ac252a6d10ff8824134d15115ca | [] | no_license | Kalior/dotfiles | 24357f70eb301503b1cfe4f194a9ecd5853e4eed | ad3a3797ed13c7009ffd39f9bf635aeff3a0f6a2 | refs/heads/master | 2022-04-28T15:54:44.673206 | 2022-04-01T18:40:33 | 2022-04-01T18:40:33 | 51,951,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,731 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Display the battery level.
Configuration parameters:
- color_* : None means - get it from i3status config
- format : text with "text" mode. percentage with % replaces {}
- hide_when_full : hide any information when battery is fully charged
- mode : for primitive-one-char bar, or "text" for text percentage ouput
Requires:
- the 'acpi' command line
@author shadowprince, AdamBSteele
@license Eclipse Public License
"""
from __future__ import division # python2 compatibility
from time import time
import math
import subprocess
BLOCKS = ["", "", "", "", "", "", "", "", ""]
CHARGING_CHARACTER = ""
EMPTY_BLOCK_CHARGING = '|'
EMPTY_BLOCK_DISCHARGING = '⍀'
FULL_BLOCK = ''
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 30
color_bad = None
color_charging = "#C6572F"
color_degraded = None
color_good = "#52C62F"
format = "Battery: {}"
hide_when_full = False
mode = "bar"
notification = True
def battery_level(self, i3s_output_list, i3s_config):
response = {}
# Example acpi raw output: "Battery 0: Discharging, 43%, 00:59:20 remaining"
acpi_raw = subprocess.check_output(["acpi"], stderr=subprocess.STDOUT)
acpi_unicode = acpi_raw.decode("UTF-8")
# Example list: ['Battery', '0:', 'Discharging', '43%', '00:59:20', 'remaining']
acpi_list = acpi_unicode.split(' ')
charging = True if acpi_list[2][:8] == "Charging" else False
percent_charged = int(acpi_list[3][:-2])
self.time_remaining = ' ' + acpi_list[4] if len(acpi_list) > 4 else ""
battery_full = False
if self.mode == "bar":
if charging:
full_text = CHARGING_CHARACTER
#+ ' ' + str(percent_charged) + "%" + str(self.time_remaining)
else:
full_text = BLOCKS[int(math.ceil(percent_charged/100*(len(BLOCKS) - 1)))]
#+ ' ' + str(percent_charged) + "%" + str(self.time_remaining)
elif self.mode == "ascii_bar":
full_part = FULL_BLOCK * int(percent_charged/10)
if charging:
empty_part = EMPTY_BLOCK_CHARGING * (10 - int(percent_charged/10))
else:
empty_part = EMPTY_BLOCK_DISCHARGING * (10 - int(percent_charged/10))
full_text = full_part + empty_part
else:
full_text = self.format.format(str(percent_charged) + "%")
response["full_text"] = full_text
if percent_charged < 10:
response["color"] = (
self.color_bad
if self.color_bad
else i3s_config['color_bad']
)
if percent_charged < 30:
response["color"] = (
self.color_degraded
if self.color_degraded
else "#FF0000"
)
else:
response["color"] = (
self.color_good
if self.color_good
else i3s_config['color_good']
)
if percent_charged < 7 and not charging and self.notification:
subprocess.call(['twmnc',
'-t', 'Warning Low Battery', '-c', str(percent_charged) + '%',
'--bg',"#C02510",
'-d', '3000',
'-i', '~/.config/twmn/crit.png'],
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'))
if battery_full:
response["color"] = (
self.color_good
if self.color_good
else i3s_config['color_good']
)
response["full_text"] = "" if self.hide_when_full else BLOCKS[-1]
elif charging:
response["color"] = self.color_charging
#response["cached_until"] = time() + self.cache_timeout
return response
def on_click(self, i3s_output_list, i3s_config, event):
"""
Display a notification with the remaining charge time.
"""
if self.notification and self.time_remaining:
subprocess.call(
['notify-send', '{}'.format(self.time_remaining), '-t', '4000'],
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w')
)
if __name__ == "__main__":
from time import sleep
x = Py3status()
config = {
'color_good': '#00FF00',
'color_bad': '#FF0000',
}
while True:
print(x.battery_level([], config))
sleep(1) | [
"kalioragus@gmail.com"
] | kalioragus@gmail.com |
cd604accecbe1e3a174eb64d58aa50cb702a0acf | 26771494974942f4ab18d2cd8247506c344e1d14 | /895-maximumFrequencyStack.py | 4a40053b7ed6952b9019de75037801c0192ff639 | [] | no_license | wangyunpengbio/LeetCode | 9f4c6076e067c5e847d662679483f737d40e8ca5 | cec1fd11fe43177abb2d4236782c0f116e6e8bce | refs/heads/master | 2020-04-29T22:28:25.899420 | 2020-04-03T07:37:26 | 2020-04-03T07:37:26 | 176,448,957 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | class FreqStack:
# 超时
def __init__(self):
from collections import defaultdict
self.stack = []
self.dic = defaultdict(int)
self.maxFrequency = 0
def push(self, x: int) -> None:
self.stack.append(x)
self.dic[x] = self.dic[x] + 1
self.calculateMaxFrequency()
def pop(self) -> int:
# print(self.stack,self.dic,self.maxFrequency)
for i in range(len(self.stack)-1,-1,-1):
# print(self.stack[i])
if self.dic[self.stack[i]] == self.maxFrequency:
self.dic[self.stack[i]] = self.dic[self.stack[i]] - 1
item = self.stack.pop(i)
break
self.calculateMaxFrequency()
return item
def calculateMaxFrequency(self):
self.maxFrequency = 0
for key,value in self.dic.items():
if value > self.maxFrequency:
self.maxFrequency = value
# Your FreqStack object will be instantiated and called as such:
# obj = FreqStack()
# obj.push(x)
# param_2 = obj.pop() | [
"wangyunpeng_bio@qq.com"
] | wangyunpeng_bio@qq.com |
5fb7177c8751886429fb169f1f3e13e7e8f37aa0 | 6c9017f9a7ff020e5a5931ae0746becf13df2080 | /worker/parent/testdata/printloop.py | 326c71015207eeaa70c8fb49b8917322697b7172 | [
"MIT"
] | permissive | FDlucifer/gocrack | 995bac075aa36452cce5417df6e93ba600b665b2 | e3b20e43e39d5fc0fbf3a1c652462559ebbec2cf | refs/heads/master | 2021-04-10T05:47:41.610567 | 2019-08-01T14:47:20 | 2019-08-01T14:47:20 | 248,914,857 | 1 | 1 | MIT | 2020-03-21T05:44:10 | 2020-03-21T05:44:09 | null | UTF-8 | Python | false | false | 494 | py | import argparse
import time
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prints numbers')
parser.add_argument('--max', type=int, default=5, help='max number to print')
parser.add_argument('--rc', type=int, default=0, help='return code to exit as')
args = parser.parse_args()
i = 0
while i < args.max:
sys.stdout.write('I: %d\n' % i)
sys.stdout.flush()
i += 1
time.sleep(2)
sys.exit(args.rc)
| [
"chris.schmitt@mandiant.com"
] | chris.schmitt@mandiant.com |
44ef95a9011192ce453982424085607440c05159 | b7bbbd60eb783208a4e7c8159d833526170b5a3c | /examples/log.py | f9812667113f1d6fb9e44c3ca0d5684a12ecbb60 | [
"ISC"
] | permissive | ericfaurot/py-app | a9a2923001b164968e8929f836d1333e5b74d0b3 | b0f25adf8b644073ef7074d24514ec35448a532e | refs/heads/master | 2018-10-12T07:41:40.367991 | 2018-09-19T16:52:39 | 2018-09-19T16:52:39 | 125,370,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | import time
import getopt
import sys
import app.log
procname = None
foreground = False
logfile = None
opts, args = getopt.getopt(sys.argv[1:], "-df:p:")
for opt, arg in opts:
if opt == '-d':
foreground = True
elif opt == '-f':
logfile = arg
elif opt == '-p':
procname = arg
app.log.init(procname = procname,
foreground = foreground,
logfile = logfile)
app.log.info("starting")
for i in range(10):
app.log.info("%d...", i)
time.sleep(1)
app.log.info("done")
| [
"eric@faurot.net"
] | eric@faurot.net |
a7a58e508b74821e3264af3384076b5e256be0cd | c58bc356bce1d1c2f0b16357bdd4a193930e29b3 | /ddan/ddcn.py | 755ea2661e8328e5d2b0591e9c44d52f39e9c2f3 | [
"MIT"
] | permissive | spandandey21/ddan | f54067e2b92a52bc6250aac8c51aa358f9e9d5ba | 842fd4dc2d0be0d841863d98df4fc131deff4787 | refs/heads/master | 2022-02-19T14:23:00.050886 | 2019-09-09T16:08:18 | 2019-09-09T16:08:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,975 | py | import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.metrics import log_loss
from tensorflow.python.framework import ops
from keras import backend as K
from keras.layers import Dense, Dropout, Activation, GaussianNoise
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU, ELU, LeakyReLU
from utils import shuffle_aligned_list, batch_gen, val_batch_gen
from mmd import maximum_mean_discrepancy
class DDCNModel(object):
def __init__(self, nfeatures=50, arch=[8, 'act'], mmd_layer_idx=[1],
batch_size=16, supervised=False, confusion=0.0, confusion_incr=1e-3, confusion_max=1,
val_data=None, validate_every=1,
activations='relu', epochs=1000, optimizer=None, noise=0.0, droprate=0.0, verbose=True):
self.batch_size = batch_size
self.epochs = epochs
self.validate_every = validate_every
self.supervised = supervised
self.verbose = verbose
if val_data is None:
self.validate_every = 0
else:
self.Xval = val_data[0]
self.yval = val_data[1]
self._build_model(nfeatures, arch, supervised, confusion, confusion_incr,
confusion_max, activations, noise, droprate, mmd_layer_idx, optimizer)
self.sess = tf.Session()
K.set_session(self.sess)
self.sess.run(tf.global_variables_initializer())
def _build_model(self, nfeatures, architecture, supervised, confusion, confusion_incr, confusion_max,
activations, noise, droprate, mmd_layer_idx, optimizer):
self.inp_a = tf.placeholder(tf.float32, shape=(None, nfeatures))
self.inp_b = tf.placeholder(tf.float32, shape=(None, nfeatures))
self.labels_a = tf.placeholder(tf.float32, shape=(None, 1))
nlayers = len(architecture)
layers_a = [self.inp_a]
layers_b = [self.inp_b]
for i, nunits in enumerate(architecture):
print nunits,
if i in mmd_layer_idx: print '(MMD)'
else: print
if isinstance(nunits, int):
shared_layer = Dense(nunits, activation='linear')
elif nunits == 'noise':
shared_layer = GaussianNoise(noise)
elif nunits == 'bn':
shared_layer = BatchNormalization()
elif nunits == 'drop':
shared_layer = Dropout(droprate)
elif nunits == 'act':
if activations == 'prelu':
shared_layer = PReLU()
elif activations == 'elu':
shared_layer = ELU()
elif activations == 'leakyrelu':
shared_layer = LeakyReLU()
else:
shared_layer = Activation(activations)
layers_a += [shared_layer(layers_a[-1])]
layers_b += [shared_layer(layers_b[-1])]
y_logits = Dense(1, activation='linear', name='a_output')(layers_a[-1])
self.y_clf = Activation('sigmoid')(y_logits)
# Sum the losses from both branches...
self.xe_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=self.labels_a, logits=y_logits))
self.mmd_losses = []
for idx in mmd_layer_idx:
self.mmd_losses += [maximum_mean_discrepancy(layers_a[idx], layers_b[idx])]
self.domain_loss = tf.reduce_sum(self.mmd_losses)
self.confusion = tf.Variable(float(confusion), trainable=False, dtype=tf.float32)
conf_incr = tf.cond(self.confusion < confusion_max, lambda: float(confusion_incr), lambda: 0.)
self.increment_confusion = tf.assign(self.confusion, self.confusion + conf_incr)
self.total_loss = tf.add(self.confusion*self.domain_loss, self.xe_loss)
if supervised:
self.labels_b = tf.placeholder(tf.float32, shape=(None, 1))
b_logits = Dense(1, activation='linear', name='b_output')(layers_b[-1])
self.bloss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=self.labels_b, logits=b_logits))
self.total_loss = tf.add(self.total_loss, self.bloss)
if optimizer is None:
self.train_step = tf.train.MomentumOptimizer(1e-3, 0.9)
else:
self.train_step = optimizer
self.train_step = self.train_step.minimize(self.total_loss)
def predict_proba(self, X, batch_size=None):
if batch_size is None: batch_size = self.batch_size
yprobs = np.zeros((X.shape[0]), dtype=float)
idx = np.arange(X.shape[0])
vbatch = val_batch_gen([idx, X], batch_size)
for i, (thisidx, thisX) in enumerate(vbatch):
yprobs[thisidx] = self.sess.run(self.y_clf,
feed_dict={self.inp_a: thisX, K.learning_phase(): 0}).flatten()
return yprobs
def evaluate(self, X, y, batch_size=None):
yprobs = self.predict_proba(X, batch_size)
return log_loss(y, yprobs)
def fit(self, Xs, ys, Xt, yt=None, Xval=None, yval=None,
epochs=None, batch_size=None, verbose=None):
if epochs is None: epochs = self.epochs
if batch_size is None: batch_size = self.batch_size
if Xval is None:
Xval = self.Xval
yval = self.yval
if verbose is None: verbose = self.verbose
S_batches = batch_gen([Xs, ys], batch_size=batch_size)
if yt is None: yt = np.ones(Xt.shape[0])
T_batches = batch_gen([Xt, yt], batch_size=batch_size)
self.history = {'source_loss': [], 'target_loss': [], 'val_loss': [], 'domain_loss': []}
for i in range(epochs):
Xsource, ysource = S_batches.next()
Xtarget, ytarget = T_batches.next()
feed_dict = {self.inp_a: Xsource, self.inp_b: Xtarget,
self.labels_a: ysource.reshape(-1, 1), K.learning_phase(): 1}
if self.supervised:
feed_dict[self.labels_b] = ytarget.reshape(-1, 1)
# train
_, _, confusion, xeloss, dloss, tloss = self.sess.run([
self.train_step,
self.increment_confusion,
self.confusion,
self.xe_loss,
self.domain_loss,
self.total_loss],
feed_dict=feed_dict)
if self.validate_every > 0 and i % self.validate_every == 0:
if i == 0:
print 'Epoch confusion dloss sloss tloss vloss'
self.history['source_loss'] += [self.evaluate(Xs, ys)]
self.history['target_loss'] += [self.evaluate(Xt, yt)]
self.history['val_loss'] += [self.evaluate(Xval, yval)]
self.history['domain_loss'] += [dloss]
print '{:04d} {:.2f} {:.4f} {:.4f} {:.5f} {:.5f} {:.5f} '.format(i, confusion, dloss, tloss,
self.history['source_loss'][-1], self.history['target_loss'][-1], self.history['val_loss'][-1])
if __name__ == '__main__':
from sklearn.datasets import make_blobs
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
batch_size = 200
Xs, ys = make_blobs(300, centers=[[0, 0], [0, 1]], cluster_std=0.2)
Xt, yt = make_blobs(300, centers=[[1, -1], [1, 0]], cluster_std=0.2)
Xall = np.vstack([Xs, Xt])
yall = np.hstack( [ys, yt])
plt.scatter(Xall[:, 0], Xall[:, 1], c=yall)
plt.savefig('blobs.png')
plt.close()
print 'MMD:', compute_mmd_on_samples(Xs, Xt)
| [
"erlend.davidson@gmail.com"
] | erlend.davidson@gmail.com |
a16fd8e50b9c997067a44669d605721cbf30a699 | c82b0584f91a7a130718273ecf72039e2d5f9ab1 | /polyaxon_deploy/schemas/security_context.py | a6ce5946b5aed47c96e476bc8c5a116f43003948 | [
"MIT"
] | permissive | todokku/polyaxon-deploy | 7af770dac9fb9797b86e3bf6b5f1da477a751ba0 | 77828e028670c43cc74704a4d7b9ec2e661e10a4 | refs/heads/master | 2021-02-15T16:02:13.468664 | 2020-03-04T09:37:06 | 2020-03-04T09:37:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import ValidationError, fields, validates_schema
from polyaxon_deploy.schemas.base import BaseConfig, BaseSchema
def validate_security_context(user, group):
if any([user, group]) and not all([user, group]):
raise ValidationError(
"Security context requires both `user` and `group` or none.")
class SecurityContextSchema(BaseSchema):
enabled = fields.Bool(allow_none=True)
user = fields.Int(allow_none=True)
group = fields.Int(allow_none=True)
@staticmethod
def schema_config():
return SecurityContextConfig
@validates_schema
def validate_security_context(self, data):
validate_security_context(data.get('user'), data.get('group'))
class SecurityContextConfig(BaseConfig):
SCHEMA = SecurityContextSchema
REDUCED_ATTRIBUTES = ['enabled', 'user', 'group']
def __init__(self, enabled=None, user=None, group=None):
validate_security_context(user, group)
self.enabled = enabled
self.user = user
self.group = group
| [
"mouradmourafiq@gmail.com"
] | mouradmourafiq@gmail.com |
2829631b6c5dbf23e71b80e88443771bec8ef460 | 9bed8867761870372e955cd54b4d33b6cd59cd6c | /ch9/grid.py | dbe670c428cbd6a6af293fcd4682b7961bec3361 | [
"MIT"
] | permissive | fxyyy123/book_codes | 15b3e266e8b8edf1c85a9fff7292ead0c54cfde6 | 64e4ac2b33c54cde4671291a6203e94cd96de4ba | refs/heads/master | 2023-06-02T04:44:58.525053 | 2020-05-18T05:24:57 | 2020-05-18T05:24:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | from tkinter import * #导入tkinter模块
window = Tk() #创建主窗口对象
window.title('Grid Example') #设置窗口标题
colors = ['red', 'green', 'light blue', 'yellow']
#展示padx, pady设置效果
labels0 = [
Label(window, font="Arial 18", text=' grid(%d,%d) ' % (0, j),
bg=colors[j]).grid(row=0, column=j, padx=j * 5, pady=j * 5)
for j in range(4)
]
#保持默认设置
labels1 = [
Label(window, font="Arial 12", text='grid(%d,%d)' % (1, j),
bg=colors[j]).grid(row=1, column=j) for j in range(4)
]
#展示sticky设置效果
flags = [N, S, W, E]
labels2 = [
Label(window, font="Arial 12", text='grid(%d,%d)' % (2, j),
bg=colors[j]).grid(row=2, column=j, sticky=flags[j])
for j in range(4)
]
#展示ipadx和ipady设置效果
labels3 = [
Label(window, font="Arial 12", text='grid(%d,%d)' % (3, j),
bg=colors[j]).grid(row=3, column=j, ipadx=j * 5, ipady=j * 5)
for j in range(4)
]
#进入Tk事件循环
window.mainloop() | [
"python_programming@163.com"
] | python_programming@163.com |
2285f53714843af0c12d3e5bbffe3461fd1b8067 | eefc395d096b5849d66e4e3a5083a3fbd50719b6 | /MEG_experiment_script.py | e3246113a5c24d7e8801500281b2b8e248d99f49 | [] | no_license | gvilidaite/meg_contrast_discrimination | 0e522b83fa654ab89505cac2b342f2d138a9d214 | b58bbea861e4d7f403de19e1af200f819b0dd179 | refs/heads/master | 2020-03-22T16:44:59.347677 | 2018-07-09T22:27:46 | 2018-07-09T22:27:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,163 | py | # MEG contrast discrimination script
# Last edited 25/10/16
# INFO:
# The script is set up so you can quit at any time by pressing ESCAPE and then waiting for the next trial
# HOWEVER, when restarting the experiment, will need to change the number of blocks in the GUI (defaults is 4)
# Presents four identical blocks of randomised (beforehand) trials
# ISI and ITI are randomised
# A condition file is saved first so that if the experiment is quit and restarted, the same conditions can be used in the next blocks
# A data file for each block is saved
# And also a trigger file is saved for the whole session
# If the experiment is restarted, the new trigger file is saved with 'newsession' in the name of the file
# to do:
# - parallel port stuff
from __future__ import division # so that 1/3=0.333 instead of 1/3=0
from psychopy import locale_setup, visual, core, data, event, logging, gui, monitors
# sound,
import parallel #NB not psychopy's parallel module -- FOR TRIGGERS AND RESPONSES
from psychopy.constants import * # things like STARTED, FINISHED
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
import random
import csv
import time as trackTime
#print monitors.calibTools.monitorFolder
## -- NB hard code monitor settings
monitors.calibTools.monitorFolder = '/groups/Projects/P1314/MEG/monitors/'
##
#print monitors.calibTools.monitorFolder
# GUI for inputting subject data:
subjectDetails = {}
subjectDetails['subjectNumber'] = ''
subjectDetails['gender'] = ''
subjectDetails['age'] = ''
subjectDetails['blocks'] ='4'
dlg = gui.DlgFromDict(subjectDetails, title='SubjectInfo', order=['subjectNumber','gender','age', 'blocks'])
if not dlg.OK:
print('User cancelled the experiment')
core.quit()
# SETTINGS *****************************
dopracticetrials = True
blocks = int(subjectDetails['blocks'])
reps = 160 # 320 of each contrast condition in total (160 per interval configuration)
totalBlockTrials = int(reps*6/blocks) # 240 (with grand total of 960)
MonFrameRate = 60.0 # give monitor refresh rate
MonSizeHor = 53.0 # centimeters horizontally
MonSizeVer = 41.0 # centimeters vertically
MonPixels = [1024.0, 768.0]
MonDist = 105.0 # participant distance from monitor
# stimulus specific settings
gratingSF = 10.0 # cycles in the stimulus (right now 10 cycles in a 10deg stimulus)
gratingSize = 10.0 # visual angle degrees
# calculating pixels per visual angle, ect
tempRes1 = (MonPixels[0])
tempRes2 = (MonPixels[1])
print tempRes1
print MonSizeHor
print MonDist
PixVisAng = 35#for MEG, at viewdist = 105cm
#PixVisAng2 = 35#for MEG, at viewdist = 105cm
# checking the calculated pixels per visual angle are consistent horizontally and vertically
#if PixVisAng != PixVisAng2:
# print "%%%%%%%%%%%% WARNING!!! %%%%%%%%%%%%"
# print " Pixel size is inconsistent "
# print "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
# core.quit()
# Checking if we already have a condition file (this will happen if we unexpectedly quit the experiment previously)
conditionfile = './Results/S%s_conditions.npy' % (subjectDetails['subjectNumber'])
if os.path.isfile(conditionfile):
a = np.load(conditionfile)
contrast_g1 = a[0]
contrast_g2 = a[1]
correctResps = a[2]
indexing = a[3]
else:
# creating conditions
contrast_g1 = [0.5, 0.54, 0.66, 0.5, 0.5, 0.5]
contrast_g2 = [0.5, 0.5, 0.5, 0.5, 0.54, 0.66]
correctResps = ["1", "1", "1", "2", "2", "2"]
condCodes_g1 = [11, 21, 31, 11, 11, 11]
condCodes_g2 = [11, 11, 11, 11, 21, 31]
nooftiles = int(totalBlockTrials/len(correctResps))
contrast_g1 = np.tile(contrast_g1,nooftiles)
contrast_g2 = np.tile(contrast_g2,nooftiles)
correctResps = np.tile(correctResps,nooftiles)
condCodes_g1 = np.tile(condCodes_g1,nooftiles)
condCodes_g2 = np.tile(condCodes_g2,nooftiles)
indexing = np.random.permutation(range(int(totalBlockTrials)))
# saving the conditions for later use in case of problem during experiment:
conditionInfo = [] # making a list of four arrays
conditionInfo.append(contrast_g1)
conditionInfo.append(contrast_g2)
conditionInfo.append(correctResps)
conditionInfo.append(indexing)
np.save(conditionfile, conditionInfo)
# making a window
win = visual.Window(
size=MonPixels,
units="pix",
fullscr=True,
allowGUI=False,
monitor='MEG_Screen'
)
print (PixVisAng)
print (gratingSize)
print (gratingSF)
print gratingSize/(PixVisAng*gratingSF)
# making the gratings
grating1 = visual.GratingStim(
win=win,
size=[gratingSize*PixVisAng, gratingSize*PixVisAng],
mask="raisedCos",
maskParams = {'fringeWidth':0.2},
units="pixels",
ori=90,
sf = gratingSize/(PixVisAng*gratingSF) # 1/((PixVisAng*gratingSF)/gratingSize)
)
grating2 = visual.GratingStim(
win=win,
size=[gratingSize*PixVisAng, gratingSize*PixVisAng],
mask="raisedCos",
maskParams= {'fringeWidth':0.2},
units="pixels",
ori=90,
sf = gratingSize/(PixVisAng*gratingSF)
)
# Stimulus and interval length:
ISI = int(0.5*MonFrameRate)
ITI = 1.1
stimLength = round(0.1 * MonFrameRate) # 100ms times refresh rate equals the number of frames to present stimulus for
stimLength = int(stimLength)
# make/show the welcome screen + practice trials here
posint = int(tempRes2/8) # an eighth of the number of vertical pixels; used for positioning text on screen
welcometext = visual.TextStim(
win=win,
color=[1, 1, 1],
pos = [0, posint*2.5],
)
### --- set up trigger port
# AG code -- function to send a trigger
parport = parallel.Parallel('/dev/parport0') # address for parallel port on many machines
#MUST set the read/write mode in linux, 0=read 1=write
parport.setDataDir(1)
#set the parallel port data pins (2-9) to zero before we start
parport.setData(0)
### ---- set up reponse port
responsePort = parallel.Parallel('/dev/parport1') # right hand lumitouch response box
#MUST set the read/write mode in linux, 0=read 1=write
responsePort.setDataDir(0)
def getLumitouchResponseMEG():
while int(responsePort.getData()) == 0:
trackTime.sleep(0.001)
quitExp = event.getKeys(keyList=['escape'])
if quitExp:
print('User has exited the experiment')
win.close()
core.quit()
responseReceived = str(int(responsePort.getData())) # code currently expects a string
return responseReceived
def sendTriggerMEG(triggerValue):
#this should be called immediately after win.flip for any key stimulus
# takes an integer argument for the trigger value to be sent
parport.setData(int(triggerValue))
### --------------
# practice trials:
if dopracticetrials == True:
welcometext.text='Welcome to our MEG experiment!'
welcometext.draw()
welcometext.text='During the experiment you will see two gratings appear one after the other in the centre of the screen. You will need to choose the grating that is HIGHER in contrast.'
welcometext.pos= [0, posint]
welcometext.draw()
welcometext.text='Please wait until you have seen both gratings and then press LEFT button for the FIRST grating or RIGHT button for the SECOND grating.'
welcometext.pos= [0, 0]
welcometext.draw()
welcometext.text='Before the experiment begins you will have 5 practice trials with feedback (correct/incorrect). Note that you will not have this feedback during the actual experiment.'
welcometext.pos= [0, -(posint)]
welcometext.draw()
welcometext.text="We'll start some practice trials for you shortly (Enter)"
welcometext.pos= [0, -(posint*2)]
welcometext.draw()
win.flip()
event.waitKeys()
welcometext.text="Starting practice trials .... (Enter)"
welcometext.pos= [0,0]
welcometext.draw()
win.flip()
core.wait(3.0)
win.flip()
core.wait(1.5)
practiceContrast1 = [0.72, 0.5, 0.64, 0.58, 0.5]
practiceContrast2 = [0.5, 0.68, 0.5, 0.5, 0.66]
practiceCorr = ['1', '2', '1', '1', '2']
for practice in range(5):
# setting up grating contrast# to do trigger levels:
grating1.contrast = practiceContrast1[practice]
grating2.contrast = practiceContrast2[practice]
correctResp = practiceCorr[practice]
# showing stimuli:
for run1 in range(stimLength):
grating1.draw()
win.flip()
for run2 in range(ISI):
win.flip()
for run3 in range(stimLength):
grating2.draw()
win.flip() # makes second grating appear
win.flip()
clock = core.Clock() # make a clock
##practkeypress = event.waitKeys(keyList=['1','2'],timeStamped=clock) # waiting for key to be pressed
##practkeytime = practkeypress[0][1]
##practkeytime = round(practkeytime, 3)
##practrespKey = practkeypress[0][0]
practrespKey = getLumitouchResponseMEG()
practkeytime = clock.getTime()
# make sure it waits the correct amount of time before starting a new trial:
if practkeytime < ITI:
timewait = ITI - practkeytime
core.wait(timewait)
# correct or incorrect?
if practrespKey == correctResp:
practtext = 'Correct!'
practcol = [-1, 0, -1]
else:
practtext = 'Incorrect'
practcol = [0, -1, -1]
win.flip()
welcometext.text = practtext
welcometext.color = practcol
welcometext.pos = [0, 0]
welcometext.draw()
win.flip()
core.wait(1)
win.flip()
core.wait(1)
# end of practice screen
welcometext.text = 'End of practice. The experiment is wil start soon. Remember:'
welcometext.color = [1, 1, 1]
welcometext.pos = [0, posint]
welcometext.draw()
welcometext.text = '- There will be no feedback and so the trials will come in quick succession'
welcometext.pos = [0, 0]
welcometext.draw()
welcometext.text = '- Press LEFT for FIRST grating'
welcometext.pos = [0, -(posint/2)]
welcometext.draw()
welcometext.text = '- Press RIGHT for SECOND grating'
welcometext.pos = [0, -(posint)]
welcometext.draw()
welcometext.text = '**** We will start the experiment for you soon ... (Enter)****'
welcometext.pos = [0, -(posint*3)]
welcometext.draw()
win.flip()
event.waitKeys()
win.flip()
core.wait(1.5)
win.flip()
welcometext.pos = [0,0]
welcometext.text = '**** Get ready to start ****'
welcometext.draw()
win.flip()
core.wait(2.5)
win.flip()
core.wait(2.5)
if (blocks == 4):
triggerfile = 'Results/S%s_MEGtriggers.csv' % (subjectDetails['subjectNumber'])
elif (blocks < 4):
triggerfile = 'Results/S%s_MEGtriggers_newsession.csv' % (subjectDetails['subjectNumber'])
# making the trigger file:
t = open(triggerfile, 'w')
# Writing column headings to the trigger file
t.write('Subject number, %s\n\n\n' % (subjectDetails['subjectNumber']))
t.write('Trigger code, Timing\n')
# start of block loop
for bl in range(blocks):
print bl
if (blocks == 4):
# making a normal new data file for saving conditions and responses; also a separate file for triggers
datafile = 'Results/S%s_contrastMEG_%s.csv' % (subjectDetails['subjectNumber'], str(bl))
elif (blocks < 4):
# correcting block numbers if we are running less than 4 blocks:
blockcorrect = 4-blocks
datafile = 'Results/S%s_contrastMEG_%s.csv' % (subjectDetails['subjectNumber'], str((bl+blockcorrect)))
else:
print 'Max number of blocks exceded'
core.quit()
print datafile
# Writing column headings to the data file
d = open(datafile, 'w')
d.write('Subject number, %s\n' % (subjectDetails['subjectNumber']))
d.write('Gender, %s\n' % (subjectDetails['gender']))
d.write('Age, %s\n\n\n' % (subjectDetails['age']))
d.write('Trial,Grating1,Grating2,CorrResp,Resp,IsCorr,RT\n')
#-----------------send start of block trigger here-----------------
expclock = core.Clock() # make a clock
trigger = 1
sendTriggerMEG(trigger)
time = expclock.getTime()
t.write('%d, %3.6f\n' % (trigger, time))
# reset trigger after
win.flip()
trigger = 0
sendTriggerMEG(trigger)
#------------------------------------------------------------------
# trial loop
for trial in range(totalBlockTrials): # range(totalBlockTrials*blocks) # minus the placeholder so that we run fewer trials when we have already done some
# length of presentations:
ISI = round((0.6 + random.randrange(0, 1000)/5000) * MonFrameRate) # picks a random length (0.6 - 0.8s) with millisecond precision
ITI = round((1 + random.randrange(0, 1000)/5000)) # picks a random length (1 - 1.2s) with millisecond precision
# ITI is in seconds and not frames because easier to wait for keys
ISI = int(ISI)
# setting up grating contrast levels:
g1 = contrast_g1[indexing[trial]]
g2 = contrast_g2[indexing[trial]]
grating1.contrast = g1
grating2.contrast = g2
correctResp = correctResps[indexing[trial]]
# showing stimuli:
#-----------------send first interval trigger here-----------------
trigger = condCodes_g1[indexing[trial]]
#------------------------------------------------------------------
curCount=0
for run1 in range(stimLength):
grating1.draw()
win.flip()
if curCount == 0:
time = expclock.getTime()
t.write('%d, %3.6f\n' % (trigger, time))
sendTriggerMEG(trigger) #trigger code
curCount=1
trigger = 0
curCount=0
for run2 in range(ISI):
win.flip()
if curCount == 0:
sendTriggerMEG(trigger) #trigger code
curCount=1
#-----------------send second interval trigger here-----------------
trigger = condCodes_g2[indexing[trial]]
#------------------------------------------------------------------
curCount=0
for run3 in range(stimLength):
grating2.draw()
win.flip() # makes second grating appear
if curCount == 0:
time = expclock.getTime()
t.write('%d, %3.6f\n' % (trigger, time))
sendTriggerMEG(trigger) #trigger code
curCount=1
win.flip()
#reset trigger to zero
trigger = 0
sendTriggerMEG(trigger) #trigger code
clock = core.Clock() # make a clock
##keypress = event.waitKeys(keyList=['1','2'],timeStamped=clock) # waiting for key to be pressed
##keytime = keypress[0][1]
##keytime = round(keytime, 6)
##respKey = keypress[0][0]
respKey = getLumitouchResponseMEG()
keytime = clock.getTime()
# correct or incorrect?
if respKey == correctResp:
correct = 1
trigger = 101 # correct
else:
correct = 0
trigger = 201 # incorrect
### log event as a trigger
##sendTriggerMEG(trigger) #trigger code
time = expclock.getTime()
t.write('%d, %3.6f\n' % (trigger, time))
# make sure it waits the correct amount of time before starting a new trial:
if keytime < ITI:
timewait = ITI - keytime
core.wait(timewait)
# if we click escape, quit the experiment:
quitExp = event.getKeys(keyList=['escape'])
if quitExp:
print('User has exited the experiment')
win.close()
core.quit()
# saving the data into our csv file
d.write('%d, %.2f, %.2f, %s, %s, %d, %1.3f\n' % ((trial+1), g1, g2, correctResp, respKey, correct, keytime))
# if all the trials in the block are done, do this:
if (trial == (totalBlockTrials-1)):
# if all the blocks are done too, quit:
if (bl == (blocks-1)):
text = visual.TextStim(win, text='End of experiment\n\nPLEASE KEEP STILL UNTIL WE TELL YOU TO MOVE.\n\n Thank you. (Enter to end)')
d.close()
t.close()
text.draw()
win.flip()
event.waitKeys()
win.close()
else:
# if not, move on to next block:
text = visual.TextStim(win, text='End of block. Click to start a new block once you have rested')
d.close()
text.draw()
win.flip()
##event.waitKeys()
respKey = getLumitouchResponseMEG()
win.flip()
| [
"vilidaite@gmail.com"
] | vilidaite@gmail.com |
bda797b01eb94234034539602752492f10e15b46 | f1acc7a7c1ec6ea6545816c65655ab664ce85197 | /website/apps/clothing/migrations/0004_auto.py | 60721c07996441f32a702517cdf851ab1432c8b9 | [
"MIT"
] | permissive | saeschdivara/myclothing | be3727e148ade6844a23bc98461bdaab7f510921 | aef10e165c7e767028e8cf8a2ac7d01013231a9e | refs/heads/master | 2020-04-05T22:49:41.969650 | 2014-05-16T06:02:05 | 2014-05-16T06:02:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,217 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field clothing_time on 'Clothing'
m2m_table_name = db.shorten_name(u'clothing_clothing_clothing_time')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('clothing', models.ForeignKey(orm['clothing.clothing'], null=False)),
('clothingtime', models.ForeignKey(orm['clothing.clothingtime'], null=False))
))
db.create_unique(m2m_table_name, ['clothing_id', 'clothingtime_id'])
def backwards(self, orm):
# Removing M2M table for field clothing_time on 'Clothing'
db.delete_table(db.shorten_name(u'clothing_clothing_clothing_time'))
models = {
'clothing.clothing': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Clothing'},
'clothing_time': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'clothes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['clothing.ClothingTime']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'clothing.clothingtime': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ClothingTime'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
}
}
complete_apps = ['clothing'] | [
"sascha.haeusler@netzbarkeit.ch"
] | sascha.haeusler@netzbarkeit.ch |
4c374d623b41f4b08ccaf0d7c3dc45adefcbee20 | 233928d206e13e068cf8cb5ff7888c9a2d84ad61 | /BOJ/BOJ_2920_음계.py | d99e9a4bb4060c1a3c802597873370a6c6437450 | [] | no_license | Jinwoongma/Algorithm | 7f6daa2d3c2c361059c09fb4fe287b1cce4863e2 | 78803f4572f1416451a9f4f31f53b7d653f74d4a | refs/heads/master | 2022-10-07T22:53:20.333329 | 2020-06-07T13:27:47 | 2020-06-07T13:27:47 | 237,114,107 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | data = list(map(int, input().split()))
if data == list(range(1, 9)):
print('ascending')
elif data == list(range(8, 0, -1)):
print('descending')
else:
print('mixed') | [
"jinwoongma@gmail.com"
] | jinwoongma@gmail.com |
3eb1a57dc99966d684dbbc8a94e98154248c5de1 | 092bb75936bbccf339a44fd13ce7040b8f2c305a | /gui.py | bbb847d0a409e822d87f1e363f35e6a3c23a357f | [] | no_license | bishop254/NetMon254 | c4794c2382aaa7311fee7d91d9217ebdf887f952 | 7f255fc6a58927ffb56e9bb74e512c42aa7d3d25 | refs/heads/main | 2023-08-18T17:52:44.691888 | 2021-09-29T19:43:40 | 2021-09-29T19:43:40 | 408,805,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py | #! /usr/bin/env python3
import random
from ipaddress import IPv4Network
from typing import List
from scapy.all import ICMP, IP, sr1, TCP
# Define IP range to scan
network = "192.168.43.0/24"
# Define TCP port range
port_range = [22,23,80,443,449]
# make list of addresses out of network, set live host counter
addresses = IPv4Network(network)
live_count = 0
def port_scan(host: str, ports: List[int]):
# Send SYN with random Src Port for each Dst port
for dst_port in ports:
src_port = random.randint(1025, 65534)
resp = sr1(
IP(dst=host)/TCP(sport=src_port,dport=dst_port,flags="S"),timeout=1,
verbose=0,
)
if resp is None:
print(f"{host}:{dst_port} is filtered (silently dropped).")
elif(resp.haslayer(TCP)):
if(resp.getlayer(TCP).flags == 0x12):
send_rst = sr(
IP(dst=host)/TCP(sport=src_port,dport=dst_port,flags='R'),
timeout=1,
verbose=0,
)
print(f"{host}:{dst_port} is open.")
elif (resp.getlayer(TCP).flags == 0x14):
print(f"{host}:{dst_port} is closed.")
elif(resp.haslayer(ICMP)):
if(
int(resp.getlayer(ICMP).type) == 3 and
int(resp.getlayer(ICMP).code) in (1, 2, 3, 9, 10, 13)
):
print(f"{host}:{dst_port} is filtered (silently dropped).")
# Send ICMP ping request, wait for answer
for host in addresses:
if (host in (addresses.network_address, addresses.broadcast_address)):
# Skip network and broadcast addresses
continue
resp = sr1(IP(dst=str(host))/ICMP(), timeout=2, verbose=0)
if resp is None:
print(f"{host} is down or not responding.")
elif (
int(resp.getlayer(ICMP).type)==3 and
int(resp.getlayer(ICMP).code) in [1,2,3,9,10,13]
):
print(f"{host} is blocking ICMP.")
else:
port_scan(str(host), port_range)
live_count += 1
print(f"{live_count}/{addresses.num_addresses} hosts are online.") | [
"noreply@github.com"
] | noreply@github.com |
93981d5496bc781858932e1163011673c668b86b | 67769384e54979a058a939d067c813d6ba84e213 | /alphaml/datasets/cls_dataset/sector.py | 31e7198fd548de25beb9398b18c050c77c974921 | [
"BSD-3-Clause"
] | permissive | dingdian110/alpha-ml | f53e1c847c9d327691e77dcb3edab8ca51520d50 | d6a7a8a8a3452a7e3362bf0ef32b9ac5fe215fde | refs/heads/master | 2020-09-14T17:08:02.229245 | 2019-11-21T11:32:56 | 2019-11-21T11:32:56 | 223,195,139 | 1 | 0 | BSD-3-Clause | 2019-11-21T14:39:39 | 2019-11-21T14:39:38 | null | UTF-8 | Python | false | false | 700 | py | import pandas as pd
def load_sector():
L = []
file_path = 'data/xgb_dataset/sector/sector.txt'
with open(file_path, 'r') as f:
for line in f.readlines():
items = line.strip().split('\n')[0].split(' ')
d ={}
d['label'] = int(items[0]) - 1
del items[0]
for item in items:
key, value = item.split(':')
d[key] = float(value)
L.append(d)
df = pd.DataFrame(L)
y = df['label'].values
del df['label']
df.fillna(0,inplace=True)
X = df.values
return X, y
if __name__ == '__main__':
X, y = load_sector()
print(X)
print(set(y)) | [
"32727236+salty-fish-97@users.noreply.github.com"
] | 32727236+salty-fish-97@users.noreply.github.com |
45ae2421cf59a4a43dd3dd496139b32a58f76e45 | 6d1f401129b8a3891be699e81ac145a1f47c77f2 | /marine_ferrary/google-python-exercises/basic/list1.py | a3d3924556dc24a4ac5b80a500ec683cbeb8ecc9 | [
"Apache-2.0"
] | permissive | olivier-loison/INFMDI721 | 1364ddf382cd4e21adff293b50dad5718ac971f0 | 12ec2acff229e4ec1053607ef29e8ece871fe9fa | refs/heads/master | 2021-07-05T18:54:46.370068 | 2017-09-21T22:22:58 | 2017-09-21T22:22:58 | 104,406,207 | 0 | 0 | null | 2017-09-21T22:49:34 | 2017-09-21T22:49:34 | null | UTF-8 | Python | false | false | 3,260 | py | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
return len([w for w in words if len (w) >=2 and w[0]==w[-1:] ])
#count =0
#for w in words :
# if len (w) >=2 and w[0]==w[-1:] :
# count +=1
# +++your code here+++
#return count
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
# +++your code here+++
l1 = [w for w in words if w.startswith("x")]
l2 = [w for w in words if not w.startswith("x")]
return sorted(l1)+sorted(l2)
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
# +++your code here+++
return sorted(tuples, key = lambda tuple:tuple[1], reverse =False)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print ('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
# Calls the above functions with interesting inputs.
def main():
print ('match_ends')
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print()
print ('front_x')
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print()
print ('sort_last')
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| [
"marine@dhcpwifi-23-39.enst.fr"
] | marine@dhcpwifi-23-39.enst.fr |
59ccae351bb923ec178b9c7535c647e37c13a56c | 25f8871f4cd871d0513a24486201fdd0cc2ceb91 | /user/signals.py | 86aeb47245c8ae4208755b975e128d80117a9e91 | [] | no_license | CaptainVee/Rest | c58b094f3e2481a88851b34daf2870f72f319f0b | b3427b131ea91096cf778dfebd8133c2cf34c5ce | refs/heads/master | 2023-03-15T00:06:39.817192 | 2021-03-29T00:28:27 | 2021-03-29T00:28:27 | 347,763,138 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **Kwargs):
if created:
Token.objects.create(user=instance)
Profile.objects.create (user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **Kwargs):
instance.profile.save()
| [
"captainvee3@gmail.com"
] | captainvee3@gmail.com |
7728b2672b63c0955fa31c15489a21301c6c50f8 | 291d0103d1012e02d0882b029ea3e5129a9c2ef7 | /src/hai/urls.py | f546a93c4f0a51847b96d409e391d9ad8243b41d | [] | no_license | lcmencia/hai | 318431b23c6391f728946bd434c2ca3a72fc3c13 | 1c7c0e2bd6bbf0e371bc4e09483cb68a187751e1 | refs/heads/master | 2021-01-18T18:12:01.407068 | 2017-08-18T17:21:32 | 2017-08-18T17:21:32 | 100,516,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | """hai URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^blog/', include('blog.urls',
namespace='blog',
app_name='blog')),
]
| [
"lcmencia2@gmail.com"
] | lcmencia2@gmail.com |
1c73574065a164f757249a8c0128113bc3fccdae | 837a430acf6deaf94784bcaec496bbfac5affff7 | /venv/bin/pip3 | acec7bf0fb4b56136916c07617d638c89df28d3e | [] | no_license | meghamanohar/Pythontests | d8a474050b150c2d80690ce71e615306163b50c3 | 93153019e8a35c567d9954b6ba9edda2c3132e92 | refs/heads/master | 2022-06-29T16:34:56.397756 | 2020-05-14T02:27:38 | 2020-05-14T02:27:38 | 263,792,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | #!/Users/krishnayadappanavar/PycharmProjects/FirstProject/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"megha.manohar@gmail.com"
] | megha.manohar@gmail.com | |
22e1df1c15e86bdfdede5035f91968705c0d6f0b | b9a5336cb031d74b80395a5f6482661330516fb7 | /reviews/migrations/0001_initial.py | d9211574aeae7e61c1eeaf6a36084bc56655bb00 | [] | no_license | OMDraz/BookrApp | 124863c0eeb7d381ac6e7aa117b953d3f7448bd5 | 0b202ddc8bb9635f503f4725ae1a2e8fdecf5091 | refs/heads/master | 2023-08-07T03:01:26.932390 | 2021-09-23T05:18:25 | 2021-09-23T05:18:25 | 394,129,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | # Generated by Django 3.2.3 on 2021-08-09 02:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='The name of the publisher', max_length=50)),
('website', models.URLField(help_text="The publisher's website")),
('email', models.EmailField(help_text="The publisher's email address.", max_length=254)),
],
),
]
| [
"omardraz94@gmail.com"
] | omardraz94@gmail.com |
9ef94e2e4d69efad94f09beea5a420f9acda3202 | c1654d09c1eccf17d105d31c62bbf4106feb89d8 | /resolution-mylar.py | 4d6222a94a7d894fdaa9fbff4e10052cca671b70 | [] | no_license | piti118/crystal-length-study-for-mu2e | 142be2f059299c9902706b50d375fda01e651ead | a0287d2676fef33c15298caf432b0d5b38443bd1 | refs/heads/master | 2016-09-11T09:12:07.118526 | 2012-05-14T05:26:27 | 2012-05-14T05:26:27 | 3,666,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,825 | py | # -*- coding: utf-8 -*-
# <nbformat>3</nbformat>
# <codecell>
from root_numpy import *
from dist_fit import *
from cithep import *
from h5py import *
sample='mylar'
# <codecell>
class Hitmap:
def __init__(self,numrow=21,numcol=21):
self.hmap = np.zeros([numrow,numcol])
self.numrow = numrow
self.numcol = numcol
def acc(self,l,k,E):
i,j = self.lk2ij(l,k)
self.hmap[i,j]+=E
def lk2ij(self, l,k):
return l+self.numcol/2,k+self.numrow/2
def sumE(self,cutoff=None):
if cutoff is not None:
return np.sum(np.sum(self.hmap[self.hmap>cutoff]))
else:
return np.sum(np.sum(self.hmap))
# <codecell>
hitmap = root2array('%s.root'%sample,'hitmap')
einfo = root2array('%s.root'%sample,'eventinfo')
# <codecell>
# <codecell>
laster = tuple()
thishit = None
result = np.array([],dtype=[('angle',np.double),('E',np.double)])
for hit in hitmap:
runno = hit['runno']
eventno = hit['eventno']
if (runno,eventno) != laster and laster != tuple():
result.resize(len(result)+1)
result[-1]['angle'] = laster[0]*5.
result[-1]['E'] = thishit.sumE()
thishit=None
laster = (runno,eventno)
if thishit is None:
thishit = Hitmap()
thishit.acc(hit['l'],hit['k'],hit['E'])
if thishit is not None:
result.resize(len(result)+1)
result[-1]['angle'] = laster[0]*5.
result[-1]['E'] = thishit.sumE()
thishit=None #take care of the last one
# <codecell>
f = File('%s.hdf5'%sample,'w')
f.create_dataset('result',data=result)
f.close()
# <codecell>
f = File('%s.hdf5'%sample,'r')
tmp = f['result']
result = np.array(tmp)
f.close()
# <codecell>
def my_gau(x,g_mu,g_sigma):
return gaussian(x,g_mu,g_sigma)
# <codecell>
def smear(E):
w = sqrt(1000.*E)#1000 photon per mev
ret = randn(len(E))
ret*=w/1000.
ret+=E
return ret
def doOneFit(E,range=(95.,110.),mean=104.,sigma=1.,n=20.,alpha=0.5,N=80000,
limit_N=(1000,100000),limit_n=(0.1,100.), limit_mean=(90,106), limit_sigma=(0.3,5.),limit_alpha=(0.,5.)):
#eg = Add2Pdf(my_gau,Normalize(crystalball,range))
#describe(eg)
#eg = Normalize(crystalball,range)
eg = Convolve(Normalize(crystalball,range),my_gau,(-2,2),nbins=40)
#eeg = eg
eeg = Extend(eg)
print describe(eeg)
#fit, m = fit_uml(eg,sm,mean=104.5,sigma=1.,n=20.,alpha=0.5, limit_n=(0.1,50.), limit_mean=(90,106), limit_sigma=(0.3,5.),limit_alpha=(0.,2.))
#try_uml(eg,sm,mean=104.,sigma=1.,n=50.,alpha=0.5)
fit,m = None,None
good = False
itry = 0
first = True
while not good and itry<5:
try:
if not first:
mean = 104.5+randn(1)*2.
alpha=0.5+randn(1)*0.2
first =False
fit,m = fit_binpoisson(eeg,E,maxcalls=2000000,bins=100,
mean=mean,sigma=sigma,n=n,alpha=alpha,N=N,g_mu=0.,g_sigma=0.3,
limit_N=limit_N,limit_n=limit_n, limit_mean=limit_mean, limit_sigma=limit_sigma,limit_alpha=limit_alpha,
limit_g_mu=(-1,1),limit_g_sigma=(0.001,0.5),
quiet=False,throw=False)
good = True
except Exception as e:
print e
#raise e
itry+=1
fit.draw(m)
l,h = fwhm_f(eeg,range,m.args)
print m.values
vertical_highlight(l,h)
return fit,m,h,l,eeg
# <codecell>
angles = np.linspace(0,90,19)[:-1]
myresult = {}
# <codecell>
arg = {
0 :{'range':(96.,105.5)},
1 :{'range':(96.,105.5)},
2 :{'range':(96.,105.5)},
3 :{'range':(96.,105.5)},
4 :{'range':(96.,105.5)},
5 :{'range':(96.,105.5)},
6 :{'range':(96.,105.5)},
7 :{'range':(96.,105.5)},
8 :{'range':(96.,105.5)},
9 :{'range':(96.,105.5)},
10:{'range':(96.,105.5)},
11:{'range':(96.,105.5)},
12:{'range':(90.,105.5)},
13:{'range':(90.,105.5)},
14:{'range':(90.,105.5)},
15:{'range':(90.,105.5)},
16:{'range':(80.,105.5)},
17:{'range':(80.,105.5)},
}
for i,angle in enumerate(angles):
if i < 14: continue
myE = result['E'][(result['angle']>(angle-0.1)) & (result['angle']<(angle+0.1))]
figure()
myE = smear(myE)
emin,emax = 101.,105.5
if i in arg:
emin,emax = arg[i]['range']
myE = myE[(myE>emin) & (myE<emax)]
myresult[i] = doOneFit(myE,range=(emin,emax),N=len(myE))
title(str(angle)+' '+str(i))
# <codecell>
#make and save the plot
def make_nice_plot(r):
fig,axs = subplots(3,3,figsize=(20,12))
for i in r:
ii = i%9
row = ii/3
col = ii%3
fit = myresult[i][0]
m = myresult[i][1]
fh,fl = myresult[i][2],myresult[i][3]
fwhm_res = (fh-fl)/2.35
ax=axs[row,col]
sca(ax)
fit.draw(m)
vertical_highlight(fl,fh)
title('%s %d deg'%(sample,5*i))
text(0.5,0.2,r'fwhm/2.35=%3.2f'%(fwhm_res),transform = ax.transAxes)
make_nice_plot(range(9))
savefig('%s_1.pdf'%sample,bbox_inches='tight')
make_nice_plot(range(9,18))
savefig('%s_2.pdf'%sample,bbox_inches='tight')
# <codecell>
fwhm = np.zeros(18)
for i in range(18): fwhm[i]=(myresult[i][2]-myresult[i][3])/2.35
np.save('fwhm_%s.npy'%sample,fwhm)
x = np.array(range(18))*5.
plot(x,fwhm,'xb')
# <codecell>
hist(result['E'],bins=100,range=(100,110),histtype='step');,
# <codecell>
a = numpy.array([],dtype=[('a',np.double)])
a
a.resize(len(a)+1)
a.resize(len(a)+1)
a
# <codecell>
gdf = df.groupby(['runno','eventno'])
# <codecell>
for k,v in gdf:
h = Hitmap(10,10)
for i in xrange(len(v)):
h.acc(v.l[i],v.k[i],v.E[i])
print h.hmap
print h.sumE()
break
# <codecell>
h = Hitmap(10,10)
# <codecell>
for x in hmap:
# <codecell>
| [
"piti118@gmail.com"
] | piti118@gmail.com |
015f28cff9057185f32b9aa80589b0f4ae92b00a | b1a7fce60e8935592d07323222212d132eedb407 | /Raspi/Confirm.py | a4d5142e76c993a17e454a2068f3e4dc046cbad7 | [] | no_license | Namlitruong/Capstone-ModularRobot | d0922030a8ee0af7a06667ea5f333b19e1bbb070 | e23b07b260a7bfef9a0ef07bb74816cf64cc6a56 | refs/heads/master | 2022-12-17T23:07:07.952625 | 2020-08-17T00:41:11 | 2020-08-17T00:41:11 | 273,672,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,766 | py | import CANbus
import can
import csv
#############################--INTERRUPT--######################################
import time
import os, signal
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def interrupt_handler(channel):
ID = os.getppid()
print(ID)
pid = os.popen("ps aux | grep 'python3 Confirm.py' | awk '{print $2}'").readlines()
print ("Length: ", len(pid))
for i in range (len(pid)):
print (pid[i])
os.system ('sudo kill -9 '+ pid[i])
print("####################################")
GPIO.add_event_detect(13, GPIO.RISING,
callback=interrupt_handler,
bouncetime=500)
###################################################################################
actuatorID = []
sensorID = []
def wriToFile (aID, sID):
f = open ('config.csv', 'w')
with f:
writer = csv.writer(f, delimiter = ";")
writer.writerow (aID)
writer.writerow (sID)
def classifier (msg):
subID = 0
mType = 0
if (msg.arbitration_id == 0x1A0):
print ("Module detected !!!")
subID = 0x1A0
mType = 'A'
elif (msg.arbitration_id == 0x1F0):
#print ("Sensor module detected !!!")
subID = 0x1F0
mType = 'S'
return subID, mType
def searchValidID (IDlist, tempModule):
for i in range (1, 16):
flag = False
tempModule.ID = tempModule.ID + 1
if (len(IDlist) == 0):
break
for j in range (len(IDlist)):
if (IDlist[j].ID == tempModule.ID):
flag = True
break
if (flag == False and j+1 == len(IDlist)):
break
IDlist.append (tempModule)
print ("Assign new ID: ", hex(tempModule.ID))
return tempModule.ID
def verifyID (IDlist):
activeList = []
for i in range (len(IDlist)):
while (True):
CANbus.send((IDlist[i].ID - 0x100), [0x00])
msg = CANbus.receiveNonBlocking(0.1)
if (IDlist[i].timeout == 5):
break
if (msg == None):
IDlist[i].timeout = IDlist[i].timeout + 1
else:
activeList.append (IDlist[i])
break
return activeList
def printAvailableID (msg, module):
IDlist =[]
print (msg)
for i in range (len(module)):
print (module[i].ID, " ", i)
IDlist.append (module[i].ID)
return IDlist
if __name__ == "__main__":
while (True):
while (True):
print ("Waiting for connecting modules")
msg = CANbus.receive()
tempID, mType = classifier (msg)
if (msg.arbitration_id == tempID):
break
tempModule = CANbus.module(msg.arbitration_id)
if (mType == 'A'):
tempID = searchValidID (actuatorID, tempModule)
CANbus.send (0x0A0, [(tempID - 0x1A0)])
elif (mType == 'S'):
tempID = searchValidID (sensorID, tempModule)
CANbus.send (0x0F0, [(tempID - 0x1F0)])
#CANbus.send (0x0A0, [(tempID - 0x1A0)])
print ("Sending Confirmation", tempID - 0x100)
while (True):
msg = CANbus.receive()
if (msg.arbitration_id == tempID):
break
print ("Confirmation Complete")
#Verify modules
print ("Verifying existing modules")
actuatorID = verifyID (actuatorID)
sensorID = verifyID (sensorID)
aID = printAvailableID ("Available Module: ", actuatorID)
#sID = printAvailableID ("Available Sensor: ", sensorID)
sID = printAvailableID (" ", sensorID)
wriToFile (aID, sID) | [
"pi@raspberrypi"
] | pi@raspberrypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.