hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
48137f6833204958dbcfd12efea83db5d3727b1f | 158 | py | Python | python/square root.py | SULING4EVER/learngit | d55f942fbd782b309b0490c34a1bb743f6c4ef03 | [
"Apache-2.0"
] | null | null | null | python/square root.py | SULING4EVER/learngit | d55f942fbd782b309b0490c34a1bb743f6c4ef03 | [
"Apache-2.0"
] | null | null | null | python/square root.py | SULING4EVER/learngit | d55f942fbd782b309b0490c34a1bb743f6c4ef03 | [
"Apache-2.0"
] | null | null | null | x=input("Enter a umber of which you want to know the square root.")
x=int(x)
g=x/2
while (g*g-x)*(g*g-x)>0.00000000001:
g=(g+x/g)/2
print(g)
print(g)
| 19.75 | 67 | 0.620253 |
481471f6b03716720f9e82b4bb3fce65fee25248 | 59 | py | Python | chapter 5/sampleCode22.py | DTAIEB/Thoughtful-Data-Science | 8b80e8f3e33b6fdc6672ecee1f27e0b983b28241 | [
"Apache-2.0"
] | 15 | 2018-06-01T19:18:32.000Z | 2021-11-28T03:31:35.000Z | chapter 5/sampleCode22.py | chshychen/Thoughtful-Data-Science | 8b80e8f3e33b6fdc6672ecee1f27e0b983b28241 | [
"Apache-2.0"
] | 1 | 2018-12-17T02:01:42.000Z | 2018-12-17T02:01:42.000Z | chapter 5/sampleCode22.py | chshychen/Thoughtful-Data-Science | 8b80e8f3e33b6fdc6672ecee1f27e0b983b28241 | [
"Apache-2.0"
] | 10 | 2018-09-23T02:45:45.000Z | 2022-03-12T15:32:05.000Z | import pixiedust
my_logger = pixiedust.getLogger(__name__)
| 19.666667 | 41 | 0.847458 |
481650f94557e95d1e169f088c7d5dec8a6391f7 | 1,212 | py | Python | iaso/migrations/0052_fix_period_before_after.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 29 | 2020-12-26T07:22:19.000Z | 2022-03-07T13:40:09.000Z | iaso/migrations/0052_fix_period_before_after.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 150 | 2020-11-09T15:03:27.000Z | 2022-03-07T15:36:07.000Z | iaso/migrations/0052_fix_period_before_after.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 4 | 2020-11-09T10:38:13.000Z | 2021-10-04T09:42:47.000Z | # Generated by Django 2.1.11 on 2020-06-04 09:19
from django.db import migrations, models
| 35.647059 | 118 | 0.655116 |
4816994d8585786c6c9791f101c25452477dc72a | 169 | py | Python | vc/manager/generation_result.py | very-meanly/vc | 41f63e8a8b159f3a49430bbee6872162de060901 | [
"MIT"
] | null | null | null | vc/manager/generation_result.py | very-meanly/vc | 41f63e8a8b159f3a49430bbee6872162de060901 | [
"MIT"
] | null | null | null | vc/manager/generation_result.py | very-meanly/vc | 41f63e8a8b159f3a49430bbee6872162de060901 | [
"MIT"
] | null | null | null | from vc.manager.base import Manager
from vc.model.generation_result import GenerationResult
| 24.142857 | 55 | 0.840237 |
4818f40bb2961d309e93cce19f1650592ac0d462 | 123 | py | Python | src/aceinna/bootstrap/__init__.py | lihaiyong827/python-openimu | f1c536ba4182aaeabd87b63c08ebd92f97e8dbb4 | [
"Apache-2.0"
] | 41 | 2018-07-20T17:30:33.000Z | 2022-02-24T08:17:39.000Z | src/aceinna/bootstrap/__init__.py | lihaiyong827/python-openimu | f1c536ba4182aaeabd87b63c08ebd92f97e8dbb4 | [
"Apache-2.0"
] | 52 | 2018-06-25T22:15:14.000Z | 2022-03-10T07:30:56.000Z | src/aceinna/bootstrap/__init__.py | lihaiyong827/python-openimu | f1c536ba4182aaeabd87b63c08ebd92f97e8dbb4 | [
"Apache-2.0"
] | 31 | 2018-12-19T00:10:08.000Z | 2022-03-19T02:14:03.000Z | import sys
import os
import traceback
from .default import Default
from .cli import CommandLine
from .loader import Loader | 17.571429 | 28 | 0.829268 |
48197fe7d2676b37c5b385a0395e386523f42d50 | 63 | py | Python | test/tutorial/scripts/api/logout_api.py | GPelayo/dcp-cli | d585fd8b4687f29bfd034242472b870d17ed1e50 | [
"MIT"
] | 8 | 2017-10-10T18:29:27.000Z | 2019-06-15T04:25:43.000Z | test/tutorial/scripts/api/logout_api.py | GPelayo/dcp-cli | d585fd8b4687f29bfd034242472b870d17ed1e50 | [
"MIT"
] | 440 | 2017-10-09T16:06:22.000Z | 2021-03-25T17:12:18.000Z | test/tutorial/scripts/api/logout_api.py | GPelayo/dcp-cli | d585fd8b4687f29bfd034242472b870d17ed1e50 | [
"MIT"
] | 10 | 2017-11-07T22:42:59.000Z | 2020-05-05T15:36:01.000Z | from hca.dss import DSSClient
dss = DSSClient()
dss.logout()
| 10.5 | 29 | 0.730159 |
481a202af8c698328bf81874ddd4607ef4a05765 | 1,995 | py | Python | home/stock_model.py | 85599/nse-django | f42531528137f39596b374a0dacdd37957e69ed2 | [
"MIT"
] | null | null | null | home/stock_model.py | 85599/nse-django | f42531528137f39596b374a0dacdd37957e69ed2 | [
"MIT"
] | 1 | 2021-02-25T05:34:43.000Z | 2021-02-25T05:34:43.000Z | home/stock_model.py | 85599/nse-django | f42531528137f39596b374a0dacdd37957e69ed2 | [
"MIT"
] | null | null | null | import os
import pandas as pd
from sklearn import linear_model
from nsetools import Nse
import pathlib
import joblib
nse = Nse()
def nse_data(stock_name):
'''input stock_name : str
output : list = output,high,low'''
data = nse.get_quote(stock_name)
current = [data['open'],data['dayHigh'],data['dayLow']]
return current
def model_check(stock_name):
'''checking if model exits or not;
input stock_name str
return true or false'''
model_path = pathlib.Path(os.getcwd()+"\\nse_data\\saved_model\\"+stock_name+'.pkl')
if model_path.exists():
return True
else:
return False
def any_stock(stock_name):
'''function to predict any stock values
stock_name == str; today_value= list,[open,high,low]
'''
try:
if model_check(stock_name) == False:
data_path = os.getcwd()+"\\home\\nse_data\\HISTORICAL_DATA\\"
df = pd.read_csv(data_path + stock_name + '_data.csv')
df.fillna(df.mean(),inplace=True)
X = df.iloc[:,[1,2,3]]
y = df.iloc[:,[4]]
reg = linear_model.LinearRegression()
reg.fit(X,y)
y_today = reg.predict([nse_data(stock_name)])
model_path_one = os.getcwd()+"\\home\\nse_data\\saved_model\\"
joblib_file = model_path_one + stock_name+ ".pkl"
joblib.dump(reg, joblib_file)
print('model creation')
return y_today[0][0]
else:
print('model loading')
model_path_one = os.getcwd()+"\\home\\nse_data\\saved_model\\"
joblib_file = model_path_one + stock_name+ ".pkl"
model = joblib.load(joblib_file)
y_today = model.predict([nse_data(stock_name)])
return y_today
except:
return (" internal error")
# try:
# print(any_stock('SBIN'))
# except IndexError:
# print('index error')
# except FileNotFoundError:
# print("no file")
| 28.098592 | 88 | 0.593484 |
481a590185ab360ad8f0c2ef3e09b5d683dfa4f6 | 26,620 | py | Python | examples/rough_translated1/osgthreadedterrain.py | JaneliaSciComp/osgpyplusplus | a5ae3f69c7e9101a32d8cc95fe680dab292f75ac | [
"BSD-3-Clause"
] | 17 | 2015-06-01T12:19:46.000Z | 2022-02-12T02:37:48.000Z | examples/rough_translated1/osgthreadedterrain.py | cmbruns/osgpyplusplus | f8bfca2cf841e15f6ddb41c958f3ad0d0b9e4b75 | [
"BSD-3-Clause"
] | 7 | 2015-07-04T14:36:49.000Z | 2015-07-23T18:09:49.000Z | examples/rough_translated1/osgthreadedterrain.py | cmbruns/osgpyplusplus | f8bfca2cf841e15f6ddb41c958f3ad0d0b9e4b75 | [
"BSD-3-Clause"
] | 7 | 2015-11-28T17:00:31.000Z | 2020-01-08T07:00:59.000Z | #!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgthreadedterrain"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import OpenThreads
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgGA
from osgpypp import osgTerrain
from osgpypp import osgText
from osgpypp import osgUtil
from osgpypp import osgViewer
# Translated from file 'osgthreadedterrain.cpp'
# OpenSceneGraph example, osgterrain.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <OpenThreads/Block>
#include <osg/Group>
#include <osg/Geode>
#include <osg/ShapeDrawable>
#include <osg/Texture2D>
#include <osg/PositionAttitudeTransform>
#include <osg/MatrixTransform>
#include <osg/CoordinateSystemNode>
#include <osg/ClusterCullingCallback>
#include <osg/ArgumentParser>
#include <osgDB/FileUtils>
#include <osgDB/fstream>
#include <osgDB/ReadFile>
#include <osgUtil/IncrementalCompileOperation>
#include <osgText/FadeText>
#include <osgViewer/Viewer>
#include <osgViewer/ViewerEventHandlers>
#include <osgGA/TrackballManipulator>
#include <osgGA/FlightManipulator>
#include <osgGA/DriveManipulator>
#include <osgGA/KeySwitchMatrixManipulator>
#include <osgGA/StateSetManipulator>
#include <osgGA/AnimationPathManipulator>
#include <osgGA/TerrainManipulator>
#include <osgTerrain/TerrainTile>
#include <osgTerrain/GeometryTechnique>
#include <osgTerrain/Layer>
#include <iostream>
typedef std.vector< osg.GraphicsThread > GraphicsThreads
ReleaseBlockOnCompileCompleted(osg.RefBlockCount* block):
_block(block)
def compileCompleted(compileSet):
if _block.valid() : _block.completed()
# tell IncrementalCompileOperation that it's now safe to remove the compileSet
osg.notify(osg.NOTICE), "compileCompleted(", compileSet, ")"
return True
_block = osg.RefBlockCount()
class LoadAndCompileOperation (osg.Operation) :
LoadAndCompileOperation( str filename, osgUtil.IncrementalCompileOperation* ico , osg.RefBlockCount* block):
Operation("Load and compile Operation", False),
_filename(filename),
_incrementalCompileOperation(ico),
_block(block)
virtual void operator () (osg.Object* object)
# osg.notify(osg.NOTICE), "LoadAndCompileOperation ", _filename
_loadedModel = osgDB.readNodeFile(_filename)
if _loadedModel.valid() and _incrementalCompileOperation.valid() :
compileSet = osgUtil.IncrementalCompileOperation.CompileSet(_loadedModel)
compileSet._compileCompletedCallback = ReleaseBlockOnCompileCompleted(_block)
_incrementalCompileOperation.add(compileSet)
else:
if _block.valid() : _block.completed()
# osg.notify(osg.NOTICE), "done LoadAndCompileOperation ", _filename
_filename = str()
_loadedModel = osg.Node()
_incrementalCompileOperation = osgUtil.IncrementalCompileOperation()
_block = osg.RefBlockCount()
class MasterOperation (osg.Operation) :
typedef std.set<str> Files
typedef std.map<str, osg.Node > FilenameNodeMap
typedef std.vector< osg.Node > Nodes
MasterOperation( str filename, osgUtil.IncrementalCompileOperation* ico):
Operation("Master reading operation",True),
_filename(filename),
_incrementalCompileOperation(ico)
#* Set the OperationQueue that the MasterOperation can use to place tasks like file loading on for other processes to handle.
virtual void operator () (osg.Object* callingObject)
# decided which method to call according to whole has called me.
viewer = dynamic_cast<osgViewer.Viewer*>(callingObject)
if viewer : update(viewer.getSceneData())
load = else()
#if 0
if not newFiles.empty() or not removedFiles.empty() :
osg.notify(osg.NOTICE), "void operator () files.size()=", files.size()
#endif
# first load the files.
nodesToAdd = FilenameNodeMap()
if not newFiles.empty() :
typedef std.vector< osg.GraphicsThread > GraphicsThreads
threads = GraphicsThreads()
for(unsigned int i=0 i<= osg.GraphicsContext.getMaxContextID() ++i)
gc = osg.GraphicsContext.getCompileContext(i)
gt = gc.getGraphicsThread() if (gc) else 0
if gt : threads.push_back(gt)
if _operationQueue.valid() :
# osg.notify(osg.NOTICE), "Using OperationQueue"
_endOfLoadBlock = osg.RefBlockCount(newFiles.size())
_endOfLoadBlock.reset()
typedef std.list< LoadAndCompileOperation > LoadAndCompileList
loadAndCompileList = LoadAndCompileList()
for(Files.iterator nitr = newFiles.begin()
not = newFiles.end()
++nitr)
# osg.notify(osg.NOTICE), "Adding LoadAndCompileOperation ", *nitr
loadAndCompile = LoadAndCompileOperation( *nitr, _incrementalCompileOperation, _endOfLoadBlock )
loadAndCompileList.push_back(loadAndCompile)
_operationQueue.add( loadAndCompile )
#if 1
operation = osg.Operation()
while operation=_operationQueue.getNextOperation() :.valid() :
# osg.notify(osg.NOTICE), "Local running of operation"
(*operation)(0)
#endif
# osg.notify(osg.NOTICE), "Waiting for completion of LoadAndCompile operations"
_endOfLoadBlock.block()
# osg.notify(osg.NOTICE), "done ... Waiting for completion of LoadAndCompile operations"
for(LoadAndCompileList.iterator litr = loadAndCompileList.begin()
not = loadAndCompileList.end()
++litr)
if *litr :._loadedModel.valid() :
nodesToAdd[(*litr)._filename] = (*litr)._loadedModel
else:
_endOfLoadBlock = osg.RefBlockCount(newFiles.size())
_endOfLoadBlock.reset()
for(Files.iterator nitr = newFiles.begin()
not = newFiles.end()
++nitr)
loadedModel = osgDB.readNodeFile(*nitr)
if loadedModel :
nodesToAdd[*nitr] = loadedModel
if _incrementalCompileOperation.valid() :
compileSet = osgUtil.IncrementalCompileOperation.CompileSet(loadedModel)
compileSet._compileCompletedCallback = ReleaseBlockOnCompileCompleted(_endOfLoadBlock)
_incrementalCompileOperation.add(compileSet)
else:
_endOfLoadBlock.completed()
else:
_endOfLoadBlock.completed()
_endOfLoadBlock.block()
requiresBlock = False
# pass the locally peppared data to MasterOperations shared data
# so that updated thread can merge these changes with the main scene
# graph. This merge is carried out via the update(..) method.
if not removedFiles.empty() or not nodesToAdd.empty() :
lock = OpenThreads.ScopedLock<OpenThreads.Mutex>(_mutex)
_nodesToRemove.swap(removedFiles)
_nodesToAdd.swap(nodesToAdd)
requiresBlock = True
# now block so we don't try to load anything till the data has been merged
# otherwise _existingFilenameNodeMap will get out of sync.
if requiresBlock :
_updatesMergedBlock.block()
else:
OpenThreads.Thread.YieldCurrentThread()
# merge the changes with the main scene graph.
if not _nodesToAdd.empty() :
for(FilenameNodeMap.iterator itr = _nodesToAdd.begin()
not = _nodesToAdd.end()
++itr)
osg.notify(osg.NOTICE), " update():inserting ", itr.first
group.addChild(itr.second)
_existingFilenameNodeMap[itr.first] = itr.second
_nodesToAdd.clear()
_updatesMergedBlock.release()
# add release implementation so that any thread cancellation can
# work even when blocks and barriers are used.
_filename = str()
_mutex = OpenThreads.Mutex()
_existingFilenameNodeMap = FilenameNodeMap()
_nodesToRemove = Files()
_nodesToAdd = FilenameNodeMap()
_updatesMergedBlock = OpenThreads.Block()
_incrementalCompileOperation = osgUtil.IncrementalCompileOperation()
_endOfCompilebarrier = osg.BarrierOperation()
_endOfLoadBlock = osg.RefBlockCount()
_operationQueue = osg.OperationQueue()
class FilterHandler (osgGA.GUIEventHandler) :
FilterHandler(osgTerrain.GeometryTechnique* gt):
_gt(gt)
def handle(ea, aa):
if not _gt : return False
switch(ea.getEventType())
case(osgGA.GUIEventAdapter.KEYDOWN):
if ea.getKey() == ord("g") :
osg.notify(osg.NOTICE), "Gaussian"
_gt.setFilterMatrixAs(osgTerrain.GeometryTechnique.GAUSSIAN)
return True
elif ea.getKey() == ord("s") :
osg.notify(osg.NOTICE), "Smooth"
_gt.setFilterMatrixAs(osgTerrain.GeometryTechnique.SMOOTH)
return True
elif ea.getKey() == ord("S") :
osg.notify(osg.NOTICE), "Sharpen"
_gt.setFilterMatrixAs(osgTerrain.GeometryTechnique.SHARPEN)
return True
elif ea.getKey() == ord("+") :
_gt.setFilterWidth(_gt.getFilterWidth()*1.1)
osg.notify(osg.NOTICE), "Filter width = ", _gt.getFilterWidth()
return True
elif ea.getKey() == ord("-") :
_gt.setFilterWidth(_gt.getFilterWidth()/1.1)
osg.notify(osg.NOTICE), "Filter width = ", _gt.getFilterWidth()
return True
elif ea.getKey() == ord(">") :
_gt.setFilterBias(_gt.getFilterBias()+0.1)
osg.notify(osg.NOTICE), "Filter bias = ", _gt.getFilterBias()
return True
elif ea.getKey() == ord("<") :
_gt.setFilterBias(_gt.getFilterBias()-0.1)
osg.notify(osg.NOTICE), "Filter bias = ", _gt.getFilterBias()
return True
break
default:
break
return False
_gt = osg.observer_ptr<osgTerrain.GeometryTechnique>()
class LayerHandler (osgGA.GUIEventHandler) :
LayerHandler(osgTerrain.Layer* layer):
_layer(layer)
def handle(ea, aa):
if not _layer : return False
scale = 1.2
switch(ea.getEventType())
case(osgGA.GUIEventAdapter.KEYDOWN):
if ea.getKey() == ord("q") :
_layer.transform(0.0, scale)
return True
elif ea.getKey() == ord("a") :
_layer.transform(0.0, 1.0/scale)
return True
break
default:
break
return False
_layer = osg.observer_ptr<osgTerrain.Layer>()
def main(argv):
arguments = osg.ArgumentParser(argv)
# construct the viewer.
viewer = osgViewer.Viewer(arguments)
# set up the camera manipulators.
keyswitchManipulator = osgGA.KeySwitchMatrixManipulator()
keyswitchManipulator.addMatrixManipulator( ord("1"), "Trackball", osgGA.TrackballManipulator() )
keyswitchManipulator.addMatrixManipulator( ord("2"), "Flight", osgGA.FlightManipulator() )
keyswitchManipulator.addMatrixManipulator( ord("3"), "Drive", osgGA.DriveManipulator() )
keyswitchManipulator.addMatrixManipulator( ord("4"), "Terrain", osgGA.TerrainManipulator() )
pathfile = str()
keyForAnimationPath = ord("5")
while arguments.read("-p",pathfile) :
apm = osgGA.AnimationPathManipulator(pathfile)
if apm or not apm.valid() :
num = keyswitchManipulator.getNumMatrixManipulators()
keyswitchManipulator.addMatrixManipulator( keyForAnimationPath, "Path", apm )
keyswitchManipulator.selectMatrixManipulator(num)
++keyForAnimationPath
viewer.setCameraManipulator( keyswitchManipulator )
# add the state manipulator
viewer.addEventHandler( osgGA.StateSetManipulator(viewer.getCamera().getOrCreateStateSet()) )
# add the stats handler
viewer.addEventHandler(osgViewer.StatsHandler)()
# add the record camera path handler
viewer.addEventHandler(osgViewer.RecordCameraPathHandler)()
# attach an IncrementaCompileOperation to allow the master loading
# to be handled with an incremental compile to avoid frame drops when large objects are added.
viewer.setIncrementalCompileOperation(osgUtil.IncrementalCompileOperation())
x = 0.0
y = 0.0
w = 1.0
h = 1.0
numLoadThreads = 1
while arguments.read("--load-threads",numLoadThreads) :
masterOperation = MasterOperation()
masterFilename = str()
while arguments.read("-m",masterFilename) :
masterOperation = MasterOperation(masterFilename, viewer.getIncrementalCompileOperation())
terrainTile = osgTerrain.TerrainTile()
locator = osgTerrain.Locator()
validDataOperator = osgTerrain.NoDataValue(0.0)
lastAppliedLayer = osgTerrain.Layer()
locator.setCoordinateSystemType(osgTerrain.Locator.GEOCENTRIC)
locator.setTransformAsExtents(-osg.PI, -osg.PI*0.5, osg.PI, osg.PI*0.5)
layerNum = 0
filterName = str()
filter = osg.Texture.LINEAR
float minValue, maxValue
scale = 1.0
offset = 0.0
pos = 1
while pos<arguments.argc() :
filename = str()
if arguments.read(pos, "--layer",layerNum) :
osg.notify(osg.NOTICE), "Set layer number to ", layerNum
elif arguments.read(pos, "-b") :
terrainTile.setTreatBoundariesToValidDataAsDefaultValue(True)
elif arguments.read(pos, "-e",x,y,w,h) :
# define the extents.
locator.setCoordinateSystemType(osgTerrain.Locator.GEOCENTRIC)
locator.setTransformAsExtents(x,y,x+w,y+h)
elif arguments.read(pos, "--transform",offset, scale) or arguments.read(pos, "-t",offset, scale) :
# define the extents.
elif arguments.read(pos, "--cartesian",x,y,w,h) :
# define the extents.
locator.setCoordinateSystemType(osgTerrain.Locator.PROJECTED)
locator.setTransformAsExtents(x,y,x+w,y+h)
elif arguments.read(pos, "--hf",filename) :
osg.notify(osg.NOTICE), "--hf ", filename
hf = osgDB.readHeightFieldFile(filename)
if hf.valid() :
hfl = osgTerrain.HeightFieldLayer()
hfl.setHeightField(hf)
hfl.setLocator(locator)
hfl.setValidDataOperator(validDataOperator)
hfl.setMagFilter(filter)
if offset not =0.0 or scale not =1.0 :
hfl.transform(offset,scale)
terrainTile.setElevationLayer(hfl)
lastAppliedLayer = hfl
osg.notify(osg.NOTICE), "created osgTerrain.HeightFieldLayer"
else:
osg.notify(osg.NOTICE), "failed to create osgTerrain.HeightFieldLayer"
scale = 1.0
offset = 0.0
elif arguments.read(pos, "-d",filename) or arguments.read(pos, "--elevation-image",filename) :
osg.notify(osg.NOTICE), "--elevation-image ", filename
image = osgDB.readImageFile(filename)
if image.valid() :
imageLayer = osgTerrain.ImageLayer()
imageLayer.setImage(image)
imageLayer.setLocator(locator)
imageLayer.setValidDataOperator(validDataOperator)
imageLayer.setMagFilter(filter)
if offset not =0.0 or scale not =1.0 :
imageLayer.transform(offset,scale)
terrainTile.setElevationLayer(imageLayer)
lastAppliedLayer = imageLayer
osg.notify(osg.NOTICE), "created Elevation osgTerrain.ImageLayer"
else:
osg.notify(osg.NOTICE), "failed to create osgTerrain.ImageLayer"
scale = 1.0
offset = 0.0
elif arguments.read(pos, "-c",filename) or arguments.read(pos, "--image",filename) :
osg.notify(osg.NOTICE), "--image ", filename, " x=", x, " y=", y, " w=", w, " h=", h
image = osgDB.readImageFile(filename)
if image.valid() :
imageLayer = osgTerrain.ImageLayer()
imageLayer.setImage(image)
imageLayer.setLocator(locator)
imageLayer.setValidDataOperator(validDataOperator)
imageLayer.setMagFilter(filter)
if offset not =0.0 or scale not =1.0 :
imageLayer.transform(offset,scale)
terrainTile.setColorLayer(layerNum, imageLayer)
lastAppliedLayer = imageLayer
osg.notify(osg.NOTICE), "created Color osgTerrain.ImageLayer"
else:
osg.notify(osg.NOTICE), "failed to create osgTerrain.ImageLayer"
scale = 1.0
offset = 0.0
elif arguments.read(pos, "--filter",filterName) :
if filterName=="NEAREST" :
osg.notify(osg.NOTICE), "--filter ", filterName
filter = osg.Texture.NEAREST
elif filterName=="LINEAR" :
filter = osg.Texture.LINEAR
osg.notify(osg.NOTICE), "--filter ", filterName
else:
osg.notify(osg.NOTICE), "--filter ", filterName, " unrecognized filter name, please use LINEAER or NEAREST."
if terrainTile.getColorLayer(layerNum) :
terrainTile.getColorLayer(layerNum).setMagFilter(filter)
elif arguments.read(pos, "--tf",minValue, maxValue) :
tf = osg.TransferFunction1D()
numCells = 6
delta = (maxValue-minValue)/float(numCells-1)
v = minValue
tf.allocate(6)
tf.setColor(v, osg.Vec4(1.0,1.0,1.0,1.0)) v += delta
tf.setColor(v, osg.Vec4(1.0,0.0,1.0,1.0)) v += delta
tf.setColor(v, osg.Vec4(1.0,0.0,0.0,1.0)) v += delta
tf.setColor(v, osg.Vec4(1.0,1.0,0.0,1.0)) v += delta
tf.setColor(v, osg.Vec4(0.0,1.0,1.0,1.0)) v += delta
tf.setColor(v, osg.Vec4(0.0,1.0,0.0,1.0))
osg.notify(osg.NOTICE), "--tf ", minValue, " ", maxValue
terrainTile.setColorLayer(layerNum, osgTerrain.ContourLayer(tf))
else:
++pos
scene = osg.Group()
if terrainTile.valid() and (terrainTile.getElevationLayer() or terrainTile.getColorLayer(0)) :
osg.notify(osg.NOTICE), "Terrain created"
scene.addChild(terrainTile)
geometryTechnique = osgTerrain.GeometryTechnique()
terrainTile.setTerrainTechnique(geometryTechnique)
viewer.addEventHandler(FilterHandler(geometryTechnique))
viewer.addEventHandler(LayerHandler(lastAppliedLayer))
if masterOperation.valid() :
osg.notify(osg.NOTICE), "Master operation created"
masterOperation.open(scene)
if scene.getNumChildren()==0 :
osg.notify(osg.NOTICE), "No model created, please specify terrain or master file on command line."
return 0
viewer.setSceneData(scene)
# start operation thread if a master file has been used.
masterOperationThread = osg.OperationThread()
typedef std.list< osg.OperationThread > OperationThreadList
generalThreadList = OperationThreadList()
if masterOperation.valid() :
masterOperationThread = osg.OperationThread()
masterOperationThread.startThread()
masterOperationThread.add(masterOperation)
# if numLoadThreads>0 :
operationQueue = osg.OperationQueue()
masterOperation.setOperationQueue(operationQueue)
for(unsigned int i=0 i<numLoadThreads ++i)
thread = osg.OperationThread()
thread.setOperationQueue(operationQueue)
thread.startThread()
generalThreadList.push_back(thread)
viewer.addUpdateOperation(masterOperation)
viewer.setThreadingModel(osgViewer.Viewer.SingleThreaded)
# enable the use of compile contexts and associated threads.
# osg.DisplaySettings.instance().setCompileContextsHint(True)
# realize the graphics windows.
viewer.realize()
# run the viewers main loop
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
| 34.661458 | 130 | 0.598911 |
481b02369261f195f00f8a2beb84d5e057a643b6 | 5,207 | py | Python | deepy/data/audio/tau2019.py | popura/deepy-pytorch | 71d87a82e937d82b9b149041280a392cc24b7299 | [
"MIT"
] | 1 | 2021-07-19T09:38:26.000Z | 2021-07-19T09:38:26.000Z | deepy/data/audio/tau2019.py | popura/deepy-pytorch | 71d87a82e937d82b9b149041280a392cc24b7299 | [
"MIT"
] | 1 | 2021-07-26T06:47:45.000Z | 2021-07-26T06:47:45.000Z | deepy/data/audio/tau2019.py | popura/deepy-pytorch | 71d87a82e937d82b9b149041280a392cc24b7299 | [
"MIT"
] | null | null | null | import sys
import os
import os.path
import random
from pathlib import Path
import torch
import torchaudio
from .audiodataset import AUDIO_EXTENSIONS, default_loader
from ..dataset import PureDatasetFolder, has_file_allowed_extension
| 41.325397 | 109 | 0.61014 |
481b0da064442a0c9a7254f4c7fb899384b93ddc | 13,530 | py | Python | rrl.py | siekmanj/apex | 49483c827d8e70302b3e993acf29e9798f4435c1 | [
"MIT"
] | null | null | null | rrl.py | siekmanj/apex | 49483c827d8e70302b3e993acf29e9798f4435c1 | [
"MIT"
] | 1 | 2019-11-14T21:12:31.000Z | 2019-11-14T21:12:31.000Z | rrl.py | siekmanj/rrl | 49483c827d8e70302b3e993acf29e9798f4435c1 | [
"MIT"
] | null | null | null | import os
import torch
import hashlib
from collections import OrderedDict
from util.env import env_factory, eval_policy
from util.logo import print_logo
if __name__ == "__main__":
import sys, argparse, time, os
parser = argparse.ArgumentParser()
parser.add_argument("--nolog", action='store_true')
print_logo(subtitle="Recurrent Reinforcement Learning for Robotics.")
if len(sys.argv) < 2:
print("Usage: python apex.py [algorithm name]", sys.argv)
elif sys.argv[1] == 'ars':
"""
Utility for running Augmented Random Search.
"""
from algos.ars import run_experiment
sys.argv.remove(sys.argv[1])
parser.add_argument("--workers", type=int, default=4)
parser.add_argument("--hidden_size", default=32, type=int) # neurons in hidden layer
parser.add_argument("--timesteps", "-t", default=1e8, type=float) # timesteps to run experiment ofr
parser.add_argument("--load_model", "-l", default=None, type=str) # load a model from a saved file.
parser.add_argument('--std', "-sd", default=0.0075, type=float) # the standard deviation of the parameter noise vectors
parser.add_argument("--deltas", "-d", default=64, type=int) # number of parameter noise vectors to use
parser.add_argument("--lr", "-lr", default=0.01, type=float) # the learning rate used to update policy
parser.add_argument("--reward_shift", "-rs", default=1, type=float) # the reward shift (to counter Gym's alive_bonus)
parser.add_argument("--traj_len", "-tl", default=1000, type=int) # max trajectory length for environment
parser.add_argument("--algo", "-a", default='v1', type=str) # whether to use ars v1 or v2
parser.add_argument("--normalize" '-n', action='store_true') # normalize states online
parser.add_argument("--recurrent", "-r", action='store_true') # whether to use a recurrent policy
parser.add_argument("--logdir", default="./logs/ars/", type=str)
parser.add_argument("--seed", "-s", default=0, type=int)
parser.add_argument("--env_name", "-e", default="Hopper-v3")
parser.add_argument("--average_every", default=10, type=int)
parser.add_argument("--save_model", "-m", default=None, type=str) # where to save the trained model to
parser.add_argument("--redis", default=None)
args = parser.parse_args()
run_experiment(args)
elif sys.argv[1] == 'ddpg':
sys.argv.remove(sys.argv[1])
"""
Utility for running Recurrent/Deep Deterministic Policy Gradients.
"""
from algos.off_policy import run_experiment
parser.add_argument("--timesteps", "-t", default=1e6, type=float) # number of timesteps in replay buffer
parser.add_argument("--start_timesteps", default=1e4, type=int) # number of timesteps to generate random actions for
parser.add_argument("--load_actor", default=None, type=str) # load an actor from a .pt file
parser.add_argument("--load_critic", default=None, type=str) # load a critic from a .pt file
parser.add_argument('--discount', default=0.99, type=float) # the discount factor
parser.add_argument('--expl_noise', default=0.2, type=float) # random noise used for exploration
parser.add_argument('--tau', default=0.01, type=float) # update factor for target networks
parser.add_argument("--a_lr", "-alr", default=1e-5, type=float) # adam learning rate for critic
parser.add_argument("--c_lr", "-clr", default=1e-4, type=float) # adam learning rate for actor
parser.add_argument("--traj_len", "-tl", default=1000, type=int) # max trajectory length for environment
parser.add_argument("--center_reward", "-r", action='store_true') # normalize rewards to a normal distribution
parser.add_argument("--normc_init", default=True, type=bool) # using col norm to init weights
parser.add_argument("--normalize" '-n', action='store_true') # normalize states online
parser.add_argument("--batch_size", default=64, type=int) # batch size for policy update
parser.add_argument("--updates", default=1, type=int) # (if recurrent) number of times to update policy per episode
parser.add_argument("--eval_every", default=100, type=int) # how often to evaluate the trained policy
parser.add_argument("--save_actor", default=None, type=str)
parser.add_argument("--save_critic", default=None, type=str)
parser.add_argument("--recurrent", action='store_true')
parser.add_argument("--prenormalize_steps", default=10000, type=int)
parser.add_argument("--logdir", default="./logs/ddpg/", type=str)
parser.add_argument("--seed", "-s", default=0, type=int)
parser.add_argument("--env_name", "-e", default="Hopper-v3")
args = parser.parse_args()
args.algo = 'ddpg'
run_experiment(args)
elif sys.argv[1] == 'td3':
sys.argv.remove(sys.argv[1])
"""
Utility for running Twin-Delayed Deep Deterministic policy gradients.
"""
from algos.off_policy import run_experiment
parser.add_argument("--timesteps", "-t", default=1e6, type=float) # number of timesteps in replay buffer
parser.add_argument("--start_timesteps", default=1e4, type=float) # number of timesteps to generate random actions for
parser.add_argument("--load_actor", default=None, type=str) # load an actor from a .pt file
parser.add_argument('--discount', default=0.99, type=float) # the discount factor
parser.add_argument('--expl_noise', default=0.1, type=float) # random noise used for exploration
parser.add_argument('--max_action', default=1.0, type=float) #
parser.add_argument('--policy_noise', default=0.2, type=float) #
parser.add_argument('--noise_clip', default=0.5, type=float) #
parser.add_argument('--tau', default=0.005, type=float) # update factor for target networks
parser.add_argument("--a_lr", "-alr", default=3e-4, type=float) # adam learning rate for critic
parser.add_argument("--c_lr", "-clr", default=3e-4, type=float) # adam learning rate for actor
parser.add_argument("--traj_len", "-tl", default=1000, type=int) # max trajectory length for environment
parser.add_argument("--center_reward", "-r", action='store_true') # normalize rewards to a normal distribution
parser.add_argument("--batch_size", default=256, type=int) # batch size for policy update
parser.add_argument("--updates", default=1, type=int) # (if recurrent) number of times to update policy per episode
parser.add_argument("--update_freq", default=1, type=int) # how many episodes to skip before updating
parser.add_argument("--eval_every", default=100, type=int) # how often to evaluate the trained policy
parser.add_argument("--save_actor", default=None, type=str)
#parser.add_argument("--save_critics", default=None, type=str)
parser.add_argument("--logdir", default="./logs/td3/", type=str)
parser.add_argument("--recurrent", action='store_true')
parser.add_argument("--prenormalize_steps", default=10000, type=int)
parser.add_argument("--seed", "-s", default=0, type=int)
parser.add_argument("--env_name", "-e", default="Hopper-v3")
args = parser.parse_args()
args.algo = 'td3'
run_experiment(args)
elif sys.argv[1] == 'ppo':
sys.argv.remove(sys.argv[1])
"""
Utility for running Proximal Policy Optimization.
"""
from algos.ppo import run_experiment
parser.add_argument("--seed", default=0, type=int) # number of timesteps to run experiment for
parser.add_argument("--timesteps", "-t", default=1e6, type=float) # number of timesteps to run experiment for
parser.add_argument("--env_name", default='Cassie-v0', type=str)
parser.add_argument("--traj_len", "-tl", default=400, type=int) # max trajectory length for environment
parser.add_argument("--prenormalize_steps", default=10000, type=int)
parser.add_argument("--num_steps", default=5000, type=int)
parser.add_argument("--recurrent", action='store_true')
parser.add_argument('--discount', default=0.99, type=float) # the discount factor
parser.add_argument('--std', default=0.13, type=float) # the fixed exploration std
parser.add_argument("--a_lr", "-alr", default=1e-4, type=float) # adam learning rate for actor
parser.add_argument("--c_lr", "-clr", default=1e-4, type=float) # adam learning rate for critic
parser.add_argument("--eps", "-ep", default=1e-5, type=float) # adam eps
parser.add_argument("--kl", default=0.02, type=float) # kl abort threshold
parser.add_argument("--entropy_coeff", default=0.0, type=float)
parser.add_argument("--grad_clip", default=0.05, type=float)
parser.add_argument("--batch_size", default=64, type=int) # batch size for policy update
parser.add_argument("--epochs", default=3, type=int) # number of updates per iter
parser.add_argument("--save_actor", default=None, type=str)
parser.add_argument("--save_critic", default=None, type=str)
parser.add_argument("--logdir", default="./logs/ppo/", type=str)
parser.add_argument("--workers", default=4, type=int)
parser.add_argument("--redis", default=None, type=str)
args = parser.parse_args()
run_experiment(args)
elif sys.argv[1] == 'sac':
sys.argv.remove(sys.argv[1])
"""
Utility for running Soft Actor-Critic.
"""
from algos.off_policy import run_experiment
parser.add_argument("--seed", default=0, type=int) # number of timesteps to run experiment for
parser.add_argument("--timesteps", "-t", default=1e6, type=float) # number of timesteps to run experiment for
parser.add_argument("--env_name", default='Cassie-v0', type=str)
parser.add_argument("--traj_len", "-tl", default=400, type=int) # max trajectory length for environment
parser.add_argument("--start_timesteps", default=10000, type=int)
parser.add_argument("--eval_every", default=100, type=int)
parser.add_argument("--recurrent", action='store_true')
parser.add_argument('--discount', default=0.99, type=float) # the discount factor
parser.add_argument('--tau', default=1e-2, type=float)
parser.add_argument("--a_lr", "-alr", default=1e-4, type=float) # adam learning rate for actor
parser.add_argument("--c_lr", "-clr", default=1e-4, type=float) # adam learning rate for critic
parser.add_argument("--alpha", default=None, type=float) # adam learning rate for critic
parser.add_argument("--grad_clip", default=0.05, type=float)
parser.add_argument("--batch_size", default=128, type=int) # batch size for policy update
parser.add_argument("--prenormalize_steps", default=10000, type=int)
parser.add_argument("--save_actor", default=None, type=str)
parser.add_argument("--save_critic", default=None, type=str)
parser.add_argument("--logdir", default="./logs/sac/", type=str)
args = parser.parse_args()
args.algo = 'sac'
run_experiment(args)
elif sys.argv[1] == 'eval':
sys.argv.remove(sys.argv[1])
parser.add_argument("--policy", default="./trained_models/ddpg/ddpg_actor.pt", type=str)
parser.add_argument("--env_name", default=None, type=str)
parser.add_argument("--traj_len", default=400, type=int)
args = parser.parse_args()
policy = torch.load(args.policy)
eval_policy(policy, min_timesteps=100000, env_name=args.env_name, max_traj_len=args.traj_len)
elif sys.argv[1] == 'cassie':
sys.argv.remove(sys.argv[1])
from cassie.udp import run_udp
parser.add_argument("--policy", default='logs/ppo/Cassie-nodelta-stateest-clockbased/bcbc77-seed0/actor.pt', type=str)
args = parser.parse_args()
run_udp(args)
else:
print("Invalid option '{}'".format(sys.argv[1]))
| 63.820755 | 143 | 0.601404 |
481b39532625d6b67659f1b6e06ab7e797e7fa51 | 215 | py | Python | swaping 2.py | aash-gates/aash-python-babysteps | cb88b02b0d33ac74acb183d4f11f6baad0ad3db9 | [
"Unlicense"
] | 7 | 2020-11-16T18:23:21.000Z | 2021-12-18T14:08:54.000Z | swaping 2.py | 00mjk/aash-python-babysteps | c52ffbc2690ea387eaad6639bb9764b9ee015bfd | [
"Unlicense"
] | null | null | null | swaping 2.py | 00mjk/aash-python-babysteps | c52ffbc2690ea387eaad6639bb9764b9ee015bfd | [
"Unlicense"
] | 1 | 2020-12-21T15:59:44.000Z | 2020-12-21T15:59:44.000Z | '''
practice qusestion from chapter 1 Module 5 of IBM Digital Nation Courses
by Aashik J Krishnan/Aash Gates
'''
x = 10
y = "ten"
#step 1
x,y = y,x
#printing on next line
print(x)
print(y)
#end of the program | 11.944444 | 72 | 0.688372 |
481b7b847a5b07aac335adc738e6043d2c120dd3 | 1,965 | py | Python | Lab_5_Eigen_Decomposition/eigen_images.py | NahianHasan/ECE63700-Digital_Image_Processing | ef1f1df93ffa16a4c76ddc8cc5ed6bc303dea96b | [
"BSD-3-Clause"
] | null | null | null | Lab_5_Eigen_Decomposition/eigen_images.py | NahianHasan/ECE63700-Digital_Image_Processing | ef1f1df93ffa16a4c76ddc8cc5ed6bc303dea96b | [
"BSD-3-Clause"
] | null | null | null | Lab_5_Eigen_Decomposition/eigen_images.py | NahianHasan/ECE63700-Digital_Image_Processing | ef1f1df93ffa16a4c76ddc8cc5ed6bc303dea96b | [
"BSD-3-Clause"
] | null | null | null | import read_data as RD
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
X = RD.read_data()
print('X = ',X.shape)
X_mean = np.reshape(np.sum(X,1)/X.shape[1],[ X.shape[0],1])
X = X-X_mean
print('X_centerred = ',X.shape)
[U,S,V] = np.linalg.svd(X, full_matrices=False)
print('U = ',U.shape)
print('S = ',S.shape)
print('V = ',V.shape)
N = 12#number of eigen images
Eig_im = U[:,0:N]
plt.figure(figsize=(10,10))
for i in range(0,N):
plt.subplot(int(np.sqrt(N)),int(np.ceil(N/int(np.sqrt(N)))),i+1)
im = np.reshape(Eig_im[:,i],[64,64])
plt.imshow(im,cmap=plt.cm.gray, interpolation='none')
plt.title('Eigen Image = '+str(i+1))
plt.savefig('Eigen_Images.png')
plt.savefig('Eigen_Images.tif')
Y = np.matmul(np.transpose(U),X)
print('Y = ',Y.shape)
plt.figure(figsize=(10,10))
Np = 10#Number of projection coefficients to plot
Ni = 4#Number of images
images = ['a','b','c','d']
for i in range(0,Ni):
plt.plot(np.arange(1,Np+1),Y[0:Np,i],label='Image = '+images[i])
plt.xlabel('Eigenvectors',fontsize=20)
plt.xticks(weight = 'bold',fontsize=15)
plt.ylabel('Magnitude of the projection coefficient',fontsize=20)
plt.yticks(weight = 'bold',fontsize=15)
plt.legend(fontsize=20)
plt.savefig('Projection_Coefficients.png')
plt.savefig('Projection_Coefficients.tif')
#Image synthesis
ind = 0#index of the image to synthesize
m = [1, 5, 10, 15, 20, 30]
plt.figure(figsize=(10,15))
for i in range(0,len(m)):
X_hat = np.reshape(np.matmul(U[:,0:m[i]],Y[0:m[i],ind]),[X.shape[0],1])
print(X_hat.shape)
print(X_mean.shape)
X_hat += X_mean
plt.subplot(3,2,i+1)
im = np.reshape(X_hat,[64,64])
plt.imshow(im,cmap=plt.cm.gray, interpolation='none')
plt.title('m = '+str(m[i]),fontsize=20)
plt.xticks(weight = 'bold',fontsize=15)
plt.yticks(weight = 'bold',fontsize=15)
#img_out = Image.fromarray(im.astype(np.uint8))
#img_out.save('Im_reconstruction_'+str(m[i])+'.tif')
plt.savefig('Im_reconstruction.png')
plt.savefig('Im_reconstruction.tif')
| 31.190476 | 72 | 0.690076 |
481e2d50c4328a17ebca8a6fc8ec42de2e15ed63 | 3,986 | py | Python | activatable_model/models.py | wesleykendall/django-activatable-model | e039db72e6a94f622293cabbbac47f88b1c1c4dc | [
"MIT"
] | null | null | null | activatable_model/models.py | wesleykendall/django-activatable-model | e039db72e6a94f622293cabbbac47f88b1c1c4dc | [
"MIT"
] | null | null | null | activatable_model/models.py | wesleykendall/django-activatable-model | e039db72e6a94f622293cabbbac47f88b1c1c4dc | [
"MIT"
] | null | null | null | from django.db import models
from manager_utils import ManagerUtilsQuerySet, ManagerUtilsManager
from activatable_model.signals import model_activations_changed
| 39.078431 | 120 | 0.699448 |
481e768516bbaa32d5ff9541dc002fb4c1d54c46 | 755 | py | Python | my_lambdata/my_mod.py | DevvinK/lambdata-devvink-dspt6 | 4920402865912cea619d995a0a092ff8f7dc7806 | [
"MIT"
] | null | null | null | my_lambdata/my_mod.py | DevvinK/lambdata-devvink-dspt6 | 4920402865912cea619d995a0a092ff8f7dc7806 | [
"MIT"
] | null | null | null | my_lambdata/my_mod.py | DevvinK/lambdata-devvink-dspt6 | 4920402865912cea619d995a0a092ff8f7dc7806 | [
"MIT"
] | null | null | null | # my_lambdata/my_mod.py
# my_lambdata.my_mod
import pandas as pd
def date_divider(df,date_col):
'''
df: the whole dataframe adding new day, month, year to
date_col: the name of the column the date is stored in
'''
converted_df = df.copy()
converted_df["Year"] = pd.DatetimeIndex(converted_df[date_col]).year
converted_df["Month"] = pd.DatetimeIndex(converted_df[date_col]).month
converted_df["Day"] = pd.DatetimeIndex(converted_df[date_col]).day
return converted_df
if __name__ == "__main__":
x = 11
print(enlarge(x))
y = int(input("Please choose a number (e.g. 5)"))
print(enlarge(y)) | 26.034483 | 73 | 0.695364 |
4820376c0af9d2cd61ab8620253e559ebeadb415 | 30,627 | py | Python | visibility.py | DanielAndreasen/ObservationTools | bae6bce4345cbd207d901ad5c4073a8e8e8a0d3e | [
"MIT"
] | 2 | 2016-04-05T16:29:32.000Z | 2016-04-13T15:51:48.000Z | visibility.py | iastro-pt/ObservationTools | bae6bce4345cbd207d901ad5c4073a8e8e8a0d3e | [
"MIT"
] | 45 | 2016-11-23T17:51:36.000Z | 2021-05-17T10:57:37.000Z | visibility.py | DanielAndreasen/ObservationTools | bae6bce4345cbd207d901ad5c4073a8e8e8a0d3e | [
"MIT"
] | 2 | 2016-11-24T00:24:29.000Z | 2016-11-30T11:36:43.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import numpy as np
import datetime as dt
from dateutil import tz
import pickle
from random import choice
from PyAstronomy import pyasl
from astropy.coordinates import SkyCoord
from astropy.coordinates import name_resolve
import ephem
import argparse
import calendar
try:
from tqdm import tqdm
except ImportError:
tqdm = lambda x: x
import io
import matplotlib.pyplot as plt
import matplotlib
replace_figure = True
try:
from PySide.QtGui import QApplication, QImage
except ImportError:
try:
from PyQt4.QtGui import QApplication, QImage
except ImportError:
try:
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QImage
except ImportError:
replace_figure = False
if replace_figure: add_clipboard_to_figures()
def decdeg2dms(dd):
""" Convert decimal degrees to deg,min,sec """
is_positive = dd >= 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600,60)
degrees,minutes = divmod(minutes,60)
degrees = degrees if is_positive else -degrees
return (degrees,minutes,seconds)
ESO_periods = {
104 : [ (2019, 10, 1), (2020, 3, 31)],
103 : [ (2019, 4, 1), (2019, 9, 30)],
102 : [ (2018, 10, 1), (2019, 3, 31)],
101 : [ (2018, 4, 1), (2018, 9, 30)],
100 : [ (2017, 10, 1), (2018, 3, 31)],
99 : [ (2017, 4, 1), (2017, 9, 30)],
98 : [ (2016, 10, 1), (2017, 3, 31)],
97 : [ (2016, 4, 1), (2016, 9, 30)],
96 : [ (2015, 10, 1), (2016, 3, 31)],
95 : [ (2015, 4, 1), (2015, 9, 30)],
94 : [ (2014, 10, 1), (2015, 3, 31)],
93 : [ (2014, 4, 1), (2014, 9, 30)],
92 : [ (2013, 10, 1), (2014, 3, 31)],
}
def get_ESO_period(period):
""" Return the JD of start and end of ESO period """
assert isinstance(period, str) or isinstance(period, int)
P = int(period)
getjd = lambda y,m,d: pyasl.jdcnv(dt.datetime(y, m, d))
jd_start, jd_end = [getjd(*d) for d in ESO_periods[P]]
return jd_start, jd_end
def StarObsPlot(year=None, targets=None, observatory=None, period=None,
hover=False, sunless_hours=None, remove_watermark=False):
"""
Plot the visibility of target.
Parameters
----------
year: int
The year for which to calculate the visibility.
targets: list
List of targets.
Each target should be a dictionary with keys 'name' and 'coord'.
The key 'name' is a string, 'coord' is a SkyCoord object.
observatory: string
Name of the observatory that pyasl.observatory can resolve.
Basically, any of pyasl.listObservatories().keys()
period: string, optional
ESO period for which to calculate the visibility. Overrides `year`.
hover: boolean, optional
If True, color visibility lines when mouse over.
sunless_hours: float, optional
If not None, plot sunless hours above this airmass
"""
from mpl_toolkits.axes_grid1 import host_subplot
from matplotlib.ticker import MultipleLocator
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
rcParams['xtick.major.pad'] = 12
font0 = FontProperties()
font1 = font0.copy()
font0.set_family('sans-serif')
font0.set_weight('light')
font1.set_family('sans-serif')
font1.set_weight('medium')
# set the observatory
if isinstance(observatory, dict):
obs = observatory
else:
obs = pyasl.observatory(observatory)
fig = plt.figure(figsize=(15,10))
fig.subplots_adjust(left=0.07, right=0.8, bottom=0.15, top=0.88)
# watermak
if not remove_watermark:
fig.text(0.99, 0.99, 'Created with\ngithub.com/iastro-pt/ObservationTools',
fontsize=10, color='gray',
ha='right', va='top', alpha=0.5)
# plotting sunless hours?
shmode = False
if sunless_hours is not None:
shmode = True
# limit in airmass (assumed plane-parallel atm)
shairmass = sunless_hours
# correspoing limit in altitude
from scipy.optimize import bisect
shalt = 90 - bisect(lambda alt: pyasl.airmassPP(alt) - shairmass, 0, 89)
if shmode:
fig.subplots_adjust(hspace=0.35)
ax = host_subplot(211)
axsh = host_subplot(212)
plt.text(0.5, 0.47,
"- sunless hours above airmass {:.1f} - \n".format(shairmass),
transform=fig.transFigure, ha='center', va='bottom', fontsize=12)
plt.text(0.5, 0.465,
"the thick line above the curves represents the total sunless hours "\
"for each day of the year",
transform=fig.transFigure, ha='center', va='bottom', fontsize=10)
else:
ax = host_subplot(111)
for n, target in enumerate(targets):
target_coord = target['coord']
target_ra = target_coord.ra.deg
target_dec = target_coord.dec.deg
if period is not None:
jd_start, jd_end = get_ESO_period(period)
else:
jd_start = pyasl.jdcnv(dt.datetime(year, 1, 1))
jd_end = pyasl.jdcnv(dt.datetime(year, 12, 31))
jdbinsize = 1 # every day
each_day = np.arange(jd_start, jd_end, jdbinsize)
jds = []
## calculate the mid-dark times
sun = ephem.Sun()
for day in each_day:
date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])
s = ephem.Observer();
s.date = date_formatted;
s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])
s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])
jds.append(ephem.julian_date(s.next_antitransit(sun)))
jds = np.array(jds)
# Get JD floating point
jdsub = jds - np.floor(jds[0])
# Get alt/az of object
altaz = pyasl.eq2hor(jds, np.ones_like(jds)*target_ra, np.ones_like(jds)*target_dec, \
lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])
ax.plot( jdsub, altaz[0], '-', color='k')
# label for each target
plabel = "[{0:2d}] {1!s}".format(n+1, target['name'])
# number of target at the top of the curve
ind_label = np.argmax(altaz[0])
# or at the bottom if the top is too close to the corners
# if jdsub[ind_label] < 5 or jdsub[ind_label] > jdsub.max()-5:
# ind_label = np.argmin(altaz[0])
ax.text( jdsub[ind_label], altaz[0][ind_label], str(n+1), color="b", fontsize=14, \
fontproperties=font1, va="bottom", ha="center")
if n+1 == 29:
# too many?
ax.text(1.1, 1.0-float(n+1)*0.04, "too many targets", ha="left", va="top", transform=ax.transAxes, \
fontsize=10, fontproperties=font0, color="r")
else:
ax.text(1.1, 1.0-float(n+1)*0.04, plabel, ha="left", va="top", transform=ax.transAxes, \
fontsize=12, fontproperties=font0, color="b")
if shmode:
sunless_hours = []
for day in each_day:
date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])
s = ephem.Observer();
s.date = date_formatted;
s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])
s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])
# hours from sunrise to sunset
td = pyasl.daycnv(ephem.julian_date(s.next_setting(sun)), mode='dt') \
- pyasl.daycnv(ephem.julian_date(s.next_rising(sun)), mode='dt')
sunless_hours.append(24 - td.total_seconds() / 3600)
days = each_day - np.floor(each_day[0])
axsh.plot(days, sunless_hours, '-', color='k', lw=2)
axsh.set(ylim=(0, 15), yticks=range(1,15), ylabel='Useful hours',
yticklabels=[r'${}^{{\rm h}}$'.format(n) for n in range(1,15)])
ax.text(1.1, 1.03, "List of targets", ha="left", va="top", transform=ax.transAxes, \
fontsize=12, fontproperties=font0, color="b")
axrange = ax.get_xlim()
if period is None:
months = range(1, 13)
ndays = [0] + [calendar.monthrange(date, m)[1] for m in months]
ax.set_xlim([0, 366])
ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays)/2.)[1:])
ax.set_xticklabels(map(calendar.month_abbr.__getitem__, months), fontsize=10)
if shmode:
axsh.set_xlim([0, 366])
axsh.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays)/2.)[1:])
axsh.set_xticklabels(map(calendar.month_abbr.__getitem__, months), fontsize=10)
else:
if int(period) % 2 == 0:
# even ESO period, Oct -> Mar
months = [10, 11, 12, 1, 2, 3]
ndays = [0] + [calendar.monthrange(date, m)[1] for m in months]
ax.set_xlim([0, 181])
ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays)/2.)[1:])
ax.set_xticklabels(map(calendar.month_abbr.__getitem__, months), fontsize=10)
if shmode:
axsh.set_xlim([0, 181])
axsh.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays)/2.)[1:])
axsh.set_xticklabels(map(calendar.month_abbr.__getitem__, months), fontsize=10)
else:
# odd ESO period, Apr -> Sep
months = range(4, 10)
ndays = [0] + [calendar.monthrange(date, m)[1] for m in months]
ax.set_xlim([0, 182])
ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays)/2.)[1:])
ax.set_xticklabels(map(calendar.month_abbr.__getitem__, months), fontsize=10)
if shmode:
axsh.set_xlim([0, 182])
axsh.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays)/2.)[1:])
axsh.set_xticklabels(map(calendar.month_abbr.__getitem__, months), fontsize=10)
if axrange[1]-axrange[0] <= 1.0:
jdhours = np.arange(0,3,1.0/24.)
utchours = (np.arange(0,72,dtype=int)+12)%24
else:
jdhours = np.arange(0,3,1.0/12.)
utchours = (np.arange(0,72, 2, dtype=int)+12)%24
# Make ax2 responsible for "top" axis and "right" axis
ax2 = ax.twin()
# Set upper x ticks
ax2.set_xticks(np.cumsum(ndays))
ax2.set_xlabel("Day")
# plane-parallel airmass
airmass_ang = np.arange(10, 81, 5)
geo_airmass = pyasl.airmass.airmassPP(airmass_ang)[::-1]
ax2.set_yticks(airmass_ang)
airmassformat = []
for t in range(geo_airmass.size):
airmassformat.append("{0:2.2f}".format(geo_airmass[t]))
ax2.set_yticklabels(airmassformat)#, rotation=90)
ax2.set_ylabel("Relative airmass", labelpad=32)
ax2.tick_params(axis="y", pad=6, labelsize=8)
plt.text(1.02,-0.04, "Plane-parallel", transform=ax.transAxes, ha='left', \
va='top', fontsize=10, rotation=90)
ax22 = ax.twin()
ax22.set_xticklabels([])
ax22.set_frame_on(True)
ax22.patch.set_visible(False)
ax22.yaxis.set_ticks_position('right')
ax22.yaxis.set_label_position('right')
ax22.spines['right'].set_position(('outward', 30))
ax22.spines['right'].set_color('k')
ax22.spines['right'].set_visible(True)
airmass2 = list(map(lambda ang: pyasl.airmass.airmassSpherical(90. - ang, obs['altitude']), airmass_ang))
ax22.set_yticks(airmass_ang)
airmassformat = []
for t in range(len(airmass2)):
airmassformat.append(" {0:2.2f}".format(airmass2[t]))
ax22.set_yticklabels(airmassformat, rotation=90)
ax22.tick_params(axis="y", pad=8, labelsize=8)
plt.text(1.05,-0.04, "Spherical+Alt", transform=ax.transAxes, ha='left', va='top', \
fontsize=10, rotation=90)
ax.set_ylim([0, 91])
ax.yaxis.set_major_locator(MultipleLocator(15))
ax.yaxis.set_minor_locator(MultipleLocator(5))
yticks = ax.get_yticks()
ytickformat = []
for t in range(yticks.size):
ytickformat.append(str(int(yticks[t]))+r"$^\circ$")
ax.set_yticklabels(ytickformat, fontsize=16)
ax.set_ylabel("Altitude", fontsize=18)
yticksminor = ax.get_yticks(minor=True)
ymind = np.where( yticksminor % 15. != 0. )[0]
yticksminor = yticksminor[ymind]
ax.set_yticks(yticksminor, minor=True)
m_ytickformat = []
for t in range(yticksminor.size):
m_ytickformat.append(str(int(yticksminor[t]))+r"$^\circ$")
ax.set_yticklabels(m_ytickformat, minor=True)
ax.set_ylim([0, 91])
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.yaxis.grid(color='gray', which="minor", linestyle='dotted')
ax2.xaxis.grid(color='gray', linestyle='dotted')
if period is not None:
plt.text(0.5, 0.95,
"Visibility over P{0!s}\n - altitudes at mid-dark time -".format(period),
transform=fig.transFigure, ha='center', va='bottom', fontsize=12)
else:
plt.text(0.5, 0.95,
"Visibility over {0!s}\n - altitudes at mid-dark time -".format(date),
transform=fig.transFigure, ha='center', va='bottom', fontsize=12)
obsco = "Obs coord.: {0:8.4f}$^\circ$, {1:8.4f}$^\circ$, {2:4f} m".format(obs['longitude'], obs['latitude'], obs['altitude'])
plt.text(0.01,0.97, obsco, transform=fig.transFigure, ha='left', va='center', fontsize=10)
plt.text(0.01,0.95, obs['name'], transform=fig.transFigure, ha='left', va='center', fontsize=10)
# interactive!
if hover:
main_axis = fig.axes[0]
all_lines = set(main_axis.get_lines())
fig.canvas.mpl_connect('motion_notify_event', on_plot_hover)
return fig
def VisibilityPlot(date=None, targets=None, observatory=None, plotLegend=True,
showMoonDist=True, print2file=False, remove_watermark=False):
"""
Plot the visibility of target.
Parameters
----------
date: datetime
The date for which to calculate the visibility.
targets: list
List of targets.
Each target should be a dictionary with keys 'name' and 'coord'.
The key 'name' is aa string, 'coord' is a SkyCoord object.
observatory: string
Name of the observatory that pyasl.observatory can resolve.
Basically, any of pyasl.listObservatories().keys()
plotLegend: boolean, optional
If True (default), show a legend.
showMoonDist : boolean, optional
If True (default), the Moon distance will be shown.
"""
from mpl_toolkits.axes_grid1 import host_subplot
from matplotlib.ticker import MultipleLocator
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
rcParams['xtick.major.pad'] = 12
if isinstance(observatory, dict):
obs = observatory
else:
obs = pyasl.observatory(observatory)
# observer = ephem.Observer()
# observer.pressure = 0
# observer.horizon = '-0:34'
# observer.lat, observer.lon = obs['latitude'], obs['longitude']
# observer.date = date
# print(observer.date)
# print(observer.previous_rising(ephem.Sun()))
# print(observer.next_setting(ephem.Sun()))
# print(observer.previous_rising(ephem.Moon()))
# print(observer.next_setting(ephem.Moon()))
# observer.horizon = '-6'
# noon = observer.next_transit(ephem.Sun())
# print(noon)
# print(observer.previous_rising(ephem.Sun(), start=noon, use_center=True))
# print()
fig = plt.figure(figsize=(15,10))
fig.subplots_adjust(left=0.07, right=0.8, bottom=0.15, top=0.88)
# watermak
if not remove_watermark:
fig.text(0.99, 0.99, 'Created with\ngithub.com/iastro-pt/ObservationTools',
fontsize=10, color='gray',
ha='right', va='top', alpha=0.5)
ax = host_subplot(111)
font0 = FontProperties()
font1 = font0.copy()
font0.set_family('sans-serif')
font0.set_weight('light')
font1.set_family('sans-serif')
font1.set_weight('medium')
for n, target in enumerate(targets):
target_coord = target['coord']
target_ra = target_coord.ra.deg
target_dec = target_coord.dec.deg
# JD array
jdbinsize = 1.0/24./20.
# jds = np.arange(allData[n]["Obs jd"][0], allData[n]["Obs jd"][2], jdbinsize)
jd = pyasl.jdcnv(date)
jd_start = pyasl.jdcnv(date)-0.5
jd_end = pyasl.jdcnv(date)+0.5
jds = np.arange(jd_start, jd_end, jdbinsize)
# Get JD floating point
jdsub = jds - np.floor(jds[0])
# Get alt/az of object
altaz = pyasl.eq2hor(jds, np.ones(jds.size)*target_ra, np.ones(jds.size)*target_dec, \
lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])
# Get alt/az of Sun
sun_position = pyasl.sunpos(jd)
sun_ra, sun_dec = sun_position[1], sun_position[2]
sunpos_altaz = pyasl.eq2hor(jds, np.ones(jds.size)*sun_ra, np.ones(jds.size)*sun_dec, \
lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])
# Define plot label
plabel = "[{0:2d}] {1!s}".format(n+1, target['name'])
# Find periods of: day, twilight, and night
day = np.where( sunpos_altaz[0] >= 0. )[0]
twi = np.where( np.logical_and(sunpos_altaz[0] > -18., sunpos_altaz[0] < 0.) )[0]
night = np.where( sunpos_altaz[0] <= -18. )[0]
if (len(day) == 0) and (len(twi) == 0) and (len(night) == 0):
print
print("VisibilityPlot - no points to draw")
print
mpos = pyasl.moonpos(jds)
# mpha = pyasl.moonphase(jds)
# mpos_altaz = pyasl.eq2hor(jds, mpos[0], mpos[1],
# lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])
# moonind = np.where( mpos_altaz[0] > 0. )[0]
if showMoonDist:
mdist = pyasl.getAngDist(mpos[0], mpos[1], np.ones(jds.size)*target_ra, \
np.ones(jds.size)*target_dec)
bindist = int((2.0/24.)/jdbinsize)
firstbin = np.random.randint(0,bindist)
for mp in range(0, int(len(jds)/bindist)):
bind = firstbin+mp*bindist
if altaz[0][bind]-1. < 5.: continue
ax.text(jdsub[bind], altaz[0][bind]-1., str(int(mdist[bind]))+r"$^\circ$", ha="center", va="top", \
fontsize=8, stretch='ultra-condensed', fontproperties=font0, alpha=1.)
if len(twi) > 1:
# There are points in twilight
linebreak = np.where( (jdsub[twi][1:]-jdsub[twi][:-1]) > 2.0*jdbinsize)[0]
if len(linebreak) > 0:
plotrjd = np.insert(jdsub[twi], linebreak+1, np.nan)
plotdat = np.insert(altaz[0][twi], linebreak+1, np.nan)
ax.plot( plotrjd, plotdat, "-", color='#BEBEBE', linewidth=1.5)
else:
ax.plot( jdsub[twi], altaz[0][twi], "-", color='#BEBEBE', linewidth=1.5)
ax.plot( jdsub[night], altaz[0][night], '.k', label=plabel)
ax.plot( jdsub[day], altaz[0][day], '.', color='#FDB813')
altmax = np.argmax(altaz[0])
ax.text( jdsub[altmax], altaz[0][altmax], str(n+1), color="b", fontsize=14, \
fontproperties=font1, va="bottom", ha="center")
if n+1 == 29:
ax.text( 1.1, 1.0-float(n+1)*0.04, "too many targets", ha="left", va="top", transform=ax.transAxes, \
fontsize=10, fontproperties=font0, color="r")
else:
ax.text( 1.1, 1.0-float(n+1)*0.04, plabel, ha="left", va="top", transform=ax.transAxes, \
fontsize=12, fontproperties=font0, color="b")
ax.text( 1.1, 1.03, "List of targets", ha="left", va="top", transform=ax.transAxes, \
fontsize=12, fontproperties=font0, color="b")
axrange = ax.get_xlim()
ax.set_xlabel("UT [hours]")
if axrange[1]-axrange[0] <= 1.0:
jdhours = np.arange(0,3,1.0/24.)
utchours = (np.arange(0,72,dtype=int)+12)%24
else:
jdhours = np.arange(0,3,1.0/12.)
utchours = (np.arange(0,72, 2, dtype=int)+12)%24
ax.set_xticks(jdhours)
ax.set_xlim(axrange)
ax.set_xticklabels(utchours, fontsize=18)
# Make ax2 responsible for "top" axis and "right" axis
ax2 = ax.twin()
# Set upper x ticks
ax2.set_xticks(jdhours)
ax2.set_xticklabels(utchours, fontsize=18)
ax2.set_xlabel("UT [hours]")
# Horizon angle for airmass
airmass_ang = np.arange(5.,90.,5.)
geo_airmass = pyasl.airmass.airmassPP(90.-airmass_ang)
ax2.set_yticks(airmass_ang)
airmassformat = []
for t in range(geo_airmass.size):
airmassformat.append("{0:2.2f}".format(geo_airmass[t]))
ax2.set_yticklabels(airmassformat, rotation=90)
ax2.set_ylabel("Relative airmass", labelpad=32)
ax2.tick_params(axis="y", pad=10, labelsize=10)
plt.text(1.015,-0.04, "Plane-parallel", transform=ax.transAxes, ha='left', \
va='top', fontsize=10, rotation=90)
ax22 = ax.twin()
ax22.set_xticklabels([])
ax22.set_frame_on(True)
ax22.patch.set_visible(False)
ax22.yaxis.set_ticks_position('right')
ax22.yaxis.set_label_position('right')
ax22.spines['right'].set_position(('outward', 25))
ax22.spines['right'].set_color('k')
ax22.spines['right'].set_visible(True)
airmass2 = list(map(lambda ang: pyasl.airmass.airmassSpherical(90. - ang, obs['altitude']), airmass_ang))
ax22.set_yticks(airmass_ang)
airmassformat = []
for t in airmass2:
airmassformat.append("{0:2.2f}".format(t))
ax22.set_yticklabels(airmassformat, rotation=90)
ax22.tick_params(axis="y", pad=10, labelsize=10)
plt.text(1.045,-0.04, "Spherical+Alt", transform=ax.transAxes, ha='left', va='top', \
fontsize=10, rotation=90)
ax3 = ax.twiny()
ax3.set_frame_on(True)
ax3.patch.set_visible(False)
ax3.xaxis.set_ticks_position('bottom')
ax3.xaxis.set_label_position('bottom')
ax3.spines['bottom'].set_position(('outward', 50))
ax3.spines['bottom'].set_color('k')
ax3.spines['bottom'].set_visible(True)
ltime, ldiff = pyasl.localtime.localTime(utchours, np.repeat(obs['longitude'], len(utchours)))
jdltime = jdhours - ldiff/24.
ax3.set_xticks(jdltime)
ax3.set_xticklabels(utchours)
ax3.set_xlim([axrange[0],axrange[1]])
ax3.set_xlabel("Local time [hours]")
ax.set_ylim([0, 91])
ax.yaxis.set_major_locator(MultipleLocator(15))
ax.yaxis.set_minor_locator(MultipleLocator(5))
yticks = ax.get_yticks()
ytickformat = []
for t in range(yticks.size): ytickformat.append(str(int(yticks[t]))+r"$^\circ$")
ax.set_yticklabels(ytickformat, fontsize=16)
ax.set_ylabel("Altitude", fontsize=18)
yticksminor = ax.get_yticks(minor=True)
ymind = np.where( yticksminor % 15. != 0. )[0]
yticksminor = yticksminor[ymind]
ax.set_yticks(yticksminor, minor=True)
m_ytickformat = []
for t in range(yticksminor.size): m_ytickformat.append(str(int(yticksminor[t]))+r"$^\circ$")
ax.set_yticklabels(m_ytickformat, minor=True)
ax.set_ylim([0, 91])
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.yaxis.grid(color='gray', which="minor", linestyle='dotted')
ax2.xaxis.grid(color='gray', linestyle='dotted')
plt.text(0.5,0.95,"Visibility on {0!s}".format(date.date()), \
transform=fig.transFigure, ha='center', va='bottom', fontsize=20)
if plotLegend:
line1 = matplotlib.lines.Line2D((0,0),(1,1), color='#FDB813', linestyle="-", linewidth=2)
line2 = matplotlib.lines.Line2D((0,0),(1,1), color='#BEBEBE', linestyle="-", linewidth=2)
line3 = matplotlib.lines.Line2D((0,0),(1,1), color='k', linestyle="-", linewidth=2)
lgd2 = plt.legend((line1,line2,line3),("day","twilight","night",), \
bbox_to_anchor=(0.88, 0.13), loc='best', borderaxespad=0.,prop={'size':12}, fancybox=True)
lgd2.get_frame().set_alpha(.5)
obsco = "Obs coord.: {0:8.4f}$^\circ$, {1:8.4f}$^\circ$, {2:4f} m".format(obs['longitude'], obs['latitude'], obs['altitude'])
plt.text(0.01,0.97, obsco, transform=fig.transFigure, ha='left', va='center', fontsize=10)
plt.text(0.01,0.95, obs['name'], transform=fig.transFigure, ha='left', va='center', fontsize=10)
return fig
if __name__ == '__main__':
args = _parser()
target_names = args.targets[0].split(',')
## Get coordinates for all the targets
targets = []
# flush keyword was not backported to Python < 3.3
if sys.version_info[:2] < (3, 3):
print('Sending queries to CDS...', end=' '); sys.stdout.flush()
else:
print('Sending queries to CDS...', end=' ', flush=True)
for target_name in tqdm(target_names):
try:
targets.append({'name': target_name,
'coord': CacheSkyCoord.from_name(target_name)})
except name_resolve.NameResolveError as e:
print('Could not find target: {0!s}'.format(target_name))
## Just print coordinates in STARALT format and exit
if args.c:
print('Coordinates for {0!s}\n'.format(args.targets[0]))
for target in targets:
## name hh mm ss dd mm ss
out = '{0!s}'.format(target['name'])
ra = target['coord'].ra.hms
out += ' {0:02d} {1:02d} {2:5.3f}'.format(int(ra.h), int(ra.m), ra.s)
dec = target['coord'].dec.dms
out += ' {0:02d} {1:02d} {2:5.3f}'.format(int(dec.d), int(dec.m), dec.s)
print(out)
sys.exit(0)
## Actually calculate the visibility curves
print('Calculating visibility for {0!s}'.format(args.targets[0]))
P = args.period
if args.period is not None:
if args.mode != 'starobs':
print('Specifying ESO period is only possible in "starobs" mode')
sys.exit(1)
P = args.period[0]
P = P.replace('P','') # if user gave --period P100, for example
if args.date == 'today':
if args.mode == 'staralt':
today = dt.datetime.now() # now() gives the current time which we don't want
date = dt.datetime(today.year, today.month, today.day, tzinfo=tz.tzutc())
elif args.mode == 'starobs':
date = dt.datetime.now().year
else:
if args.mode == 'staralt':
if "-" not in args.date:
raise ValueError("Date needs to be provided as YYYY-MM-DD for staralt mode.")
ymd = [int(i) for i in args.date.split('-')]
date = dt.datetime(*ymd)
elif args.mode == 'starobs':
if "-" in args.date:
date = int(args.date.split('-')[0])
else:
date = int(args.date)
## Find observatory
if args.loc is None:
available_sites = pyasl.listObservatories(show=False)
if args.site not in available_sites.keys():
print('"{0!s}" is not a valid observatory code. Try one of the following:\n'.format(args.site))
maxCodeLen = max(map(len, available_sites.keys()))
print(("{0:"+str(maxCodeLen)+"s} ").format("Code") + "Observatory name")
print("-" * (21+maxCodeLen))
for k in sorted(available_sites.keys(), key=lambda s: s.lower()):
print(("{0:"+str(maxCodeLen)+"s} --- ").format(k) + available_sites[k]["name"])
sys.exit(1)
site = args.site
else:
loc = list(map(float, args.loc.split(',')))
site = {'altitude':loc[0], 'latitude': loc[1], 'longitude':loc[2], 'tz':loc[3], 'name':'unknown'}
if args.mode == 'staralt':
fig = VisibilityPlot(date=date, targets=targets, observatory=site,
remove_watermark=args.remove_watermark)
elif args.mode == 'starobs':
if args.A is not None:
am = args.A[0]
else:
am = None
fig = StarObsPlot(year=date, targets=targets, observatory=site,
period=P, hover=args.hover, sunless_hours=am,
remove_watermark=args.remove_watermark)
if args.save is not None:
print('Saving the figure to {}'.format(args.save[0]))
fig.savefig(args.save[0])
else:
plt.show()
| 37.259124 | 127 | 0.631795 |
4820cb952aabf646bc6ba2c9f17988cb0a784a1d | 2,410 | py | Python | brainrender/Utils/parsers/rat.py | maithamn/BrainRender | 9359ccc5b278f58ee3124bcf75b9ebefe0378bbc | [
"MIT"
] | null | null | null | brainrender/Utils/parsers/rat.py | maithamn/BrainRender | 9359ccc5b278f58ee3124bcf75b9ebefe0378bbc | [
"MIT"
] | null | null | null | brainrender/Utils/parsers/rat.py | maithamn/BrainRender | 9359ccc5b278f58ee3124bcf75b9ebefe0378bbc | [
"MIT"
] | null | null | null | import sys
sys.path.append('./')
import os
import pandas as pd
from vtkplotter import load
from brainrender import DEFAULT_STRUCTURE_COLOR
def get_rat_regions_metadata(metadata_fld):
"""
:param metadata_fld:
"""
return pd.read_pickle(os.path.join(metadata_fld, "rat_structures.pkl"))
def get_rat_mesh_from_region(region, paths, use_original_color=False, **kwargs):
"""
:param region:
:param paths:
:param use_original_color: (Default value = False)
:param **kwargs:
"""
if not isinstance(region, (tuple, list)):
region = [region]
check = False
else: check = True
metadata = get_rat_regions_metadata(paths.metadata)
meshes = []
for reg in region:
if isinstance(reg, int):
entry = metadata.loc[metadata.Id == reg]
elif isinstance(reg, str):
entry = metadata.loc[metadata['Name'] == reg]
else:
raise ValueError("Unrecognized value for region while trying to get mesh for rat: {}".format(reg))
try:
meshname = os.path.join(paths.rat_meshes, "label_{}.stl".format(entry.Id.values[0]))
if not os.path.isfile(meshname):
raise FileExistsError(meshname)
if use_original_color:
c = entry["rgb"].values[0]
if isinstance(c, str):
c = c.replace("[", "")
c = c.replace("]", "")
cols = c.split(",")
color = [int(c) for c in cols]
else:
color = c
else:
if "color" in list(kwargs.keys()):
color = kwargs.pop("color", DEFAULT_STRUCTURE_COLOR)
elif "c" in list(kwargs.keys()):
color = kwargs.pop("c", DEFAULT_STRUCTURE_COLOR)
if "color" in list(kwargs.keys()): del kwargs["color"]
elif "c" in list(kwargs.keys()): del kwargs["c"]
mesh = load(meshname, c=color, **kwargs)
mesh = mesh.smoothLaplacian().subdivide(2)
meshes.append(mesh)
except:
print("Could not load rat region: {}".format(entry["Name"].values[0]))
return None
if not check:
return meshes[0]
else:
return meshes
if __name__ == "__main__":
pass
#fix_data() ## UNDEFINED!!??
| 29.036145 | 110 | 0.546473 |
482206f17e299eb89f694f10375879badc6e9f3d | 8,678 | py | Python | components/isceobj/Alos2burstProc/runFrameMosaic.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-08-18T13:00:39.000Z | 2020-08-18T13:00:39.000Z | components/isceobj/Alos2burstProc/runFrameMosaic.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | components/isceobj/Alos2burstProc/runFrameMosaic.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #
# Author: Cunren Liang
# Copyright 2015-present, NASA-JPL/Caltech
#
import os
import logging
import isceobj
from isceobj.Alos2Proc.runFrameMosaic import frameMosaic
from isceobj.Alos2Proc.runFrameMosaic import frameMosaicParameters
from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
logger = logging.getLogger('isce.alos2burstinsar.runFrameMosaic')
def runFrameMosaic(self):
'''mosaic frames
'''
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
self.updateParamemetersFromUser()
referenceTrack = self._insar.loadTrack(reference=True)
secondaryTrack = self._insar.loadTrack(reference=False)
mosaicDir = 'insar'
os.makedirs(mosaicDir, exist_ok=True)
os.chdir(mosaicDir)
numberOfFrames = len(referenceTrack.frames)
if numberOfFrames == 1:
import shutil
frameDir = os.path.join('f1_{}/mosaic'.format(self._insar.referenceFrames[0]))
if not os.path.isfile(self._insar.interferogram):
os.symlink(os.path.join('../', frameDir, self._insar.interferogram), self._insar.interferogram)
#shutil.copy2() can overwrite
shutil.copy2(os.path.join('../', frameDir, self._insar.interferogram+'.vrt'), self._insar.interferogram+'.vrt')
shutil.copy2(os.path.join('../', frameDir, self._insar.interferogram+'.xml'), self._insar.interferogram+'.xml')
if not os.path.isfile(self._insar.amplitude):
os.symlink(os.path.join('../', frameDir, self._insar.amplitude), self._insar.amplitude)
shutil.copy2(os.path.join('../', frameDir, self._insar.amplitude+'.vrt'), self._insar.amplitude+'.vrt')
shutil.copy2(os.path.join('../', frameDir, self._insar.amplitude+'.xml'), self._insar.amplitude+'.xml')
# os.rename(os.path.join('../', frameDir, self._insar.interferogram), self._insar.interferogram)
# os.rename(os.path.join('../', frameDir, self._insar.interferogram+'.vrt'), self._insar.interferogram+'.vrt')
# os.rename(os.path.join('../', frameDir, self._insar.interferogram+'.xml'), self._insar.interferogram+'.xml')
# os.rename(os.path.join('../', frameDir, self._insar.amplitude), self._insar.amplitude)
# os.rename(os.path.join('../', frameDir, self._insar.amplitude+'.vrt'), self._insar.amplitude+'.vrt')
# os.rename(os.path.join('../', frameDir, self._insar.amplitude+'.xml'), self._insar.amplitude+'.xml')
#update track parameters
#########################################################
#mosaic size
referenceTrack.numberOfSamples = referenceTrack.frames[0].numberOfSamples
referenceTrack.numberOfLines = referenceTrack.frames[0].numberOfLines
#NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE
#range parameters
referenceTrack.startingRange = referenceTrack.frames[0].startingRange
referenceTrack.rangeSamplingRate = referenceTrack.frames[0].rangeSamplingRate
referenceTrack.rangePixelSize = referenceTrack.frames[0].rangePixelSize
#azimuth parameters
referenceTrack.sensingStart = referenceTrack.frames[0].sensingStart
referenceTrack.prf = referenceTrack.frames[0].prf
referenceTrack.azimuthPixelSize = referenceTrack.frames[0].azimuthPixelSize
referenceTrack.azimuthLineInterval = referenceTrack.frames[0].azimuthLineInterval
#update track parameters, secondary
#########################################################
#mosaic size
secondaryTrack.numberOfSamples = secondaryTrack.frames[0].numberOfSamples
secondaryTrack.numberOfLines = secondaryTrack.frames[0].numberOfLines
#NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE
#range parameters
secondaryTrack.startingRange = secondaryTrack.frames[0].startingRange
secondaryTrack.rangeSamplingRate = secondaryTrack.frames[0].rangeSamplingRate
secondaryTrack.rangePixelSize = secondaryTrack.frames[0].rangePixelSize
#azimuth parameters
secondaryTrack.sensingStart = secondaryTrack.frames[0].sensingStart
secondaryTrack.prf = secondaryTrack.frames[0].prf
secondaryTrack.azimuthPixelSize = secondaryTrack.frames[0].azimuthPixelSize
secondaryTrack.azimuthLineInterval = secondaryTrack.frames[0].azimuthLineInterval
else:
#choose offsets
if self.frameOffsetMatching:
rangeOffsets = self._insar.frameRangeOffsetMatchingReference
azimuthOffsets = self._insar.frameAzimuthOffsetMatchingReference
else:
rangeOffsets = self._insar.frameRangeOffsetGeometricalReference
azimuthOffsets = self._insar.frameAzimuthOffsetGeometricalReference
#list of input files
inputInterferograms = []
inputAmplitudes = []
for i, frameNumber in enumerate(self._insar.referenceFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
inputInterferograms.append(os.path.join('../', frameDir, 'mosaic', self._insar.interferogram))
inputAmplitudes.append(os.path.join('../', frameDir, 'mosaic', self._insar.amplitude))
#note that track parameters are updated after mosaicking
#mosaic amplitudes
frameMosaic(referenceTrack, inputAmplitudes, self._insar.amplitude,
rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1,
updateTrack=False, phaseCompensation=False, resamplingMethod=0)
#mosaic interferograms
frameMosaic(referenceTrack, inputInterferograms, self._insar.interferogram,
rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1,
updateTrack=True, phaseCompensation=True, resamplingMethod=1)
create_xml(self._insar.amplitude, referenceTrack.numberOfSamples, referenceTrack.numberOfLines, 'amp')
create_xml(self._insar.interferogram, referenceTrack.numberOfSamples, referenceTrack.numberOfLines, 'int')
#update secondary parameters here
#do not match for secondary, always use geometrical
rangeOffsets = self._insar.frameRangeOffsetGeometricalSecondary
azimuthOffsets = self._insar.frameAzimuthOffsetGeometricalSecondary
frameMosaicParameters(secondaryTrack, rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1)
os.chdir('../')
#save parameter file
self._insar.saveProduct(referenceTrack, self._insar.referenceTrackParameter)
self._insar.saveProduct(secondaryTrack, self._insar.secondaryTrackParameter)
#mosaic spectral diversity inteferograms
mosaicDir = 'sd'
os.makedirs(mosaicDir, exist_ok=True)
os.chdir(mosaicDir)
numberOfFrames = len(referenceTrack.frames)
if numberOfFrames == 1:
import shutil
frameDir = os.path.join('f1_{}/mosaic'.format(self._insar.referenceFrames[0]))
for sdFile in self._insar.interferogramSd:
if not os.path.isfile(sdFile):
os.symlink(os.path.join('../', frameDir, sdFile), sdFile)
shutil.copy2(os.path.join('../', frameDir, sdFile+'.vrt'), sdFile+'.vrt')
shutil.copy2(os.path.join('../', frameDir, sdFile+'.xml'), sdFile+'.xml')
else:
#choose offsets
if self.frameOffsetMatching:
rangeOffsets = self._insar.frameRangeOffsetMatchingReference
azimuthOffsets = self._insar.frameAzimuthOffsetMatchingReference
else:
rangeOffsets = self._insar.frameRangeOffsetGeometricalReference
azimuthOffsets = self._insar.frameAzimuthOffsetGeometricalReference
#list of input files
inputSd = [[], [], []]
for i, frameNumber in enumerate(self._insar.referenceFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
for k, sdFile in enumerate(self._insar.interferogramSd):
inputSd[k].append(os.path.join('../', frameDir, 'mosaic', sdFile))
#mosaic spectral diversity interferograms
for inputSdList, outputSdFile in zip(inputSd, self._insar.interferogramSd):
frameMosaic(referenceTrack, inputSdList, outputSdFile,
rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1,
updateTrack=False, phaseCompensation=True, resamplingMethod=1)
for sdFile in self._insar.interferogramSd:
create_xml(sdFile, referenceTrack.numberOfSamples, referenceTrack.numberOfLines, 'int')
os.chdir('../')
catalog.printToLog(logger, "runFrameMosaic")
self._insar.procDoc.addAllFromCatalog(catalog)
| 50.748538 | 139 | 0.698779 |
48223ac36e2493351b3ff6303137a45f254fb804 | 820 | py | Python | acronym.py | steffenschroeder/python-playground | 3c94a7c92a26d41e69118e8245e8ac9db7cf5ed2 | [
"MIT"
] | null | null | null | acronym.py | steffenschroeder/python-playground | 3c94a7c92a26d41e69118e8245e8ac9db7cf5ed2 | [
"MIT"
] | null | null | null | acronym.py | steffenschroeder/python-playground | 3c94a7c92a26d41e69118e8245e8ac9db7cf5ed2 | [
"MIT"
] | null | null | null | import unittest
if __name__ == '__main__':
unittest.main() | 27.333333 | 87 | 0.687805 |
4822b67c5088178025d58774742a32a17ce91c77 | 834 | py | Python | env/lib/python3.8/site-packages/plotly/validators/waterfall/_connector.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/lib/python3.8/site-packages/plotly/validators/waterfall/_connector.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/lib/python3.8/site-packages/plotly/validators/waterfall/_connector.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
| 33.36 | 83 | 0.57554 |
4823bb4588dd2055c421f82a83ae6da7fc0c5d90 | 7,915 | py | Python | ssd_data/augmentations/geometrics.py | star-baba/res50_sa_ssd | c7c0f218307b50e7ac1fd8945868df01f1743467 | [
"MIT"
] | 1 | 2020-06-16T12:53:25.000Z | 2020-06-16T12:53:25.000Z | ssd_data/augmentations/geometrics.py | star-baba/res50_sa_ssd | c7c0f218307b50e7ac1fd8945868df01f1743467 | [
"MIT"
] | 3 | 2020-06-06T02:00:34.000Z | 2020-06-23T16:38:25.000Z | ssd_data/augmentations/geometrics.py | star-baba/res50_sa_ssd | c7c0f218307b50e7ac1fd8945868df01f1743467 | [
"MIT"
] | 1 | 2021-04-27T06:40:21.000Z | 2021-04-27T06:40:21.000Z | from numpy import random
import numpy as np
import logging
from ._utils import decision
from ssd.core.boxes.utils import iou_numpy, centroids2corners_numpy, corners2centroids_numpy
from .base import Compose
| 36.307339 | 115 | 0.586734 |
48246329c18e90c00165cc92ef48bb7d9a328558 | 5,200 | py | Python | tests/unit_tests/prepare_email/test_mail_segmenting.py | farthur/melusine | 121fbb17da221b12186a275d5843b466ce65d954 | [
"Apache-2.0"
] | null | null | null | tests/unit_tests/prepare_email/test_mail_segmenting.py | farthur/melusine | 121fbb17da221b12186a275d5843b466ce65d954 | [
"Apache-2.0"
] | null | null | null | tests/unit_tests/prepare_email/test_mail_segmenting.py | farthur/melusine | 121fbb17da221b12186a275d5843b466ce65d954 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from melusine.prepare_email.mail_segmenting import structure_email, tag_signature
structured_historic = [
{
"text": " \n \n \n Bonjours, \n \n Suite a notre conversation \
tlphonique de Mardi , pourriez vous me dire la \n somme que je vous \
dois afin d'd'tre en rgularisation . \n \n Merci bonne journe",
"meta": "",
},
{
"text": " \n Bonjour. \n \n Merci de bien vouloir prendre connaissance \
du document ci-joint : \n 1 - Relev d'identit postal MUTUELLE \
(contrats) \n \n Sentiments mutualistes. \n \n La Mutuelle \n \n \
La visualisation des fichiers PDF ncessite Adobe Reader. \n ",
"meta": " \n \n Le mar. 22 mai 2018 10:20, \
<gestionsocietaire@mutuelle.fr> a crit\xa0:",
},
]
output = [
{
"meta": {"date": None, "from": None, "to": None},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjours, ", "tags": "HELLO"},
{
"part": " Suite a notre conversation \
tlphonique de Mardi , pourriez vous me dire la somme que je vous dois \
afin d'd'tre en rgularisation . \n \n ",
"tags": "BODY",
},
{"part": "Merci bonne journe", "tags": "GREETINGS"},
],
},
},
{
"meta": {
"date": " mar. 22 mai 2018 10:20",
"from": " <gestionsocietaire@mutuelle.fr> ",
"to": None,
},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjour. \n \n ", "tags": "HELLO"},
{
"part": "Merci de bien vouloir prendre \
connaissance du document ci-joint : 1 - Relev d'identit postal MUTUELLE \
(contrats) ",
"tags": "BODY",
},
{"part": " Sentiments mutualistes. ", "tags": "GREETINGS"},
{"part": " La Mutuelle ", "tags": "BODY"},
{
"part": " La visualisation des fichiers \
PDF ncessite Adobe Reader. \n",
"tags": "FOOTER",
},
],
},
},
]
structured_historic_signature = [
{
"text": " \n \n \n Bonjours, \n \n Suite a notre conversation \
tlphonique de Mardi , pourriez vous me dire la \n somme que je vous \
dois afin d'd'tre en rgularisation . \n \n Merci bonne journe\nJean Dupont",
"meta": "",
},
{
"text": " \n Bonjour. \n \n Merci de bien vouloir prendre connaissance \
du document ci-joint : \n 1 - Relev d'identit postal MUTUELLE \
(contrats) \n \n Sentiments mutualistes. \n \n La Mutuelle \n \n \
La visualisation des fichiers PDF ncessite Adobe Reader. \n ",
"meta": " \n \n Le mar. 22 mai 2018 10:20, \
<gestionsocietaire@mutuelle.fr> a crit\xa0:",
},
]
output_signature = [
{
"meta": {"date": None, "from": None, "to": None},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjours, ", "tags": "HELLO"},
{
"part": " Suite a notre conversation \
tlphonique de Mardi , pourriez vous me dire la somme que je vous dois \
afin d'd'tre en rgularisation . \n \n ",
"tags": "BODY",
},
{"part": "Merci bonne journe", "tags": "GREETINGS"},
{"part": "Jean Dupont", "tags": "SIGNATURE"},
],
},
},
{
"meta": {
"date": " mar. 22 mai 2018 10:20",
"from": " <gestionsocietaire@mutuelle.fr> ",
"to": None,
},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjour. \n \n ", "tags": "HELLO"},
{
"part": "Merci de bien vouloir prendre \
connaissance du document ci-joint : 1 - Relev d'identit postal MUTUELLE \
(contrats) ",
"tags": "BODY",
},
{"part": " Sentiments mutualistes. ", "tags": "GREETINGS"},
{"part": " La Mutuelle ", "tags": "BODY"},
{
"part": " La visualisation des fichiers PDF ncessite Adobe Reader. \n",
"tags": "FOOTER",
},
],
},
},
]
| 35.616438 | 93 | 0.497308 |
48253cfafc062954525ca06fd5152e38d4e70a79 | 295 | py | Python | Modulos/Modulo2/app3.py | Trallyan/Curso_Udemy | 2a5c0becd14352f2cc2bf2362d1e5494edd6198d | [
"MIT"
] | null | null | null | Modulos/Modulo2/app3.py | Trallyan/Curso_Udemy | 2a5c0becd14352f2cc2bf2362d1e5494edd6198d | [
"MIT"
] | null | null | null | Modulos/Modulo2/app3.py | Trallyan/Curso_Udemy | 2a5c0becd14352f2cc2bf2362d1e5494edd6198d | [
"MIT"
] | null | null | null | idade = 18
carteiramotorista = True
print (idade >= 18 and carteiramotorista == True)
print ("Pode Dirigir")
velocidade = 90
radar = 100
radarfuncionando = False
print (velocidade > radar and radarfuncionando == True)
print ("No foi multado")
velocidade1 = 101
print (velocidade1 >= radar) | 18.4375 | 55 | 0.735593 |
482633d918d23f0a510e111cc0ad6f51458a51a4 | 1,233 | py | Python | examples/basic/merge_instance.py | talashilkarraj/spydrnet-physical | d13bcbb0feef7d5c93aa60af4a916f837128a5ad | [
"BSD-3-Clause"
] | 3 | 2021-11-05T18:25:21.000Z | 2022-03-02T22:03:02.000Z | examples/basic/merge_instance.py | talashilkarraj/spydrnet-physical | d13bcbb0feef7d5c93aa60af4a916f837128a5ad | [
"BSD-3-Clause"
] | null | null | null | examples/basic/merge_instance.py | talashilkarraj/spydrnet-physical | d13bcbb0feef7d5c93aa60af4a916f837128a5ad | [
"BSD-3-Clause"
] | 2 | 2022-01-10T14:27:59.000Z | 2022-03-13T08:21:33.000Z | """
===================================
Merging two instances in the design
===================================
This example demonstrate how to merge two instance in the design to create a new
merged definition
.. hdl-diagram:: ../../../examples/basic/_initial_design_merge.v
:type: netlistsvg
:align: center
:module: top
**Output1** Merged design Instance
.. hdl-diagram:: ../../../examples/basic/_merged_design.v
:type: netlistsvg
:align: center
:module: top
"""
from os import path
import spydrnet as sdn
import spydrnet_physical as sdnphy
import logging
logger = logging.getLogger('spydrnet_logs')
sdn.enable_file_logging(LOG_LEVEL='INFO')
netlist = sdnphy.load_netlist_by_name('nested_hierarchy')
sdn.compose(netlist, '_initial_design_merge.v', skip_constraints=True)
netlist = sdnphy.load_netlist_by_name('nested_hierarchy')
top = netlist.top_instance.reference
inst1 = next(top.get_instances("inst_1_0"))
inst2 = next(top.get_instances("inst_1_1"))
top.merge_instance([inst1, inst2],
new_definition_name="merged_module",
new_instance_name="merged_module_instance_0")
top.create_unconn_wires()
sdn.compose(netlist, '_merged_design.v', skip_constraints=True)
| 26.804348 | 80 | 0.703163 |
482697dcf4d097846528ae15ee8dbca33b6e86d7 | 525 | py | Python | splunge.py | neilebliss/reddit_bot | 74be4b57ddbdf9fe0d9876207388ee2778b4a50d | [
"Unlicense"
] | null | null | null | splunge.py | neilebliss/reddit_bot | 74be4b57ddbdf9fe0d9876207388ee2778b4a50d | [
"Unlicense"
] | null | null | null | splunge.py | neilebliss/reddit_bot | 74be4b57ddbdf9fe0d9876207388ee2778b4a50d | [
"Unlicense"
] | null | null | null | import praw
import re
import os
reddit = praw.Reddit('Splunge Bot v1', client_id=os.environ['REDDIT_CLIENT_ID'], client_secret=os.environ['REDDIT_CLIENT_SECRET'], password=os.environ['REDDIT_PASSWORD'], username=os.environ['REDDIT_USERNAME'])
subreddit = reddit.subreddit('tubasaur')
for submission in subreddit.new(limit=5):
for top_level_comment in submission.comments:
if re.search('splunge', top_level_comment.body, re.IGNORECASE):
top_level_comment.reply("Well, yeah, splunge for me too!")
print("Splunged.")
| 40.384615 | 210 | 0.775238 |
48285ffa4d4045b7cf655571731a70ba6854e4b3 | 19,519 | py | Python | cogv3/admin/managecommands.py | XFazze/discordbot | 6b4201a6d6ff1bed5f65de4b4d30738b4d51e223 | [
"MIT"
] | 2 | 2021-07-29T02:39:36.000Z | 2021-07-29T02:39:38.000Z | cogv3/admin/managecommands.py | XFazze/discordbot | 6b4201a6d6ff1bed5f65de4b4d30738b4d51e223 | [
"MIT"
] | 2 | 2021-08-16T08:31:24.000Z | 2021-09-20T16:34:58.000Z | cogv3/admin/managecommands.py | XFazze/discordbot | 6b4201a6d6ff1bed5f65de4b4d30738b4d51e223 | [
"MIT"
] | null | null | null | import discord
from discord import embeds
from discord.ext import commands
from discord.ext.commands.core import command
from pymongo import MongoClient, collation
from discord_components import Button, Select, SelectOption, ComponentsBot
from discord.utils import get
| 42.06681 | 230 | 0.56217 |
48289ef712fad809681babbffb67acddcce6b08d | 13,910 | py | Python | edb/pgsql/compiler/context.py | OhBonsai/edgedb | 786c853090b90f3005cb65014194d0dbd45d6fcc | [
"Apache-2.0"
] | 2 | 2019-01-21T05:43:52.000Z | 2019-05-24T02:53:14.000Z | edb/pgsql/compiler/context.py | ciusji/edgedb | 1c68c02430a92464839f03f43c4e5ad6f7ede4e0 | [
"Apache-2.0"
] | null | null | null | edb/pgsql/compiler/context.py | ciusji/edgedb | 1c68c02430a92464839f03f43c4e5ad6f7ede4e0 | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""IR compiler context."""
from __future__ import annotations
from typing import *
import collections
import contextlib
import itertools
import enum
import uuid
from edb.common import compiler
from edb.pgsql import ast as pgast
from . import aliases
if TYPE_CHECKING:
from edb.ir import ast as irast
NO_STMT = pgast.SelectStmt()
# XXX: this context hack is necessary until pathctx is converted
# to use context levels instead of using env directly.
| 33.680387 | 76 | 0.649029 |
4828cf4e5987d77ba633b24834a9ab7cbcc2c32c | 1,111 | py | Python | tests/unit/test_resources_log.py | CarlosAMolina/logs-analyzer | b381d0c7ae4c5a8b6911d2b7019baa74208192c6 | [
"MIT"
] | null | null | null | tests/unit/test_resources_log.py | CarlosAMolina/logs-analyzer | b381d0c7ae4c5a8b6911d2b7019baa74208192c6 | [
"MIT"
] | null | null | null | tests/unit/test_resources_log.py | CarlosAMolina/logs-analyzer | b381d0c7ae4c5a8b6911d2b7019baa74208192c6 | [
"MIT"
] | null | null | null | import unittest
import mock
from src.api.resources import log
from tests import LOGS_PATH
if __name__ == "__main__":
unittest.main()
| 29.236842 | 80 | 0.636364 |
482a0469f8aaa784c2bee17a9875456c7d03fc8d | 4,378 | py | Python | src/rqt_py_trees/message_loader_thread.py | alexfneves/rqt_py_trees | 87237c3dcf25db419ad783ec29b9a40fcfa7b75c | [
"BSD-3-Clause"
] | 4 | 2021-04-19T04:04:06.000Z | 2022-02-08T10:13:37.000Z | src/rqt_py_trees/message_loader_thread.py | alexfneves/rqt_py_trees | 87237c3dcf25db419ad783ec29b9a40fcfa7b75c | [
"BSD-3-Clause"
] | 7 | 2016-11-25T04:53:29.000Z | 2018-10-07T21:49:10.000Z | src/rqt_py_trees/message_loader_thread.py | alexfneves/rqt_py_trees | 87237c3dcf25db419ad783ec29b9a40fcfa7b75c | [
"BSD-3-Clause"
] | 3 | 2021-01-08T10:47:21.000Z | 2021-07-26T15:18:39.000Z | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import threading
| 38.403509 | 159 | 0.680448 |
482b6bc1045b43ed5348ce5b37a40561a89cb30b | 2,796 | py | Python | run_ddpg.py | huangwl18/geometry-dex | 0c629316258ef560b360c6103d83d5cb828e3ccd | [
"MIT"
] | 29 | 2021-11-11T23:05:02.000Z | 2022-03-10T06:05:23.000Z | run_ddpg.py | huangwl18/geometry-dex | 0c629316258ef560b360c6103d83d5cb828e3ccd | [
"MIT"
] | 2 | 2021-12-13T16:18:14.000Z | 2022-03-09T14:04:37.000Z | run_ddpg.py | huangwl18/geometry-dex | 0c629316258ef560b360c6103d83d5cb828e3ccd | [
"MIT"
] | 2 | 2021-11-18T06:00:30.000Z | 2021-12-17T03:04:52.000Z | from rl_modules.utils import *
import torch
import random
from rl_modules.ddpg_agent import ddpg_agent
from arguments_ddpg import get_args
import os
import numpy as np
import dex_envs
import wandb
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
"""
train the agent
"""
if __name__ == "__main__":
# env setting ========================================================================
# do not enable wandb output
os.environ["WANDB_SILENT"] = "true"
launch(init_callback=init_callback, log_callback=log_callback)
| 31.41573 | 90 | 0.658798 |
482da58116dfb913fbea2c87dc9df1955becba11 | 3,528 | py | Python | code/a_train_generalist.py | seba-1511/specialists | 9888e639707142db80aafe6ae7bf25f572d34505 | [
"Apache-2.0"
] | 1 | 2016-05-31T07:54:31.000Z | 2016-05-31T07:54:31.000Z | code/a_train_generalist.py | seba-1511/specialists | 9888e639707142db80aafe6ae7bf25f572d34505 | [
"Apache-2.0"
] | null | null | null | code/a_train_generalist.py | seba-1511/specialists | 9888e639707142db80aafe6ae7bf25f572d34505 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is an experiment that will train a specified generalist network.
"""
import random
import numpy as np
from neon.backends import gen_backend
from neon.data import DataIterator, load_cifar10
from neon.transforms.cost import Misclassification
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
from neon.util.persist import save_obj
from keras.datasets import cifar100
from cifar_net import get_custom_vgg
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
DATASET_NAME = 'cifar100'
EXPERIMENT_DIR = 'experiments/' + DATASET_NAME + '/'
VALIDATION = True
if __name__ == '__main__':
# hyperparameters
batch_size = 64
num_epochs = args.epochs
num_epochs = 74 if num_epochs == 10 else num_epochs
rng_seed = 1234
np.random.seed(rng_seed)
random.seed(rng_seed)
# setup backend
be = gen_backend(
backend=args.backend,
batch_size=batch_size,
rng_seed=rng_seed,
device_id=args.device_id,
default_dtype=args.datatype,
)
(X_train, y_train), (X_test, y_test), nout = load_data(DATASET_NAME)
if VALIDATION:
(X_train, y_train), (X_valid, y_valid) = split_train_set(X_train, y_train)
model, opt, cost = get_custom_vgg(nout=nout)
train_set = DataIterator(X_train, y_train, nclass=nout, lshape=(3, 32, 32))
test_set = DataIterator(X_test, y_test, nclass=nout, lshape=(3, 32, 32))
callbacks = Callbacks(model, train_set, args, eval_set=test_set)
if VALIDATION:
valid_set = DataIterator(X_valid, y_valid, nclass=nout, lshape=(3, 32, 32))
callbacks = Callbacks(model, train_set, args, eval_set=valid_set)
model.fit(train_set, optimizer=opt, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
print 'Validation: ', VALIDATION
print 'Train misclassification error: ', model.eval(train_set, metric=Misclassification())
if VALIDATION:
print 'Valid misclassification error: ', model.eval(valid_set, metric=Misclassification())
print 'Test misclassification error: ', model.eval(test_set, metric=Misclassification())
if args.save_path is not None:
save_obj(model.serialize(), EXPERIMENT_DIR + args.save_path)
| 33.283019 | 98 | 0.67602 |
482db97435aafc9eda667eac490cfcdd4c5b28e6 | 1,393 | py | Python | lilies/terminal/ansicodes.py | mrz1988/lilies | 9525770fabab7e142ebedc40ab5d0c8027aa90ba | [
"MIT"
] | null | null | null | lilies/terminal/ansicodes.py | mrz1988/lilies | 9525770fabab7e142ebedc40ab5d0c8027aa90ba | [
"MIT"
] | 51 | 2019-06-18T16:35:56.000Z | 2021-02-23T00:32:23.000Z | lilies/terminal/ansicodes.py | mrz1988/lilies | 9525770fabab7e142ebedc40ab5d0c8027aa90ba | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
# Leading control character
CSI = "\033["
FULLRESET = 0
BOLD = BRIGHT = 1
DIM = 2
ITALIC = 3
UNDERLINE = 4
BLINK = 5
# Unsupported
################
# RAPIDBLINK = 6
# REVERSE = 7
# CONCEAL = 8
STRIKE = 9
# Unsupported
################
# PRIMARY_FONT = 10
# ALTFONT1 = 11
# ALTFONT2 = 12
# ALTFONT3 = 13
# ALTFONT4 = 14
# ALTFONT5 = 15
# ALTFONT6 = 16
# ALTFONT7 = 17
# ALTFONT8 = 18
# ALTFONT9 = 19
# FRAKTUR = 20
# DOUBLEUNDERLINE = 21
NOBOLDDIM = 22
NOITALIC = 23
NOUNDERLINE = 24
NOBLINK = 25
# Unsupported
################
# 26 is missing?
# NOREVERSE = 27
# NOCONCEAL = 28
NOSTRIKE = 29
# COLORS!
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
LIGHTGRAY = 37
NOCOLOR = 39
# 16-color extended,
# Only kind of supported
DARKGRAY = 90
BRIGHTRED = 91
BRIGHTGREEN = 92
BRIGHTYELLOW = 93
BRIGHTBLUE = 94
BRIGHTMAGENTA = 95
BRIGHTCYAN = 96
WHITE = 97
ATTR_ON_CODES = {
"bold": BOLD,
"dim": DIM,
"italic": ITALIC,
"underline": UNDERLINE,
"blink": BLINK,
}
ATTR_OFF_CODES = {
"bold": NOBOLDDIM,
"dim": NOBOLDDIM,
"italic": NOITALIC,
"underline": NOUNDERLINE,
"blink": NOBLINK,
}
| 13.656863 | 52 | 0.61809 |
482df5bdcc006ad0be823ed8da879646f9d15872 | 2,220 | py | Python | preprocess/step1.py | wenhuchen/KGPT | f898577d8e0ebbf48ea84915777c7b01e616ca3a | [
"MIT"
] | 119 | 2020-10-06T08:21:21.000Z | 2022-03-25T12:00:10.000Z | preprocess/step1.py | wenhuchen/KGPT | f898577d8e0ebbf48ea84915777c7b01e616ca3a | [
"MIT"
] | 7 | 2020-10-29T09:34:14.000Z | 2021-12-28T14:27:27.000Z | preprocess/step1.py | wenhuchen/KGPT | f898577d8e0ebbf48ea84915777c7b01e616ca3a | [
"MIT"
] | 16 | 2020-10-07T18:58:48.000Z | 2022-02-23T07:42:29.000Z | import json
import regex
import nltk.data
from nltk.tokenize import word_tokenize
import sys
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
def split_paragraphs(text):
"""
remove urls, lowercase all words and separate paragraphs
"""
splits = regex.split(r'\n+', text)
paras = []
for split in splits[1:]: # skip the titles
split = split.strip()
if len(split) == 0:
continue
if 'Section::' in split:
continue
paras.append(split)
paras = " ".join(paras)
return sent_detector.tokenize(paras)
fw = open('out-more.json', 'w')
with open('en.json', 'r') as f:
for i, line in enumerate(f):
data = json.loads(line)
entry = {"id": data['id'], "url": data['url'], 'title': data['title']}
outputs = []
if len(data['text']) > 50:
try:
sents = split_paragraphs(data['text'])
for sent in sents:
if len(sent) < 400:
output, ratio, count = split_sent(sent)
if count > 1 and ratio >= 0.10 and len(output) >= 8 and output[0][0][0].isupper():
text = [_[0] for _ in output]
hyperlink = [_[1] for _ in output]
outputs.append((text, hyperlink))
except Exception:
pass
if len(outputs) > 0:
entry['text'] = outputs
fw.write(json.dumps(entry) + '\n')
sys.stdout.write('finished {}/{} \r'.format(i, 5989879))
fw.close()
| 30.833333 | 106 | 0.498198 |
482eeb2176d1d54a0f7e399cf1d1c1710f4d9f12 | 454 | py | Python | mintools/zmqmin/client.py | jtimon/elements-explorer | 397089593e860c4bdceb3a1222687a9120db0022 | [
"MIT"
] | 9 | 2018-01-25T16:32:18.000Z | 2018-10-10T18:47:33.000Z | mintools/zmqmin/client.py | jtimon/elements-explorer | 397089593e860c4bdceb3a1222687a9120db0022 | [
"MIT"
] | 2 | 2018-03-13T20:50:33.000Z | 2018-03-13T21:01:22.000Z | mintools/zmqmin/client.py | jtimon/elements-explorer | 397089593e860c4bdceb3a1222687a9120db0022 | [
"MIT"
] | 2 | 2018-02-20T17:50:18.000Z | 2018-02-26T07:29:15.000Z | # Copyright (c) 2012-2018 The Mintools developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import pyzmq
from .messenger import Messenger
| 26.705882 | 69 | 0.737885 |
482f26a433eaf7f306d04205a3bac702463a9adc | 1,879 | py | Python | scripts/filter_genes_matrix.py | fengwanwan/st_analysis | 24ef6326efce7ddb1d7cfe9497a6733e48da8331 | [
"MIT"
] | 4 | 2017-03-15T15:32:12.000Z | 2020-12-09T08:03:14.000Z | scripts/filter_genes_matrix.py | Coke-Zhang/st_analysis | 9ec446c0f1bff8e485f2016206b43dcdcf543119 | [
"MIT"
] | 1 | 2021-05-06T16:57:21.000Z | 2021-05-06T16:58:38.000Z | scripts/filter_genes_matrix.py | Coke-Zhang/st_analysis | 9ec446c0f1bff8e485f2016206b43dcdcf543119 | [
"MIT"
] | 4 | 2018-03-19T12:02:41.000Z | 2019-12-13T08:41:07.000Z | #! /usr/bin/env python
"""
Script that takes ST dataset (matrix of counts)
where the columns are genes and the rows
are spot coordinates
gene gene
XxY
XxY
And removes the columns of genes
matching the regular expression given as input.
@Author Jose Fernandez Navarro <jose.fernandez.navarro@scilifelab.se>
"""
import argparse
import sys
import os
import pandas as pd
import re
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--counts-matrix", required=True,
help="Matrix with gene counts (genes as columns)")
parser.add_argument("--outfile", help="Name of the output file")
parser.add_argument("--filter-genes", help="Regular expression for \
gene symbols to filter out. Can be given several times.",
default=None,
type=str,
action='append')
args = parser.parse_args()
main(args.counts_matrix, args.filter_genes, args.outfile)
| 34.796296 | 90 | 0.651943 |
482f35fcca776fd3b82f536d756e301830e31fbf | 83 | py | Python | libs/models/__init__.py | tonyngjichun/pspnet-pytorch | 75297aa4fdb4f7a712ef9185be1ec805044f8328 | [
"MIT"
] | 56 | 2017-12-07T12:29:14.000Z | 2021-05-14T16:45:59.000Z | libs/models/__init__.py | tonyngjichun/pspnet-pytorch | 75297aa4fdb4f7a712ef9185be1ec805044f8328 | [
"MIT"
] | 7 | 2017-12-26T09:00:23.000Z | 2019-01-14T03:55:56.000Z | libs/models/__init__.py | tonyngjichun/pspnet-pytorch | 75297aa4fdb4f7a712ef9185be1ec805044f8328 | [
"MIT"
] | 16 | 2017-12-20T00:36:51.000Z | 2020-12-31T07:41:06.000Z | from __future__ import absolute_import
from .resnet import *
from .pspnet import *
| 20.75 | 38 | 0.807229 |
482fb0fc0f3fd414d792e168bab4aaa39e2474d7 | 1,405 | py | Python | modules/tools/record_analyzer/common/distribution_analyzer.py | seeclong/apollo | 99c8afb5ebcae2a3c9359a156a957ff03944b27b | [
"Apache-2.0"
] | 3 | 2020-04-01T14:49:24.000Z | 2020-04-01T14:49:28.000Z | modules/tools/record_analyzer/common/distribution_analyzer.py | seeclong/apollo | 99c8afb5ebcae2a3c9359a156a957ff03944b27b | [
"Apache-2.0"
] | 7 | 2021-03-10T18:14:25.000Z | 2022-02-27T04:46:46.000Z | modules/tools/record_analyzer/common/distribution_analyzer.py | seeclong/apollo | 99c8afb5ebcae2a3c9359a156a957ff03944b27b | [
"Apache-2.0"
] | 2 | 2020-08-05T12:52:42.000Z | 2021-10-19T13:07:49.000Z | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from statistical_analyzer import PrintColors
| 36.025641 | 79 | 0.582206 |
483016a462d5e8b33445c247250dee4a2ae74ecd | 2,261 | py | Python | models/batch.py | scaleapi/sail | 1bd857b5db34cbd08c7d4e2476beafdb353a458d | [
"Apache-2.0"
] | 7 | 2021-03-10T23:37:12.000Z | 2022-01-13T01:14:58.000Z | models/batch.py | scaleapi/sail | 1bd857b5db34cbd08c7d4e2476beafdb353a458d | [
"Apache-2.0"
] | 2 | 2021-01-04T15:54:27.000Z | 2021-03-30T22:45:03.000Z | models/batch.py | scaleapi/sail | 1bd857b5db34cbd08c7d4e2476beafdb353a458d | [
"Apache-2.0"
] | 3 | 2021-07-31T04:03:12.000Z | 2021-10-03T05:51:48.000Z | from helpers.concurrency import execute
from scaleapi import exceptions
| 38.322034 | 145 | 0.62008 |
4830669ccefdde03889bd71b019ac1ba14c36c86 | 315 | py | Python | worker/xs.py | hoshimaemi/XZZ | 6d712906fa2f1fcf16155cfd5d89245ef8e0aff8 | [
"MIT"
] | 29 | 2020-02-27T13:49:48.000Z | 2021-02-26T15:44:14.000Z | worker/xs.py | hoshimaemi/XZZ | 6d712906fa2f1fcf16155cfd5d89245ef8e0aff8 | [
"MIT"
] | 3 | 2021-03-30T11:31:49.000Z | 2021-12-07T12:11:56.000Z | worker/xs.py | hoshimaemi/XZZ | 6d712906fa2f1fcf16155cfd5d89245ef8e0aff8 | [
"MIT"
] | 14 | 2020-02-29T07:25:12.000Z | 2021-01-03T05:12:25.000Z | from zzcore import StdAns, mysakuya
import requests
| 18.529412 | 37 | 0.533333 |
48331ce668f0e91220bd7b7d009bc8b5666778cd | 304 | py | Python | bot.py | matthewzhaocc/discord-server-status | c9c58271ab3f7142f7e827b88d5c960cc442b355 | [
"MIT"
] | null | null | null | bot.py | matthewzhaocc/discord-server-status | c9c58271ab3f7142f7e827b88d5c960cc442b355 | [
"MIT"
] | null | null | null | bot.py | matthewzhaocc/discord-server-status | c9c58271ab3f7142f7e827b88d5c960cc442b355 | [
"MIT"
] | null | null | null | #a discord bot for playing with CICD
#system dependencies
import os
#3rd party dependencies
import discord
TOKEN = os.environ.get("DISCORD_API_TOKEN")
client = discord.Client()
client.run(TOKEN) | 19 | 54 | 0.720395 |
4834a12f8b0a1adc974a9695986c5da1d9c04010 | 603 | py | Python | repost/api/schemas/user.py | pckv/fastapi-backend | 0f561528086ac3fdcabbf9efeac888421eeb66de | [
"MIT"
] | 9 | 2020-02-03T11:17:06.000Z | 2021-06-15T13:20:34.000Z | repost/api/schemas/user.py | pckv/fastapi-backend | 0f561528086ac3fdcabbf9efeac888421eeb66de | [
"MIT"
] | 40 | 2020-02-03T11:23:59.000Z | 2020-05-19T08:05:41.000Z | repost/api/schemas/user.py | pckv/fastapi-backend | 0f561528086ac3fdcabbf9efeac888421eeb66de | [
"MIT"
] | 1 | 2020-03-11T02:47:40.000Z | 2020-03-11T02:47:40.000Z | """API schemas for users."""
from datetime import datetime
from typing import Optional
from pydantic import BaseModel
| 20.1 | 48 | 0.681592 |
483592e4049e6951c186723536311a58d0a2c2a3 | 1,459 | py | Python | gluon/packages/dal/pydal/adapters/sap.py | GeorgesBrantley/ResistanceGame | 65ec925ec8399af355e176c4814a749fde5f907d | [
"BSD-3-Clause"
] | 408 | 2015-01-01T10:31:47.000Z | 2022-03-26T17:41:21.000Z | gluon/packages/dal/pydal/adapters/sap.py | GeorgesBrantley/ResistanceGame | 65ec925ec8399af355e176c4814a749fde5f907d | [
"BSD-3-Clause"
] | 521 | 2015-01-08T14:45:54.000Z | 2022-03-24T11:15:22.000Z | gluon/packages/dal/pydal/adapters/sap.py | GeorgesBrantley/ResistanceGame | 65ec925ec8399af355e176c4814a749fde5f907d | [
"BSD-3-Clause"
] | 158 | 2015-01-25T20:02:00.000Z | 2022-03-01T06:29:12.000Z | import re
from .._compat import integer_types, long
from .base import SQLAdapter
from . import adapters
| 32.422222 | 85 | 0.592186 |
4835ed45283fa22be5264491ed9fa12710bc4c24 | 54,436 | py | Python | search.py | Hawxo/GoWDiscordTeamBot | ebe062f88b8d39615ba871476471d466e6759e7a | [
"BSD-3-Clause"
] | null | null | null | search.py | Hawxo/GoWDiscordTeamBot | ebe062f88b8d39615ba871476471d466e6759e7a | [
"BSD-3-Clause"
] | null | null | null | search.py | Hawxo/GoWDiscordTeamBot | ebe062f88b8d39615ba871476471d466e6759e7a | [
"BSD-3-Clause"
] | null | null | null | import copy
import datetime
import importlib
import logging
import operator
import re
from calendar import different_locale
import translations
from data_source.game_data import GameData
from game_constants import COLORS, EVENT_TYPES, RARITY_COLORS, SOULFORGE_REQUIREMENTS, TROOP_RARITIES, \
UNDERWORLD_SOULFORGE_REQUIREMENTS, WEAPON_RARITIES
from models.bookmark import Bookmark
from models.toplist import Toplist
from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day
LOGLEVEL = logging.DEBUG
formatter = logging.Formatter('%(asctime)-15s [%(levelname)s] %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(LOGLEVEL)
log = logging.getLogger(__name__)
log.setLevel(LOGLEVEL)
log.addHandler(handler)
t = translations.Translations()
_ = t.get
def get_levels(self, lang):
levels = [{
'level': level['level'],
'bonus': _(level['bonus'], lang),
} for level in self.levels]
return levels
def translate_toplist(self, toplist_id, lang):
toplist = self.toplists.get(toplist_id)
if not toplist:
return None
result = toplist.copy()
result['items'] = []
for item_search in toplist['items']:
items = self.search_troop(item_search, lang)
if not items:
items = self.search_weapon(item_search, lang)
if not items:
continue
result['items'].append(items[0])
return result
def get_toplist_troop_ids(self, items, lang):
result = []
for search_term in items.split(','):
items = self.search_troop(search_term, lang)
if not items:
items = self.search_weapon(search_term, lang)
if items:
result.append(str(items[0]['id']))
return result
def get_soulforge_weapon_image_data(self, search_term, date, switch, lang):
search_result = self.search_weapon(search_term, lang)
if len(search_result) != 1:
return
weapon = search_result[0].copy()
requirements = SOULFORGE_REQUIREMENTS[weapon['raw_rarity']].copy()
alternate_kingdom_id = weapon.get('event_faction')
if alternate_kingdom_id:
requirements = UNDERWORLD_SOULFORGE_REQUIREMENTS[weapon['raw_rarity']].copy()
jewels = []
for color in weapon['colors']:
color_code = COLORS.index(color)
filename = f'Runes_Jewel{color_code:02n}_full.png'
jewels.append({
'filename': filename,
'amount': requirements['jewels'],
'available_on': translate_day(color_code, lang),
'kingdoms': sorted([_(kingdom['name'], lang) for kingdom in self.kingdoms.values()
if 'primary_color' in kingdom
and color == kingdom['primary_color']
and kingdom['location'] == 'krystara']),
})
requirements['jewels'] = jewels
kingdom = self.kingdoms[weapon['kingdom_id']]
alternate_kingdom = None
alternate_kingdom_name = None
alternate_kingdom_filename = None
if alternate_kingdom_id:
alternate_kingdom = self.kingdoms[alternate_kingdom_id]
alternate_kingdom_name = _(alternate_kingdom['name'], lang)
alternate_kingdom_filename = alternate_kingdom['filename']
affixes = [{
'name': _(affix['name'], lang),
'description': _(affix['description'], lang),
'color': list(RARITY_COLORS.values())[i],
} for i, affix in enumerate(weapon['affixes'], start=1)]
mana_colors = ''.join([c.title() for c in weapon['colors']]).replace('Brown', 'Orange')
kingdom_filebase = self.kingdoms[weapon['kingdom_id']]['filename']
in_soulforge_text = _('[WEAPON_AVAILABLE_FROM_SOULFORGE]', lang)
if alternate_kingdom_id:
in_soulforge_text += ' (' + _(f'[{weapon["event_faction"]}_NAME]', lang) + ' ' + _(
'[FACTION_WEAPON]', lang) + ')'
date = get_next_monday_in_locale(date, lang)[0]
result = {
'switch': switch,
'name': weapon['name'],
'rarity_color': RARITY_COLORS[weapon['raw_rarity']],
'rarity': weapon['rarity'],
'filename': f'Spells/Cards_{weapon["spell_id"]}_full.png',
'description': weapon['spell']['description'],
'kingdom': weapon['kingdom'],
'alternate_kingdom': alternate_kingdom_name,
'kingdom_logo': f'Troopcardshields_{kingdom_filebase}_full.png',
'alternate_kingdom_logo': f'Troopcardshields_{alternate_kingdom_filename}_full.png',
'type': _(weapon['type'], lang),
'background': f'Background/{kingdom["filename"]}_full.png',
'gow_logo': 'Atlas/gow_logo.png',
'requirements': requirements,
'affixes': affixes,
'affix_icon': 'Atlas/affix.png',
'gold_medal': 'Atlas/medal_gold.png',
'mana_color': f'Troopcardall_{mana_colors}_full.png',
'mana_cost': weapon['spell']['cost'],
'stat_increases': {'attack': sum(weapon['attack_increase']),
'health': sum(weapon['health_increase']),
'armor': sum(weapon['armor_increase']),
'magic': sum(weapon['magic_increase'])},
'stat_icon': 'Atlas/{stat}.png',
'texts': {
'from_battles': _('[PET_LOOT_BONUS]', lang).replace('+%1% %2 ', '').replace('+%1 %2 ', ''),
'gem_bounty': _('[DUNGEON_OFFER_NAME]', lang),
'kingdom_challenges': f'{_("[KINGDOM]", lang)} {_("[CHALLENGES]", lang)}',
'soulforge': _('[SOULFORGE]', lang),
'resources': _('[RESOURCES]', lang),
'dungeon': _('[DUNGEON]', lang),
'dungeon_battles': _('[TASK_WIN_DUNGEON_BATTLES]', lang).replace('{0}', '3').replace('\x19', 's'),
'tier_8': _('[CHALLENGE_TIER_8_ROMAN]', lang),
'available': _('[AVAILABLE]', lang),
'in_soulforge': in_soulforge_text,
'n_gems': _('[GEMS_GAINED]', lang).replace('%1', '50'),
},
'date': date,
}
return result
def translate_drop_chances(self, data: dict, lang):
for key, item in data.copy().items():
if not self.is_untranslated(key):
continue
new_key = _(key, lang)
if key == '[KEYTYPE_5_TITLE]':
new_key = f'{new_key}*'
data[new_key] = item.copy()
if key != new_key:
del data[key]
if isinstance(data[new_key], dict):
self.translate_drop_chances(data[new_key], lang)
| 44.185065 | 118 | 0.557701 |
4836dce172471538808ff516434e702497a39d34 | 39,841 | py | Python | troposphere/sagemaker.py | filipepmo/troposphere | b1590f58ed8cc86ba18a19ed93fc9380d6f7306b | [
"BSD-2-Clause"
] | null | null | null | troposphere/sagemaker.py | filipepmo/troposphere | b1590f58ed8cc86ba18a19ed93fc9380d6f7306b | [
"BSD-2-Clause"
] | null | null | null | troposphere/sagemaker.py | filipepmo/troposphere | b1590f58ed8cc86ba18a19ed93fc9380d6f7306b | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012-2022, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 51.0.0
from . import AWSObject, AWSProperty, PropsDictType, Tags
from .validators import boolean, double, integer
| 32.207761 | 206 | 0.680681 |
483774235763ac392213eaed3e87eadcdbd2e771 | 1,199 | py | Python | src/lib/divergence.py | evolutics/sparse-approximation | fda419b2ca0f6563a4668bae23ca0b94936ff8e8 | [
"MIT"
] | null | null | null | src/lib/divergence.py | evolutics/sparse-approximation | fda419b2ca0f6563a4668bae23ca0b94936ff8e8 | [
"MIT"
] | null | null | null | src/lib/divergence.py | evolutics/sparse-approximation | fda419b2ca0f6563a4668bae23ca0b94936ff8e8 | [
"MIT"
] | null | null | null | import math
from numpy import linalg
from scipy import stats
from scipy.spatial import distance
import numpy
def jensen_shannon_distance(p, Q):
"""Square root of Jensen-Shannon divergence."""
return numpy.apply_along_axis(lambda q: distance.jensenshannon(p, q), 0, Q)
def k_directed(p, Q):
"""See: Jianhua Lin. "Divergence Measures Based on the Shannon Entropy". 1991."""
return numpy.apply_along_axis(lambda q: stats.entropy(p, (p + q) / 2), 0, Q)
| 25.510638 | 85 | 0.683069 |
4838335f5aaabe1145c8cd0b7af080ad9ce72fb6 | 10,319 | py | Python | vad/data_models/voice_activity.py | zsl24/voice-activity-detection | a034be23c6283121c6b72e778c6ff6711045cbe3 | [
"MIT"
] | 74 | 2021-02-22T17:35:52.000Z | 2022-03-29T03:08:12.000Z | vad/data_models/voice_activity.py | zsl24/voice-activity-detection | a034be23c6283121c6b72e778c6ff6711045cbe3 | [
"MIT"
] | 1 | 2021-08-15T07:56:39.000Z | 2021-08-15T07:56:39.000Z | vad/data_models/voice_activity.py | zsl24/voice-activity-detection | a034be23c6283121c6b72e778c6ff6711045cbe3 | [
"MIT"
] | 9 | 2021-07-22T16:46:11.000Z | 2022-03-27T13:19:24.000Z | import json
import math
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum
from pathlib import Path
from typing import List, Optional
import numpy as np
from vad.util.time_utils import (
format_timedelta_to_milliseconds,
format_timedelta_to_timecode,
parse_timecode_to_timedelta,
)
| 41.777328 | 100 | 0.560907 |
4839b9f176bff6bb0c25323ed01d0f68d5ef1760 | 1,807 | py | Python | face_recognition/project/schema.py | dgr113/face-recognition | edda6ca8fef567d24ae740afd2399b66166f3431 | [
"MIT"
] | null | null | null | face_recognition/project/schema.py | dgr113/face-recognition | edda6ca8fef567d24ae740afd2399b66166f3431 | [
"MIT"
] | null | null | null | face_recognition/project/schema.py | dgr113/face-recognition | edda6ca8fef567d24ae740afd2399b66166f3431 | [
"MIT"
] | null | null | null | # coding: utf-8
SCHEMA_MAPPING = {
"persons": {
"type": "object",
"patternProperties": {
r"\d+": {
"type": "object",
"properties": {
"first_name": {"type": "string"},
"last_name": {"type": "string"},
},
"patternProperties": {
r".+": {"type": ["integer", "string"]}
},
"required": ["first_name", "last_name"]
}
}
},
"camera": {
"type": "object",
"properties": {
"camera_id": {"type": "integer"},
"camera_close_key": {"type": "string"},
"camera_frame_shape": {"type": "array", "items": {"type": "integer"}, "minItems": 3, "maxItems": 3}
},
"required": ["camera_id", "camera_close_key", "camera_frame_shape"]
},
"model_config": {
"type": "object",
"properties": {
"class_name": {"type": "string"},
"config": {
"type": "object",
"properties": {
"name": {"type": "string"},
"layers": {
"type": "array",
"items": {
"type": "object",
"properties": {
"class_name": {"type": "string"},
"config": {
"type": "object"
}
}
}
}
}
},
"keras_version": {"type": "string"},
"backend": {"type": "string", "enum": ["theano", "tensorflow"]}
}
}
}
| 30.116667 | 111 | 0.328168 |
483a75dbbf41e0a2382e74253427bd94ad78ce29 | 303 | py | Python | resources/www/scripts/recibido.py | miguelarman/Redes-de-comunicaciones-ii-practica1 | 8f90dddcf9025f7d9c08dfb6ca1aa8dc24e9fa13 | [
"MIT"
] | null | null | null | resources/www/scripts/recibido.py | miguelarman/Redes-de-comunicaciones-ii-practica1 | 8f90dddcf9025f7d9c08dfb6ca1aa8dc24e9fa13 | [
"MIT"
] | null | null | null | resources/www/scripts/recibido.py | miguelarman/Redes-de-comunicaciones-ii-practica1 | 8f90dddcf9025f7d9c08dfb6ca1aa8dc24e9fa13 | [
"MIT"
] | null | null | null | import sys
import urllib.parse as urlparse
print("Argumentos recibidos por STDIN: ")
try:
for line in sys.stdin:
url = 'foo.com/?' + line
parsed = urlparse.urlparse(url)
print('Recibido: {}'.format(urlparse.parse_qs(parsed.query)))
except:
ignorar = True
| 21.642857 | 70 | 0.623762 |
483a82b33807937515011fa3de571cf7d20b8db3 | 8,620 | py | Python | gui/main.py | aman-v1729/CommonAudioVideoCLI | c2245a02bbafd1ff9899dba2b02f246f98538746 | [
"MIT"
] | null | null | null | gui/main.py | aman-v1729/CommonAudioVideoCLI | c2245a02bbafd1ff9899dba2b02f246f98538746 | [
"MIT"
] | 1 | 2020-05-14T13:20:45.000Z | 2020-05-14T19:08:06.000Z | gui/main.py | aman-v1729/CommonAudioVideoCLI | c2245a02bbafd1ff9899dba2b02f246f98538746 | [
"MIT"
] | 5 | 2020-05-17T17:00:43.000Z | 2020-07-25T06:19:57.000Z | import tkinter
import subprocess
from tkinter import filedialog, messagebox
import os
import pyqrcode
def clip_filename_with_extension(filename):
""" clips long file names """
clipped = filename[filename.rfind("/") + 1 :]
if len(clipped) > 15:
clipped = clipped[:6] + "..." + clipped[clipped.rfind(".") - 4 :]
return clipped
def select_vid_file():
""" Presents file dialog box to select .mp4/.mkv files """
global curr_dir
# print(curr_dir)
global vid_filename
filename = filedialog.askopenfilename(
initialdir=curr_dir,
title="Select Video File",
filetypes=[("Video", ".mp4 .mkv")],
)
if filename.endswith(".mp4") or filename.endswith(".mkv"):
vid_filename = filename
video_btn["text"] = clip_filename_with_extension(vid_filename)
else:
if len(vid_filename) > 0:
pass
else:
video_btn["text"] = "Choose File"
def select_sub_file():
""" Presents file dialog box to select .srt files """
global curr_dir
global sub_filename
filename = filedialog.askopenfilename(
initialdir=curr_dir,
title="Select Subtitle File",
filetypes=[("Subtitle", ".srt")],
)
if filename.endswith(".srt"):
sub_filename = filename
sub_btn["text"] = clip_filename_with_extension(sub_filename)
else:
if len(sub_filename) > 0:
pass
else:
sub_btn["text"] = "Choose File"
def change_sub_state():
""" Enable/Disable subtitle file """
state = allow_sub.get()
if state:
sub_btn["state"] = tkinter.NORMAL
else:
sub_btn["state"] = tkinter.DISABLED
def run_checks_before_play():
""" File selection checks before calling CLI """
global vid_filename, sub_filename
if not (vid_filename.endswith(".mp4")) and not (vid_filename.endswith(".mkv")):
return 1
if allow_sub.get() and not (sub_filename.endswith(".srt")):
return 2
return 0
def generate_qr():
""" Generates QR code for room link """
global link
global photo, qrImage, myQr
print(link)
top = tkinter.Toplevel()
top.title("QR Code")
qr_lbl = tkinter.Label(top)
myQr = pyqrcode.create(link)
qrImage = myQr.xbm(scale=6)
photo = tkinter.BitmapImage(data=qrImage)
qr_lbl.config(image=photo, state=tkinter.NORMAL)
qr_lbl.pack()
def copy_link():
""" Copies room link to clipboard """
global link
root.clipboard_append(link)
copy_link_btn["text"] = "Link Copied!"
def retrieve_link(bash_command):
""" Gets room link retrieved from the CLI """
import tkinter
global link
global curr_dir
print(curr_dir)
subprocess.Popen(bash_command)
while not os.path.exists(curr_dir + "/invite_link.txt"):
root.after(2000)
f = open("invite_link.txt", "r")
link = f.readline()
print(link)
f.close()
os.remove("invite_link.txt")
tkinter.messagebox.showinfo(
"Success", "Room Creation Successful! Share the link or scan the QR to join!"
)
success_lbl.config(text="Share this link and enjoy: " + link, state=tkinter.NORMAL)
success_lbl.config(font=("Courier", 14))
success_lbl.grid(row=7, column=0, columnspan=6)
copy_link_btn.config(state=tkinter.NORMAL)
copy_link_btn.grid(row=8, column=0, columnspan=3, sticky=tkinter.E)
qr_gen_btn["state"] = tkinter.NORMAL
qr_gen_btn.grid(row=8, column=3, columnspan=3, sticky=tkinter.W)
def play():
""" Gathers widget configurations to create CLI command """
global curr_dir, vid_filename, sub_filename
err_status = run_checks_before_play()
if err_status == 0:
bash_command = []
bash_command.append("python3")
bash_command.append(curr_dir + "cli/main.py")
bash_command.append("-f")
bash_command.append(vid_filename)
if allow_sub.get():
bash_command.append("-s")
bash_command.append(sub_filename)
if not server.get():
bash_command.append("--web")
quality = audio_quality.get()
if quality == 0:
bash_command.append("--audio-quality")
bash_command.append("low")
elif quality == 2:
bash_command.append("--audio-quality")
bash_command.append("high")
"""
if(show_qr.get()):
bash_command.append('--qr')
"""
if host_control.get():
bash_command.append("--control")
print(bash_command)
for widget in root.winfo_children():
widget["state"] = tkinter.DISABLED
retrieve_link(bash_command)
elif err_status == 1:
tkinter.messagebox.showerror("ERROR", "No video file chosen")
elif err_status == 2:
tkinter.messagebox.showerror("ERROR", "No subtitle file chosen")
def on_closing():
""" Confirms session closing """
if messagebox.askokcancel(
"Quit",
"Closing this window will stop this session."
+ "Are you sure you want to quit?",
):
root.destroy()
if __name__ == "__main__":
global curr_dir
curr_dir = __file__
curr_dir = curr_dir[: curr_dir.rfind("gui/main.py")]
curr_dir = (
subprocess.run("pwd", capture_output=True, text=True).stdout.strip()
+ "/"
+ curr_dir
)
# Create root window
root = tkinter.Tk()
root.title("Common Audio Video GUI")
# Remove previously created links
if os.path.exists("invite_link.txt"):
os.remove("invite_link.txt")
# Place welcome label
wlcm_lbl = tkinter.Label(root, text="Welcome to Common Audio Video Host GUI!")
wlcm_lbl.grid(row=0, column=0, columnspan=5)
# Video File Selection
global vid_filename
vid_filename = ""
video_btn = tkinter.Button(root, text="Select Video File", command=select_vid_file)
video_btn.grid(row=1, column=0, columnspan=5)
# Subtitle File Check
allow_sub = tkinter.IntVar()
check_sub = tkinter.Checkbutton(
root,
text="Add subtitles:",
command=change_sub_state,
variable=allow_sub,
onvalue=1,
offvalue=0,
)
check_sub.deselect()
check_sub.grid(row=2, column=0, columnspan=2, sticky=tkinter.E)
# Subtitle File Selection
global sub_filename
sub_filename = ""
sub_btn = tkinter.Button(
root, text="Choose File", command=select_sub_file, state=tkinter.DISABLED
)
sub_btn.grid(row=2, column=2, columnspan=3, sticky=tkinter.W)
# Server Selection
server = tkinter.IntVar()
server.set(0)
radio_server_web = tkinter.Radiobutton(root, text="Web", variable=server, value=0)
radio_server_local = tkinter.Radiobutton(
root, text="Local", variable=server, value=1
)
tkinter.Label(root, text="Server: ").grid(row=3, column=0, columnspan=2)
radio_server_web.grid(row=3, column=2)
radio_server_local.grid(row=3, column=3)
# Audio Quality Selection
audio_quality = tkinter.IntVar()
audio_quality.set(1)
radio_quality_low = tkinter.Radiobutton(
root, text="Low", variable=audio_quality, value=0
)
radio_quality_medium = tkinter.Radiobutton(
root, text="Medium", variable=audio_quality, value=1
)
radio_quality_high = tkinter.Radiobutton(
root, text="High", variable=audio_quality, value=2
)
quality_lbl = tkinter.Label(root, text="Audio Quality: ")
quality_lbl.grid(row=4, column=0, columnspan=2)
radio_quality_low.grid(row=4, column=2)
radio_quality_medium.grid(row=4, column=3)
radio_quality_high.grid(row=4, column=4)
# Control
host_control = tkinter.IntVar()
check_control = tkinter.Checkbutton(
root, text="Only host can control", variable=host_control, onvalue=1, offvalue=0
)
check_control.deselect()
check_control.grid(row=5, column=0, columnspan=5)
"""
# Show QR
show_qr = tkinter.IntVar()
check_qr = tkinter.Checkbutton(
root, text="Show QR", variable=show_qr, onvalue=1, offvalue=0
)
check_qr.select()
check_qr.grid(row=5, column=3, columnspan=2)
"""
# Play Button
play_btn = tkinter.Button(root, text="PLAY!", command=play)
play_btn.grid(row=6, column=0, columnspan=5)
# Post room creation options
success_lbl = tkinter.Label(root)
copy_link_btn = tkinter.Button(root, text="Copy Link", command=copy_link)
qr_gen_btn = tkinter.Button(root, text="Generate QR", command=generate_qr)
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop()
| 28.448845 | 88 | 0.641763 |
483af36db857e32be1df5b6afda8e6fc42b22d40 | 2,170 | py | Python | virtex/core/profile.py | chrislarson1/virtex | 36eb47d1ace297951cae36edc8a00544b85fed79 | [
"Apache-2.0"
] | 5 | 2020-06-17T06:22:32.000Z | 2022-03-04T09:25:31.000Z | virtex/core/profile.py | virtexlabs/virtex | 36eb47d1ace297951cae36edc8a00544b85fed79 | [
"Apache-2.0"
] | null | null | null | virtex/core/profile.py | virtexlabs/virtex | 36eb47d1ace297951cae36edc8a00544b85fed79 | [
"Apache-2.0"
] | null | null | null | # -------------------------------------------------------------------
# Copyright 2021 Virtex authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# -------------------------------------------------------------------
import asyncio
from functools import wraps
from typing import Callable, Any
from virtex.core.timing import now, async_now
def profile(profile_fn,
*fn_args,
tstamp_fn: Callable[[float, float], Any],
loop: asyncio.BaseEventLoop = None):
"""
Parameters
----------
profile_fn: ``Callable[Any, Any]``
Wrapped function
fn_args: ``Tuple[Any]``
Wrapped function arguments
tstamp_fn: ``Callable[[float, float], Any]``
A function that accepts a start_time,end_time
argument pair and returns the profile value
loop: ``Optional[asyncio.BaseEventLoop]``
Event loop to be used for async functions
"""
return _execute
| 31.911765 | 69 | 0.602765 |
483bb446decbf48fa9ae87d928153944790671cf | 4,855 | py | Python | apps/molecular_generation/JT_VAE/src/mol_tree.py | agave233/PaddleHelix | e5578f72c2a203a27d9df7da111f1ced826c1429 | [
"Apache-2.0"
] | 454 | 2020-11-21T01:02:45.000Z | 2022-03-29T12:53:40.000Z | apps/molecular_generation/JT_VAE/src/mol_tree.py | chupvl/PaddleHelix | 6e082f89b8090c3c360593d40a08bffc884165dd | [
"Apache-2.0"
] | 161 | 2020-12-12T06:35:54.000Z | 2022-03-27T11:31:13.000Z | apps/molecular_generation/JT_VAE/src/mol_tree.py | chupvl/PaddleHelix | 6e082f89b8090c3c360593d40a08bffc884165dd | [
"Apache-2.0"
] | 108 | 2020-12-07T09:01:10.000Z | 2022-03-31T14:42:29.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MolTree"""
import rdkit
import rdkit.Chem as Chem
from src.chemutils import get_clique_mol, tree_decomp, get_mol, get_smiles, set_atommap, enum_assemble, decode_stereo
from src.vocab import Vocab
def dfs(node, fa_idx):
"""dfs"""
max_depth = 0
for child in node.neighbors:
if child.idx == fa_idx: continue
max_depth = max(max_depth, dfs(child, node.idx))
return max_depth + 1
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--train_path', required=True)
parser.add_argument('--vocab_path', required=True)
args = parser.parse_args()
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
with open(args.train_path, 'r') as f:
data = f.read().splitlines()
cset = set()
for item in data:
smiles = item.split()[0]
mol = MolTree(smiles)
for c in mol.nodes:
cset.add(c.smiles)
with open(args.vocab_path, 'w') as f:
for c in cset:
f.write(c + '\n')
| 30.923567 | 117 | 0.603502 |
484313fcfa513337e375cc555180add4dbd721a7 | 1,663 | py | Python | torch/indicator/vision/object_detection/iou.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | 1 | 2018-12-09T06:09:29.000Z | 2018-12-09T06:09:29.000Z | torch/indicator/vision/object_detection/iou.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | null | null | null | torch/indicator/vision/object_detection/iou.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | null | null | null | # coding = utf-8
from abc import ABCMeta, abstractmethod
import torch
from Putil.torch.indicator.vision.object_detection import box
##@brief iou
# @note
# @return
##@brief IoU[batch, box, ...]box[top_left_x, top_left_y, width, height],
# [batch, 1, ...]iougt_box[0, 0, 0, 0]
# iougt **batchMeanIoU
#
# @note
##@brief
# @note | 26.396825 | 105 | 0.6362 |
4843692979b67bbb7eade27d08ade8ca10f18066 | 2,012 | py | Python | magPi_05_mountains.py | oniMoNaku/thePit | f82d2dc70346e6188fca493a4b9373aa99ccfa32 | [
"Unlicense"
] | null | null | null | magPi_05_mountains.py | oniMoNaku/thePit | f82d2dc70346e6188fca493a4b9373aa99ccfa32 | [
"Unlicense"
] | null | null | null | magPi_05_mountains.py | oniMoNaku/thePit | f82d2dc70346e6188fca493a4b9373aa99ccfa32 | [
"Unlicense"
] | null | null | null | # today is 389f
# the python pit
# magPi - 05
# MOUNTAINS
import os, pygame; from pygame.locals import *
pygame.init(); clock = pygame.time.Clock()
os.environ['SDL_VIDEO_WINDOW_POS'] = 'center'
pygame.display.set_caption("Mountains")
screen=pygame.display.set_mode([600,382],0,32)
sky = pygame.Surface((600,255))
r=0; g=64; b=128
for l in range (0,255):
pygame.draw.rect(sky,(r,g,b),(0,l-1,600,l))
r=r+1;g=g+1;b=b+1
if r>=255: r=255
if g>=255: g=255
if b>=255: b=255
ground = pygame.Surface((600,128))
r=192; g=255; b=192
for l in range (0,128):
pygame.draw.rect(ground,(r,g,b),(0,l-2,600,l))
r=r-2;g=g-2;b=b-2
if r<=0: r=0
if g<=0: g=0
if b<=0: b=0
# Add in an extra surface for the mountains
mountain = pygame.Surface((600,128))
mountain.set_colorkey([0,0,0]) # Black is transparent
r=96; g=64; b=255
for l in range (0,128):
pygame.draw.rect(mountain,(r,g,b),(0,l-2,600,l))
r=r+2;g=g+2;b=b+2
if r>=255: r=255
if g>=255: g=255
if b>=255: b=255
# Draw some black (Transparent) polygons to create mountain peaks
# The screen is 600 wide so I've drawn 10 polygons at 60 pixels wide each
pygame.draw.polygon(mountain,[0,0,0],[(0,0),(60,0),(60,10),(0,40)])
pygame.draw.polygon(mountain,[0,0,0],[(60,0),(120,0),(120,30),(60,10)])
pygame.draw.polygon(mountain,[0,0,0],[(120,0),(180,0),(180,20),(120,30)])
pygame.draw.polygon(mountain,[0,0,0],[(180,0),(240,0),(240,50),(180,20)])
pygame.draw.polygon(mountain,[0,0,0],[(240,0),(300,0),(300,40),(240,50)])
pygame.draw.polygon(mountain,[0,0,0],[(300,0),(360,0),(360,10),(300,40)])
pygame.draw.polygon(mountain,[0,0,0],[(360,0),(420,0),(420,35),(360,10)])
pygame.draw.polygon(mountain,[0,0,0],[(420,0),(480,0),(480,45),(420,35)])
pygame.draw.polygon(mountain,[0,0,0],[(480,0),(540,0),(540,42),(480,45)])
pygame.draw.polygon(mountain,[0,0,0],[(540,0),(600,0),(600,15),(540,42)])
screen.blit(sky,(0,0))
screen.blit(ground,(0,255))
screen.blit(mountain,(0,128))
pygame.display.update()
pygame.time.wait(30000) | 34.101695 | 73 | 0.638171 |
48461f7075c6cb1cc7aff2cd4d853dffd50a16bd | 6,041 | py | Python | bots/parkour/reports.py | Marcin1396/parkour | 25d7d888b178eb7860a897e6df7578f2de0a729a | [
"MIT"
] | null | null | null | bots/parkour/reports.py | Marcin1396/parkour | 25d7d888b178eb7860a897e6df7578f2de0a729a | [
"MIT"
] | null | null | null | bots/parkour/reports.py | Marcin1396/parkour | 25d7d888b178eb7860a897e6df7578f2de0a729a | [
"MIT"
] | null | null | null | """
Handles reports
"""
from parkour.env import env
from parkour.utils import normalize_name
import asyncio
import aiotfm
import time
| 24.556911 | 96 | 0.640953 |
48473f9998c2721254601aaa70efd1a6c575862d | 3,053 | py | Python | data_analysis_scripts/mouse_et_ephys_viz.py | idc9/mvmm_sim | 5819d9ff95e36310536fd436bba50baba4f0ca71 | [
"MIT"
] | null | null | null | data_analysis_scripts/mouse_et_ephys_viz.py | idc9/mvmm_sim | 5819d9ff95e36310536fd436bba50baba4f0ca71 | [
"MIT"
] | null | null | null | data_analysis_scripts/mouse_et_ephys_viz.py | idc9/mvmm_sim | 5819d9ff95e36310536fd436bba50baba4f0ca71 | [
"MIT"
] | null | null | null | from joblib import load
from os.path import join
import argparse
import numpy as np
import matplotlib.pyplot as plt
from mvmm_sim.simulation.sim_viz import save_fig
from mvmm_sim.data_analysis.utils import load_data
from mvmm_sim.simulation.utils import make_and_get_dir
from mvmm_sim.mouse_et.MouseETPaths import MouseETPaths
from mvmm_sim.mouse_et.raw_ephys_loading import load_raw_ephys
from mvmm_sim.mouse_et.ephys_viz import get_ephys_super_data,\
plot_top_clust_ephys_curves, plot_cluster_ephys_curve
parser = argparse.\
ArgumentParser(description='Cluster interpretation.')
parser.add_argument('--results_dir', default=None,
help='Directory to save results.')
parser.add_argument('--fpaths', nargs='+',
help='Paths to data sets.')
args = parser.parse_args()
inches = 8
n_top_clust = 10
results_dir = args.results_dir
fpaths = args.fpaths
fitting_dir = join(results_dir, 'model_fitting')
ephys_viz_dir = join(results_dir, 'interpret', 'bd_mvmm', 'ephys_pca_feats')
# load models and data
models = load(join(fitting_dir, 'selected_models'))
view_data, dataset_names, sample_names, view_feat_names = load_data(*fpaths)
# load raw ephys data
orig_data_dir = join(MouseETPaths().raw_data_dir, 'inh_patchseq_spca_files',
'orig_data_csv')
ephys_raw = load_raw_ephys(orig_data_dir, concat=False)
for k in ephys_raw.keys():
ephys_raw[k] = ephys_raw[k].loc[sample_names]
print(k, ephys_raw[k].shape)
n_datasets = len(ephys_raw)
# get data for plotting
v = 1
cluster_super_means, super_data_means, super_data_stds, y_cnts = \
get_ephys_super_data(model=models['bd_mvmm'].final_.view_models_[v],
fit_data=view_data[v],
ephys_raw=ephys_raw)
clust_labels = ['cluster_{}'.format(cl_idx + 1)
for cl_idx in range(len(y_cnts))]
# plot top several clusters
plot_top_clust_ephys_curves(cluster_super_means,
y_cnts=y_cnts,
overall_means=super_data_means,
overall_stds=super_data_stds,
clust_labels=clust_labels,
n_to_show=n_top_clust,
inches=inches)
save_fig(join(ephys_viz_dir, 'ephys_curves_top_clust.png'))
# plot each (non-trival) cluster
# non_trivial_clusters = y_cnts[y_cnts >= 5].index.values
non_trivial_clusters = y_cnts[y_cnts >= 0].index.values
save_dir = make_and_get_dir(ephys_viz_dir, 'cluster_curves')
for cl_idx in non_trivial_clusters:
label = clust_labels[cl_idx]
values = {}
for name in cluster_super_means.keys():
values[name] = cluster_super_means[name][cl_idx]
plt.figure(figsize=(2 * n_datasets * inches, inches))
plot_cluster_ephys_curve(values,
overall_means=super_data_means,
overall_stds=super_data_stds,
y_label=label)
save_fig(join(save_dir, '{}_ephys_curve.png'.format(label)))
| 32.827957 | 76 | 0.689158 |
48474668430bd56c9be0fa4e96a14ac44c7e0f55 | 1,831 | py | Python | gimmemotifs/commands/match.py | littleblackfish/gimmemotifs | 913a6e5db378493155273e2c0f8ab0dc11ab219e | [
"MIT"
] | null | null | null | gimmemotifs/commands/match.py | littleblackfish/gimmemotifs | 913a6e5db378493155273e2c0f8ab0dc11ab219e | [
"MIT"
] | null | null | null | gimmemotifs/commands/match.py | littleblackfish/gimmemotifs | 913a6e5db378493155273e2c0f8ab0dc11ab219e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2009-2016 Simon van Heeringen <simon.vanheeringen@gmail.com>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
from __future__ import print_function
from gimmemotifs.comparison import MotifComparer
from gimmemotifs.motif import pwmfile_to_motifs, Motif
from gimmemotifs.plot import match_plot
| 38.145833 | 111 | 0.588749 |
4847f5739e2a2a4fe3f2279bc69fc734031f35e3 | 5,610 | py | Python | rest-service/manager_rest/rest/resources_v3/users.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
] | null | null | null | rest-service/manager_rest/rest/resources_v3/users.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
] | null | null | null | rest-service/manager_rest/rest/resources_v3/users.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
] | null | null | null | #########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from flask_security import current_user
from manager_rest import constants
from manager_rest.storage import models, user_datastore
from manager_rest.security.authorization import authorize
from manager_rest.security import (SecuredResource,
MissingPremiumFeatureResource)
from manager_rest.manager_exceptions import BadParametersError
from .. import rest_decorators, rest_utils
from ..responses_v3 import UserResponse
try:
from cloudify_premium.multi_tenancy.secured_tenant_resource \
import SecuredMultiTenancyResource
except ImportError:
SecuredMultiTenancyResource = MissingPremiumFeatureResource
| 34.207317 | 79 | 0.659893 |
484898b58a6b8f0e2cf8c6f249de3cb0f85b7504 | 6,659 | py | Python | src/m2_run_this_on_laptop.py | Petersl13/99-CapstoneProject-201920 | 4ec25ebf064e93745b9280a09c9212f8b16f76a1 | [
"MIT"
] | null | null | null | src/m2_run_this_on_laptop.py | Petersl13/99-CapstoneProject-201920 | 4ec25ebf064e93745b9280a09c9212f8b16f76a1 | [
"MIT"
] | null | null | null | src/m2_run_this_on_laptop.py | Petersl13/99-CapstoneProject-201920 | 4ec25ebf064e93745b9280a09c9212f8b16f76a1 | [
"MIT"
] | null | null | null | """
Capstone Project. Code to run on a LAPTOP (NOT the robot).
Displays the Graphical User Interface (GUI) and communicates with the robot.
Authors: Your professors (for the framework)
and Nathalie Grier.
Winter term, 2018-2019.
"""
import mqtt_remote_method_calls as com
import tkinter
from tkinter import ttk
import shared_gui
import m2_sprint_3
import rosebot
def main():
"""
This code, which must run on a LAPTOP:
1. Constructs a GUI for my part of the Capstone Project.
2. Communicates via MQTT with the code that runs on the EV3 robot.
"""
# -------------------------------------------------------------------------
# Construct and connect the MQTT Client:
# -------------------------------------------------------------------------
mqtt_sender = com.MqttClient()
mqtt_sender.connect_to_ev3()
# -------------------------------------------------------------------------
# The root TK object for the GUI:
# -------------------------------------------------------------------------
root = tkinter.Tk()
root.title('CSSE 120, Nathalie Grier, Winter 2018-19')
# -------------------------------------------------------------------------
# The main frame, upon which the other frames are placed.
# -------------------------------------------------------------------------
main_frame = ttk.Frame(root, padding=10, borderwidth=5, relief='groove')
main_frame.grid()
# -------------------------------------------------------------------------
# Sub-frames for the shared GUI that the team developed:
# -------------------------------------------------------------------------
#teleop_frame, arm_fram, control_frame, go_straight_frame, beep_frame, color_frame, go_straight, camera_frame, sprint_3 = get_shared_frames(main_frame, mqtt_sender)
sprint_3, control_frame = new_shared_frames(main_frame, mqtt_sender)
# -------------------------------------------------------------------------
# Frames that are particular to my individual contributions to the project.
# -------------------------------------------------------------------------
# DONE: Implement and call get_my_frames(...)
# -------------------------------------------------------------------------
# Grid the frames.
# -------------------------------------------------------------------------
#grid_frames(teleop_frame, arm_fram, control_frame, go_straight_frame, beep_frame, color_frame, go_straight, camera_frame, sprint_3)
new_grid_frames(sprint_3, control_frame)
# -------------------------------------------------------------------------
# The event loop:
# -------------------------------------------------------------------------
root.mainloop()
def sprint_3_nathalie(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Entry and Button objects that control the EV3 robot's Arm
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief='ridge')
frame.grid()
frame_label = ttk.Label(frame, text='Sprint 3 Nathalie')
frame_label.grid(row=0, column=1)
sprint_3_button = ttk.Button(frame, text='Sprint 3')
sprint_3_button.grid(row=2, column=0)
sprint_3_button["command"] = lambda: handle_sprint_3(mqtt_sender, speed_entry)
speed_entry = ttk.Entry(frame, width=8)
speed_label = ttk.Label(frame, text='Speed:')
speed_entry.grid(row=2, column=1)
speed_label.grid(row=1, column=1)
bark_button = ttk.Button(frame, text='Bark!')
bark_button.grid(row=2, column=2)
bark_button["command"] = lambda: handle_bark(mqtt_sender)
trick_1_button = ttk.Button(frame, text='Trick 1')
trick_2_button = ttk.Button(frame, text='Trick 2')
trick_1_button.grid(row=3, column=0)
trick_2_button.grid(row=3, column=2)
trick_1_button["command"] = lambda: handle_trick_1(mqtt_sender, speed_entry)
trick_2_button["command"] = lambda: handle_trick_2(mqtt_sender, speed_entry)
return frame
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main() | 40.357576 | 168 | 0.59138 |
4848f46a84c4346593a1d98c3f7f6dead3b394ab | 2,905 | py | Python | alphamind/benchmarks/data/neutralize.py | rongliang-tech/alpha-mind | 39f720974c637d17e185e445dc05c9fc4863a241 | [
"MIT"
] | 186 | 2017-11-27T01:26:44.000Z | 2022-03-28T16:11:33.000Z | alphamind/benchmarks/data/neutralize.py | atefar2/alpha-mind | 66d839affb5d81d31d5cac7e5e224278e3f99a8b | [
"MIT"
] | 2 | 2017-12-19T02:47:36.000Z | 2021-01-09T05:25:18.000Z | alphamind/benchmarks/data/neutralize.py | atefar2/alpha-mind | 66d839affb5d81d31d5cac7e5e224278e3f99a8b | [
"MIT"
] | 65 | 2017-11-27T01:26:47.000Z | 2022-03-17T10:50:52.000Z | # -*- coding: utf-8 -*-
"""
Created on 2017-4-25
@author: cheng.li
"""
import datetime as dt
import numpy as np
from sklearn.linear_model import LinearRegression
from alphamind.data.neutralize import neutralize
if __name__ == '__main__':
benchmark_neutralize(3000, 10, 1000)
benchmark_neutralize_with_groups(3000, 10, 1000, 30)
| 35.864198 | 102 | 0.561102 |
484bd025db2a6036894e42cd251bae437f17440a | 329 | py | Python | python/__init__.py | seangal/xAODAnaHelpers | 49f15c8525bf4aed9beceec2c58e58964d57e034 | [
"Apache-2.0"
] | null | null | null | python/__init__.py | seangal/xAODAnaHelpers | 49f15c8525bf4aed9beceec2c58e58964d57e034 | [
"Apache-2.0"
] | null | null | null | python/__init__.py | seangal/xAODAnaHelpers | 49f15c8525bf4aed9beceec2c58e58964d57e034 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-,
from __future__ import absolute_import
from __future__ import print_function
from . import logging as xAH_logging
try:
from .config import Config
except:
xAH_logging.logger.warning("xAH::Config could not be imported.")
__version__ = "1.0.0"
__all__ = ["utils", "config"]
| 21.933333 | 68 | 0.723404 |
484e5a10424681e3b5a649a30275352d6bd27762 | 8,118 | py | Python | process/extract.py | kogakenji/kasatomaru | 6e3cd36ea54a5e7c8042d17beed4675f210f1a36 | [
"MIT"
] | null | null | null | process/extract.py | kogakenji/kasatomaru | 6e3cd36ea54a5e7c8042d17beed4675f210f1a36 | [
"MIT"
] | 2 | 2021-03-31T19:40:45.000Z | 2021-12-13T20:34:12.000Z | process/extract.py | kogakenji/kasatomaru | 6e3cd36ea54a5e7c8042d17beed4675f210f1a36 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import lxml.html
import pathlib
import db
import datetime
from concurrent.futures import ThreadPoolExecutor
import threading
# Define the lock globally
lock = threading.Lock()
def files_list(start, end):
"""Generate url list with given start and end of indexes"""
resultlist = []
for i in range(start, end):
resultlist.append(f"page_{i}.html")
return resultlist
def extract_main_pages():
"""Extracts content from main pages """
pages = files_list(1, 49278)
# There was a problematic file: 44663.html removed #&...
print(len(pages))
for page in pages:
path = pathlib.Path.cwd().parent / "main_files" / page
print(path)
with open(str(path), encoding="ISO-8859-1") as p:
soup = BeautifulSoup(p.read(), 'html.parser')
# soup = BeautifulSoup(p.read(), 'lxml')
# table = soup.find_all("table", bgcolor="#FFFFFF")
# print(table)
data = soup.find_all("tr", {'class': 'texto'})
for i, d in enumerate(data):
tds = data[i].find_all('td')
ship = tds[0].a.contents[0].strip()
link_family = "http://www.museubunkyo.org.br/ashiato/web2/" + tds[1].a.get("href")
family_id_register = link_family[link_family.find("=") + 1:link_family.index("&")]
leave_date = tds[1].a.contents[0].strip()
leave_date = datetime.datetime.strptime(leave_date, '%m/%d/%Y').strftime('%d/%m/%y')
arrive_date = tds[1].a.contents[2].strip()
arrive_date = datetime.datetime.strptime(arrive_date, '%m/%d/%Y').strftime('%d/%m/%y')
province = tds[2].a.contents[0].strip()
destination = tds[3].a.contents[0].strip()
surname = tds[4].a.contents[0][0:4].strip()
name = tds[5].a.contents[0].strip()
print(
f"Ship: {ship} - leave_date: {leave_date} - arrive_date: {arrive_date} - province: {province} - destination: {destination} - surname: {surname} - name: {name}")
# print(f"link_family: {link_family} - idRegistro: {id_register}")
db.insert_person(name, surname, province, ship, destination, leave_date, arrive_date, link_family,
family_id_register)
def extract_jp_pages():
"""Extracts content from main pages """
pages = files_list(1, 49277)
# There was a problematic file: 44663.html removed #&...
print(len(pages))
for page in pages:
path = pathlib.Path.cwd().parent / "jp_files" / "jp" /page
print(path)
with open(str(path), encoding="ISO-8859-1") as p:
soup = BeautifulSoup(p.read(), 'html.parser')
# soup = BeautifulSoup(p.read(), 'lxml')
# table = soup.find_all("table", bgcolor="#FFFFFF")
# print(table)
data = soup.find_all("tr", {'class': 'texto'})
for i, d in enumerate(data):
tds = data[i].find_all('td')
ship = tds[0].a.contents[0].strip()
link_family = "http://www.museubunkyo.org.br/ashiato/web2/" + tds[1].a.get("href")
family_id_register = link_family[link_family.find("=") + 1:link_family.index("&")]
leave_date = tds[1].a.contents[0].strip()
leave_date = datetime.datetime.strptime(leave_date, '%m/%d/%Y').strftime('%d/%m/%y')
arrive_date = tds[1].a.contents[2].strip()
arrive_date = datetime.datetime.strptime(arrive_date, '%m/%d/%Y').strftime('%d/%m/%y')
province = tds[2].a.contents[0].strip()
destination = tds[3].a.contents[0].strip()
surname = tds[4].a.contents[0][0:4].strip()
name = tds[5].a.contents[0].strip()
try:
print(
f"Ship: {ship} - leave_date: {leave_date} - arrive_date: {arrive_date} - province: {province} - destination: {destination} - surname: {surname} - name: {name}")
# print(f"link_family: {link_family} - idRegistro: {id_register}")
db.insert_person(name, surname, province, ship, destination, leave_date, arrive_date, link_family,
family_id_register)
except Exception as exp:
print(exp)
pass
def get_family_content(id_family_register):
# id, name, surname, id_family_register, link_family = family
path = pathlib.Path.cwd().parent / "families_files" / "families" / f"page_{id_family_register[0]}.html"
print(f"caminho do arquivo: {path}")
with open(str(path), encoding="ISO-8859-1") as p:
soup = BeautifulSoup(p.read(), "html.parser")
# print(soup)
# print("=================fazenda================")
td = soup.find_all("span", {'class': 'titulo'})
for titulo in td:
if titulo.get_text() == "Navio:":
ship = titulo.parent.get_text().split(': ')[1]
if titulo.get_text() == "Destino:":
destination = titulo.parent.get_text().split(': ')[1]
if titulo.get_text() == "Partida:":
leave_date = titulo.parent.get_text().split(': ')[1]
leave_date = datetime.datetime.strptime(leave_date, '%m/%d/%Y').strftime('%d/%m/%y')
if titulo.get_text() == "Chegada:":
arrival_date = titulo.parent.get_text().split(': ')[1]
arrival_date = datetime.datetime.strptime(arrival_date, '%m/%d/%Y').strftime('%d/%m/%y')
if titulo.get_text() == "Fazenda:":
farm = titulo.parent.get_text().split(': ')[1]
if titulo.get_text() == "Estao:":
station = titulo.parent.get_text().split(': ')[1]
ship_info = Ship(ship, leave_date, arrival_date, destination, farm, station)
# print("===================pessoas=====================")
data = soup.find_all("tr", {'class': 'texto'})
for d in data:
record = d.find_all("td")
list = [name.get_text() for name in record]
p = Person(list[1], list[0], list[3], list[2], ship_info)
lock.acquire(True)
try:
db.update_jp_family(p.name, p.surname, p.name_kanji, p.surname_kanji, p.ship.name, p.ship.destination,
p.ship.leave_date, p.ship.arrival_date, p.ship.farm, p.ship.station)
except Exception as err:
print(err)
pass
lock.release()
def extract_family_content():
"""Extracts content from family pages"""
families = db.person_info()
print(f"TOTAL SIZE: {len(families)}")
with ThreadPoolExecutor(max_workers=3) as executor:
# executor.map(get_family_content, path)
executor.map(get_family_content, families)
if __name__ == "__main__":
# extract_main_pages()
extract_family_content()
# extract_jp_pages()
| 44.604396 | 184 | 0.572555 |
484f03e9c4b7ff5aefbc6845368e72fc3dfe1209 | 114 | py | Python | tests/shunit/data/bad_i18n_newline_5.py | nicole331/TWLight | fab9002e76868f8a2ef36f9279c777de34243b2c | [
"MIT"
] | 67 | 2017-12-14T22:27:48.000Z | 2022-03-13T18:21:31.000Z | tests/shunit/data/bad_i18n_newline_5.py | nicole331/TWLight | fab9002e76868f8a2ef36f9279c777de34243b2c | [
"MIT"
] | 433 | 2017-03-24T22:51:23.000Z | 2022-03-31T19:36:22.000Z | tests/shunit/data/bad_i18n_newline_5.py | Mahuton/TWLight | 90b299d07b0479f21dc90e17b8d05f5a221b0de1 | [
"MIT"
] | 105 | 2017-06-23T03:53:41.000Z | 2022-03-30T17:24:29.000Z | # Single-quoted string is preceded and succeeded by newlines.
# Translators: This is a helpful comment.
_(
'5'
)
| 16.285714 | 61 | 0.736842 |
48511c78e308c11777c5277149036d4e3f1a72d0 | 9,090 | py | Python | deciphon/protein_profile.py | EBI-Metagenomics/deciphon-py | 81df946c4f2f53c55ac96fc78ed2f95958b291d8 | [
"MIT"
] | null | null | null | deciphon/protein_profile.py | EBI-Metagenomics/deciphon-py | 81df946c4f2f53c55ac96fc78ed2f95958b291d8 | [
"MIT"
] | 1 | 2021-07-02T10:24:19.000Z | 2021-07-02T10:24:19.000Z | deciphon/protein_profile.py | EBI-Metagenomics/deciphon-py | 81df946c4f2f53c55ac96fc78ed2f95958b291d8 | [
"MIT"
] | null | null | null | from __future__ import annotations
from math import log
from typing import List, Type, Union
from imm import MuteState, Sequence, lprob_add, lprob_zero
from nmm import (
AminoAlphabet,
AminoLprob,
BaseLprob,
CodonLprob,
CodonMarg,
DNAAlphabet,
FrameState,
RNAAlphabet,
codon_iter,
)
from .codon_table import CodonTable
from .hmmer_model import HMMERModel
from .model import AltModel, EntryDistr, Node, NullModel, SpecialNode, Transitions
from .profile import Profile, ProfileID
__all__ = ["ProteinProfile", "create_profile"]
# def search(self, sequence: SequenceABC) -> SearchResults:
# self._set_target_length_model(len(sequence))
# alt_results = self._alt_model.viterbi(sequence, self.window_length)
# def create_fragment(
# seq: SequenceABC, path: Path, homologous: bool
# ):
# return ProteinFragment(seq, path, homologous)
# search_results = SearchResults(sequence, create_fragment)
# for alt_result in alt_results:
# subseq = alt_result.sequence
# # TODO: temporary fix for reading from binary file
# # and consequently alt and null model having different alphabets
# s = Sequence.create(bytes(subseq), self._null_model.hmm.alphabet)
# viterbi_score0 = self._null_model.loglikelihood(s)
# if len(alt_result.path) == 0:
# viterbi_score1 = lprob_invalid()
# else:
# viterbi_score1 = self._alt_model.loglikelihood(alt_result.sequence,
# alt_result.path)
# score = viterbi_score1 - viterbi_score0
# window = Interval(subseq.start, subseq.start + len(subseq))
# search_results.append(
# score, window, alt_result.path, viterbi_score1, viterbi_score0
# )
# return search_results
# def create_profile(
# hmm: HMMERModel,
# base_abc: Union[RNAAlphabet, DNAAlphabet],
# window_length: int = 0,
# epsilon: float = 0.1,
# ) -> ProteinProfile:
# amino_abc = hmm.alphabet
# assert isinstance(amino_abc, AminoAlphabet)
# lprobs = lprob_normalize(hmm.insert_lprobs(0))
# null_aminot = AminoLprob.create(amino_abc, lprobs)
# factory = ProteinStateFactory(CodonTable(base_abc, amino_abc), epsilon)
# nodes: List[Node] = []
# for m in range(1, hmm.model_length + 1):
# lprobs = lprob_normalize(hmm.match_lprobs(m))
# M = factory.create(f"M{m}".encode(), AminoLprob.create(amino_abc, lprobs))
# lprobs = lprob_normalize(hmm.insert_lprobs(m))
# I = factory.create(f"I{m}".encode(), AminoLprob.create(amino_abc, lprobs))
# D = MuteState.create(f"D{m}".encode(), base_abc)
# nodes.append(Node(M, I, D))
# trans: List[Transitions] = []
# for t in hmm.transitions:
# t.normalize()
# trans.append(t)
# profid = ProfileID(hmm.model_id.name, hmm.model_id.acc)
# prof = ProteinProfile.create(
# profid, factory, null_aminot, nodes, trans, EntryDistr.UNIFORM
# )
# prof.window_length = window_length
# return prof
| 31.344828 | 85 | 0.627063 |
48514c4855c82f6511561bc091163063091c1e9c | 664 | py | Python | ptranking/ltr_adhoc/util/one_hot_utils.py | junj2ejj/ptranking.github.io | 06fa9751dd2eca89749ba4bb9641e4272cfc30a1 | [
"MIT"
] | 1 | 2020-09-24T10:38:53.000Z | 2020-09-24T10:38:53.000Z | ptranking/ltr_adhoc/util/one_hot_utils.py | junj2ejj/ptranking.github.io | 06fa9751dd2eca89749ba4bb9641e4272cfc30a1 | [
"MIT"
] | null | null | null | ptranking/ltr_adhoc/util/one_hot_utils.py | junj2ejj/ptranking.github.io | 06fa9751dd2eca89749ba4bb9641e4272cfc30a1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Description
"""
import torch
from ptranking.ltr_global import global_gpu as gpu
def get_one_hot_reprs(batch_stds):
""" Get one-hot representation of batch ground-truth labels """
batch_size = batch_stds.size(0)
hist_size = batch_stds.size(1)
int_batch_stds = batch_stds.type(torch.cuda.LongTensor) if gpu else batch_stds.type(torch.LongTensor)
hot_batch_stds = torch.cuda.FloatTensor(batch_size, hist_size, 3) if gpu else torch.FloatTensor(batch_size, hist_size, 3)
hot_batch_stds.zero_()
hot_batch_stds.scatter_(2, torch.unsqueeze(int_batch_stds, 2), 1)
return hot_batch_stds
| 30.181818 | 125 | 0.74247 |
485441df6c93c795b69160386a1e913eee4699da | 5,108 | py | Python | src/data_module.py | enningxie/lightning-semantic-matching | 156ce3d40c53436b8166679c718b80f45782fe37 | [
"MIT"
] | 2 | 2020-10-21T01:02:22.000Z | 2021-07-29T01:56:53.000Z | src/data_module.py | enningxie/lightning-semantic-matching | 156ce3d40c53436b8166679c718b80f45782fe37 | [
"MIT"
] | null | null | null | src/data_module.py | enningxie/lightning-semantic-matching | 156ce3d40c53436b8166679c718b80f45782fe37 | [
"MIT"
] | null | null | null | # Created by xieenning at 2020/10/19
from argparse import ArgumentParser, Namespace
from typing import Optional, Union, List
from pytorch_lightning import LightningDataModule
from transformers import BertTokenizer
from transformers import ElectraTokenizer
from transformers.utils import logging
import torch
from torch.utils.data import DataLoader, TensorDataset
from src.data_processor import SemanticMatchingProcessor, convert_examples_to_features
logger = logging.get_logger(__name__)
if __name__ == '__main__':
tmp_parser = ArgumentParser()
tmp_parser.add_argument(
"--model_name_or_path",
type=str,
default="/Data/public/pretrained_models/pytorch/chinese-bert-wwm-ext"
)
tmp_parser = SemanticMatchingDataModule.add_data_specific_args(tmp_parser)
hparams = tmp_parser.parse_args()
tmp_data_module = SemanticMatchingDataModule(hparams)
tmp_data_module.prepare_data()
tmp_data_module.setup()
train_dataloader = tmp_data_module.val_dataloader()
for batch in train_dataloader:
print(type(batch))
print(batch)
print('break point.')
print('break point.')
| 39.292308 | 108 | 0.634495 |
48544b3690b1859057fd2e593fbf385719c5db3e | 14,076 | py | Python | mainwin.py | hatmann1944/pyqt-http-file-svr | 3e95a222dc7d662921da44654aadb1721cba0382 | [
"Apache-2.0"
] | 1 | 2015-08-27T13:22:42.000Z | 2015-08-27T13:22:42.000Z | mainwin.py | hatmann1944/pyqt-http-file-svr | 3e95a222dc7d662921da44654aadb1721cba0382 | [
"Apache-2.0"
] | null | null | null | mainwin.py | hatmann1944/pyqt-http-file-svr | 3e95a222dc7d662921da44654aadb1721cba0382 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/mnt/hgfs/tmpcode/pyqt-http/untitled.ui'
#
# Created: Fri Jun 5 10:59:33 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
import socket
import signal
import errno
import sys
import os
import platform
import time
#from sendfile import sendfile
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
try:
_encoding = QtGui.QApplication.UnicodeUTF8
except AttributeError:
from PyQt4 import QtGui, QtCore
from PIL import ImageQt
import qrcode
import socket
if WhichPlatform() == "linux":
import fcntl
import struct
if __name__ == '__main__':
global localIP
#localIP = socket.gethostbyname(socket.gethostname())
if WhichPlatform() != "windows":
localIP = get_ip_address("eth0")
print "local ip:%s "%localIP
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| 32.210526 | 157 | 0.586957 |
4854e27a28f0ec8896c437afdc84226fabdac5c2 | 905 | py | Python | monzo/handlers/echo.py | petermcd/monzo-api | e7b09d7564d07d00c0d0031b300f72e4479d8690 | [
"MIT"
] | 1 | 2022-02-08T23:13:56.000Z | 2022-02-08T23:13:56.000Z | monzo/handlers/echo.py | petermcd/monzo-api | e7b09d7564d07d00c0d0031b300f72e4479d8690 | [
"MIT"
] | 12 | 2021-09-21T20:09:50.000Z | 2022-03-13T14:39:02.000Z | monzo/handlers/echo.py | petermcd/monzo-api | e7b09d7564d07d00c0d0031b300f72e4479d8690 | [
"MIT"
] | 1 | 2021-12-05T17:47:33.000Z | 2021-12-05T17:47:33.000Z | """Class to echo credentials."""
from monzo.handlers.storage import Storage
| 29.193548 | 82 | 0.583425 |
4854e666dca1f05f5b35de7678011b69bdfaadb9 | 359 | py | Python | grappelli/settings.py | theatlantic/django-grappelli-old | f4a5f10a2e68024873556d4cc249cf0351eb1335 | [
"BSD-3-Clause"
] | 285 | 2019-12-23T09:50:21.000Z | 2021-12-08T09:08:49.000Z | base/site-packages/grappelli/settings.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | null | null | null | base/site-packages/grappelli/settings.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 9 | 2019-12-23T12:59:25.000Z | 2022-03-15T05:12:11.000Z | # coding: utf-8
# DJANGO IMPORTS
from django.conf import settings
# Admin Site Title
ADMIN_HEADLINE = getattr(settings, "GRAPPELLI_ADMIN_HEADLINE", 'Grappelli')
ADMIN_TITLE = getattr(settings, "GRAPPELLI_ADMIN_TITLE", 'Grappelli')
# Link to your Main Admin Site (no slashes at start and end)
ADMIN_URL = getattr(settings, "GRAPPELLI_ADMIN_URL", '/admin/') | 29.916667 | 75 | 0.768802 |
4855ae459d96ebb92658af5b5f4e917cfa5c95f9 | 638 | py | Python | epikjjh/baekjoon/2178.py | 15ers/Solve_Naively | 23ee4a3aedbedb65b9040594b8c9c6d9cff77090 | [
"MIT"
] | 3 | 2019-05-19T13:44:39.000Z | 2019-07-03T11:15:20.000Z | epikjjh/baekjoon/2178.py | 15ers/Solve_Naively | 23ee4a3aedbedb65b9040594b8c9c6d9cff77090 | [
"MIT"
] | 7 | 2019-05-06T02:37:26.000Z | 2019-06-29T07:28:02.000Z | epikjjh/baekjoon/2178.py | 15ers/Solve_Naively | 23ee4a3aedbedb65b9040594b8c9c6d9cff77090 | [
"MIT"
] | 1 | 2019-07-28T06:24:54.000Z | 2019-07-28T06:24:54.000Z | import sys
input = sys.stdin.readline
n,m = map(int,input().split())
arr = [conv(input().split()[0]) for i in range(n)]
visit = [[0]*m for i in range(n)]
visit[0][0] = 1
direction = [(0,1),(0,-1),(1,0),(-1,0)]
queue = [[0,0]]
while queue:
y,x = queue.pop(0)
if y==n-1 and x==m-1:
print(visit[y][x])
break
for i in range(4):
n_y = y+direction[i][0]
n_x = x+direction[i][1]
if 0<=n_y<n and 0<=n_x<m and arr[n_y][n_x] and not visit[n_y][n_x]:
visit[n_y][n_x] = visit[y][x] + 1
queue.append([n_y,n_x]) | 26.583333 | 75 | 0.525078 |
485611bfa6d80f65f56625abee3ae8772d391fbe | 2,877 | py | Python | DockerHubPackages/code/analyzer/analyzers/python_packages.py | halcyondude/datasets | f91cec403b09d6ca060c41bf0147fb3a15fac1fc | [
"Apache-2.0"
] | 283 | 2018-01-27T21:51:21.000Z | 2022-03-07T11:23:44.000Z | DockerHubPackages/code/analyzer/analyzers/python_packages.py | halcyondude/datasets | f91cec403b09d6ca060c41bf0147fb3a15fac1fc | [
"Apache-2.0"
] | 100 | 2018-01-28T18:02:41.000Z | 2021-11-10T11:00:38.000Z | DockerHubPackages/code/analyzer/analyzers/python_packages.py | halcyondude/datasets | f91cec403b09d6ca060c41bf0147fb3a15fac1fc | [
"Apache-2.0"
] | 79 | 2018-01-28T17:57:38.000Z | 2022-03-21T11:44:16.000Z | from ..utils import run
import logging
logger = logging.getLogger(__name__)
def process_one_package(path, package, python_version="3"):
"""Get details about one precise python package in the given image.
:param path: path were the docker image filesystem is expanded.
:type path: string
:param package: name of the python package to get info from.
:type package: string
:param python_version: version of python to use. can be "2" or "3". default to "3".
:type python_version: string
:return: list containing package name, version and size
:rtype: list[string, string, int]
"""
command = f"sudo chroot {path} pip{python_version} show {package}"
info = get_ipython().getoutput(command)
for line in info:
if "Name" in line:
name = line.split(" ").pop()
if "Version" in line:
version = line.split(" ").pop()
if "Location" in line:
location = line.split(" ").pop()
result = get_ipython().getoutput(
f"du --max-depth=0 {path}{location}/{name}").pop()
# If the folder does not exist, try lowercase
if "cannot access" in result:
result = get_ipython().getoutput(
f"du --max-depth=0 {path}{location}/{name.lower()}").pop()
# If the lowercase folder do not exist either
if "cannot access" not in result:
size = int(result.split('\t').pop(0))
# List the files by hand
else:
command = f"sudo chroot {path} pip{python_version} show {package} -f"
info = get_ipython().getoutput(command)
flag = False
size = 0
for line in info:
if flag:
command = f"du {path}{location}/{line.strip()}"
size += int(get_ipython().getoutput(command).pop().split('\t').pop(0))
if 'Files' in line:
flag = True
return [name, version, size]
def get_python_packages_info(path, python_version="3"):
"""Get details about all python packages in an image filesystem.
:param path: path were the docker image filesystem is expanded.
:type path: string
:param python_version: version of python to use. can be "2" or "3". default to "3".
:type python_version: string
:return: list containing lists of each package's name, version and size
:rtype: list[list[string, string, int]]
"""
command = f"sudo chroot {path} pip{python_version} list --format freeze --no-cache-dir 2>/dev/null"
packages = [package.split('==')
for package in get_ipython().getoutput(command)]
package_list = []
for package in packages:
try:
package_list.append(process_one_package(path, package[0]))
except Exception as e:
logger.error("Error processing python packages", package[0], e)
pass
return package_list
| 37.855263 | 103 | 0.620438 |
4856168e71f578517034764f4b9110679f5820fe | 24 | py | Python | src/maho/modules/__init__.py | evangelos-ch/maho-bot | 458c3ed0e4cb4d8edd300441b2defbc481aaf3f3 | [
"MIT"
] | null | null | null | src/maho/modules/__init__.py | evangelos-ch/maho-bot | 458c3ed0e4cb4d8edd300441b2defbc481aaf3f3 | [
"MIT"
] | null | null | null | src/maho/modules/__init__.py | evangelos-ch/maho-bot | 458c3ed0e4cb4d8edd300441b2defbc481aaf3f3 | [
"MIT"
] | 1 | 2021-02-16T13:06:56.000Z | 2021-02-16T13:06:56.000Z | """Maho bot modules."""
| 12 | 23 | 0.583333 |
485cca825ed78a1668753f45f923d308e840da2c | 6,785 | py | Python | backend/dc_tests/api_views.py | gitter-badger/djangochannel | f9e33254739457c461e84b66879172007512f9b0 | [
"BSD-3-Clause"
] | 2 | 2021-11-29T15:34:24.000Z | 2021-12-02T14:47:20.000Z | backend/dc_tests/api_views.py | gitter-badger/djangochannel | f9e33254739457c461e84b66879172007512f9b0 | [
"BSD-3-Clause"
] | null | null | null | backend/dc_tests/api_views.py | gitter-badger/djangochannel | f9e33254739457c461e84b66879172007512f9b0 | [
"BSD-3-Clause"
] | null | null | null | from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, JsonResponse
from django.views.generic.base import View
from django.contrib.auth.mixins import LoginRequiredMixin
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions
from .models import (
TestCategory,
Test,
Question,
PossibleAnswer,
AnswersCounter
)
from .serializers import (
TestCategorySerializer,
TestSerializer,
QuestionSerializer,
PossibleAnswerSerializer
)
from backend.courses.models import Task, RealizationTask, Course
from backend.courses.api_views import CompletedTasks
from backend.utils.api import BlankGetAPIView
# class QuestionsInTest(BlankGetAPIView):
# """
# ,
# : pk, : id ,
# """
# permission_classes = [permissions.IsAuthenticated]
# model = Question
# serializer = QuestionSerializer
# filter_name = 'test_id'
# order_params = 'id'
| 32.464115 | 113 | 0.627708 |
485d74659bc61cba2ba9b5ae45bb87b9fe1df6b3 | 2,066 | py | Python | sdk/python/pulumi_kubernetes/apps/v1/ControllerRevision.py | rosskevin/pulumi-kubernetes | e4fa04b13a20929c879aca1bbe58fb5a95d16f7c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/apps/v1/ControllerRevision.py | rosskevin/pulumi-kubernetes | e4fa04b13a20929c879aca1bbe58fb5a95d16f7c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/apps/v1/ControllerRevision.py | rosskevin/pulumi-kubernetes | e4fa04b13a20929c879aca1bbe58fb5a95d16f7c | [
"Apache-2.0"
] | null | null | null | import pulumi
import pulumi.runtime
from ... import tables
| 44.913043 | 100 | 0.703291 |
485ec5eb7e878a442433e3d945a0ad573fe3057e | 1,479 | py | Python | backend/python_scripts/feedback_frequency.py | bartaliskrisztian/sapifeedback | a63e38c0b767458509e47c1d5ccad0f6ce21a285 | [
"MIT"
] | null | null | null | backend/python_scripts/feedback_frequency.py | bartaliskrisztian/sapifeedback | a63e38c0b767458509e47c1d5ccad0f6ce21a285 | [
"MIT"
] | null | null | null | backend/python_scripts/feedback_frequency.py | bartaliskrisztian/sapifeedback | a63e38c0b767458509e47c1d5ccad0f6ce21a285 | [
"MIT"
] | null | null | null | import sys
import json
import pandas as pd
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import os
import base64
if __name__ == '__main__':
main()
| 23.47619 | 70 | 0.619337 |
485f584dda3b7ed9cbcd49b969e57d33ae96c239 | 6,720 | py | Python | tempest/tests/lib/services/placement/test_resource_providers_client.py | AurelienLourot/tempest | 4d14a22a1a0eb7aaa4aafb917273baa0739f55c3 | [
"Apache-2.0"
] | null | null | null | tempest/tests/lib/services/placement/test_resource_providers_client.py | AurelienLourot/tempest | 4d14a22a1a0eb7aaa4aafb917273baa0739f55c3 | [
"Apache-2.0"
] | null | null | null | tempest/tests/lib/services/placement/test_resource_providers_client.py | AurelienLourot/tempest | 4d14a22a1a0eb7aaa4aafb917273baa0739f55c3 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.placement import resource_providers_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
| 36.721311 | 78 | 0.654315 |
485f7ffc14de09acdf65c094b7c9e15395d4ca1b | 1,001 | py | Python | problems/095.py | JoshKarpel/Euler | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
] | 1 | 2017-09-20T22:26:24.000Z | 2017-09-20T22:26:24.000Z | problems/095.py | JoshKarpel/euler-python | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
] | null | null | null | problems/095.py | JoshKarpel/euler-python | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
] | null | null | null | from problems import utils, mymath
if __name__ == '__main__':
print(solve())
| 26.342105 | 87 | 0.632368 |
4860d4c2fef20a3559333f3c07fba155be5e079a | 12,166 | py | Python | core/utils.py | jojo23333/mcan-vqa | 294cf672155a3c01d148450afc6542412a8837e6 | [
"Apache-2.0"
] | null | null | null | core/utils.py | jojo23333/mcan-vqa | 294cf672155a3c01d148450afc6542412a8837e6 | [
"Apache-2.0"
] | null | null | null | core/utils.py | jojo23333/mcan-vqa | 294cf672155a3c01d148450afc6542412a8837e6 | [
"Apache-2.0"
] | null | null | null | import copy
import logging
import re
import torch
import json
from fvcore.common.checkpoint import (
get_missing_parameters_message,
get_unexpected_parameters_message,
)
from core.data.data_utils import ans_stat
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict):
"""
Match names between the two state-dict, and update the values of model_state_dict in-place with
copies of the matched tensor in ckpt_state_dict.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
# used for logging
max_len_model = max(len(key) for key in model_keys) if model_keys else 1
max_len_ckpt = max(len(key) for key in ckpt_keys) if ckpt_keys else 1
log_str_template = "{: <{}} loaded from {: <{}} of shape {}"
# logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
print(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
print(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
model_state_dict[key_model] = value_ckpt.clone()
if key_ckpt in matched_keys: # already added to matched_keys
print(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
print(
log_str_template.format(
key_model,
max_len_model,
original_keys[key_ckpt],
max_len_ckpt,
tuple(shape_in_model),
)
)
matched_model_keys = matched_keys.values()
matched_ckpt_keys = matched_keys.keys()
# print warnings about unmatched keys on both side
unmatched_model_keys = [k for k in model_keys if k not in matched_model_keys]
if len(unmatched_model_keys):
print(get_missing_parameters_message(unmatched_model_keys))
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in matched_ckpt_keys]
if len(unmatched_ckpt_keys):
print(
get_unexpected_parameters_message(original_keys[x] for x in unmatched_ckpt_keys)
)
import numpy as np
| 39.888525 | 102 | 0.616472 |
486361edc3e5c1d568dba14a5be4788c38396ea5 | 6,589 | py | Python | spid_cie_oidc/entity/trust_chain_operations.py | peppelinux/spid-cie-oidc-authority | 816636fece10f410f5d6fce85fd79bb409d0c8b8 | [
"Apache-2.0"
] | 4 | 2022-03-08T09:05:13.000Z | 2022-03-16T17:59:43.000Z | spid_cie_oidc/entity/trust_chain_operations.py | peppelinux/spid-cie-oidc-authority | 816636fece10f410f5d6fce85fd79bb409d0c8b8 | [
"Apache-2.0"
] | 64 | 2022-03-08T01:11:40.000Z | 2022-03-31T17:23:49.000Z | spid_cie_oidc/entity/trust_chain_operations.py | peppelinux/spid-cie-oidc-authority | 816636fece10f410f5d6fce85fd79bb409d0c8b8 | [
"Apache-2.0"
] | 8 | 2022-03-09T12:00:08.000Z | 2022-03-31T13:52:14.000Z | import logging
from django.utils import timezone
from typing import Union
from .exceptions import InvalidTrustchain, TrustchainMissingMetadata
from .models import FetchedEntityStatement, TrustChain
from .statements import EntityConfiguration, get_entity_configurations
from .settings import HTTPC_PARAMS
from .trust_chain import TrustChainBuilder
from .utils import datetime_from_timestamp
logger = logging.getLogger(__name__)
def trust_chain_builder(
subject: str,
trust_anchor: EntityConfiguration,
httpc_params: dict = HTTPC_PARAMS,
required_trust_marks: list = []
) -> Union[TrustChainBuilder, bool]:
"""
Trust Chain builder
"""
tc = TrustChainBuilder(
subject,
trust_anchor=trust_anchor,
required_trust_marks=required_trust_marks,
httpc_params=httpc_params
)
tc.start()
if not tc.is_valid:
logger.error(
"The tree of trust cannot be validated for "
f"{tc.subject}: {tc.tree_of_trust}"
)
return False
else:
return tc
def get_or_create_trust_chain(
subject: str,
trust_anchor: str,
httpc_params: dict = HTTPC_PARAMS,
required_trust_marks: list = [],
force: bool = False,
) -> Union[TrustChain, None]:
"""
returns a TrustChain model object if any available
if available it return it
if not available it create a new one
if available and expired it return the expired one
if flag force is set to True -> renew the trust chain, update it and
return the updated one
"""
fetched_trust_anchor = FetchedEntityStatement.objects.filter(
sub=trust_anchor, iss=trust_anchor
)
if not fetched_trust_anchor or fetched_trust_anchor.first().is_expired or force:
jwts = get_entity_configurations([trust_anchor], httpc_params=httpc_params)
ta_conf = EntityConfiguration(jwts[0], httpc_params=httpc_params)
data = dict(
exp=datetime_from_timestamp(ta_conf.payload["exp"]),
iat=datetime_from_timestamp(ta_conf.payload["iat"]),
statement=ta_conf.payload,
jwt=ta_conf.jwt,
)
if not fetched_trust_anchor:
# trust to the anchor should be absolute trusted!
# ta_conf.validate_by_itself()
fetched_trust_anchor = FetchedEntityStatement.objects.create(
sub=ta_conf.sub, iss=ta_conf.iss, **data
)
else:
fetched_trust_anchor.update(
exp=datetime_from_timestamp(ta_conf.payload["exp"]),
iat=datetime_from_timestamp(ta_conf.payload["iat"]),
statement=ta_conf.payload,
jwt=ta_conf.jwt,
)
fetched_trust_anchor = fetched_trust_anchor.first()
else:
fetched_trust_anchor = fetched_trust_anchor.first()
ta_conf = fetched_trust_anchor.get_entity_configuration_as_obj()
tc = TrustChain.objects.filter(sub=subject, trust_anchor__sub=trust_anchor).first()
if tc and not tc.is_active:
# if manualy disabled by staff
return None
elif force or not tc or tc.is_expired:
trust_chain = trust_chain_builder(
subject=subject,
trust_anchor=ta_conf,
required_trust_marks=required_trust_marks
)
if not trust_chain:
raise InvalidTrustchain(
f"Trust chain for subject {subject} and "
f"trust_anchor {trust_anchor} is not found"
)
elif not trust_chain.is_valid:
raise InvalidTrustchain(
f"Trust chain for subject {subject} and "
f"trust_anchor {trust_anchor} is not valid"
)
elif not trust_chain.final_metadata:
raise TrustchainMissingMetadata(
f"Trust chain for subject {subject} and "
f"trust_anchor {trust_anchor} doesn't have any metadata"
)
dumps_statements_from_trust_chain_to_db(trust_chain)
tc = TrustChain.objects.filter(
sub=subject, trust_anchor__sub=trust_anchor
)
data = dict(
exp=trust_chain.exp_datetime,
processing_start = timezone.localtime(),
chain=trust_chain.serialize(),
metadata=trust_chain.final_metadata,
parties_involved=[i.sub for i in trust_chain.trust_path],
status="valid",
trust_marks=[
{"id": i.id, "trust_mark": i.jwt}
for i in trust_chain.verified_trust_marks
],
is_active=True,
)
if tc:
tc.update(**data)
tc = tc.first()
else:
tc = TrustChain.objects.create(
sub=subject,
trust_anchor=fetched_trust_anchor,
**data,
)
return tc
| 32.29902 | 87 | 0.610563 |
486394bb559615b84fa49567fcdb6a63df1d44d1 | 19,445 | py | Python | code.py | FoamyGuy/CircuitPython_CSV_TileMap_Game | 4cf1661dd7db1cecd434e9fba6e07eb375ffc06d | [
"MIT"
] | 1 | 2020-05-14T02:35:09.000Z | 2020-05-14T02:35:09.000Z | code.py | FoamyGuy/CircuitPython_CSV_TileMap_Game | 4cf1661dd7db1cecd434e9fba6e07eb375ffc06d | [
"MIT"
] | null | null | null | code.py | FoamyGuy/CircuitPython_CSV_TileMap_Game | 4cf1661dd7db1cecd434e9fba6e07eb375ffc06d | [
"MIT"
] | null | null | null | import board
import displayio
import adafruit_imageload
from displayio import Palette
from adafruit_pybadger import PyBadger
import time
# Direction constants for comparison
UP = 0
DOWN = 1
RIGHT = 2
LEFT = 3
# how long to wait between rendering frames
FPS_DELAY = 1/30
# how many tiles can fit on thes screen. Tiles are 16x16
SCREEN_HEIGHT_TILES = 8
SCREEN_WIDTH_TILES = 10
# hold the map state as it came out of the csv. Only holds non-entities.
ORIGINAL_MAP = {}
# hold the current map state if/when it changes. Only holds non-entities.
CURRENT_MAP = {}
# dictionary with tuple keys that map to tile type values
# e.x. {(0,0): "left_wall", (1,1): "floor"}
CAMERA_VIEW = {}
# how far offset the camera is from the CURRENT_MAP
# used to determine where things are at in the camera view vs. the MAP
CAMERA_OFFSET_X = 0
CAMERA_OFFSET_Y = 0
# list of sprite objects, one for each entity
ENTITY_SPRITES = []
# Dictionary with touple keys that map to lists of entity objects.
# Each one has the index of the sprite in the ENTITY_SPRITES list
# and the tile type string
ENTITY_SPRITES_DICT = {}
# list of entities that need to be on the screen currently based on the camera view
NEED_TO_DRAW_ENTITIES = []
# hold the location of the player in tile coordinates
PLAYER_LOC = (0,0)
# return from CURRENT_MAP the tile name of the tile of the given coords
# return from TILES dict the tile object with stats and behavior for the tile at the given coords.
# check the can_walk property of the tile at the given coordinates
# behavior function that allows the player to push the entity
# main dictionary that maps tile type strings to objects.
# each one stores the sprite_sheet index and any necessary
# behavioral stats like can_walk or before_move
TILES = {
# empty strings default to floor and no walk.
"": {
"sprite_index": 7,
"can_walk": False
},
"floor": {
"sprite_index": 7,
"can_walk": True
},
"top_wall": {
"sprite_index": 4,
"can_walk": False
},
"top_right_wall": {
"sprite_index": 5,
"can_walk": False
},
"top_left_wall": {
"sprite_index": 3,
"can_walk": False
},
"bottom_right_wall": {
"sprite_index": 11,
"can_walk": False
},
"bottom_left_wall": {
"sprite_index": 9,
"can_walk": False
},
"right_wall": {
"sprite_index": 8,
"can_walk": False
},
"left_wall": {
"sprite_index": 6,
"can_walk": False
},
"bottom_wall": {
"sprite_index": 10,
"can_walk": False
},
"robot": {
"sprite_index": 1,
"can_walk": True,
"entity": True,
"before_move": allow_push
},
"heart": {
"sprite_index": 2,
"can_walk": True,
"entity": True,
},
"player": {
"sprite_index": 0,
"entity": True,
}
}
# Badger object for easy button handling
badger = PyBadger()
# display object variable
display = board.DISPLAY
# Load the sprite sheet (bitmap)
sprite_sheet, palette = adafruit_imageload.load("/castle_sprite_sheet.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette)
# make bright pink be transparent so entities can be drawn on top of map tiles
palette.make_transparent(5)
# Create the castle TileGrid
castle = displayio.TileGrid(sprite_sheet, pixel_shader=palette,
width = 10,
height = 8,
tile_width = 16,
tile_height = 16)
# Create a Group to hold the sprites and add it
sprite_group = displayio.Group(max_size=48)
# Create a Group to hold the castle and add it
castle_group = displayio.Group()
castle_group.append(castle)
# Create a Group to hold the sprite and castle
group = displayio.Group()
# Add the sprite and castle to the group
group.append(castle_group)
group.append(sprite_group)
# Open and read raw string from the map csv file
f = open("map.csv", 'r')
map_csv_str = f.read()
f.close()
# split the raw string into lines
map_csv_lines = map_csv_str.replace("\r", "").split("\n")
# set the WIDTH and HEIGHT variables.
# this assumes the map is rectangular.
MAP_HEIGHT = len(map_csv_lines)
MAP_WIDTH = len(map_csv_lines[0].split(","))
#print(TILES.keys())
#print(map_csv_lines)
# loop over each line storing index in y variable
for y, line in enumerate(map_csv_lines):
# ignore empty line
if line != "":
# loop over each tile type separated by commas, storing index in x variable
for x, tile_name in enumerate(line.split(",")):
print("%s '%s'" % (len(tile_name), str(tile_name)))
# if the tile exists in our main dictionary
if tile_name in TILES.keys():
# if the tile is an entity
if 'entity' in TILES[tile_name].keys() and TILES[tile_name]['entity']:
# set the map tiles to floor
ORIGINAL_MAP[x,y] = "floor"
CURRENT_MAP[x,y] = "floor"
# if it's the player
if tile_name == "player":
# Create the sprite TileGrid
sprite = displayio.TileGrid(sprite_sheet, pixel_shader=palette,
width = 1,
height = 1,
tile_width = 16,
tile_height = 16,
default_tile = TILES[tile_name]['sprite_index'])
# set the position of sprite on screen
sprite.x = x*16
sprite.y = y*16
# set position in x,y tile coords for reference later
PLAYER_LOC = (x,y)
# add sprite to the group
sprite_group.append(sprite)
else: # not the player
# Create the sprite TileGrid
entity_srite = displayio.TileGrid(sprite_sheet, pixel_shader=palette,
width = 1,
height = 1,
tile_width = 16,
tile_height = 16,
default_tile = TILES[tile_name]['sprite_index'])
# set the position of sprite on screen
# default to offscreen
entity_srite.x = -16
entity_srite.y = -16
# add the sprite object to ENTITY_SPRITES list
ENTITY_SPRITES.append(entity_srite)
#print("setting entity_sprites_dict[%s,%s]" % (x,y))
# create an entity obj
entity_obj = {
"entity_sprite_index": len(ENTITY_SPRITES) - 1,
"map_tile_name": tile_name
}
# if there are no entities at this location yet
if (x,y) not in ENTITY_SPRITES_DICT:
# create a list and add it to the dictionary at the x,y location
ENTITY_SPRITES_DICT[x, y] = [entity_obj]
else:
# append the entity to the existing list in the dictionary
ENTITY_SPRITES_DICT[x, y].append(entity_obj)
else: # tile is not entity
# set the tile_name into MAP dictionaries
ORIGINAL_MAP[x, y] = tile_name
CURRENT_MAP[x, y] = tile_name
else: # tile type wasn't found in dict
print("tile: %s not found in TILES dict" % tile_name)
# add all entity sprites to the group
for entity in ENTITY_SPRITES:
sprite_group.append(entity)
# Add the Group to the Display
display.show(group)
# variables to store previous value of button state
prev_up = False
prev_down = False
prev_left = False
prev_right = False
# helper function returns true if player is allowed to move given direction
# based on can_walk property of the tiles next to the player
# set the appropriate tiles into the CAMERA_VIEW dictionary
# based on given starting coords and size
# draw the current CAMERA_VIEW dictionary and the ENTITY_SPRITES_DICT
# variable to store timestamp of last drawn frame
last_update_time = 0
# variables to store movement offset values
x_offset = 0
y_offset = 0
# main loop
while True:
# auto dim the screen
badger.auto_dim_display(delay=10)
# set the current button values into variables
cur_up = badger.button.up
cur_down = badger.button.down
cur_right = badger.button.right
cur_left = badger.button.left
# check for up button press / release
if not cur_up and prev_up:
if can_player_move(UP):
x_offset = 0
y_offset = - 1
# check for down button press / release
if not cur_down and prev_down:
if can_player_move(DOWN):
x_offset = 0
y_offset = 1
# check for right button press / release
if not cur_right and prev_right:
if can_player_move(RIGHT):
x_offset = 1
y_offset = 0
# check for left button press / release
if not cur_left and prev_left:
if can_player_move(LEFT):
print("can_move left")
x_offset = -1
y_offset = 0
# if any offset is not zero then we need to process player movement
if x_offset != 0 or y_offset != 0:
# variable to store if player is allowed to move
can_move = False
# coordinates the player is moving to
moving_to_coords = (PLAYER_LOC[0] + x_offset, PLAYER_LOC[1] + y_offset)
# tile name of the spot player is moving to
moving_to_tile_name = CURRENT_MAP[moving_to_coords[0], moving_to_coords[1]]
# if there are entity(s) at spot the player is moving to
if moving_to_coords in ENTITY_SPRITES_DICT:
print("found entity(s) where we are moving to")
# loop over all entities at the location player is moving to
for entity_obj in ENTITY_SPRITES_DICT[moving_to_coords]:
print("checking entity %s" % entity_obj["map_tile_name"])
# if the entity has a before_move behavior function
if "before_move" in TILES[entity_obj["map_tile_name"]].keys():
print("calling before_move %s, %s, %s" % (moving_to_coords,PLAYER_LOC,entity_obj))
# call the before_move behavior function act upon it's result
if TILES[entity_obj["map_tile_name"]]['before_move'](moving_to_coords,PLAYER_LOC,entity_obj):
# all the movement if it returned true
can_move = True
else:
# break and don't allow movement if it returned false
break;
else: # entity does not have a before_move function
# allow movement
can_move = True
if can_move:
# set the player loc variable to the new coords
PLAYER_LOC = moving_to_coords
else: # no entities at the location player is moving to
# set player loc variable to new coords
PLAYER_LOC = moving_to_coords
# reset movement offset variables
y_offset = 0
x_offset = 0
# set previos button values for next iteration
prev_up = cur_up
prev_down = cur_down
prev_right = cur_right
prev_left = cur_left
# current time
now = time.monotonic()
# if it has been long enough based on FPS delay
if now > last_update_time + FPS_DELAY:
# if player is past x tile coordinate 4
if PLAYER_LOC[0] > 4:
# set camera to player location offset by 4
set_camera_view(int(PLAYER_LOC[0]-4),0,10,8)
else:
# set camera to 0,0
set_camera_view(0,0,10,8)
# draw the camera
draw_camera_view()
# store the last update time
last_update_time = now
| 37.038095 | 123 | 0.581846 |
4866676df99cb56da6528e0c45d5fc2aef3aec92 | 13,162 | py | Python | tools/harness/tests/compiler-rt_builtins.py | Harvard-PRINCESS/barrelfish-trunk-mirror | 1c98195d123046d985bb3952a591297c2ef6fdf9 | [
"MIT"
] | 4 | 2017-09-16T01:23:48.000Z | 2017-09-22T08:02:47.000Z | tools/harness/tests/compiler-rt_builtins.py | Harvard-PRINCESS/barrelfish-trunk-mirror | 1c98195d123046d985bb3952a591297c2ef6fdf9 | [
"MIT"
] | null | null | null | tools/harness/tests/compiler-rt_builtins.py | Harvard-PRINCESS/barrelfish-trunk-mirror | 1c98195d123046d985bb3952a591297c2ef6fdf9 | [
"MIT"
] | 1 | 2020-03-06T15:48:10.000Z | 2020-03-06T15:48:10.000Z | ##########################################################################
# Copyright (c) 2009, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import tests
from common import TestCommon
from results import PassFailMultiResult
# lists of tests to run for compiler-rt
vector_fp_tests = [
"compiler-rt/test/builtins/Unit/adddf3vfp_test",
"compiler-rt/test/builtins/Unit/addsf3vfp_test",
"compiler-rt/test/builtins/Unit/divdf3vfp_test",
"compiler-rt/test/builtins/Unit/divsf3vfp_test",
"compiler-rt/test/builtins/Unit/eqdf2vfp_test",
"compiler-rt/test/builtins/Unit/eqsf2vfp_test",
"compiler-rt/test/builtins/Unit/extebdsfdf2vfp_test",
"compiler-rt/test/builtins/Unit/fixdfsivfp_test",
"compiler-rt/test/builtins/Unit/fixsfsivfp_test",
"compiler-rt/test/builtins/Unit/fixunsdfsivfp_test",
"compiler-rt/test/builtins/Unit/fixunssfsivfp_test",
"compiler-rt/test/builtins/Unit/floatsidfvfp_test",
"compiler-rt/test/builtins/Unit/floatsisfvfp_test",
"compiler-rt/test/builtins/Unit/floatunssidfvfp_test",
"compiler-rt/test/builtins/Unit/floatunssisfvfp_test",
"compiler-rt/test/builtins/Unit/gedf2vfp_test",
"compiler-rt/test/builtins/Unit/gesf2vfp_test",
"compiler-rt/test/builtins/Unit/gtdf2vfp_test",
"compiler-rt/test/builtins/Unit/gtsf2vfp_test",
"compiler-rt/test/builtins/Unit/ledf2vfp_test",
"compiler-rt/test/builtins/Unit/lesf2vfp_test",
"compiler-rt/test/builtins/Unit/ltdf2vfp_test",
"compiler-rt/test/builtins/Unit/ltsf2vfp_test",
"compiler-rt/test/builtins/Unit/muldf3vfp_test",
"compiler-rt/test/builtins/Unit/mulsf3vfp_test",
"compiler-rt/test/builtins/Unit/nedf2vfp_test",
"compiler-rt/test/builtins/Unit/negdf2vfp_test",
"compiler-rt/test/builtins/Unit/negsf2vfp_test",
"compiler-rt/test/builtins/Unit/nesf2vfp_test",
"compiler-rt/test/builtins/Unit/subdf3vfp_test",
"compiler-rt/test/builtins/Unit/subsf3vfp_test",
"compiler-rt/test/builtins/Unit/truncdfsf2vfp_test",
"compiler-rt/test/builtins/Unit/unorddf2vfp_test",
"compiler-rt/test/builtins/Unit/unordsf2vfp_test",
]
fp_tests = [
"compiler-rt/test/builtins/Unit/absvdi2_test",
"compiler-rt/test/builtins/Unit/absvsi2_test",
"compiler-rt/test/builtins/Unit/absvti2_test",
"compiler-rt/test/builtins/Unit/addtf3_test",
"compiler-rt/test/builtins/Unit/addvdi3_test",
"compiler-rt/test/builtins/Unit/addvsi3_test",
"compiler-rt/test/builtins/Unit/addvti3_test",
"compiler-rt/test/builtins/Unit/ashldi3_test",
"compiler-rt/test/builtins/Unit/ashlti3_test",
"compiler-rt/test/builtins/Unit/ashrdi3_test",
"compiler-rt/test/builtins/Unit/ashrti3_test",
"compiler-rt/test/builtins/Unit/bswapdi2_test",
"compiler-rt/test/builtins/Unit/bswapsi2_test",
# "compiler-rt/test/builtins/Unit/clear_cache_test",
"compiler-rt/test/builtins/Unit/clzdi2_test",
"compiler-rt/test/builtins/Unit/clzsi2_test",
"compiler-rt/test/builtins/Unit/clzti2_test",
"compiler-rt/test/builtins/Unit/cmpdi2_test",
"compiler-rt/test/builtins/Unit/cmpti2_test",
"compiler-rt/test/builtins/Unit/comparedf2_test",
"compiler-rt/test/builtins/Unit/comparesf2_test",
"compiler-rt/test/builtins/Unit/ctzdi2_test",
"compiler-rt/test/builtins/Unit/ctzsi2_test",
"compiler-rt/test/builtins/Unit/ctzti2_test",
"compiler-rt/test/builtins/Unit/divdc3_test",
"compiler-rt/test/builtins/Unit/divdi3_test",
"compiler-rt/test/builtins/Unit/divmodsi4_test",
"compiler-rt/test/builtins/Unit/divsc3_test",
"compiler-rt/test/builtins/Unit/divsi3_test",
# "compiler-rt/test/builtins/Unit/divtc3_test",
"compiler-rt/test/builtins/Unit/divtf3_test",
"compiler-rt/test/builtins/Unit/divti3_test",
"compiler-rt/test/builtins/Unit/divxc3_test",
# "compiler-rt/test/builtins/Unit/enable_execute_stack_test",
"compiler-rt/test/builtins/Unit/eqtf2_test",
"compiler-rt/test/builtins/Unit/extenddftf2_test",
# "compiler-rt/test/builtins/Unit/extendhfsf2_test",
"compiler-rt/test/builtins/Unit/extendsftf2_test",
"compiler-rt/test/builtins/Unit/ffsdi2_test",
"compiler-rt/test/builtins/Unit/ffsti2_test",
"compiler-rt/test/builtins/Unit/fixdfdi_test",
"compiler-rt/test/builtins/Unit/fixdfti_test",
"compiler-rt/test/builtins/Unit/fixsfdi_test",
"compiler-rt/test/builtins/Unit/fixsfti_test",
"compiler-rt/test/builtins/Unit/fixtfdi_test",
"compiler-rt/test/builtins/Unit/fixtfsi_test",
"compiler-rt/test/builtins/Unit/fixtfti_test",
# this errors on 0X1P+64
#"compiler-rt/test/builtins/Unit/fixunsdfdi_test",
"compiler-rt/test/builtins/Unit/fixunsdfsi_test",
"compiler-rt/test/builtins/Unit/fixunsdfti_test",
# this errors on 0X1P+64
#"compiler-rt/test/builtins/Unit/fixunssfdi_test",
"compiler-rt/test/builtins/Unit/fixunssfsi_test",
"compiler-rt/test/builtins/Unit/fixunssfti_test",
"compiler-rt/test/builtins/Unit/fixunstfdi_test",
"compiler-rt/test/builtins/Unit/fixunstfsi_test",
"compiler-rt/test/builtins/Unit/fixunstfti_test",
"compiler-rt/test/builtins/Unit/fixunsxfdi_test",
"compiler-rt/test/builtins/Unit/fixunsxfsi_test",
"compiler-rt/test/builtins/Unit/fixunsxfti_test",
"compiler-rt/test/builtins/Unit/fixxfdi_test",
"compiler-rt/test/builtins/Unit/fixxfti_test",
"compiler-rt/test/builtins/Unit/floatdidf_test",
"compiler-rt/test/builtins/Unit/floatdisf_test",
"compiler-rt/test/builtins/Unit/floatditf_test",
"compiler-rt/test/builtins/Unit/floatdixf_test",
"compiler-rt/test/builtins/Unit/floatsitf_test",
"compiler-rt/test/builtins/Unit/floattidf_test",
"compiler-rt/test/builtins/Unit/floattisf_test",
"compiler-rt/test/builtins/Unit/floattixf_test",
"compiler-rt/test/builtins/Unit/floatundidf_test",
"compiler-rt/test/builtins/Unit/floatundisf_test",
"compiler-rt/test/builtins/Unit/floatunditf_test",
"compiler-rt/test/builtins/Unit/floatundixf_test",
"compiler-rt/test/builtins/Unit/floatunsitf_test",
"compiler-rt/test/builtins/Unit/floatuntidf_test",
"compiler-rt/test/builtins/Unit/floatuntisf_test",
"compiler-rt/test/builtins/Unit/floatuntixf_test",
# "compiler-rt/test/builtins/Unit/gcc_personality_test",
"compiler-rt/test/builtins/Unit/getf2_test",
"compiler-rt/test/builtins/Unit/gttf2_test",
"compiler-rt/test/builtins/Unit/letf2_test",
"compiler-rt/test/builtins/Unit/lshrdi3_test",
"compiler-rt/test/builtins/Unit/lshrti3_test",
"compiler-rt/test/builtins/Unit/lttf2_test",
"compiler-rt/test/builtins/Unit/moddi3_test",
"compiler-rt/test/builtins/Unit/modsi3_test",
"compiler-rt/test/builtins/Unit/modti3_test",
"compiler-rt/test/builtins/Unit/muldc3_test",
"compiler-rt/test/builtins/Unit/muldi3_test",
"compiler-rt/test/builtins/Unit/mulodi4_test",
"compiler-rt/test/builtins/Unit/mulosi4_test",
"compiler-rt/test/builtins/Unit/muloti4_test",
"compiler-rt/test/builtins/Unit/mulsc3_test",
"compiler-rt/test/builtins/Unit/multc3_test",
"compiler-rt/test/builtins/Unit/multf3_test",
"compiler-rt/test/builtins/Unit/multi3_test",
"compiler-rt/test/builtins/Unit/mulvdi3_test",
"compiler-rt/test/builtins/Unit/mulvsi3_test",
"compiler-rt/test/builtins/Unit/mulvti3_test",
"compiler-rt/test/builtins/Unit/mulxc3_test",
"compiler-rt/test/builtins/Unit/negdi2_test",
"compiler-rt/test/builtins/Unit/negti2_test",
"compiler-rt/test/builtins/Unit/negvdi2_test",
"compiler-rt/test/builtins/Unit/negvsi2_test",
"compiler-rt/test/builtins/Unit/negvti2_test",
"compiler-rt/test/builtins/Unit/netf2_test",
"compiler-rt/test/builtins/Unit/paritydi2_test",
"compiler-rt/test/builtins/Unit/paritysi2_test",
"compiler-rt/test/builtins/Unit/parityti2_test",
"compiler-rt/test/builtins/Unit/popcountdi2_test",
"compiler-rt/test/builtins/Unit/popcountsi2_test",
"compiler-rt/test/builtins/Unit/popcountti2_test",
"compiler-rt/test/builtins/Unit/powidf2_test",
"compiler-rt/test/builtins/Unit/powisf2_test",
"compiler-rt/test/builtins/Unit/powitf2_test",
"compiler-rt/test/builtins/Unit/powixf2_test",
"compiler-rt/test/builtins/Unit/subtf3_test",
"compiler-rt/test/builtins/Unit/subvdi3_test",
"compiler-rt/test/builtins/Unit/subvsi3_test",
"compiler-rt/test/builtins/Unit/subvti3_test",
# "compiler-rt/test/builtins/Unit/trampoline_setup_test",
# "compiler-rt/test/builtins/Unit/truncdfhf2_test",
"compiler-rt/test/builtins/Unit/truncdfsf2_test",
# "compiler-rt/test/builtins/Unit/truncsfhf2_test",
"compiler-rt/test/builtins/Unit/trunctfdf2_test",
"compiler-rt/test/builtins/Unit/trunctfsf2_test",
"compiler-rt/test/builtins/Unit/ucmpdi2_test",
"compiler-rt/test/builtins/Unit/ucmpti2_test",
"compiler-rt/test/builtins/Unit/udivdi3_test",
"compiler-rt/test/builtins/Unit/udivmoddi4_test",
"compiler-rt/test/builtins/Unit/udivmodsi4_test",
"compiler-rt/test/builtins/Unit/udivmodti4_test",
"compiler-rt/test/builtins/Unit/udivsi3_test",
"compiler-rt/test/builtins/Unit/udivti3_test",
"compiler-rt/test/builtins/Unit/umoddi3_test",
"compiler-rt/test/builtins/Unit/umodsi3_test",
"compiler-rt/test/builtins/Unit/umodti3_test",
"compiler-rt/test/builtins/Unit/unordtf2_test",
]
def get_modules_tpl(ts, self, build, machine):
'''Function template for get_modules() for each compiler-rt test case'''
modules = super(CompilerRTBuiltinsAbstract, self).get_modules(build, machine)
for m in ts:
if machine.name.startswith("panda") and \
(m.endswith("floatdisf_test") or m.endswith("floatdidf_test")):
# Skip failing test on pandaboard
continue
modules.add_module(m)
modules.add_module("usleeptest", [ "5" ])
return modules
def chunker(seq, size):
'''Helper function: this takes a sequence `seq` and splits it up into
`size`-sized chunks, except for the last chunk which is just the <= size
long remainder of the sequence'''
return (seq[pos:pos+size] for pos in xrange(0, len(seq), size))
# generate test-cases with <=CHUNK_SIZE compiler-rt tests each
CHUNK_SIZE=35
# array just to keep the class objects somewhere
compiler_rt_tests_classes = []
for i, ts in enumerate(chunker(fp_tests, CHUNK_SIZE)):
# append new class to our array
compiler_rt_tests_classes.append(
# this is essentially the decorator @tests.add_test
tests.add_test(
# type is the (built-in) base-class for python classes, here we
# construct classes by calling its constructor
# signature of type constructor:
# type(classname, baseclass tuple, dict with methods/attributes)
type('CompilerRTBuiltins%d' % (i+1),
(CompilerRTBuiltinsAbstract,),
{ 'name': 'compiler-rt-fp%d' % (i+1),
# partially bind the get_modules() template to select the
# right set of tests. Note the ts=ts in the lambda
# arguments, this prevents python's default late-binding
# for closure arguments.
'get_modules':
lambda s, b, m, ts=ts: get_modules_tpl(ts, s, b, m)})))
| 50.429119 | 81 | 0.677557 |
4866eb2646559988f7b4c029e556146f3b4e3f4a | 4,090 | py | Python | api_base.py | mpalazzolo/API-Base | b0a7c3ba9bb5add59a43d3dec36354318478e226 | [
"MIT"
] | null | null | null | api_base.py | mpalazzolo/API-Base | b0a7c3ba9bb5add59a43d3dec36354318478e226 | [
"MIT"
] | null | null | null | api_base.py | mpalazzolo/API-Base | b0a7c3ba9bb5add59a43d3dec36354318478e226 | [
"MIT"
] | null | null | null | import requests
from requests.exceptions import HTTPError
import time
| 30.75188 | 114 | 0.526406 |
486787d9c3efbc67538ff3c74ea68506a0623fb8 | 1,143 | py | Python | plasmasm/arch/X64.py | LRGH/plasmasm | 4cd50546c3dc895763d72dd60b7c46179c1916bc | [
"Apache-2.0"
] | 1 | 2021-02-28T21:31:18.000Z | 2021-02-28T21:31:18.000Z | plasmasm/arch/X64.py | LRGH/plasmasm | 4cd50546c3dc895763d72dd60b7c46179c1916bc | [
"Apache-2.0"
] | null | null | null | plasmasm/arch/X64.py | LRGH/plasmasm | 4cd50546c3dc895763d72dd60b7c46179c1916bc | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2011-2020 Airbus, Louis.Granboulan@airbus.com
containers = { 'ELF': 'X86_64', 'MACHO': 'X86_64' }
try:
from plasmasm.python.compatibility import set
except ImportError:
pass
from plasmasm.arch.I386 import opcodes as opcodes_x86
x64_att_opcodes = set([
'jmpq', 'callq', 'retq', 'popq', 'pushq',
'movq', 'cmpq', 'testq', 'leaq', 'btq', 'bswapq',
'notq', 'orq', 'xorq', 'andq', 'bsfq', 'bslq', 'bsrq',
'rolq', 'rorq', 'sarq', 'salq', 'shrq', 'shlq', 'sbbq',
'negq', 'decq', 'incq', 'adcq', 'addq', 'subq',
'mulq', 'divq', 'imulq', 'idivq', 'shldq', 'shrdq',
'cltq', 'cqto', 'movabsq', 'movsbq', 'movslq', 'movswq',
'insq', 'movsq', 'outsq', 'lodsq', 'stosq', 'cmpsq', 'scasq',
'pextrq', 'pinsrq',
'cvtsi2sdq', 'cvtsi2ssq', 'cvttsd2siq', 'cvttss2siq',
])
suffix = [ 'a', 'ae', 'b', 'be', 'c', 'e', 'g', 'ge', 'l', 'le', 'nb', 'nc', 'ne', 'np', 'ns', 'nz', 'p', 's', ]
x64_att_opcodes.update(set([ 'cmov'+s+'q' for s in suffix ]))
del suffix
x64_att_opcodes.update(opcodes_x86['I386-att'])
opcodes = {
'X64-att': x64_att_opcodes,
}
| 42.333333 | 112 | 0.551181 |
4868d79bbf2ff6bbae4f4cb4d9abf9fab912436f | 724 | py | Python | ansiblelater/rules/CheckScmInSrc.py | ankitdobhal/ansible-later | a107cd2821e310fd459a7f9b802d5794f2b96f35 | [
"MIT"
] | 38 | 2020-10-14T09:40:58.000Z | 2022-03-17T10:45:22.000Z | ansiblelater/rules/CheckScmInSrc.py | ankitdobhal/ansible-later | a107cd2821e310fd459a7f9b802d5794f2b96f35 | [
"MIT"
] | 188 | 2020-09-29T09:43:54.000Z | 2022-03-04T08:45:42.000Z | ansiblelater/rules/CheckScmInSrc.py | ankitdobhal/ansible-later | a107cd2821e310fd459a7f9b802d5794f2b96f35 | [
"MIT"
] | 4 | 2021-02-10T03:35:19.000Z | 2022-01-17T15:54:39.000Z | from ansible.parsing.yaml.objects import AnsibleMapping
from ansiblelater.standard import StandardBase
| 30.166667 | 82 | 0.627072 |
486936b454230e71425f5f21ffabf8c3b40a119e | 595 | py | Python | DMOJ/CCC/slot machine.py | eddiegz/Personal-C | f7869826216e5c665f8f646502141f0dc680e545 | [
"MIT"
] | 3 | 2021-05-15T08:18:09.000Z | 2021-05-17T04:41:57.000Z | DMOJ/CCC/slot machine.py | eddiegz/Personal-C | f7869826216e5c665f8f646502141f0dc680e545 | [
"MIT"
] | null | null | null | DMOJ/CCC/slot machine.py | eddiegz/Personal-C | f7869826216e5c665f8f646502141f0dc680e545 | [
"MIT"
] | null | null | null | quarter=int(input())
p1=int(input())
p2=int(input())
p3=int(input())
time=0
while quarter>0:
if quarter == 0:
continue
p1+=1
quarter-=1
time+=1
if p1==35:
quarter+=30
p1=0
if quarter == 0:
continue
time+=1
p2+=1
quarter-=1
if p2==100:
p2=0
quarter+=60
if quarter == 0:
continue
p3+=1
time+=1
quarter-=1
if p3==10:
quarter+=9
p3=0
print(f'Martha plays {time} times before going broke.')
| 16.081081 | 56 | 0.438655 |
4869a5e537b1616b1387d41f76532922834d0c3e | 327 | py | Python | project/app/migrations/0003_auto_20210125_0924.py | dbinetti/kidsallin | 147491cdfbe812ffde91725193ec16c03083c1da | [
"BSD-3-Clause"
] | null | null | null | project/app/migrations/0003_auto_20210125_0924.py | dbinetti/kidsallin | 147491cdfbe812ffde91725193ec16c03083c1da | [
"BSD-3-Clause"
] | null | null | null | project/app/migrations/0003_auto_20210125_0924.py | dbinetti/kidsallin | 147491cdfbe812ffde91725193ec16c03083c1da | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.1.5 on 2021-01-25 16:24
from django.db import migrations
| 18.166667 | 47 | 0.590214 |
486abe98f15277d75707a2bda0dddf48de43bab7 | 28,203 | py | Python | cinder/volume/drivers/emc/emc_vmax_provision.py | kazum/cinder | 370b8e60c3166b289c8da924a227dd1bc63f8b8a | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/emc/emc_vmax_provision.py | kazum/cinder | 370b8e60c3166b289c8da924a227dd1bc63f8b8a | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/emc/emc_vmax_provision.py | kazum/cinder | 370b8e60c3166b289c8da924a227dd1bc63f8b8a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from cinder import exception
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder.volume.drivers.emc import emc_vmax_utils
LOG = logging.getLogger(__name__)
STORAGEGROUPTYPE = 4
POSTGROUPTYPE = 3
EMC_ROOT = 'root/emc'
THINPROVISIONINGCOMPOSITE = 32768
THINPROVISIONING = 5
| 42.731818 | 79 | 0.587987 |
486b39af5811634dc06771353577ccba06dfa1ca | 9,840 | py | Python | tests/gold_tests/redirect/redirect_actions.test.py | cmcfarlen/trafficserver | 2aa1d3106398eb082e5a454212b0273c63d5f69d | [
"Apache-2.0"
] | 1,351 | 2015-01-03T08:25:40.000Z | 2022-03-31T09:14:08.000Z | tests/gold_tests/redirect/redirect_actions.test.py | cmcfarlen/trafficserver | 2aa1d3106398eb082e5a454212b0273c63d5f69d | [
"Apache-2.0"
] | 7,009 | 2015-01-14T16:22:45.000Z | 2022-03-31T17:18:04.000Z | tests/gold_tests/redirect/redirect_actions.test.py | cmcfarlen/trafficserver | 2aa1d3106398eb082e5a454212b0273c63d5f69d | [
"Apache-2.0"
] | 901 | 2015-01-11T19:21:08.000Z | 2022-03-18T18:21:33.000Z | '''
Test redirection behavior to invalid addresses
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import re
import os
import socket
import sys
Test.Summary = '''
Test redirection behavior to invalid addresses
'''
Test.ContinueOnFail = False
Test.Setup.Copy(os.path.join(Test.Variables.AtsTestToolsDir, 'tcp_client.py'))
dns = Test.MakeDNServer('dns')
# This record is used in each test case to get the initial redirect response from the origin that we will handle.
dnsRecords = {'iwillredirect.test': ['127.0.0.1']}
host = socket.gethostname()
ipv4addrs = set()
try:
ipv4addrs = set([ip for
(family, _, _, _, (ip, *_)) in
socket.getaddrinfo(host, port=None) if
socket.AF_INET == family])
except socket.gaierror:
pass
ipv6addrs = set()
try:
ipv6addrs = set(["[{0}]".format(ip.split('%')[0]) for
(family, _, _, _, (ip, *_)) in
socket.getaddrinfo(host, port=None) if
socket.AF_INET6 == family and 'fe80' != ip[0:4]]) # Skip link-local addresses.
except socket.gaierror:
pass
origin = Test.MakeOriginServer('origin', ip='0.0.0.0')
ArbitraryTimestamp = '12345678'
# This is for cases when the content is actually fetched from the invalid address.
request_header = {
'headers': ('GET / HTTP/1.1\r\n'
'Host: *\r\n\r\n'),
'timestamp': ArbitraryTimestamp,
'body': ''}
response_header = {
'headers': ('HTTP/1.1 204 No Content\r\n'
'Connection: close\r\n\r\n'),
'timestamp': ArbitraryTimestamp,
'body': ''}
origin.addResponse('sessionfile.log', request_header, response_header)
# Map scenarios to trafficserver processes.
trafficservers = {}
data_dirname = 'generated_test_data'
data_path = os.path.join(Test.TestDirectory, data_dirname)
os.makedirs(data_path, exist_ok=True)
def normalizeForAutest(value):
'''
autest uses "test run" names to build file and directory names, so we must transform them in case there are incompatible or
annoying characters.
This means we can also use them in URLs.
'''
if not value:
return None
return re.sub(r'[^a-z0-9-]', '_', value, flags=re.I)
def makeTestCase(redirectTarget, expectedAction, scenario):
'''
Helper method that creates a "meta-test" from which autest generates a test case.
:param redirectTarget: The target address of a redirect from origin to be handled.
:param scenario: Defines the ACL to configure and the addresses to test.
'''
config = ','.join(':'.join(t) for t in sorted((addr.name.lower(), action.name.lower()) for (addr, action) in scenario.items()))
normRedirectTarget = normalizeForAutest(redirectTarget)
normConfig = normalizeForAutest(config)
tr = Test.AddTestRun('With_Config_{0}_Redirect_to_{1}'.format(normConfig, normRedirectTarget))
if trafficservers:
tr.StillRunningAfter = origin
tr.StillRunningAfter = dns
else:
tr.Processes.Default.StartBefore(origin)
tr.Processes.Default.StartBefore(dns)
if config not in trafficservers:
trafficservers[config] = Test.MakeATSProcess('ts_{0}'.format(normConfig), enable_cache=False)
trafficservers[config].Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'http|dns|redirect',
'proxy.config.http.number_of_redirections': 1,
'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port),
'proxy.config.dns.resolv_conf': 'NULL',
'proxy.config.url_remap.remap_required': 0,
'proxy.config.http.redirect.actions': config,
'proxy.config.http.connect_attempts_timeout': 5,
'proxy.config.http.connect_attempts_max_retries': 0,
})
tr.Processes.Default.StartBefore(trafficservers[config])
else:
tr.StillRunningAfter = trafficservers[config]
testDomain = 'testdomain{0}.test'.format(normRedirectTarget)
# The micro DNS server can't tell us whether it has a record of the domain already, so we use a dictionary to avoid duplicates.
# We remove any surrounding brackets that are common to IPv6 addresses.
if redirectTarget:
dnsRecords[testDomain] = [redirectTarget.strip('[]')]
# A GET request parameterized on the config and on the target.
request_header = {
'headers': ('GET /redirect?config={0}&target={1} HTTP/1.1\r\n'
'Host: *\r\n\r\n').
format(normConfig, normRedirectTarget),
'timestamp': ArbitraryTimestamp,
'body': ''}
# Returns a redirect to the test domain for the given target & the port number for the TS of the given config.
response_header = {
'headers': ('HTTP/1.1 307 Temporary Redirect\r\n'
'Location: http://{0}:{1}/\r\n'
'Connection: close\r\n\r\n').
format(testDomain, origin.Variables.Port),
'timestamp': ArbitraryTimestamp,
'body': ''}
origin.addResponse('sessionfile.log', request_header, response_header)
# Generate the request data file.
command_path = os.path.join(data_path, tr.Name)
with open(command_path, 'w') as f:
f.write(('GET /redirect?config={0}&target={1} HTTP/1.1\r\n'
'Host: iwillredirect.test:{2}\r\n\r\n').
format(normConfig, normRedirectTarget, origin.Variables.Port))
# Set the command with the appropriate URL.
port = trafficservers[config].Variables.port
dir_path = os.path.join(data_dirname, tr.Name)
tr.Processes.Default.Command = \
(f"bash -o pipefail -c '{sys.executable} tcp_client.py 127.0.0.1 {port} "
f"{dir_path} | head -n 1'")
tr.Processes.Default.ReturnCode = 0
# Generate and set the 'gold file' to check stdout
goldFilePath = os.path.join(data_path, '{0}.gold'.format(tr.Name))
with open(goldFilePath, 'w') as f:
f.write(expectedAction.value['expectedStatusLine'])
tr.Processes.Default.Streams.stdout = goldFilePath
scenarios = [
{
# Follow to loopback, but alternately reject/return others.
AddressE.Private: ActionE.Reject,
AddressE.Loopback: ActionE.Follow,
AddressE.Multicast: ActionE.Reject,
AddressE.Linklocal: ActionE.Return,
AddressE.Routable: ActionE.Reject,
AddressE.Self: ActionE.Return,
AddressE.Default: ActionE.Reject,
},
{
# Follow to loopback, but alternately reject/return others, flipped from the previous scenario.
AddressE.Private: ActionE.Return,
AddressE.Loopback: ActionE.Follow,
AddressE.Multicast: ActionE.Return,
AddressE.Linklocal: ActionE.Reject,
AddressE.Routable: ActionE.Return,
AddressE.Self: ActionE.Reject,
AddressE.Default: ActionE.Return,
},
{
# Return loopback, but reject everything else.
AddressE.Loopback: ActionE.Return,
AddressE.Default: ActionE.Reject,
},
{
# Reject loopback, but return everything else.
AddressE.Loopback: ActionE.Reject,
AddressE.Default: ActionE.Return,
},
{
# Return everything.
AddressE.Default: ActionE.Return,
},
]
for scenario in scenarios:
for addressClass in AddressE:
if not addressClass.value:
# Default has no particular addresses to test.
continue
for address in addressClass.value:
expectedAction = scenario[addressClass] if addressClass in scenario else scenario[AddressE.Default]
makeTestCase(redirectTarget=address, expectedAction=expectedAction, scenario=scenario)
# Test redirects to names that cannot be resolved.
makeTestCase(redirectTarget=None, expectedAction=ActionE.Break, scenario=scenario)
dns.addRecords(records=dnsRecords)
# Make sure this runs only after local files have been created.
Test.Setup.Copy(data_path)
| 38.893281 | 131 | 0.66626 |
486d212547e00f7831ca70c40d4c968f71b4de71 | 4,575 | py | Python | LynkCoHelper/lynco_regist_wrok.py | 21haoshaonian/LynkCoHelper | b4e5d67583190bf09fe44902499c3a99463b4df5 | [
"MIT"
] | null | null | null | LynkCoHelper/lynco_regist_wrok.py | 21haoshaonian/LynkCoHelper | b4e5d67583190bf09fe44902499c3a99463b4df5 | [
"MIT"
] | null | null | null | LynkCoHelper/lynco_regist_wrok.py | 21haoshaonian/LynkCoHelper | b4e5d67583190bf09fe44902499c3a99463b4df5 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import threading
import time
import base64
from lynkco_app_request import lynkco_app_request
from com.uestcit.api.gateway.sdk.auth.aes import aes as AES
from sms_request import sms_request
import json
import sys
import os
import re | 39.782609 | 113 | 0.551257 |
486e93474bb833e6c69bd39f0e367b929c5bddaf | 2,442 | py | Python | acestream/ACEStream/Core/RequestPolicy.py | GrandPaRPi/p2ptv-pi | 6f79c00f9055a3763ddfe1dc41e14d2cb533f4c3 | [
"MIT"
] | null | null | null | acestream/ACEStream/Core/RequestPolicy.py | GrandPaRPi/p2ptv-pi | 6f79c00f9055a3763ddfe1dc41e14d2cb533f4c3 | [
"MIT"
] | null | null | null | acestream/ACEStream/Core/RequestPolicy.py | GrandPaRPi/p2ptv-pi | 6f79c00f9055a3763ddfe1dc41e14d2cb533f4c3 | [
"MIT"
] | 2 | 2018-04-17T17:34:39.000Z | 2020-07-26T03:43:33.000Z | #Embedded file name: ACEStream\Core\RequestPolicy.pyo
from ACEStream.Core.simpledefs import *
from ACEStream.Core.exceptions import *
from ACEStream.Core.BitTornado.BT1.MessageID import *
DEBUG = False
MAX_QUERIES_FROM_RANDOM_PEER = 1000
| 30.525 | 99 | 0.700246 |
4871ff697124412845f2fa5d890fac9d6f0735fb | 1,172 | py | Python | appserver.py | XplosiveX/webfortune | bdcde9e4fc703c05a5520db3a4623103aeb77028 | [
"Apache-2.0"
] | null | null | null | appserver.py | XplosiveX/webfortune | bdcde9e4fc703c05a5520db3a4623103aeb77028 | [
"Apache-2.0"
] | null | null | null | appserver.py | XplosiveX/webfortune | bdcde9e4fc703c05a5520db3a4623103aeb77028 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, render_template, request, session, redirect, url_for, jsonify, abort
import os
import subprocess
import uuid
app = Flask(__name__)
app.secret_key = str(uuid.uuid4().hex)
intropre = '<pre style="border:black solid 4px; border-radius: 12.5px; background:silver; opacity: 0.65; margin-left:auto; margin-right:auto;height:100%;height:65%;overflow:auto; text-align:center; font-size:16px;">'
| 34.470588 | 216 | 0.705631 |
4872759cf120b5248551d5a5595288fb0852c2a9 | 1,356 | py | Python | auth0/v3/test/management/test_stats.py | akmjenkins/auth0-python | 511b016ac9853c7f4ee66769be7ad315c5585735 | [
"MIT"
] | 340 | 2015-06-05T12:32:26.000Z | 2022-03-30T18:41:30.000Z | auth0/v3/test/management/test_stats.py | akmjenkins/auth0-python | 511b016ac9853c7f4ee66769be7ad315c5585735 | [
"MIT"
] | 179 | 2015-05-26T00:35:07.000Z | 2022-03-18T17:16:37.000Z | auth0/v3/test/management/test_stats.py | akmjenkins/auth0-python | 511b016ac9853c7f4ee66769be7ad315c5585735 | [
"MIT"
] | 151 | 2015-01-27T11:49:01.000Z | 2022-03-03T14:26:09.000Z | import unittest
import mock
from ...management.stats import Stats
| 31.534884 | 86 | 0.640855 |
4874bbd204b2fd95be7d58eace358a2bc329365d | 2,038 | py | Python | JavPy/utils/common.py | generaljun/JavPy | e2b5488631c0979c643a2f86ba4cd8bb1709e2f8 | [
"Apache-2.0"
] | 1 | 2020-07-30T08:48:17.000Z | 2020-07-30T08:48:17.000Z | JavPy/utils/common.py | liqiang0330/JavPy | e2b5488631c0979c643a2f86ba4cd8bb1709e2f8 | [
"Apache-2.0"
] | null | null | null | JavPy/utils/common.py | liqiang0330/JavPy | e2b5488631c0979c643a2f86ba4cd8bb1709e2f8 | [
"Apache-2.0"
] | 2 | 2020-07-30T06:30:23.000Z | 2020-07-30T08:48:19.000Z | import datetime
import functools
import re
from typing import Iterable
version = "0.6"
_class_name_pattern = re.compile(r"\.(.+?)\s")
| 22.898876 | 87 | 0.598135 |
4875786274d1dcdef100393c55e236d7510c92a2 | 10,457 | py | Python | Foundry_Manager_v2.py | MrVauxs/Foundry-Selection-Menu | 13f9164595c3c11fe01e5d44cd35bcc79b6a34df | [
"MIT"
] | 5 | 2020-09-26T10:16:17.000Z | 2022-01-06T14:31:54.000Z | Foundry_Manager_v2.py | MrVauxs/Foundry-Selection-Menu | 13f9164595c3c11fe01e5d44cd35bcc79b6a34df | [
"MIT"
] | null | null | null | Foundry_Manager_v2.py | MrVauxs/Foundry-Selection-Menu | 13f9164595c3c11fe01e5d44cd35bcc79b6a34df | [
"MIT"
] | 1 | 2020-09-07T23:36:17.000Z | 2020-09-07T23:36:17.000Z | import requests
from bottle import route, run, template,ServerAdapter,redirect
import subprocess
from html.parser import HTMLParser
import threading
import time
ssl_cert=None #XYZ - 'fullchain.pem'
ssl_key=None #XYZ - 'privkey.pem'
world_mapping={"URL_Path":["world-folder","Name To Be Shown"], "URL_Path2":["world-folder-2","Name To Be Shown Two: Electric Boogaloo"]} #XYZ - Repeatable until the HTML page doesn't handle it.
foundry_base="http://blank.com" #XYZ
foundry_port=30000 #XYZ
foundry_url=foundry_base+":"+str(foundry_port)
foundry_directory="C:\Program Files\FoundryVTT" #XYZ - The directory has to point to /resources/app
idle_logout=300 #XYZ- Seconds - time to shut down foundry if at login screen and 0 users
##Populate this automatically from module configuration, can probably get pictures etc but who has time?
## A bunch of threading stuff
def get_logged_in_players(timeout=0.1):
r=requests.get(foundry_url+"/join",timeout=timeout)
par=AwfulScrape_nPlayers()
par.feed(r.text)
return par.nPlayers
def _get_world_url(item):
return "<p> > <a href='/"+item[0]+"' >" + item[1][1]+"</a> </p>"
server=bottleManager()
| 33.516026 | 280 | 0.589557 |
48769d3fe736152c54bf8b09ad3360ea09bd2080 | 1,181 | py | Python | scripts/12865.py | JihoChoi/BOJ | 08974a9db8ebaa299ace242e951cac53ab55fc4d | [
"MIT"
] | null | null | null | scripts/12865.py | JihoChoi/BOJ | 08974a9db8ebaa299ace242e951cac53ab55fc4d | [
"MIT"
] | null | null | null | scripts/12865.py | JihoChoi/BOJ | 08974a9db8ebaa299ace242e951cac53ab55fc4d | [
"MIT"
] | null | null | null |
"""
TAG: 0-1 Knapsack Problem, Dynamic Programming (DP), O(nW)
References:
- https://www.geeksforgeeks.org/0-1-knapsack-problem-dp-10/
weights and values of n items, capacity -> max value
"""
N, W = map(int, input().split()) # number of items, capacity
weights = []
values = []
for i in range(N):
w, v = map(int, input().split())
weights.append(w)
values.append(v)
print(knapsack(W, weights, values, N))
# Naive
"""
def knapsack(W, weights, values, n):
if n == 0 or W == 0: # base
return 0
if (weights[n-1] > W):
return knapsack(W, weights, values, n-1)
else:
return max(
values[n-1] + knapsack(W - weights[n-1], weights, values, n-1),
knapsack(W, weights, values, n-1)
)
"""
| 21.87037 | 83 | 0.516511 |
4876b544334a9fdacdb07da711e4c0eb80787e3b | 339 | py | Python | tests/test_parse.py | fphammerle/duplitab | 8dcea2dbcb7f44405cdff24e24f598d338bdcea0 | [
"MIT"
] | 1 | 2021-02-24T11:45:49.000Z | 2021-02-24T11:45:49.000Z | tests/test_parse.py | fphammerle/duplitab | 8dcea2dbcb7f44405cdff24e24f598d338bdcea0 | [
"MIT"
] | null | null | null | tests/test_parse.py | fphammerle/duplitab | 8dcea2dbcb7f44405cdff24e24f598d338bdcea0 | [
"MIT"
] | null | null | null | import pytest
import datetime
import duplitab
| 30.818182 | 79 | 0.769912 |
4877da686c21f0b39bdbcc627ce13221392e0654 | 5,597 | py | Python | cdlib/algorithms/internal/COACH.py | xing-lab-pitt/cdlib | 590e145429cda1db4d3671c994c502bedd77f108 | [
"BSD-2-Clause"
] | 248 | 2019-02-17T05:31:22.000Z | 2022-03-30T04:57:20.000Z | cdlib/algorithms/internal/COACH.py | xing-lab-pitt/cdlib | 590e145429cda1db4d3671c994c502bedd77f108 | [
"BSD-2-Clause"
] | 130 | 2019-02-10T19:35:55.000Z | 2022-03-31T10:58:39.000Z | cdlib/algorithms/internal/COACH.py | xing-lab-pitt/cdlib | 590e145429cda1db4d3671c994c502bedd77f108 | [
"BSD-2-Clause"
] | 70 | 2019-02-15T19:04:29.000Z | 2022-03-27T12:58:50.000Z | # Author: True Price <jtprice@cs.unc.edu>
# A core-attachment based method to detect protein complexes in PPI networks
# Wu, Li, Kwoh, Ng (2009)
# http://www.biomedcentral.com/1471-2105/10/169
from collections import defaultdict
from itertools import combinations
import functools
# return average degree and density for a graph
# return core nodes, given a graph and its average degree
__get_core_nodes = lambda g, avg: set(v for v, n in g.items() if len(n) >= avg)
# return NA score
__NA_score = lambda a, b: float(len(a & b) ** 2) / (len(a) * len(b))
| 33.315476 | 88 | 0.517063 |
48782a7be4c6875b12d933fb4c7555216fa0e180 | 2,745 | py | Python | appengine/findit/waterfall/test/revert_and_notify_culprit_pipeline_test.py | mcgreevy/chromium-infra | 09064105713603f7bf75c772e8354800a1bfa256 | [
"BSD-3-Clause"
] | 1 | 2018-01-02T05:47:07.000Z | 2018-01-02T05:47:07.000Z | appengine/findit/waterfall/test/revert_and_notify_culprit_pipeline_test.py | mcgreevy/chromium-infra | 09064105713603f7bf75c772e8354800a1bfa256 | [
"BSD-3-Clause"
] | null | null | null | appengine/findit/waterfall/test/revert_and_notify_culprit_pipeline_test.py | mcgreevy/chromium-infra | 09064105713603f7bf75c772e8354800a1bfa256 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common.constants import DEFAULT_QUEUE
from common.waterfall import failure_type
from gae_libs.pipeline_wrapper import pipeline_handlers
from waterfall import create_revert_cl_pipeline
from waterfall.create_revert_cl_pipeline import CreateRevertCLPipeline
from waterfall.revert_and_notify_culprit_pipeline import (
RevertAndNotifyCulpritPipeline)
from waterfall.send_notification_for_culprit_pipeline import (
SendNotificationForCulpritPipeline)
from waterfall.test import wf_testcase
| 36.118421 | 77 | 0.676867 |
4878680107622e5788667d3e8c86e78f88548e8c | 2,368 | py | Python | labs/lab2/lm_model/models/LSTM.py | luyuliu/CSE-5194 | 52970106c21b30e64d4cf1df26bec09929494060 | [
"MIT"
] | 1 | 2020-12-04T18:07:54.000Z | 2020-12-04T18:07:54.000Z | labs/lab2/lm_model/models/LSTM.py | luyuliu/CSE-5194 | 52970106c21b30e64d4cf1df26bec09929494060 | [
"MIT"
] | 1 | 2019-11-15T22:05:22.000Z | 2019-12-01T03:41:14.000Z | labs/lab2/lm_model/models/LSTM.py | luyuliu/CSE-5194 | 52970106c21b30e64d4cf1df26bec09929494060 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import numpy as np
| 34.823529 | 128 | 0.649916 |
4879e9953736c5be4e8f8c3cffbf391ebb052c79 | 229 | py | Python | Day 17/Aayushi-Mittal.py | ChetasShree/MarchCode | 80ee6206c0e4481b4421a83c7b7b7fc977450009 | [
"MIT"
] | 9 | 2021-03-02T12:16:24.000Z | 2021-03-26T11:06:08.000Z | Day 17/Aayushi-Mittal.py | ChetasShree/MarchCode | 80ee6206c0e4481b4421a83c7b7b7fc977450009 | [
"MIT"
] | 65 | 2021-03-02T04:57:47.000Z | 2021-04-02T19:31:30.000Z | Day 17/Aayushi-Mittal.py | ChetasShree/MarchCode | 80ee6206c0e4481b4421a83c7b7b7fc977450009 | [
"MIT"
] | 94 | 2021-03-02T04:42:28.000Z | 2021-06-28T10:38:20.000Z | # To print fibonacci series upto a given number n.
first = 0
second = 1
n = int(input())
print("Fibbonacci Series:")
for i in range(0,n):
print(first, end=", ")
next = second + first
first = second
second = next
| 19.083333 | 50 | 0.628821 |
487a8dc97f3ca00e23834b0ac346fc97195d5b14 | 3,578 | py | Python | taotao-cloud-python/taotao-cloud-oldboy/day84-PerfectCRM/PerfectCRM/kingadmin/permissions.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 47 | 2021-04-13T10:32:13.000Z | 2022-03-31T10:30:30.000Z | taotao-cloud-python/taotao-cloud-oldboy/day84-PerfectCRM/PerfectCRM/kingadmin/permissions.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 1 | 2021-11-01T07:41:04.000Z | 2021-11-01T07:41:10.000Z | taotao-cloud-python/taotao-cloud-oldboy/day84-PerfectCRM/PerfectCRM/kingadmin/permissions.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 21 | 2021-04-13T10:32:17.000Z | 2022-03-26T07:43:22.000Z | from django.core.urlresolvers import resolve
from django.shortcuts import render,redirect,HttpResponse
from kingadmin.permission_list import perm_dic
from django.conf import settings
| 38.473118 | 106 | 0.579374 |
487a9c7212eb09b59a26433079cfd900f9387fb7 | 7,894 | py | Python | wserver_qdk/tests/main_test.py | PunchyArchy/wserver_qdk | cd29785710cb9f21efb2fc35fa395b1f693b854e | [
"MIT"
] | null | null | null | wserver_qdk/tests/main_test.py | PunchyArchy/wserver_qdk | cd29785710cb9f21efb2fc35fa395b1f693b854e | [
"MIT"
] | null | null | null | wserver_qdk/tests/main_test.py | PunchyArchy/wserver_qdk | cd29785710cb9f21efb2fc35fa395b1f693b854e | [
"MIT"
] | null | null | null | """ . """
import unittest
from wserver_qdk.main import WServerQDK
from wserver_qdk import tools
import uuid
| 43.373626 | 75 | 0.60375 |