blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9e69c2013364f4ed257b3f46392ed2611a7163b4 | c44e28b14de66ca7a995c361ddec7add6c0c4775 | /back/celery_local.py | e934b6bdc24a0488ec8abbd0571cf6b56813a9fa | [] | no_license | WnP/taiga-stack | 27791318c87ba9cc7482542a2a1efa6c93c86112 | c68cb634d1d2af64bf8e0ca084a0b7a94986e75f | refs/heads/master | 2020-03-31T18:05:42.306457 | 2018-10-16T17:51:07 | 2018-10-16T17:51:07 | 152,445,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from .celery import * # noqa
# To use celery in memory
# task_always_eager = True
broker_url = "amqp://taiga:secret@tasks.rabbitmq:5672/celery"
result_backend = 'redis://tasks.redis:6379/0'
| [
"steevechailloux@gmail.com"
] | steevechailloux@gmail.com |
72ed7287238b03b37d9fa4ab713ae57f4e995441 | 85e907873cb4ed2e663427b4d37c85661dc4ad2c | /listita/wsgi.py | 6df54f098d7ba9cfee913cc7e207dec931908b63 | [] | no_license | gabrieltron/lista | 4f6c7e620a7325462ca16a8f7102449a7827face | c56186533cb6b58a8e3702108b10d5e554dbf417 | refs/heads/master | 2021-07-13T22:31:51.392260 | 2017-10-19T21:39:47 | 2017-10-19T21:39:47 | 104,925,635 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | """
WSGI config for listita project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "listita.settings")
application = get_wsgi_application()
| [
"joao_trombeta@hotmail.com"
] | joao_trombeta@hotmail.com |
2f83c5cac7624e5304cf2aa0459939e100999280 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_085/ch2_2020_07_09_22_36_13_402167.py | c82e823dcb2277b1070d6ac039df23e7849b4131 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | def calcula_velocidade_media(d,t):
return d/t | [
"you@example.com"
] | you@example.com |
bd690590a1b4049110cba3759fc56053d302b5d2 | 92fa526d5ca2e31f5908f17bb95c529b09ac9995 | /old/models/socket-t/client.py | 6edd2d446253a63ab16744b1bcd6f1ff4ca2109d | [] | no_license | KevinLongkk/IFit | 55e1f7f872dbdd28e23066c9b96465315061ca2a | 438fbdbd63d7cf4059038623e2739fc0a860c26b | refs/heads/master | 2020-03-21T02:08:49.538064 | 2018-06-20T04:53:56 | 2018-06-20T04:53:56 | 137,982,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,067 | py | ###########################################################################
#*- -*#
#*- coding : UTF-8 -*#
#*- function : as a client to send and receiver file -*#
#*- connectHost : 192.168.1.8 -*#
#*- connectPort : 4518 -*#
#*- author : pwn_w -*#
#*- -*#
###########################################################################
import os,struct
from socket import *
s = socket(AF_INET,SOCK_STREAM) #定义socket类型
s.connect(('192.168.1.8',4518)) #创建连接
#--------------------------------发送文件---------------------------------#
def sendFile():
filepath = input('Input the video you want to send:\r\n')
s.sendall(bytes(filepath,encoding="utf8"))
filepath = str(s.recv(1024), encoding="utf8")
if os.path.isfile(filepath):
fileinfo_size=struct.calcsize('128sl')
fhead = struct.pack('128sl',os.path.basename(filepath).encode('utf-8'),os.stat(filepath).st_size)
s.send(fhead)
print ('client filepath: ',filepath)
fo = open(filepath,'rb')
while True:
filedata = fo.read(1024)
if not filedata:
break
s.send(filedata)
fo.close()
print ('send over...')
else:
print ('no such file')
#-------------------------------------------------------------------------#
#--------------------------------接收文件---------------------------------#
def receiverFile():
try:
filename = input('Which file you want to download:\r\n') #输入要下载的文件
s.sendall(bytes(filename,encoding="utf8"))
s.settimeout(600)
fileinfo_size=struct.calcsize('128sl') #打包规则
buf = s.recv(fileinfo_size)
if buf:
filename,filesize =struct.unpack('128sl',buf)
filename=filename.decode('utf-8')
filename_f = filename.strip('\00')
filenewname = os.path.join('/home/aston/',(filename_f))
print ('file new name is %s, filesize is %s' %(filenewname,filesize))
recvd_size = 0 #定义接收了的文件大小
file = open(filenewname,'wb')
print ('stat receiving...')
while not recvd_size == filesize:
if filesize - recvd_size > 1024:
rdata = s.recv(1024)
recvd_size += len(rdata)
else:
rdata = s.recv(filesize - recvd_size)
recvd_size = filesize
file.write(rdata)
file.close()
print ('receive done')
except socket.timeout:
print('timeout')
#-------------------------------------------------------------------------#
#--------------------------------浏览文件---------------------------------#
def scanFile():
directory = input('Which directory you want to scan:\r\n') #发送要浏览的目录
s.sendall(bytes(directory,encoding="utf8"))
acceptFile = str(s.recv(1024),encoding = "utf8")
print("".join(("List:",acceptFile)))
#-------------------------------------------------------------------------#
#--------------------------------执行函数---------------------------------#
def work():
Mode = input('Upload(U)、Download(D)、Scan(S) or Quit(Q):\r\n') #输入工作模式
s.sendall(bytes(Mode,encoding="utf8")) #发送至工作模式至服务器
if (Mode=='D'): #下载文件
receiverFile()
elif(Mode=='U'): #上传文件
sendFile()
elif(Mode=='S'): #浏览目录文件
scanFile()
elif(Mode=='Q'): #退出服务器
exit(0)
else: #提示输入错误工作模式
print('Invalid value, please input again...')
#-------------------------------------------------------------------------#
while True:
work()
s.close()
| [
"1247594945@qq.com"
] | 1247594945@qq.com |
3fe33e070c2c0d75811a238d675bdd0dfbcf1773 | dc84012d21ce76dc5300659d7fd6d711bd1534eb | /tests/transports_test.py | 7973552568555e934c847d2b745a83738228197a | [
"ISC"
] | permissive | jia3857/py-restclient | 64d7f885c76fe0a901fd26f0a33306a9dcd71e85 | e44c4041b439bba31f4738974229d5f09e86f2ba | refs/heads/master | 2021-08-22T21:01:33.350528 | 2017-12-01T05:08:19 | 2017-12-01T08:29:18 | 112,697,315 | 0 | 0 | null | 2017-12-01T05:06:45 | 2017-12-01T05:06:45 | null | UTF-8 | Python | false | false | 6,935 | py | # -*- coding: utf-8 -
#
# Copyright (c) 2008 (c) Benoit Chesneau <benoitc@e-engura.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import cgi
import os
import socket
import threading
import unittest
import urlparse
from restclient.transport import CurlTransport
from restclient.rest import Resource, RestClient, RequestFailed, \
ResourceNotFound, Unauthorized
from _server_test import HOST, PORT, run_server_test
class HTTPClientTestCase(unittest.TestCase):
httptransport = CurlTransport()
def setUp(self):
run_server_test()
self.url = 'http://%s:%s' % (HOST, PORT)
self.res = Resource(self.url, self.httptransport)
def tearDown(self):
self.res = None
def testGet(self):
result = self.res.get()
self.assert_(result == "welcome")
def testUnicode(self):
result = self.res.get('/unicode')
self.assert_(result == u"éàù@")
def testUrlWithAccents(self):
result = self.res.get('/éàù')
self.assert_(result == "ok")
self.assert_(self.res.response.status == 200)
def testUrlUnicode(self):
result = self.res.get(u'/test')
self.assert_(result == "ok")
self.assert_(self.res.response.status == 200)
result = self.res.get(u'/éàù')
self.assert_(result == "ok")
self.assert_(self.res.response.status == 200)
def testGetWithContentType(self):
result = self.res.get('/json', headers={'Content-Type': 'application/json'})
self.assert_(self.res.response.status == 200)
def bad_get():
result = self.res.get('/json', headers={'Content-Type': 'text/plain'})
self.assertRaises(RequestFailed, bad_get)
def testGetWithContentType2(self):
res = Resource(self.url, self.httptransport,
headers={'Content-Type': 'application/json'})
result = res.get('/json')
self.assert_(res.response.status == 200)
def testNotFound(self):
def bad_get():
result = self.res.get("/unknown")
self.assertRaises(ResourceNotFound, bad_get)
def testGetWithQuery(self):
result = self.res.get('/query', test="testing")
self.assert_(self.res.response.status == 200)
def testGetBinary(self):
import imghdr
import tempfile
res = Resource('http://e-engura.org', self.httptransport)
result = res.get('/images/logo.gif')
self.assert_(res.response.status == 200)
fd, fname = tempfile.mkstemp(suffix='.gif')
f = os.fdopen(fd, "wb")
f.write(result)
f.close()
self.assert_(imghdr.what(fname) == 'gif')
def testSimplePost(self):
result = self.res.post(payload="test")
self.assert_(result=="test")
def testPostByteString(self):
result = self.res.post('/bytestring', payload="éàù@")
self.assert_(result == u"éàù@")
def testPostUnicode(self):
result = self.res.post('/unicode', payload=u"éàù@")
self.assert_(result == u"éàù@")
def testPostWithContentType(self):
result = self.res.post('/json', payload="test",
headers={'Content-Type': 'application/json'})
self.assert_(self.res.response.status == 200 )
def bad_post():
return self.res.post('/json', payload="test",
headers={'Content-Type': 'text/plain'})
self.assertRaises(RequestFailed, bad_post)
def testEmptyPost(self):
result = self.res.post('/empty', payload="",
headers={'Content-Type': 'application/json'})
self.assert_(self.res.response.status == 200 )
result = self.res.post('/empty',headers={'Content-Type': 'application/json'})
self.assert_(self.res.response.status == 200 )
def testPostWithQuery(self):
result = self.res.post('/query', test="testing")
self.assert_(self.res.response.status == 200)
def testSimplePut(self):
result = self.res.put(payload="test")
self.assert_(result=="test")
def testPutWithContentType(self):
result = self.res.put('/json', payload="test",
headers={'Content-Type': 'application/json'})
self.assert_(self.res.response.status == 200 )
def bad_put():
return self.res.put('/json', payload="test",
headers={'Content-Type': 'text/plain'})
self.assertRaises(RequestFailed, bad_put)
def testEmptyPut(self):
result = self.res.put('/empty', payload="",
headers={'Content-Type': 'application/json'})
self.assert_(self.res.response.status == 200 )
result = self.res.put('/empty',headers={'Content-Type': 'application/json'})
self.assert_(self.res.response.status == 200 )
def testPuWithQuery(self):
result = self.res.put('/query', test="testing")
self.assert_(self.res.response.status == 200)
def testHead(self):
result = self.res.head('/ok')
self.assert_(self.res.response.status == 200)
def testDelete(self):
result = self.res.delete('/delete')
self.assert_(self.res.response.status == 200)
def testFileSend(self):
content_length = len("test")
import StringIO
content = StringIO.StringIO("test")
result = self.res.post('/json', payload=content,
headers={
'Content-Type': 'application/json',
'Content-Length': str(content_length)
})
self.assert_(self.res.response.status == 200 )
def testAuth(self):
httptransport = self.httptransport
httptransport.add_credentials("test", "test")
res = Resource(self.url, httptransport)
result = res.get('/auth')
self.assert_(res.response.status == 200)
httptransport.add_credentials("test", "test2")
def niettest():
res = Resource(self.url, httptransport)
result = res.get('/auth')
self.assertRaises(Unauthorized, niettest)
if __name__ == '__main__':
from _server_test import run_server_test
run_server_test()
unittest.main()
| [
"yjaaidi@gmail.com"
] | yjaaidi@gmail.com |
8eb3b583ba00c21e0e51f30d62670c1da9f518e3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/87/usersdata/171/57992/submittedfiles/contido.py | 9b0e89a4443abb2d93f0a13c24567c49b26e2254 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | # -*- coding: utf-8 -*-
def quantidade(lista,lista2):
cont=0
for i in range(0,len(lista),1):
if lista[i]==lista2[i]:
cont=cont+1
return(cont)
n=int(input('digite o numero de elemento:'))
lista1=[]
for i in range(1,n+1,1):
cont=0
valor1=int(input('digite o numero á ser colocado na lista 1:'))
lista1.append(valor1)
n=int(input('digite o numero de elemento:'))
lista2=[]
for i in range(1,n+1,1):
cont=0
valor2=int(input('digite o numero á ser colocado na lista 2:'))
lista2.append(valor2)
if quantidade(lista1,lista2):
print(cont)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
5c45c17aefbe4fca29f0d7af21bc35890672183a | 71d7c7c7abb33f3a0b36e401cbb546b1f7135fb8 | /myproject/myproject/blacklist/models.py | 582409b8bfd2bcb320763bab6cdb4c797e868931 | [] | no_license | Naveen-yelamakuru/News_website | 4de542b16f447294b755b380fa8f4ec027773541 | 05884b9a7bbaa1af1fa9c0c3a9796736fbf4bf4d | refs/heads/master | 2023-04-01T06:59:26.384010 | 2021-04-07T06:27:28 | 2021-04-07T06:27:28 | 355,433,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class BlackList(models.Model):
ip = models.CharField(max_length=10)
date = models.CharField(max_length=12,default="-")
def __str__(self):
return self.ip
| [
"yelamakurunaveen12@gmail.com"
] | yelamakurunaveen12@gmail.com |
744d50f28a2c94ad5282605b6d3bb4517f7916ea | 0466559817d3a1be9409da2c83db99c4db3bacfe | /hubcheck/conf/config.py | b68b5bd69b995419b02cbc759340b2a456a15ce1 | [
"MIT"
] | permissive | ken2190/hubcheck | 955cf9b75a1ee77e28256dfd3a780cfbc17de961 | 2ff506eb56ba00f035300862f8848e4168452a17 | refs/heads/master | 2023-03-20T15:17:12.949715 | 2015-09-29T16:11:18 | 2015-09-29T16:11:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | import os
from pkg_resources import resource_filename
class Config(object):
data_dir = resource_filename('hubcheck','data')
profiles_dir = resource_filename('hubcheck','profiles')
# user configuration variables
screenshot_dir = None
video_dir = None
config_filename = None
tdfname = ''
tdpass = ''
highlight_web_elements = False
scroll_to_web_elements = False
log_locator_updates = False
log_widget_attachments = False
proxy = None
hub_hostname = None
hub_version = None
tool_container_version = None
default_workspace_toolname = None
apps_workspace_toolname = None
# full path of the hub config file, used by toolcheck
configpath = None
settings = Config()
| [
"telldsk@gmail.com"
] | telldsk@gmail.com |
72e9740d464cc851759637f97394bbe32c8a4474 | c4cd9e23563072e810e90ada59904dc5247c92fb | /p_library/admin.py | d38deca44f1a68b27d9e4c4fdedeb749b5eb41c0 | [] | no_license | riderufa/GGilmanovD411 | 130524818c6036fd9d5d29726e5dcabb8d68e2b6 | 59f05fec1b8dad5a6014e12f3f8c323ddb9143ed | refs/heads/master | 2020-09-29T02:50:42.918242 | 2019-12-10T10:00:11 | 2019-12-10T10:00:11 | 226,932,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | from django.contrib import admin
from p_library.models import Book, Author, Publishing
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
# @staticmethod
# def author_full_name(obj):
# return obj.author.full_name
# author_full_name.admin_order_field = 'publishing'
list_display = ('title', 'publishing', 'author_full_name')
fields = ('ISBN', 'title', 'description', 'year_release', 'author', 'publishing', 'price')
@admin.register(Author)
class AuthorAdmin(admin.ModelAdmin):
@staticmethod
def author_full_name(obj):
return obj.full_name
list_display = ('author_full_name', )
@admin.register(Publishing)
class PublishingAdmin(admin.ModelAdmin):
@staticmethod
def publishing_name(obj):
return obj.publishing_name
list_display = ('publishing_name', ) | [
"noreply@github.com"
] | noreply@github.com |
c4b1887ea606f6c95c6222db1351a682d8dad691 | 59eecdf59d632d21b94adcad01b5af1fc1f9ed72 | /utils/balon_al_arco.py | d8d9fe8e0655b292fffe0aed8810a046c7938a99 | [] | no_license | AnthonyCAS/vsss2015_Strategy | 8caa668191651cee757a84cac47fa0ce509d9200 | 48ff71218dc55136369a9d24f7f510b675b80e77 | refs/heads/master | 2020-04-12T08:07:02.468659 | 2016-10-02T23:04:23 | 2016-10-02T23:04:23 | 42,489,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,733 | py | import sys
import pygame
from vsss.serializer import VsssSerializerSimulator, VsssSerializerReal
from vsss.strategy import TeamStrategyBase
from vsss.data import VsssOutData
from vsss.controller import Controller
from vsss.position import RobotPosition, Position
from vsss.trajectory import TrajectorySCurve
from vsss.visualizer import VsssVisualizer
from vsss.move import Move
from vsss.colors import *
from vsss.vsss_math.arithmetic import *
trajectory = None
my_side = 0
class TrajectoryVisualizer(VsssVisualizer):
def extra_visualize(self):
global trajectory
if trajectory is not None:
trajectory = [self.to_screen(x).tonp() for x in trajectory]
pygame.draw.lines(self.screen, GREEN, False, trajectory, 1)
for p in trajectory:
pygame.draw.circle(self.screen, GREEN, p, 3, 3)
pygame.draw.circle(self.screen, BLUE, trajectory[1], 3, 3)
class BalonAlArcoStrategy(TeamStrategyBase):
CONTROL_SERVER = ('0.0.0.0', 9003)
serializer_class = VsssSerializerSimulator
do_visualize = True
latency = 100
print_iteration_time = False
visualizer_class = TrajectoryVisualizer
def set_up(self):
super(BalonAlArcoStrategy, self).set_up()
self.controller = Controller()
self.traj = TrajectorySCurve(r=10)
def strategy(self, data):
global trajectory
print "TEAM", data.teams
team = data.teams[self.team]
ball = data.ball
if my_side == 0:
goal = Position(-80,0)
else:
goal = Position(80,0)
Abg = ball.angle_to(goal) # angulo ball to goal
obj = RobotPosition(ball.x, ball.y, Abg)
current = team[1]
if current.distance_to(ball) <= 8:
new_obj = move_by_radius(ball.tonp(), 10, Abg)
obj = RobotPosition(new_obj[0], new_obj[1], Abg)
move = self.controller.go_with_trajectory(obj, current)
trajectory = self.traj.get_trajectory(obj, current, 10)
out_data = VsssOutData(moves=[
Move(0,0),
move,
Move(0,0),
])
return out_data
if __name__ == '__main__':
def help():
return """Ejecute el script de cualquiera de las 2 formas, una para cada equipo:
./vision_test 0 <puerto>
./vision_test 1 <puerto>"""
# Help the user if he doesn't know how to use the command
if len(sys.argv) != 3:
print help()
sys.exit()
elif sys.argv[1] != '0' and sys.argv[1] != '1':
print help()
sys.exit()
my_side = int(sys.argv[1])
strategy = BalonAlArcoStrategy(my_side, 3, this_server=("0.0.0.0", int(sys.argv[2])))
strategy.run()
| [
"jose@amigocloud.com"
] | jose@amigocloud.com |
35169d9ca2b214f9e10d0d57ba79f111fb07bed2 | 630ed534a7859ff79dee2f4c797a410952470551 | /input_analyser.py | b7d90ad3d64af94b058ebe0b6fd3cc5a85b588d3 | [] | no_license | shubh3695/Python-Question-Set-Generator | c98bd1bc52405f2df163f89254ca94c72d63033e | 609a2617cddf1e66da9646a1841de22b12e1de0d | refs/heads/master | 2020-06-23T07:01:58.731368 | 2019-07-24T04:16:21 | 2019-07-24T04:16:21 | 198,550,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | from json import load
from models import Question, QuestionSet, Requirements, LoadedModel
##########################
# Generic Input Mapper from JSon to RequiredModel
##########################
class InputHandler:
def __init__(self):
with open('input4.json') as json_file:
self.data = load(json_file)
self.loaded_model: LoadedModel = LoadedModel()
self.map_json()
def map_json(self):
questions = []
for question in self.data["questions"]:
questions.append(Question(question["id"], question["difficulty"], question["marks"]))
question_set = QuestionSet(self.data["questionsSize"], questions)
requirements = self.map_total_marks(self.data["requirements"])
self.loaded_model = LoadedModel(question_set, requirements)
def get_loaded_results(self) -> LoadedModel:
return self.loaded_model
@staticmethod
def map_total_marks(requirement) -> Requirements:
total = requirement["total"]
return Requirements(total, requirement["easy"] * 0.01 * total, requirement["medium"] * 0.01 * total,
requirement["hard"] * total * 0.01)
| [
"shubh3695@gmail.com"
] | shubh3695@gmail.com |
8afef6f24d1c969ecf405eacaf0d931750e41313 | aa1d9d60de89f92351cf37d9e1f4df04cd9b0449 | /gcp/pubsub_client.py | a7e143bc87ecbc11f493f8042faa6d0a344fe6a0 | [] | no_license | zhoufengzd/python | 7ca0de97fa0050f4984d866c87ea0f3069ad3cfa | 036e2e17fde91c5ef46b22af00fc73e41bf75428 | refs/heads/master | 2022-10-13T23:10:10.538705 | 2022-09-25T03:51:26 | 2022-09-25T03:51:26 | 209,899,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,376 | py | #!/usr/bin/env python
## utility to write / read messages with pubsub
import argparse
import os
from google.cloud import pubsub_v1 as pubsub
class PubSubClient:
def __init__(self, gcp_project_id):
self.gcp_project_id = gcp_project_id
self.reader = pubsub.SubscriberClient()
self.writer = pubsub.PublisherClient()
def write(self, topic, data, time_out=None):
""" Publishes data to a Pub/Sub topic. """
topic_path = self.writer.topic_path(self.gcp_project_id, topic)
payload = data
if os.path.exists(data):
with open(data, mode='rb') as inf:
payload = inf.read()
try:
future = self.writer.publish(topic_path, payload)
message_id = future.result(timeout=time_out)
print(f"Published data to {topic}: {message_id}")
except Exception as e:
print(str(e))
def _process_message(self, message):
print(f"Received {message}.")
message.ack()
print(f"Acknowledged {message.message_id}.")
def _check_subscription(self, topic_path, reader_path):
"""Create a new pull subscription on the given topic."""
response = self.writer.list_topic_subscriptions(request={"topic": topic_path})
for rdrpath in response:
if rdrpath == reader_path:
return
subscription = self.reader.create_subscription(request={"name": reader_path, "topic": topic_path})
print('Subscription created: {}'.format(subscription))
def read(self, topic, subscription_id=None, time_out=None):
if not subscription_id:
subscription_id = topic + "-sub"
topic_path = self.reader.topic_path(self.gcp_project_id, topic)
reader_path = self.reader.subscription_path(self.gcp_project_id, subscription_id)
try:
self._check_subscription(topic_path, reader_path)
future = self.reader.subscribe(reader_path, callback=self._process_message)
future.result(timeout=time_out)
except Exception as e:
print(str(e))
future.cancel()
future.result() # Block until the shutdown is complete.
self.reader.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("action", help="s|sub|read, or w|write|pub")
parser.add_argument("--topic", help="Pub/Sub topic ID")
parser.add_argument("--data", default="data.json", help="Pub data file or payload string")
parser.add_argument("--timeout", default=None, help="read or write timeout")
parser.add_argument("--subscription_id", help="Pub/Sub subscription ID. Default to {topic}-sub")
parser.add_argument("--project_id", help="Google Cloud project ID")
args = parser.parse_args()
if not args.project_id:
args.project_id = os.environ.get("GOOGLE_CLOUD_PROJECT", os.environ.get("PROJECT"))
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", None):
print("Error! GOOGLE_APPLICATION_CREDENTIALS is expected")
exit(1)
client = PubSubClient(args.project_id)
if args.action in ["s", "sub", "r", "read"]:
client.read(args.topic, args.subscription_id, int(args.timeout) if args.timeout else None)
elif args.action in ["w", "write", "p", "pub"]:
client.write(args.topic, args.data)
| [
"feng.zhou@clearlabs.com"
] | feng.zhou@clearlabs.com |
9390d1ba58dfdb8793881a2da5c58a7d8c865296 | c5aa4c8c482925448634cf202140eb32e32500bb | /flash guided non-flash image denoising/code/DN_train_npy.py | d40a2e9b6a78f37809e8c538894edc2dfd239a74 | [] | no_license | JingyiXu404/CU-Net-plus | e2c2636e71ec2ab8064ae6880f4afd89fa157542 | aaf7e3eba938ec00297962cbd1c4d62fdb4b37c1 | refs/heads/main | 2023-08-14T07:37:45.991839 | 2021-09-20T03:09:16 | 2021-09-20T03:09:16 | 370,957,706 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | import torch.utils.data as data
from glob import glob
import matplotlib.pyplot as plt
from torchvision import transforms
#import cv2
from PIL import Image
import random
import os
import numpy as np
import torch
class cudataset(data.Dataset):
def __init__(self,target_name,guide_name,label_name):
super(cudataset, self).__init__()
self.noise = np.load('trainset/DN/'+target_name+'.npy')#(batch,height,width,c)
self.noise = np.transpose(self.noise, (0, 3, 1, 2))
self.noise_t = torch.from_numpy(self.noise)
self.gt = np.load('trainset/DN/'+label_name+'.npy') # (batch,height,width,c)
self.gt = np.transpose(self.gt, (0, 3, 1, 2))
self.gt_t = torch.from_numpy(self.gt)
self.guide = np.load('trainset/DN/'+guide_name+'.npy') # (batch,height,width,c)
self.guide = np.transpose(self.guide, (0, 3, 1, 2))
self.guide_t = torch.from_numpy(self.guide)
def __getitem__(self, item):
img_noise = self.noise_t[item]
img_gt = self.gt_t[item]
img_guide = self.guide_t[item]
return (img_noise, img_gt,img_guide)
def __len__(self):
return len(self.noise)
if __name__ =='__main__':
dataset=cudataset('data_x','data_y','label')
dataloader=data.DataLoader(dataset,batch_size=1)
for b1,(img_L,img_H,img_guide) in enumerate(dataloader):
print(b1)
print(img_L.shape,img_H.shape,img_guide.shape) | [
"noreply@github.com"
] | noreply@github.com |
e273b347e2345994f64bb95af98c82b139510d43 | 97099104ac872920fd32b7149bcb3f785ce7da15 | /src/data_ingestion.py | f8c7583b9e1fa8f645eac37d7f4d65250013cebf | [] | no_license | aj-1000/data-team-coding-exercise | a78c8ea8263bb48ba838d05eccc3e2328083ca69 | 3d2311c2e42690aa235d8ae3e766a1e8f2722fca | refs/heads/master | 2023-08-19T09:25:17.839061 | 2021-09-27T17:20:20 | 2021-09-27T17:20:20 | 406,456,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | import pandas as pd
import pandera as pa
import numpy as np
from fastlogging import LogInit
logger = LogInit(pathName='logs/data_ingestion.log')
def check_dataframe_schema(df: pd.DataFrame, schema: pa.DataFrameSchema) -> None:
"""A function to assert that the dataframe has the types expected in the
schema"""
validate_df = schema.validate(df)
def ingest_components_file(file: str) -> None:
"A function to load, type check and save the components data from a file"
schema = pa.DataFrameSchema({
"componentId": pa.Column(str),
"colour": pa.Column(str),
"costPrice": pa.Column(float)
})
df = pd.read_csv(file)
check_dataframe_schema(df, schema)
df.to_pickle('./data/df_components.pkl')
def ingest_orders_file(file: str):
"A function to load, type check and save the orders data from a file"
schema = pa.DataFrameSchema({
"timestamp": pa.Column(np.datetime64),
"orderId": pa.Column(str),
"units": pa.Column(object)
})
df = pd.read_json(file, lines=True)
check_dataframe_schema(df, schema)
df.to_pickle('./data/df_orders.pkl')
def main(components_file: str, orders_file: str) -> None:
"Call the data ingestion functions"
logger.info("STARTING DATA INGESTION")
ingest_components_file(components_file)
ingest_orders_file(orders_file)
logger.info("FINISHED DATA INGESTION")
if __name__ == '__main__':
try:
main('./data/components.csv', './data/orders.json.txt')
except Exception as e:
logger.exception(e) | [
"adafejaja@gmail.com"
] | adafejaja@gmail.com |
8bcc7d5dab776217c266b88d9884fc3a7e5a583d | 1bcb966740f47c0edc23e9b05afec55f2bcae36a | /app/game/appinterface/packageInfo.py | 92697a828967b4ecf9a95dbc7d38fdf70e8c3d66 | [] | no_license | East196/diabloworld | 0d2e9dbf650aa86fcc7b9fc1ef49912e79adb954 | d7a83a21287ed66aea690ecb6b73588569478be6 | refs/heads/master | 2021-05-09T12:15:31.640065 | 2018-02-04T15:16:54 | 2018-02-04T15:16:54 | 119,007,609 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,226 | py | #coding:utf8
'''
Created on 2011-4-13
@author: sean_lan
'''
from app.game.core.PlayersManager import PlayersManager
def getItemsInEquipSlotNew(dynamicId,characterId):
'''获取角色的装备栏信息
@param dynamicId: int 客户端的id
@param characterId: int 角色的id
'''
player = PlayersManager().getPlayerByID(characterId)
if not player or not player.CheckClient(dynamicId):
return {'result':False,'message':""}
equipmentList = player.pack.getEquipmentSlotItemList()
keys_copy = dict(equipmentList)
equipmentList_copy = []
for position in range(1,7):
item = keys_copy.get(position,None)
if item:
_item = {}
_item['itemid'] = item.baseInfo.id
_item['icon'] = item.baseInfo.getItemTemplateInfo().get('icon',0)
_item['tempid'] = item.baseInfo.getItemTemplateId()
_item['exp'] = item.exp
iteminfo = {'pos':position,'item':_item}
equipmentList_copy.append(iteminfo)
playerInfo = player.formatInfoForWeiXin()
data = {}
data['equip'] = equipmentList_copy
data['attack'] = playerInfo['attack']
data['fangyu'] = playerInfo['fangyu']
data['minjie'] = playerInfo['minjie']
return {'result':True,'message':u'','data':data}
def UserItemNew(dynamicId,characterId,tempid):
'''使用物品
'''
player = PlayersManager().getPlayerByID(characterId)
if not player or not player.CheckClient(dynamicId):
return {'result':False,'message':""}
data = player.pack.equipEquipmentByItemId(tempid)
return data
def GetPackageInfo(dynamicId,characterId):
'''获取包裹的信息
'''
player = PlayersManager().getPlayerByID(characterId)
if not player or not player.CheckClient(dynamicId):
return {'result':False,'message':""}
data = player.pack.getPackageItemList()
return data
def unloadedEquipment_new(dynamicId, characterId, itemId):
'''卸下装备
'''
player = PlayersManager().getPlayerByID(characterId)
if not player or not player.CheckClient(dynamicId):
return {'result':False,'message':""}
data = player.pack.unloaded(itemId)
return data
| [
"2901180515@qq.com"
] | 2901180515@qq.com |
da7a4c4583429538ef54bd4b92b0617d73b31115 | 785e3597b632b7f504572730779eaf92eaa5e5fa | /SSAFYcrawl_TEST.py | 2f64f65b3da29def4053915bced78037a1313cb5 | [] | no_license | zzangkkmin/AlgorithmPracticeZone | 7fd4f2986a98c93b0e0cf5e40b30283c2a338d49 | a1ee937379d8c78d24fffd98cdadd781710d7680 | refs/heads/master | 2020-04-22T19:00:13.436309 | 2019-11-25T07:41:03 | 2019-11-25T07:41:03 | 170,594,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,551 | py | #use pip
# pip install bs4 --user
# pip install lxml --user
# pip install requests --user
from bs4 import BeautifulSoup as bs
import requests
form_url ="https://edu.ssafy.com/comm/login/SecurityLoginForm.do"
form_header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Cookie': 'WMONID=gObw0yoeO8C; toast.pagefit-mode=view-all; lgnId=zzangkkmin@naver.com; JSESSIONID_HAKSAF=zk_sGYjtSHjC6k_PgQ9ACNvroYrKn-zep6EcGzjNUuBIjRj9OtU5!-604896005!278776549',
'Host': 'edu.ssafy.com',
'Pragma': 'no-cache',
'Referer': 'https://edu.ssafy.com/comm/login/SecurityLogoutForm.do',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}
session = requests.session()
response = session.get(form_url,headers=form_header)
soup = bs(response.text, 'lxml')
csrf_token = soup.select_one('meta[name="_csrf"]')['content']
login_url = "https://edu.ssafy.com/comm/login/SecurityLoginCheck.do"
login_data = {
'userId' : 'zzangkkmin@naver.com',
'userPwd' : 'ryghkseo!@3',
'idSave' : 'on'
}
login_header ={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin': 'https://edu.ssafy.com',
'Referer': 'https://edu.ssafy.com/comm/login/SecurityLoginForm.do',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
'X-CSRF-TOKEN': csrf_token,
'X-Requested-With': 'XMLHttpRequest'
}
res = session.post(login_url,headers=login_header,data=login_data)
print(res.headers)
target_URL = "http://edu.ssafy.com/data/upload_files/crossUpload/openLrn/ebook/unzip/A20181231142031414/assets/page-images/page-7573c40c-33216b63-"
cd_p = "C:/CRTEST/"
pointer = 1
while pointer < 106:
target = target_URL + str(pointer) + ".jpg"
if pointer<10:
t_imgName = cd_p + "00" + str(pointer) + ".jpg"
elif pointer<100:
t_imgName = cd_p + "0" + str(pointer) + ".jpg"
else:
t_imgName = cd_p + str(pointer) + ".jpg"
response = session.get(target)
with open(t_imgName,"wb") as f:
f.write(response.content)
f.close()
pointer = pointer + 1
| [
"zzangkkmin@naver.com"
] | zzangkkmin@naver.com |
e5c59a146854f20e9180454a28c88279d02c0350 | dabddb39b88cee43b4b6ad5a38aa54b0d31ada21 | /main.py | 871a48e563d743446ef20ed1b5522181d72f0298 | [] | no_license | hcw-00/Inference_simple | f1bc8b34a39208159b6c47f84d65e3439256cbc6 | ce4d901931bffad3d81e7540024904dcd339ddaf | refs/heads/master | 2022-11-20T19:06:09.782513 | 2020-07-23T04:49:18 | 2020-07-23T04:49:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | import argparse
import os
import tensorflow as tf
tf.set_random_seed(20)
from model import infer
def get_params():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dataset_dir', dest='dataset_dir', default='dataset', help='path of the dataset')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=1, help='# images in batch')
args = parser.parse_args()
return args
def main(params):
with tf.Session() as sess:
model = infer(sess, params)
model.test(params)
if __name__ == '__main__':
params = vars(get_params())
main(params)
| [
"21908600@ncbnet.co.kr"
] | 21908600@ncbnet.co.kr |
10e46eb0dc43fc62e6216ee6b9a96cbf96e67b7a | 4b7d1f2da5da0a9f03a58f59161f7eec12fc2f60 | /q6_find_ascii_char.py | a33f048db5d620b42000c0d23556cea45fc92322 | [] | no_license | leeliren/practical_1 | 6b4d6e89c8520ce75e2af879dbe655da3f70f6c1 | ed33f677af1c2316bdb52f0d56f449b401c765e9 | refs/heads/master | 2021-01-10T08:37:01.411830 | 2016-02-26T04:15:19 | 2016-02-26T04:15:19 | 51,885,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | # q6_find_ascii_char.py
# program that display acsii character
# input acsii character
acsii = input("enter acsii value: ")
# conver acsii value to character
character = chr(int(acsii))
# output character
print(character)
| [
"ncs3101sta@N3101SMCS5863.SCHOOLS.MOE.EDU.SG"
] | ncs3101sta@N3101SMCS5863.SCHOOLS.MOE.EDU.SG |
c12227791c9532c511adc49b611291c60354dc51 | 948d84d2e3fc04e353a11384d8570308174242f5 | /5-Pythonda Koşul İfadeleri/if-else-demo-2.py | ee6cf34554eeaa7723e51f9eedd857630f2067ee | [] | no_license | omerfarukcelenk/PythonMaster | a0084a800b8a41cd2ad538a7ca3687c26dc679ec | 0db8f8b0ea2e1c2d810c542068cfcf1a3615f581 | refs/heads/main | 2023-04-16T17:42:05.501904 | 2021-04-26T21:19:27 | 2021-04-26T21:19:27 | 361,896,109 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,485 | py | '''
1- Girilen bir sayının 0-100 arasında olup olmadığını kontrol ediniz.
sayi = float(input('sayı: '))
if (sayi > 0) and (sayi<=100):
print('sayı 0-100 arasında.')
else:
print('sayı 0-100 arasında değildir.')
'''
'''
2- Girilen bir sayının pozitif çift sayı olup olmadığını kontrol ediniz.
sayi = int(input('sayı: '))
if (sayi > 0):
if (sayi % 2 ==0):
print('girilen sayı pozitif çift sayıdır.')
else:
print('girilen sayı pozitif ancak sayı tek.')
else:
print('girilen sayı negatif sayı.')
'''
'''
3- Email ve parola bilgileri ile giriş kontrolü yapınız.
email = 'email@sadikturan.com'
password = 'abc123'
girilenEmail = input('email: ')
girilenPassword = input('password: ')
if (girilenEmail == email):
if (girilenPassword == password):
print('uygulamaya giriş başarılı.')
else:
print('parolanız yanlış')
else:
print('email bilginiz yanlış')
'''
'''
4- Girilen 3 sayıyı büyüklük olarak karşılaştırınız.
a = int(input('a: '))
b = int(input('b: '))
c = int(input('c: '))
if (a > b) and (a > c):
print(f'a en büyük sayıdır.')
elif (b > a) and (b > c):
print(f'b en büyük sayıdır.')
elif (c > a) and (c > b):
print(f'c en büyük sayıdır.')
'''
'''
5- Kullanıcıdan 2 vize (%60) ve final (%40) notunu alıp ortalama hesaplayınız.
Eğer ortalama 50 ve üstündeyse geçti değilse kaldı yazdırın.
a-) Ortamalama 50 olsa bile final notu en az 50 olmalıdır.
b-) Finalden 70 alındığında ortalamanın önemi olmasın.
vize1 = float(input('vize 1: '))
vize2 = float(input('vize 2: '))
final = float(input('final : '))
ortalama = ((vize1+vize2)/2)*0.6 + (final * 0.4)
result = (ortalama>=50) and (final>=50)
result = (ortalama >=50) or (final>=70)
** durum-1
if (ortalama>=50):
if (final>=50):
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarılı')
else:
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarısız. Finalden en az 50 almalısınız.')
else:
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarısız')
** durum-2
if (ortalama >=50):
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarılı')
else:
if (final>=70):
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarılı. Finalden en az 70 aldığınız için geçtiniz.')
else:
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarısız')
'''
'''
6- Kişinin ad, kilo ve boy bilgilerini alıp kilo indekslerini hesaplayınız.
Formül: (Kilo / boy uzunluğunun karesi)
Aşağıdaki tabloya göre kişi hangi gruba girmektedir.
0-18.4 => Zayıf
18.5-24.9 => Normal
25.0-29.9 => Fazla Kilolu
30.0-34.9 => Şişman (Obez)
name = input('adınız: ')
kg = float(input('kilonuz: '))
hg = float(input('boyunuz: '))
index = (kg) / (hg ** 2)
if (index >= 0) and (index<=18.4):
print(f'{name} kilo indeksin: {index} ve kilo değerlendirmen zayıf.')
elif (index>18.4) and (index<=24.9):
print(f'{name} kilo indeksin: {index} ve kilo değerlendirmen normal.')
elif (index>24.9) and (index<=29.9):
print(f'{name} kilo indeksin: {index} ve kilo değerlendirmen kilolu.')
elif (index>=29.9) and (index<=45.9):
print(f'{name} kilo indeksin: {index} ve kilo değerlendirmen obez.')
else:
print('bilgileriniz yanlış.')
'''
| [
"omerfar0133@gmail.com"
] | omerfar0133@gmail.com |
aa893d19b93fa4e46eae5303e87793c9a2afed4f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/1093.py | fa244fab36bc58d7529d2c7242771c1da73f5714 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | def isPalindrome(num):
if num - int(num) != 0:
return False
num = str(int(num))
l = len(num)
for i in xrange(l):
if num[i] != num[l-i-1]:
return False
return True
def allFairAndSquare(a, b):
rtn = []
for i in xrange(a, b+1):
if isPalindrome(i) and isPalindrome(i**(0.5)):
rtn.append(i)
return rtn
f = open('C-small-attempt1.in', 'r')
g = open('output.txt', 'w')
n = int(f.readline())
count = 1
all = allFairAndSquare(1, 1000)
while count <= n:
rng = f.readline().split()
a, b = int(rng[0]), int(rng[1])
x, y = 1000, 1000
for i in xrange(len(all)):
if all[i] >= a:
x = i
break
for i in xrange(x, len(all)):
if all[i] > b:
y = i
break
total = 0
if x == 1000:
total = 0
elif y == 1000:
y = len(all)
total = y-x
else:
total = y-x
g.write("Case #" + str(count) + ": " + str(total) + '\n')
count += 1
f.close()
g.close() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
4531bc24b5d028b9ae04e0702a12afe5f44ac009 | 5079ba8f44eb52f96da6063ea88477c765b5a82e | /exceptions/customexceptions.py | f79dfdc25fbae94743d98cec4edf60cffc206339 | [] | no_license | navdeep710/imapfilesync | e5747e724b26eaefa692be3d8f43a971a114e329 | 86056986b75d7ca13ab89b630cf844fb9bef52c0 | refs/heads/master | 2022-12-09T20:50:32.240855 | 2018-09-13T07:12:26 | 2018-09-13T07:12:26 | 146,305,090 | 0 | 0 | null | 2022-12-08T02:47:41 | 2018-08-27T13:57:58 | Python | UTF-8 | Python | false | false | 82 | py | class Queuenotfound(Exception):
pass
class Emailnotfound(Exception):
pass | [
"navdeep.bits@gmail.com"
] | navdeep.bits@gmail.com |
bb6a13c53d923939882c90a3722dcb0ee6f65008 | ecee6e84ba18100b621c7e06f493ae48e44a34fe | /build/navigation/nav_core/catkin_generated/pkg.installspace.context.pc.py | 7154b8261ec9e1dfe4b7533a24c5c418fed5f7a6 | [] | no_license | theleastinterestingcoder/Thesis | 6d59e06b16cbe1588a6454689248c88867de2094 | 3f6945f03a58f0eff105fe879401a7f1df6f0166 | refs/heads/master | 2016-09-05T15:30:26.501946 | 2015-05-11T14:34:15 | 2015-05-11T14:34:15 | 31,631,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/alfred/quan_ws/install/include".split(';') if "/home/alfred/quan_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "std_msgs;geometry_msgs;tf;costmap_2d".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "nav_core"
PROJECT_SPACE_DIR = "/home/alfred/quan_ws/install"
PROJECT_VERSION = "1.13.0"
| [
"quanzhou64@gmail.com"
] | quanzhou64@gmail.com |
1adb61062d3e07b521e6e4b41ec14ddca7000d72 | 458d136f89388f3b6a98559f01addda5db3cf400 | /common/tablib/tablib.py | c6f82696d5b8c15a7242af0ff3817edeb55d88b6 | [] | no_license | secretleojoy/hack4tech-h | 35626f11f655392cbc71208a63e8999f121dafbb | 8c46f80e0c71d0e6bff939780e5b1ad202fafb99 | refs/heads/main | 2023-06-17T08:19:20.411688 | 2021-06-06T12:45:41 | 2021-06-06T12:45:41 | 385,784,799 | 0 | 0 | null | 2021-07-14T01:58:08 | 2021-07-14T01:58:08 | null | UTF-8 | Python | false | false | 10,261 | py | from collections import OrderedDict
from .format import registry
class Row:
"""Internal Row object. Mainly used for filtering."""
__slots__ = ['_row', 'tags']
def __init__(self, row=None, tags=None):
if tags is None:
tags = list()
if row is None:
row = list()
self._row = list(row)
self.tags = list(tags)
def __iter__(self):
return (col for col in self._row)
def __len__(self):
return len(self._row)
def __repr__(self):
return repr(self._row)
def __getitem__(self, i):
return self._row[i]
def __setitem__(self, i, value):
self._row[i] = value
def __delitem__(self, i):
del self._row[i]
def __getstate__(self):
slots = dict()
for slot in self.__slots__:
attribute = getattr(self, slot)
slots[slot] = attribute
return slots
def __setstate__(self, state):
for (k, v) in list(state.items()):
setattr(self, k, v)
def rpush(self, value):
self.insert(len(self._row), value)
def append(self, value):
self.rpush(value)
def insert(self, index, value):
self._row.insert(index, value)
def __contains__(self, item):
return (item in self._row)
@property
def tuple(self):
"""Tuple representation of :class:`Row`."""
return tuple(self._row)
class Dataset:
"""The :class:`Dataset` object is the heart of Tablib. It provides all core
functionality.
Usually you create a :class:`Dataset` instance in your main module, and append
rows as you collect data. ::
data = tablib.Dataset()
data.headers = ('name', 'age')
for (name, age) in some_collector():
data.append((name, age))
Setting columns is similar. The column data length must equal the
current height of the data and headers must be set. ::
data = tablib.Dataset()
data.headers = ('first_name', 'last_name')
data.append(('hareesh', 'Hack'))
data.append(('hacker', 'India'))
data.append_col((90, 67), header='age')
You can also set rows and headers upon instantiation. This is useful if
dealing with dozens or hundreds of :class:`Dataset` objects. ::
headers = ('first_name', 'last_name')
data = [('hareesh', 'Hack'), ('hacker', 'India')]
data = tablib.Dataset(*data, headers=headers)
:param \\*args: (optional) list of rows to populate Dataset
:param headers: (optional) list strings for Dataset header row
:param title: (optional) string to use as title of the Dataset
.. admonition:: Format Attributes Definition
If you look at the code, the various output/import formats are not
defined within the :class:`Dataset` object. To add support for a new format, see
:ref:`Adding New Formats <newformats>`.
"""
def __init__(self, *args, **kwargs):
self._data = list(Row(arg) for arg in args)
self.__headers = None
# ('title', index) tuples
self._separators = []
# (column, callback) tuples
self._formatters = []
self.headers = kwargs.get('headers')
self.title = kwargs.get('title')
def __len__(self):
return self.height
def _validate(self, row=None, col=None, safety=False):
"""Assures size of every row in dataset is of proper proportions."""
if row:
is_valid = (len(row) == self.width) if self.width else True
elif col:
if len(col) < 1:
is_valid = True
else:
is_valid = (len(col) == self.height) if self.height else True
else:
is_valid = all(len(x) == self.width for x in self._data)
if is_valid:
return True
if not safety:
raise InvalidDimensions
return False
def _package(self, dicts=True, ordered=True):
"""Packages Dataset into lists of dictionaries for transmission."""
# TODO: Dicts default to false?
_data = list(self._data)
if ordered:
dict_pack = OrderedDict
else:
dict_pack = dict
# Execute formatters
if self._formatters:
for row_i, row in enumerate(_data):
for col, callback in self._formatters:
try:
if col is None:
for j, c in enumerate(row):
_data[row_i][j] = callback(c)
else:
_data[row_i][col] = callback(row[col])
except IndexError:
raise InvalidDatasetIndex
if self.headers:
if dicts:
data = [dict_pack(list(zip(self.headers, data_row)))
for data_row in _data]
else:
data = [list(self.headers)] + list(_data)
else:
data = [list(row) for row in _data]
return data
def _get_headers(self):
"""An *optional* list of strings to be used for header rows and attribute names.
This must be set manually. The given list length must equal :class:`Dataset.width`.
"""
return self.__headers
def _set_headers(self, collection):
"""Validating headers setter."""
self._validate(collection)
if collection:
try:
self.__headers = list(collection)
except TypeError:
raise TypeError
else:
self.__headers = None
headers = property(_get_headers, _set_headers)
def _get_dict(self):
"""A native Python representation of the :class:`Dataset` object. If headers have
been set, a list of Python dictionaries will be returned. If no headers have been
set, a list of tuples (rows) will be returned instead.
A dataset object can also be imported by setting the `Dataset.dict` attribute: ::
data = tablib.Dataset()
data.dict = [{'age': 90, 'first_name': 'Kenneth', 'last_name': 'Reitz'}]
"""
return self._package()
def _set_dict(self, pickle):
"""A native Python representation of the Dataset object. If headers have been
set, a list of Python dictionaries will be returned. If no headers have been
set, a list of tuples (rows) will be returned instead.
A dataset object can also be imported by setting the :class:`Dataset.dict` attribute. ::
data = tablib.Dataset()
data.dict = [{'age': 90, 'first_name': 'Kenneth', 'last_name': 'Reitz'}]
"""
if not len(pickle):
return
# if list of rows
if isinstance(pickle[0], list):
self.wipe()
for row in pickle:
self.append(Row(row))
# if list of objects
elif isinstance(pickle[0], dict):
self.wipe()
self.headers = list(pickle[0].keys())
for row in pickle:
self.append(Row(list(row.values())))
else:
raise UnsupportedFormat
dict = property(_get_dict, _set_dict)
@property
def height(self):
"""The number of rows currently in the :class:`Dataset`.
Cannot be directly modified.
"""
return len(self._data)
@property
def width(self):
"""The number of columns currently in the :class:`Dataset`.
Cannot be directly modified.
"""
try:
return len(self._data[0])
except IndexError:
try:
return len(self.headers)
except TypeError:
return 0
def export(self, format, **kwargs):
"""
Export :class:`Dataset` object to `format`.
:param format: export format
:param kwargs: (optional) custom configuration to the format `export_set`.
"""
fmt = registry.get_format(format)
if not hasattr(fmt, 'export_set'):
raise Exception('Format {} cannot be exported.'.format(format))
return fmt.export_set(self, **kwargs)
# ----
# Rows
# ----
def insert(self, index, row, tags=None):
"""Inserts a row to the :class:`Dataset` at the given index.
Rows inserted must be the correct size (height or width).
The default behaviour is to insert the given row to the :class:`Dataset`
object at the given index.
"""
if tags is None:
tags = list()
self._validate(row)
self._data.insert(index, Row(row, tags=tags))
def rpush(self, row, tags=None):
"""Adds a row to the end of the :class:`Dataset`.
See :class:`Dataset.insert` for additional documentation.
"""
if tags is None:
tags = list()
self.insert(self.height, row=row, tags=tags)
def append(self, row, tags=None):
"""Adds a row to the :class:`Dataset`.
See :class:`Dataset.insert` for additional documentation.
"""
if tags is None:
tags = list()
self.rpush(row, tags)
def extend(self, rows, tags=None):
"""Adds a list of rows to the :class:`Dataset` using
:class:`Dataset.append`
"""
if tags is None:
tags = list()
for row in rows:
self.append(row, tags)
# ----
# Misc
# ----
def remove_duplicates(self):
"""Removes all duplicate rows from the :class:`Dataset` object
while maintaining the original order."""
seen = set()
self._data[:] = [row for row in self._data if
not (tuple(row) in seen or seen.add(tuple(row)))]
def wipe(self):
"""Removes all content and headers from the :class:`Dataset` object."""
self._data = list()
self.__headers = None
registry.register_builtins()
class InvalidDimensions(Exception):
"""Invalid size"""
class InvalidDatasetIndex(Exception):
"""Outside of Dataset size"""
class UnsupportedFormat(NotImplementedError):
"""Format is not supported"""
| [
"vhhareesh947@gmail.com"
] | vhhareesh947@gmail.com |
83a53505bd883a0049978dbdc0ef84340b64458c | ea1e0d6d8a7bd97d7b179f4e7857149012466ecc | /fiches/views.py | 56ad0e8013f4ed8ddb5ced7d18aa14a6d05e15e4 | [] | no_license | eloigrau/templatedjango | 97575b5a098ec24a25b67a9f86a918d70f9e45a4 | 12a1768e92c43469d0127e7202af83e1d4a04321 | refs/heads/master | 2022-12-11T11:48:33.579366 | 2021-03-28T19:30:47 | 2021-03-28T19:30:47 | 237,098,214 | 0 | 0 | null | 2022-12-08T01:23:31 | 2020-01-29T22:51:54 | JavaScript | UTF-8 | Python | false | false | 9,795 | py | # -*- coding: utf-8 -*-
from django.shortcuts import render, redirect, get_object_or_404, HttpResponseRedirect
from django.urls import reverse_lazy
from .models import Fiche, CommentaireFiche, Choix, Atelier
from .forms import FicheForm, CommentaireFicheForm, FicheChangeForm, AtelierForm, AtelierChangeForm
from django.contrib.auth.decorators import login_required
from django.views.generic import ListView, UpdateView, DeleteView
from django.utils.timezone import now
from pacte.models import Suivis
from actstream import actions, action
def accueil(request):
return render(request, 'fiches/accueil.html')
@login_required
def ajouterFiche(request):
form = FicheForm(request.POST or None)
if form.is_valid():
fiche = form.save(request.user)
action.send(request.user, verb='fiche_nouveau', action_object=fiche, url=fiche.get_absolute_url(),
description="a ajouté la fiche: '%s'" % fiche.titre)
return redirect(fiche.get_absolute_url())
return render(request, 'fiches/fiche_ajouter.html', { "form": form, })
@login_required
def ajouterAtelier(request, fiche_slug):
form = AtelierForm(request.POST or None)
if form.is_valid():
fiche = Fiche.objects.get(slug=fiche_slug)
form.save(fiche)
action.send(request.user, verb='fiche_ajouter_atelier', action_object=fiche, url=fiche.get_absolute_url(),
description="a ajouté un atelier à la fiche: '%s'" % fiche.titre)
return redirect(fiche.get_absolute_url())
return render(request, 'fiches/atelier_ajouter.html', { "form": form, })
# @login_required
class ModifierFiche(UpdateView):
model = Fiche
form_class = FicheChangeForm
template_name_suffix = '_modifier'
# fields = ['user','site_web','description', 'competences', 'adresse', 'avatar', 'inscrit_newsletter']
def get_object(self):
return Fiche.objects.get(slug=self.kwargs['slug'])
def form_valid(self, form):
self.object = form.save()
self.object.date_modification = now()
self.object.save()
action.send(self.request.user, verb='fiche_modifier', action_object=self.object, url=self.object.get_absolute_url(),
description="a modifié la fiche: '%s'" % self.object.titre)
return HttpResponseRedirect(self.object.get_absolute_url())
def save(self):
return super(ModifierFiche, self).save()
class ModifierAtelier(UpdateView):
model = Atelier
form_class = AtelierChangeForm
template_name_suffix = '_modifier'
def get_object(self):
return Atelier.objects.get(slug=self.kwargs['slug'])
def form_valid(self, form):
self.object = form.save()
self.object.date_modification = now()
self.object.save()
action.send(self.request.user, verb='fiche_atelier_modifier', action_object=self.object, url=self.object.get_absolute_url(),
description="a modifié l'atelier: '%s'" % self.object.titre)
return HttpResponseRedirect(self.object.fiche.get_absolute_url())
#return redirect('lireFiche', slug=self.object.fiche.slug)
def get_success_url(self):
return self.object.fiche.get_absolute_url()
def save(self):
return super(ModifierAtelier, self).save()
class SupprimerFiche(DeleteView):
model = Fiche
success_url = reverse_lazy('fiches:index')
template_name_suffix = '_supprimer'
# fields = ['user','site_web','description', 'competences', 'adresse', 'avatar', 'inscrit_newsletter']
def get_object(self):
return Fiche.objects.get(slug=self.kwargs['slug'])
@login_required
def lireFiche(request, slug):
fiche = get_object_or_404(Fiche, slug=slug)
commentaires = CommentaireFiche.objects.filter(fiche=fiche).order_by("date_creation")
ateliers = Atelier.objects.filter(fiche=fiche).order_by("date_creation")
form_comment = CommentaireFicheForm(request.POST or None)
if form_comment.is_valid():
comment = form_comment.save(commit=False)
comment.fiche = fiche
comment.auteur_comm = request.user
fiche.date_dernierMessage = comment.date_creation
fiche.dernierMessage = ("(" + str(comment.auteur_comm) + ") " + str(comment.commentaire))[:96] + "..."
fiche.save()
comment.save()
url = fiche.get_absolute_url()
action.send(request.user, verb='fiche_message', action_object=fiche, url=url,
description="a réagi à la fiche: '%s'" % fiche.titre)
return redirect(request.path)
return render(request, 'fiches/lireFiche.html', {'fiche': fiche, 'ateliers':ateliers, 'form': form_comment, 'commentaires':commentaires},)
def lireAtelier(request, slug):
atelier = get_object_or_404(Atelier, slug=slug)
return render(request, 'fiches/lireAtelier.html', {'atelier': atelier,},)
def lireAtelier_id(request, id):
atelier = get_object_or_404(Atelier, id=id)
return render(request, 'fiches/lireAtelier.html', {'atelier': atelier,},)
class ListeFiches(ListView):
model = Fiche
context_object_name = "fiche_list"
template_name = "fiches/index.html"
paginate_by = 30
def get_queryset(self):
params = dict(self.request.GET.items())
qs = Fiche.objects.all()
if "categorie" in params:
qs = qs.filter(categorie=params['categorie'])
if "mc" in params:
if params['mc']=="essentiels":
qs = qs.filter(tags__name__in=["essentiel",])
else:
qs = qs.filter(tags__name__in=[cat for cat in params['mc']])
if "ordreTri" in params:
qs = qs.order_by(params['ordreTri'])
else:
qs = qs.order_by('categorie', 'numero', '-date_dernierMessage', )
return qs
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
cat= Fiche.objects.order_by('categorie').values_list('categorie', flat=True).distinct()
context['categorie_list'] = [x for x in Choix.type_fiche if x[0] in cat]
context['typeFiltre'] = "aucun"
context['suivis'], created = Suivis.objects.get_or_create(nom_suivi="articles")
context['ordreTriPossibles'] = ['-date_creation', '-date_dernierMessage', 'categorie', 'titre' ]
if 'categorie' in self.request.GET:
context['typeFiltre'] = "categorie"
context['categorie_courante'] = [x[1] for x in Choix.type_fiche if x[0] == self.request.GET['categorie']][0]
if 'ordreTri' in self.request.GET:
context['typeFiltre'] = "ordreTri"
if "mc" in self.request.GET:
context['typeFiltre'] = "mc"
return context
class ListeAteliers(ListView):
model = Atelier
context_object_name = "atelier_list"
template_name = "fiches/index_ateliers.html"
paginate_by = 30
def get_queryset(self):
params = dict(self.request.GET.items())
qs = Atelier.objects.all()
if "categorie" in params:
qs = qs.filter(fiche__categorie=params['categorie'])
if "mc" in params:
if params['mc']=="essentiels":
qs = qs.filter(fiche__tags__name__in=["essentiel",])
else:
qs = qs.filter(fiche__tags__name__in=[cat for cat in params['mc']])
if "ordreTri" in params:
qs = qs.order_by(params['ordreTri'])
else:
qs = qs.order_by('fiche__numero', 'fiche__categorie',)
return qs
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
cat= Fiche.objects.order_by('categorie').values_list('categorie', flat=True).distinct()
context['categorie_list'] = [x for x in Choix.type_fiche if x[0] in cat]
context['typeFiltre'] = "aucun"
context['suivis'], created = Suivis.objects.get_or_create(nom_suivi="articles")
context['ordreTriPossibles'] = ['-date_creation', '-date_dernierMessage', 'categorie', 'titre' ]
if 'categorie' in self.request.GET:
context['typeFiltre'] = "categorie"
context['categorie_courante'] = [x[1] for x in Choix.type_fiche if x[0] == self.request.GET['categorie']][0]
if 'ordreTri' in self.request.GET:
context['typeFiltre'] = "ordreTri"
if "mc" in self.request.GET:
context['typeFiltre'] = "mc"
return context
def voirFicheTest(request):
try:
fiche = get_object_or_404(Fiche, slug="44429d18-6e5e-4609-bb5d-84797a50dad4")
except:
fiche = get_object_or_404(Fiche, slug="ezar")
commentaires = CommentaireFiche.objects.filter(fiche=fiche).order_by("date_creation")
ateliers = Atelier.objects.filter(fiche=fiche).order_by("date_creation")
if request.user.is_authenticated:
form_comment = CommentaireFicheForm(request.POST or None)
if form_comment.is_valid():
comment = form_comment.save(commit=False)
comment.fiche = fiche
comment.auteur_comm = request.user
fiche.date_dernierMessage = comment.date_creation
fiche.dernierMessage = ("(" + str(comment.auteur_comm) + ") " + str(comment.commentaire))[:96] + "..."
fiche.save()
comment.save()
return redirect(request.path)
else:
form_comment = None
return render(request, 'fiches/lireFiche.html', {'fiche': fiche, 'ateliers':ateliers, 'form': form_comment, 'commentaires':commentaires},)
return render(request, 'fiches/lireFiche.html', {'fiche': fiche, 'ateliers':ateliers, 'form': form_comment, 'commentaires':commentaires},)
| [
"marchelibre@gmx.fr"
] | marchelibre@gmx.fr |
6512252e8f437e9237f39ae69a605cf3e891bd9e | 713720fdc5ead7c8a13476da847e335202d6df03 | /scripts/sorno_realdate.py | 8136704bbfee2e999e640ae3742c2b921d9bf7fc | [
"Apache-2.0"
] | permissive | hermantai/sorno-py-scripts | 26f72ce4f4711e2b3ee57f60c30ee1d9c314304d | 6d1ccaba3c7a259a3bbbe13b39c3abfc951789f8 | refs/heads/master | 2022-04-30T13:02:20.037450 | 2022-04-22T03:29:30 | 2022-04-22T03:30:57 | 16,879,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,684 | py | #!/usr/bin/env python
"""sorno_realdate.py prints the human readable date for timestamps
Example:
$ sorno_realdate.py 1455223642 1455223642000 1455223642000000 1455223642000000000
1455223642: 2016-02-11 12:47:22-0800 PST in s
1455223642000: 2016-02-11 12:47:22-0800 PST in ms
1455223642000000: 2016-02-11 12:47:22-0800 PST in us
1455223642000000000: 2016-02-11 12:47:22-0800 PST in ns
Copyright 2016 Heung Ming Tai
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import sys
from sornobase import datetimeutil
_datetime_format = "%Y-%m-%d %H:%M:%S%z %Z"
class RealDateApp(object):
def __init__(self, args):
self.args = args
def run(self):
for n in self.args.numbers:
try:
n = int(n)
dt, unit = datetimeutil.number_to_local_datetime(n)
print(
"%s: %s in %s" % (
n,
dt.strftime(_datetime_format),
unit,
)
)
except ValueError:
try:
print(
"%s: %s" % (
n,
datetimeutil.guess_local_datetime(n).strftime(
_datetime_format
),
)
)
except:
print("%s: invalid datetime" % n)
def parse_args(cmd_args):
description = __doc__.split("Copyright 2016")[0].strip()
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"numbers",
metavar="number",
nargs="+",
)
args = parser.parse_args(cmd_args)
return args
def main():
args = parse_args(sys.argv[1:])
app = RealDateApp(args)
sys.exit(app.run())
if __name__ == '__main__':
main()
| [
"htaihm@gmail.com"
] | htaihm@gmail.com |
e5ba62b5f8329a38c865fda67e7a23bb28064ad6 | b0232c7856879f720ca9f64ca4df5096a0c34356 | /factoryTests.py | 774a67c13adc3b380ca174425d61317a4dd5a8f9 | [] | no_license | extramask93/CEVA_code_gen | 2d7d614741edfe68fd9a29f4ae6960ae15985ebf | e2630c096840605d7f986c1db863ea3d213eb6a6 | refs/heads/master | 2020-03-23T15:05:22.158796 | 2018-09-06T16:10:57 | 2018-09-06T16:10:57 | 141,721,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | import pytest
from decoder import TokenFactory, RegularRegister, VectorRegister, RegularImmediateToken, MultiTokenVPU, ModuRegister
def test_factorytest():
a= TokenFactory("rA.ui")
assert isinstance(a,RegularRegister)
a = TokenFactory("vrB1.s16")
assert isinstance(a, VectorRegister)
a = TokenFactory("#uimmA32")
assert isinstance(a,RegularImmediateToken)
a = TokenFactory("#immB5")
assert isinstance(a, RegularImmediateToken)
a = TokenFactory("(rN.ui+rM.i).di")
assert isinstance(a,MultiTokenVPU)
a= TokenFactory("moduA.ui")
assert isinstance(a, ModuRegister)
assert a.GetType() == "UI" | [
"jozwiak.damian02@gmail.com"
] | jozwiak.damian02@gmail.com |
9a9e8b7d9284574442ab7b8a10207055a4e065fd | fa07f9ff0c833746a4195a9092f5831e1126b684 | /03逻辑回归/tool/Read_Minist_Tool.py | 20e00b911aaddbd62cbe3738177e435b941c794e | [] | no_license | shiqiuwang/ML_basic_model | 76c3b755dda772031bfba22860ee61bb2ea286fc | b6d7350332f3ef32ccc5dc69f81b629c5bcdd349 | refs/heads/master | 2023-03-23T10:23:08.130357 | 2021-03-20T16:43:30 | 2021-03-20T16:43:30 | 348,405,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,341 | py | # coding=utf-8
import numpy as np
import struct
import matplotlib.pyplot as plt
# 训练集文件
train_images_idx3_ubyte_file = 'data/minist/train-images.idx3-ubyte'
# 训练集标签文件
train_labels_idx1_ubyte_file = 'data/minist/train-labels.idx1-ubyte'
# 测试集文件
test_images_idx3_ubyte_file = 'data/minist/t10k-images.idx3-ubyte'
# 测试集标签文件
test_labels_idx1_ubyte_file = 'data/minist/t10k-labels.idx1-ubyte'
def decode_idx3_ubyte(idx3_ubyte_file):
"""
解析idx3文件的通用函数
:param idx3_ubyte_file: idx3文件路径
:return: 数据集
"""
# 读取二进制数据
bin_data = open(idx3_ubyte_file, 'rb').read()
# 解析文件头信息,依次为魔数、图片数量、每张图片高、每张图片宽
offset = 0
fmt_header = '>iiii' #因为数据结构中前4行的数据类型都是32位整型,所以采用i格式,但我们需要读取前4行数据,所以需要4个i。我们后面会看到标签集中,只使用2个ii。
magic_number, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, offset)
print('魔数:%d, 图片数量: %d张, 图片大小: %d*%d' % (magic_number, num_images, num_rows, num_cols))
# 解析数据集
image_size = num_rows * num_cols
offset += struct.calcsize(fmt_header) #获得数据在缓存中的指针位置,从前面介绍的数据结构可以看出,读取了前4行之后,指针位置(即偏移位置offset)指向0016。
print(offset)
fmt_image = '>' + str(image_size) + 'B' #图像数据像素值的类型为unsigned char型,对应的format格式为B。这里还有加上图像大小784,是为了读取784个B格式数据,如果没有则只会读取一个值(即一副图像中的一个像素值)
print(fmt_image,offset,struct.calcsize(fmt_image))
images = np.empty((num_images, num_rows, num_cols))
#plt.figure()
for i in range(num_images):
if (i + 1) % 10000 == 0:
print('已解析 %d' % (i + 1) + '张')
print(offset)
images[i] = np.array(struct.unpack_from(fmt_image, bin_data, offset)).reshape((num_rows, num_cols))
#print(images[i])
offset += struct.calcsize(fmt_image)
# plt.imshow(images[i],'gray')
# plt.pause(0.00001)
# plt.show()
#plt.show()
return images
def decode_idx1_ubyte(idx1_ubyte_file):
"""
解析idx1文件的通用函数
:param idx1_ubyte_file: idx1文件路径
:return: 数据集
"""
# 读取二进制数据
bin_data = open(idx1_ubyte_file, 'rb').read()
# 解析文件头信息,依次为魔数和标签数
offset = 0
fmt_header = '>ii'
magic_number, num_images = struct.unpack_from(fmt_header, bin_data, offset)
print('魔数:%d, 图片数量: %d张' % (magic_number, num_images))
# 解析数据集
offset += struct.calcsize(fmt_header)
fmt_image = '>B'
labels = np.empty(num_images)
for i in range(num_images):
if (i + 1) % 10000 == 0:
print ('已解析 %d' % (i + 1) + '张')
labels[i] = struct.unpack_from(fmt_image, bin_data, offset)[0]
offset += struct.calcsize(fmt_image)
return labels
def load_train_images(idx_ubyte_file=train_images_idx3_ubyte_file):
"""
TRAINING SET IMAGE FILE (train-images-idx3-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000803(2051) magic number
0004 32 bit integer 60000 number of images
0008 32 bit integer 28 number of rows
0012 32 bit integer 28 number of columns
0016 unsigned byte ?? pixel
0017 unsigned byte ?? pixel
........
xxxx unsigned byte ?? pixel
Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).
:param idx_ubyte_file: idx文件路径
:return: n*row*col维np.array对象,n为图片数量
"""
return decode_idx3_ubyte(idx_ubyte_file)
def load_train_labels(idx_ubyte_file=train_labels_idx1_ubyte_file):
"""
TRAINING SET LABEL FILE (train-labels-idx1-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000801(2049) magic number (MSB first)
0004 32 bit integer 60000 number of items
0008 unsigned byte ?? label
0009 unsigned byte ?? label
........
xxxx unsigned byte ?? label
The labels values are 0 to 9.
:param idx_ubyte_file: idx文件路径
:return: n*1维np.array对象,n为图片数量
"""
return decode_idx1_ubyte(idx_ubyte_file)
def load_test_images(idx_ubyte_file=test_images_idx3_ubyte_file):
"""
TEST SET IMAGE FILE (t10k-images-idx3-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000803(2051) magic number
0004 32 bit integer 10000 number of images
0008 32 bit integer 28 number of rows
0012 32 bit integer 28 number of columns
0016 unsigned byte ?? pixel
0017 unsigned byte ?? pixel
........
xxxx unsigned byte ?? pixel
Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).
:param idx_ubyte_file: idx文件路径
:return: n*row*col维np.array对象,n为图片数量
"""
return decode_idx3_ubyte(idx_ubyte_file)
def load_test_labels(idx_ubyte_file=test_labels_idx1_ubyte_file):
"""
TEST SET LABEL FILE (t10k-labels-idx1-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000801(2049) magic number (MSB first)
0004 32 bit integer 10000 number of items
0008 unsigned byte ?? label
0009 unsigned byte ?? label
........
xxxx unsigned byte ?? label
The labels values are 0 to 9.
:param idx_ubyte_file: idx文件路径
:return: n*1维np.array对象,n为图片数量
"""
return decode_idx1_ubyte(idx_ubyte_file)
| [
"2714049089@qq.com"
] | 2714049089@qq.com |
7e6b235d3a48117b856cbec0dd3b7ae0d244ff98 | d5bf16ecfba523d15eb2fc467c85c639a511a555 | /createRenderFilesForDepths.py | 0944bcffba98560958361d2a9ace8b8c0c6a2487 | [] | no_license | chenkeshuai/TransparentShapeDatasetCreation | b5bbedcff5eb2bd8c25d51c40c756b252033a862 | 9c1ca853e02cec26ce1ca86b7a73deeff24625d9 | refs/heads/master | 2022-11-21T22:24:20.674760 | 2020-07-14T04:08:33 | 2020-07-14T04:08:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,681 | py | import os.path as osp
import numpy as np
import glob
import xml.etree.ElementTree as et
from xml.dom import minidom
import argparse
def addShape(root, name ):
shape = et.SubElement(root, 'shape' )
shape.set('id', '{0}_object'.format(name) )
shape.set('type', 'obj' )
stringF = et.SubElement(shape, 'string' )
stringF.set('name', 'filename' )
stringF.set('value', '{0}'.format(name) )
bsdf = et.SubElement(shape, 'bsdf')
bsdf.set('id', 'mat')
bsdf.set('type', 'dielectric' )
specularR = et.SubElement(bsdf, 'rgb')
specularR.set('name', 'specularReflectance')
specularR.set('value', '1.0 1.0 1.0')
specularT = et.SubElement(bsdf, 'rgb')
specularT.set('name', 'specularTransmittance')
specularT.set('value', '1.0 1.0 1.0')
intInd = et.SubElement(bsdf, 'float')
intInd.set('name', 'intIOR')
intInd.set('value', '%.4f' % intIOR )
extInd = et.SubElement(bsdf, 'float')
extInd.set('name', 'extIOR')
extInd.set('value', '%.4f' % extIOR )
return root
def transformToXml(root ):
rstring = et.tostring(root, 'utf-8')
pstring = minidom.parseString(rstring)
xmlString = pstring.toprettyxml(indent=" ")
xmlString= xmlString.split('\n')
xmlString = [x for x in xmlString if len(x.strip()) != 0 ]
xmlString = '\n'.join(xmlString )
return xmlString
def addSensor(root, fovValue, imWidth, imHeight, sampleCount):
camera = et.SubElement(root, 'sensor')
camera.set('type', 'perspective')
fov = et.SubElement(camera, 'float')
fov.set('name', 'fov')
fov.set('value', '%.4f' % (fovValue) )
fovAxis = et.SubElement(camera, 'string')
fovAxis.set('name', 'fovAxis')
fovAxis.set('value', 'x')
film = et.SubElement(camera, 'film')
film.set('type', 'hdrfilm')
width = et.SubElement(film, 'integer')
width.set('name', 'width')
width.set('value', '%d' % (imWidth) )
height = et.SubElement(film, 'integer')
height.set('name', 'height')
height.set('value', '%d' % (imHeight) )
sampler = et.SubElement(camera, 'sampler')
sampler.set('type', 'adaptive')
sampleNum = et.SubElement(sampler, 'integer')
sampleNum.set('name', 'sampleCount')
sampleNum.set('value', '%d' % (sampleCount) )
return root
parser = argparse.ArgumentParser()
parser.add_argument('--imNum', type=int, default=75, help='the number of images')
parser.add_argument('--imWidth', type=int, default=480, help='image width')
parser.add_argument('--imHeight', type=int, default=360, help='image height')
parser.add_argument('--sampleCount', type=int, default=64, help='the number of samples')
parser.add_argument('--fov', type=float, default=63.4149, help='the field of view in x axis')
parser.add_argument('--dist', type=float, default=3.0, help='the distance from camera to the target point')
parser.add_argument('--mode', default='train')
parser.add_argument('--fileRoot', default='./Shapes/', help='path to the file root')
parser.add_argument('--rs', default = 0, type=int, help='the starting point')
parser.add_argument('--re', default = 10, type=int, help='the end point')
parser.add_argument('--intIOR', type=float, default=1.4723, help='the index of refraction of glass')
parser.add_argument('--extIOR', type=float, default=1.0003, help='the index of refraction of air')
opt = parser.parse_args()
print(opt )
imNum = opt.imNum
imHeight = opt.imHeight
imWidth = opt.imWidth
fovValue = opt.fov
sampleCount = opt.sampleCount
dist = opt.dist
rs = opt.rs
re = opt.re
fileRoot = opt.fileRoot
intIOR = opt.intIOR
extIOR = opt.extIOR
mode = opt.mode
shapes = glob.glob(osp.join(fileRoot, mode, 'Shape*' ) )
for n in range(rs, min(re, len(shapes) ) ):
shapeRoot = osp.join(fileRoot, mode, 'Shape__%d' % n )
print('%d/%d: %s' % (n, min(re, len(shapes) ), shapeRoot ) )
if osp.isfile(osp.join(shapeRoot, 'depth.xml') ):
print('Warning: %s already exists' % shapeRoot )
continue
# Create rendering file for Depth maps
root = et.Element('scene')
root.set('version', '0.5.0')
integrator = et.SubElement(root, 'integrator')
integrator.set('type', 'path')
# Add shape
addShape(root, 'object.obj' )
# Add sensor
root = addSensor(root, fovValue, imWidth, imHeight, sampleCount)
# Output xml file
xmlString = transformToXml(root )
with open(osp.join(shapeRoot, 'depth.xml'), 'w') as xmlOut:
xmlOut.write(xmlString )
# Generate camera file
target = np.array([0.0, 0.0, 0.0], dtype = np.float32 )
originSeed = np.random.random( (imNum, 2) ).astype(np.float32 )
phi = originSeed[:, 0:1] * np.pi * 2
theta = np.arccos(2 * originSeed[:, 1:2] - 1 )
origin = np.concatenate([
np.sin(theta ) * np.cos(phi ),
np.sin(theta ) * np.sin(phi ),
np.cos(theta ) ], axis=1 )
origin = origin * dist
xaxis = np.zeros((imNum * 3), dtype=np.float32 )
indexAxis = np.arange(0, imNum ) * 3 + np.argmin(np.abs(origin), axis=1 )
xaxis[indexAxis ] = 1.0
xaxis = xaxis.reshape(imNum, 3 )
up = np.concatenate([
xaxis[:, 1:2] * origin[:, 2:3] - xaxis[:, 2:3] * origin[:, 1:2],
xaxis[:, 2:3] * origin[:, 0:1] - xaxis[:, 0:1] * origin[:, 2:3],
xaxis[:, 0:1] * origin[:, 1:2] - xaxis[:, 1:2] * origin[:, 0:1]
], axis = 1 )
up = up / np.sqrt(np.sum(up * up, axis = 1) )[:, np.newaxis ]
with open(osp.join(shapeRoot, 'depthCam.txt'), 'w') as camOut:
camOut.write('%d\n' % imNum )
for n in range(0, imNum ):
camOut.write('%.4f %.4f %.4f\n' % (origin[n, 0], origin[n, 1], origin[n, 2] ) )
camOut.write('%.4f %.4f %.4f\n' % (target[0], target[1], target[2] ) )
camOut.write('%.4f %.4f %.4f\n' % (up[n, 0], up[n, 1], up[n, 2] ) )
# Generate log file
with open(osp.join(shapeRoot, 'depthLogPoint.txt'), 'w') as logOut:
for n in range(0, imNum ):
logOut.write('%d %d %d\n' % (0, 0, n+1) )
y = up[n, :]
z = origin[n, :]
x = np.cross(y, z)
y = y / np.sqrt(np.sum(y*y ) )
z = z / np.sqrt(np.sum(z*z ) )
x = x / np.sqrt(np.sum(x*x ) )
rot = np.concatenate([x[:, np.newaxis], \
y[:, np.newaxis], z[:, np.newaxis] ], axis=1 )
logOut.write('%.6f %.6f %.6f %.6f\n' % (rot[0, 0], rot[0, 1], rot[0, 2], origin[n, 0]) )
logOut.write('%.6f %.6f %.6f %.6f\n' % (rot[1, 0], rot[1, 1], rot[1, 2], origin[n, 1]) )
logOut.write('%.6f %.6f %.6f %.6f\n' % (rot[2, 0], rot[2, 1], rot[2, 2], origin[n, 2]) )
logOut.write('%.6f %.6f %.6f %.6f\n' % (0, 0, 0, 1) )
| [
"zhl378@eng.ucsd.edu"
] | zhl378@eng.ucsd.edu |
46fb50be72246b32790681ebd25f5daffa7354ac | 649c17052feedff130bcb3283a0c3ae9a66c9669 | /tests/test_iris_tree.py | 7b2465ee3b96607988029ee2984062e31bae0e2f | [
"MIT"
] | permissive | drevilslab/pykitml | abb04d4aaafe3712ec24e9a9e38ed0b65c4cbb8d | 1c3e50cebcdb6c4da63979ef9a812b44d23a4857 | refs/heads/master | 2023-01-16T03:29:14.762110 | 2020-11-02T11:03:55 | 2020-11-02T11:03:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | from pykitml.testing import pktest_graph, pktest_nograph
@pktest_graph
def test_iris_tree():
import numpy as np
import pykitml as pk
from pykitml.datasets import iris
# Load iris data set
inputs_train, outputs_train, inputs_test, outputs_test = iris.load()
# Create model
tree_iris_classifier = pk.DecisionTree(4, 3, max_depth=5, feature_type=['continues']*4)
# Train
tree_iris_classifier.train(inputs_train, outputs_train)
# Save it
pk.save(tree_iris_classifier, 'tree_iris_classifier.pkl')
# Print accuracy
accuracy = tree_iris_classifier.accuracy(inputs_train, outputs_train)
print('Train accuracy:', accuracy)
accuracy = tree_iris_classifier.accuracy(inputs_test, outputs_test)
print('Test accuracy:', accuracy)
# Plot confusion matrix
tree_iris_classifier.confusion_matrix(inputs_test, outputs_test,
gnames=['Setosa', 'Versicolor', 'Virginica'])
# Plot decision tree
tree_iris_classifier.show_tree()
# Assert accuracy
assert (tree_iris_classifier.accuracy(inputs_train, outputs_train)) >= 98
@pktest_nograph
def test_predict_iris_tree():
import numpy as np
import pykitml as pk
# Predict type of species with
# sepal-length sepal-width petal-length petal-width
# 5.8, 2.7, 3.9, 1.2
input_data = np.array([5.8, 2.7, 3.9, 1.2])
# Load the model
tree_iris_classifier = pk.load('tree_iris_classifier.pkl')
# Get output
tree_iris_classifier.feed(input_data)
model_output = tree_iris_classifier.get_output_onehot()
# Print result
print(model_output)
if __name__ == '__main__':
try:
test_iris_tree.__wrapped__()
test_predict_iris_tree.__wrapped__()
except AssertionError:
pass | [
"vishnu.vish.shankar@gmail.com"
] | vishnu.vish.shankar@gmail.com |
f6c5e331691fb14a8ca15b8f18c3deaa42168845 | 55813c77666d798e806a280f3305c6e6c6531df9 | /desktop/main.py | 91963db43d6eb5d80423c2217ecd5e533ef0aba5 | [] | no_license | cwwang2/EmbeddedProto_Example_STM32_UART | d80ec11ecc81bd8f6e938e71c49f566067bb805d | d63952eb5aad5b15f9922aa5b6be8f695cdf0a62 | refs/heads/master | 2023-03-25T03:34:22.560786 | 2021-03-23T13:10:15 | 2021-03-23T13:10:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,840 | py | #
# Copyright (C) 2020 Embedded AMS B.V. - All Rights Reserved
#
# This file is part of Embedded Proto.
#
# Embedded Proto is open source software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, version 3 of the license.
#
# Embedded Proto is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Embedded Proto. If not, see <https://www.gnu.org/licenses/>.
#
# For commercial and closed source application please visit:
# <https://EmbeddedProto.com/license/>.
#
# Embedded AMS B.V.
# Info:
# info at EmbeddedProto dot com
#
# Postal address:
# Johan Huizingalaan 763a
# 1066 VH, Amsterdam
# the Netherlands
#
import serial
import argparse
from generated import uart_messages_pb2
def print_control_keys():
print("Command keys are:")
print("To move around press:")
print(" w")
print("a s d")
print("")
print("To try to grab your price press \"g\"")
print("If your done press \"Q\"")
def process_cmd_input():
send_command = True
quit = False
msg = uart_messages_pb2.Command()
msg.value = 1
char = input("Next command: ")
if "w" == char:
msg.button = uart_messages_pb2.Command.Up
elif "s" == char:
msg.button = uart_messages_pb2.Command.Down
elif "a" == char:
msg.button = uart_messages_pb2.Command.Left
elif "d" == char:
msg.button = uart_messages_pb2.Command.Right
elif "g" == char:
msg.button = uart_messages_pb2.Command.Grab
elif "Q" == char:
msg.button = uart_messages_pb2.Command.Stop
# Stop the loop
quit = True
else:
send_command = False
print_control_keys()
if send_command:
return msg, quit
else:
return None, quit
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--com', default="/dev/ttyACM0", help='The desired comport to open')
args = parser.parse_args()
# Try to open the serial port with the default baud rate.
with serial.Serial(args.com, 115200, timeout=1) as ser:
print_control_keys()
running = True
while running:
command, quit = process_cmd_input()
running = not quit
if command:
b = bytearray()
# Serialize the command message and send it over uart.
command_str = command.SerializeToString()
# First send the length of the message.
l = len(command_str)
b.extend(l.to_bytes(1, byteorder='little'))
# Next send the actual data
b.extend(command_str)
ser.write(b)
# Await a reply.
# First the length of the message.
length_bytes = ser.read(1)
length = int.from_bytes(length_bytes, byteorder="little")
if 0 < length:
# Next the actual data
bytes = ser.read(length)
# Check if we have received all bytes.
if length == len(bytes):
reply = uart_messages_pb2.Reply()
reply.ParseFromString(bytes)
# Do something with the reply.
if reply.price:
print("We have a winner!")
else:
print("x pos: " + str(reply.x_pos))
print("y pos: " + str(reply.y_pos))
| [
"Bart@EmbeddedAMS.nl"
] | Bart@EmbeddedAMS.nl |
e7a9f35ebb77c9eba8512429cdbcd7299268d354 | 954e077df69e90f3f3742276d0644485514b2de5 | /PyBank/main.py | 42053b8d79d5dd2d5b3cba5f207f27bc76e916a8 | [] | no_license | Vilma0228/python-challenge | d5cce1bbfcacfacd80daaf39067882c540d51c8b | 4fddf4f507d5c314f328b2f839c7f4e611dd2a04 | refs/heads/master | 2020-03-14T15:08:12.242787 | 2018-05-05T23:31:10 | 2018-05-05T23:31:10 | 131,669,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py | import os
import csv
#Files to load
PyBankpath = "budget_data_1.csv"
#Read the csv and convert to dictionaries
with open (PyBankpath) as revenue_data:
reader = csv.reader(revenue_data)
#Use the next to skip first title row
next (reader)
revenue = []
date = []
rev_change = []
#Loop to generate sum of column 1
for row in reader:
revenue.append(float(row[1]))
date.append(row[0])
print("Financial Analysis")
print("-----------------------------------")
print("Total Months:", len(date))
print("Total Revenue: $", sum(revenue))
#Loop total of difference between all row of column "Revenue" and found Total Revenue change.
for i in range(1,len(revenue)):
rev_change.append(revenue[i] - revenue[i-1])
avg_rev_change = sum(rev_change)/len(rev_change)
# Also found out max revenue change and min revenue change.
max_rev_change = max(rev_change)
min_rev_change = min(rev_change)
max_rev_change_date = str(date[rev_change.index(max(rev_change))])
min_rev_change_date = str(date[rev_change.index(min(rev_change))])
print("Avereage Revenue Change: $", round(avg_rev_change))
print("Greatest Increase in Revenue:", max_rev_change_date,"($", max_rev_change,")")
print("Greatest Decrease in Revenue:", min_rev_change_date,"($", min_rev_change,")")
| [
"vilma0228@gmail.com"
] | vilma0228@gmail.com |
8785bfa65e79ad99e0867513d746ace94f2107d6 | 43d33fe29c2d75b4612d01e804ff9aa96683e592 | /day3/part2.py | bef750872601af0ccf84e0c5a5ebbc32c1b9b250 | [
"Unlicense"
] | permissive | fvdnabee/aoc20 | b2a7d0950086e3ab61821f23de6c3055cb0cafde | e4c9d31aaead998bad9f0a53612ab731b66933da | refs/heads/main | 2023-02-05T16:01:15.342636 | 2020-12-25T13:27:49 | 2020-12-25T13:27:49 | 318,639,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | with open('input') as fp:
grid = [line[:-1] for line in fp]
grid_width = len(grid[-1])
slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]
slopes_trees = []
slopes_trees_multiplied = 1
for r, l in slopes:
n_trees = 0
x, y = (0, 0)
while y < len(grid):
if grid[y][x % grid_width] == '#':
n_trees += 1
x += r
y += l
slopes_trees_multiplied *= n_trees
slopes_trees.append(n_trees)
print(slopes_trees)
print(slopes_trees_multiplied)
| [
"floris@vdna.be"
] | floris@vdna.be |
3c60cb7e069f959fd27d7c4354d02b1fad18bef8 | b31674c022fd03aee6b0d3f2c1a3b4c901b26c42 | /Scripts/environment.py | 5ca5b935c43c919822881bec6de9d7bb57e9aa77 | [] | no_license | sdfgeoff/geodesic-chess | d033512ecd4b90428901a6d287de3f477862927d | ddba711af70bb8f443b98c2a1822f8e5c87638b2 | refs/heads/master | 2021-01-13T05:12:52.109560 | 2017-05-15T03:39:06 | 2017-05-15T03:39:06 | 81,260,952 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | import bge
def test(cont):
'''This is used to test the enviroment class from Camera.blend. It checks
to see if the game object exists, and if it does not, runs the environment'''
# When you create a game, it puts itself in the global dictionary
if 'GAME' not in bge.logic.globalDict:
# Make this function run every frame by adjusting the sensor driving it
cont.sensors[0].usePosPulseMode = True
cont.sensors[0].skippedTicks = 0
# If the environment object does not exist, create it
if 'ENVIRONMENT' not in cont.owner:
cont.owner['ENVIRONMENT'] = Environment(cont.owner)
else:
# Otherwise run
cont.owner['ENVIRONMENT'].update()
class Environment(object):
'''This object represents the backdrop. It has to follow the position
of the active_camera.'''
def __init__(self, enviroment_object):
# This obj is the backdrop for the scene
self.obj = enviroment_object
def update(self):
'''Move the object to match the position of the camera'''
# Get the camera from the environments scene
camera = self.obj.scene.active_camera
# Match their positions
self.obj.worldPosition = camera.worldPosition
| [
"sdfgeoff@gmail.com"
] | sdfgeoff@gmail.com |
bd6fe3a09008d2af126731dc0dce5e67b38920d3 | 5b89963fedfd6caed58c9f08085abf4ccdf07d3e | /MLP_from_scratch/prediction.py | 94b73fb7cba56a80fad395c0709ac38ad0f6b9c0 | [
"MIT"
] | permissive | THINK989/Neural-Network-from-Scratch | 0c0158e6327b6c95c66313fba5a4d83188904f93 | cf410d45c4306b835b354bb19fb3051575d486fc | refs/heads/master | 2020-07-06T10:57:09.591170 | 2020-05-01T15:29:35 | 2020-05-01T15:29:35 | 202,993,986 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | import numpy as np
from propagation import *
def predict(X, y, parameters):
"""
This function is used to predict the results of a n-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
n = len(parameters) // 2 # number of layers in the neural network
p = np.zeros((1,m))
# Forward propagation
probas, caches = forward_propagation(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, probas.shape[1]):
if probas[0,i] > 0.5:
p[0,i] = 1
else:
p[0,i] = 0
#print results
#print ("predictions: " + str(p))
#print ("true labels: " + str(y))
print("Accuracy: " + str(np.sum((p == y)/m)))
return p
| [
"noreply@github.com"
] | noreply@github.com |
c5af1b1c066c1a49573ef543d9894567d5909df2 | 9e482a31dd8c1661ad1953d7fbd24a532306f58c | /Plays/Play10/medium_batch_normalization.py | 2d0166d6dce71a2e1ec99d39a56e366341b49c99 | [] | no_license | margaridav27/feup-fpro | 49a66f6643c83adb948ff110f522948f43508519 | e805e08d0fdd273db272300e3e9676c585030f23 | refs/heads/master | 2023-01-23T01:16:11.534704 | 2020-11-25T10:48:00 | 2020-11-25T10:48:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 10:14:35 2019
@author: Margarida Viera
"""
#solution using generator
def batch_norm(alist, batch_size):
while len(alist) > 0:
batch = []
l = alist.copy()
if len(alist) < batch_size:
batch_size = len(alist)
for i in range(batch_size):
batch.append(l[i])
alist.remove(l[i])
from statistics import median
med = median(batch)
for n in range(len(batch)):
batch[n] -= med
yield batch
#solution using normal function return
def batch_norm(alist, batch_size):
batches = []
while len(alist) > batch_size:
batches.append(alist[:batch_size])
alist = alist[batch_size:]
if len(alist) != 0:
batches.append(alist)
from statistics import median
for batch in batches:
med = median(batch)
for n in range(len(batch)):
batch[n] -= med
return batches
print(batch_norm([10, 1, -12, 5, 1, 3, 7, 3, 3], 5)) | [
"up201907907@fe.up.pt"
] | up201907907@fe.up.pt |
f1df3479147b367dfc6cc0d007b4386d3a0e7fa8 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/express_route_circuit_authorization_py3.py | b4bb3df4dd52a8366e8cb7b9b1d60fa77e023bf0 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,559 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class ExpressRouteCircuitAuthorization(SubResource):
"""Authorization in an ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param authorization_key: The authorization key.
:type authorization_key: str
:param authorization_use_status: AuthorizationUseStatus. Possible values
are: 'Available' and 'InUse'. Possible values include: 'Available',
'InUse'
:type authorization_use_status: str or
~azure.mgmt.network.v2017_03_01.models.AuthorizationUseStatus
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'authorization_use_status': {'key': 'properties.authorizationUseStatus', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, authorization_key: str=None, authorization_use_status=None, provisioning_state: str=None, name: str=None, **kwargs) -> None:
super(ExpressRouteCircuitAuthorization, self).__init__(id=id, **kwargs)
self.authorization_key = authorization_key
self.authorization_use_status = authorization_use_status
self.provisioning_state = provisioning_state
self.name = name
self.etag = None
| [
"noreply@github.com"
] | noreply@github.com |
20c78b60b815c01583da61d8d071a7b4e1735589 | dbf8768bb3818b4003f2e34ff561afb235a3734a | /Python/Templates/Django/ProjectTemplates/Python/Web/PollsDjango/app-admin.py | 898eb59ef40b51c7c1a179375f53430f2d5f5b8c | [
"Apache-2.0"
] | permissive | wjk/PTVS | bf3880198ba35ae34b12872a86fe2b03d2a82180 | 184b6711a8700a7f9d78f6d6ac3b225f81a8b8b8 | refs/heads/master | 2020-12-14T16:11:40.486645 | 2020-01-17T20:45:15 | 2020-01-17T20:45:15 | 234,801,602 | 1 | 0 | Apache-2.0 | 2020-01-18T21:41:27 | 2020-01-18T21:41:26 | null | UTF-8 | Python | false | false | 669 | py | """
Customizations for the Django administration interface.
"""
from django.contrib import admin
from app.models import Choice, Poll
class ChoiceInline(admin.TabularInline):
"""Choice objects can be edited inline in the Poll editor."""
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
"""Definition of the Poll editor."""
fieldsets = [
(None, {'fields': ['text']}),
('Date information', {'fields': ['pub_date']}),
]
inlines = [ChoiceInline]
list_display = ('text', 'pub_date')
list_filter = ['pub_date']
search_fields = ['text']
date_hierarchy = 'pub_date'
admin.site.register(Poll, PollAdmin)
| [
"steve.dower@microsoft.com"
] | steve.dower@microsoft.com |
1f9b7ac363f4e5ba9f5d25efa087f724e9cfa14e | 67254af1798c9715e55a0a1457ddf4520210d22f | /2021届秋招/leetcode/链表/k个一组实现链表.py | 7ef2beb9af6cec115525bf45301eee097da38c4c | [] | no_license | Aries5522/daily | 0a3160494263871c361c1efa07e47899a7f2b4f7 | d31a2c92e62b1699cda2ffa39395df5b372445b8 | refs/heads/master | 2021-08-10T11:49:17.954283 | 2020-09-07T15:39:41 | 2020-09-07T15:39:41 | 217,962,036 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def list2Node(nums):
root = cur = ListNode(None)
for i in nums:
node = ListNode(i)
cur.next = node
cur = cur.next
return root.next
def print_(head):
res = []
while head:
res.append(head.val)
head = head.next
if len(res) > 15:
break
print(res)
def kgropu_reverse(head, k):
if k == 1: return head
res = pre=ListNode(None)
res.next = head
len_ = 0
cur = head
while cur: ##先看链表长度
len_ += 1
cur = cur.next
cur = head
# print(len_)
for i in range(len_ // k): ##总的翻转次数
for j in range(k - 1): ## 每次内部翻转
temp = cur.next
cur.next = temp.next
temp.next = pre.next
pre.next = temp
pre = cur
cur = cur.next
return res.next
print_(kgropu_reverse(list2Node([1, 2, 3, 4, 5]), 3))
| [
"Aries5522"
] | Aries5522 |
1d67381581a090003013177bdf95c0a14cd224f9 | c7a8be023b072f8a9eab0404556e37ba4ba8857e | /My_Django_Stuff/myDjangoEnv/lib/python3.6/_weakrefset.py | e4ebae2415877da84a02d1bd8072301185336225 | [] | no_license | jbastawrose120/DjangoTutorial | efdbe6c83b7b6ba9024866aee11f5a0e66708f21 | 20a03fb98da26adfc2559cd95598a305b275540b | refs/heads/master | 2020-04-30T05:06:11.414533 | 2019-05-24T02:14:07 | 2019-05-24T02:14:07 | 176,622,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | F:/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/_weakrefset.py | [
"jbastawrose120@gmail.com"
] | jbastawrose120@gmail.com |
762c929c9132c0c6dcb7e70375cba733b0af371f | 8e408d2efa879cc60bf2898e6705c91aae303097 | /class10/timeseries.py | 51c23d6a5dde47115a376edef7c5d4738769c4df | [] | no_license | mschachter/ml-biophysics-2014 | ea1f05a68657f7a163511f3e16be835b94e530c3 | d120e2acb76734655b5a4dc48b4cb36620b8dad8 | refs/heads/master | 2020-05-30T14:04:37.931556 | 2014-11-06T18:48:11 | 2014-11-06T18:48:11 | 24,829,676 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,778 | py | import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
def generate_sine_wave(freqs=[1.0, 2.0, 5.0], duration=5.0, sample_rate=1e3, plot=True):
#generate vector that represents time
num_samps = int(duration*sample_rate)
t = np.arange(num_samps) / sample_rate
#generate sine wave
y = np.zeros([len(t)])
for freq in freqs:
y += np.sin(2*np.pi*t*freq)
if plot:
#plot the sine wave
plt.figure()
plt.plot(t, y, 'c-', linewidth=2.0)
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
return t,y
def generate_ar_process(weights=[-0.3, 0.5], duration=0.050, sample_rate=1e3, plot=True):
#generate vector that represents time
num_samps = int(duration*sample_rate)
t = np.arange(num_samps) / sample_rate
#generate the series
y = np.zeros([num_samps])
#generate a random starting point
y[0] = np.random.randn()
for k in range(1, num_samps):
#determine the number of previous time points available
nw = min(k+1, len(weights))
#multiply each weight by the previous point in the series
for j in range(nw):
y[k] += y[k-(j+1)]*weights[j]
if plot:
plt.figure()
plt.plot(t, y, 'g-', linewidth=2.0)
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
return t,y
def run(transition_function, initial_value, nsteps=100):
""" Simulate a system using a difference equation.
transition_function: The right hand side of the difference equation.
initial_value: The starting value for the state.
nsteps: The number of steps to run the system for.
"""
x = np.zeros([nsteps])
x[0] = initial_value
for k in range(1, nsteps):
x[k] = transition_function(x[k-1])
return x
def difference_equation_examples(logistic_map_r=3.86):
""" Show some examples of difference equations.
logistic_map_r: The value of r to give the logistic
map in the third plot. Defaults to 3.86, which
creates chaotic dynamics.
"""
plt.figure()
plt.subplot(3, 1, 1)
x = run(np.cos, initial_value=1.0, nsteps=20)
plt.plot(x, 'k-')
plt.axis('tight')
plt.title('$x_{t+1} = cos(x_t)$')
plt.subplot(3, 1, 2)
r = 3.57
logistic_map = lambda x: r*x*(1.0 - x)
x = run(logistic_map, initial_value=0.5, nsteps=75)
plt.plot(x, 'k-')
plt.axis('tight')
plt.title('$x_{t+1} = %0.6f x_t (1 - x_t)$' % r)
plt.subplot(3, 1, 3)
r = logistic_map_r
logistic_map = lambda x: r*x*(1.0 - x)
x = run(logistic_map, initial_value=0.5, nsteps=175)
plt.plot(x, 'k-')
plt.axis('tight')
plt.title('$x_{t+1} = %0.6f x_t (1 - x_t)$' % r)
def plot_power_spectrum(x, sample_rate=1.0):
#take the fourier transform of the time series x
xft = np.fft.fft(x)
freq = np.fft.fftfreq(len(x), d=1.0/sample_rate)
findex = freq > 0.0
#square the magnitude of the fourier transform to get
#the power spectrum
ps = np.abs(xft)**2
#make a plot
plt.figure()
plt.plot(freq[findex], ps[findex], 'g-')
plt.ylabel('Power')
plt.xlabel('Frequency')
def autocorrelation_function(y, lags=range(20)):
""" Compute the autocorrelation function for the time series y
at the given lags.
"""
acf = np.zeros([len(lags)])
for k,lag in enumerate(lags):
#compute the correlation coefficient between y and lagged y
C = np.corrcoef(y[:len(y)-lag], y[lag:])
acf[k] = C[0, 1]
return acf,lags
def get_system_data(nsteps=500):
""" Generate data from linearly filtered smoothed Gaussian noise input. """
#generate random noise input
input = np.random.randn(nsteps)
#smooth the noise with a hanning window
h = np.hanning(30)
input_smooth = np.convolve(input, h, mode='same')
#normalize the input so it's between -1 and 1
input_smooth /= np.abs(input_smooth).max()
##generate the output by convolving with some sort of oscillating filter
the_filter = [1.0, 0.5, 0.0, -0.5, -1.0, -0.25, 0.0, 0.25, 0.0, -0.05, 0.0, 0.05]
intercept = 0.7
y = np.convolve(input_smooth, the_filter, mode='same') + intercept
return input_smooth,y,the_filter,intercept
def fit_linear_filter(input, output, nlags):
""" Fit the weights of a linear filter with nlags between
the input time series and the output time series,
"""
#generate data matrix
X = list()
for k in range(nlags, len(output)):
X.append(input[k-nlags:k])
X = np.array(X)
#generate target vector
y = output[nlags:]
#do a ridge regression
rr = Ridge(alpha=1)
rr.fit(X, y)
#return the filter weights and the bias
return rr.coef_[::-1],rr.intercept_
if __name__ == '__main__':
#t,y = generate_sine_wave()
#t,y = generate_ar_process(sample_rate=5e3)
#difference_equation_examples()
#sr = 1e3
#t,y = generate_sine_wave(freqs=[2.0, 57.0, 143.0], duration=5.0, sample_rate=1e3)
#plot_power_spectrum(y, sample_rate=1e3)
"""
plt.figure()
#plot the ACF of random noise
y = np.random.randn(1000)
plt.subplot(2, 2, 1)
acf,lags = autocorrelation_function(y, lags=range(50))
plt.plot(lags, acf, 'k-', linewidth=2.0)
plt.title('ACF of Random Noise')
plt.axis('tight')
#plot the ACF of a sine wave
t,y = generate_sine_wave(freqs=[2.0, 57.0, 143.0], duration=5.0, sample_rate=1e3, plot=False)
plt.subplot(2, 2, 2)
acf,lags = autocorrelation_function(y, lags=range(50))
plt.plot(lags, acf, 'k-', linewidth=2.0)
plt.title('ACF of Sum of Sine Wave')
plt.axis('tight')
#plot the ACF of the logistic map
logistic_map = lambda x: 3.86*x*(1.0 - x)
y = run(logistic_map, initial_value=0.5, nsteps=500)
acf,lags = autocorrelation_function(y, lags=range(100))
plt.subplot(2, 2, 3)
plt.plot(lags, acf, 'k-', linewidth=2.0)
plt.title('ACF of Logistic Map (r=3.86)')
plt.axis('tight')
#plot the ACF of another logistic map
logistic_map = lambda x: 3.7*x*(1.0 - x)
y = run(logistic_map, initial_value=0.5, nsteps=500)
acf,lags = autocorrelation_function(y, lags=range(100))
plt.subplot(2, 2, 4)
plt.plot(lags, acf, 'k-', linewidth=2.0)
plt.title('ACF of Logistic Map (r=3.7)')
plt.axis('tight')
"""
#r = 3.86
#logistic_map = lambda x: r*x*(1.0 - x)
#y = run(logistic_map, initial_value=0.5, nsteps=2000)
#plot_power_spectrum(y, sample_rate=1.0)
#generate the input/output data
input,output,the_filter,the_intercept = get_system_data()
#fit a linear filter to the input/output data
pred_filter,pred_intercept = fit_linear_filter(input, output, 12)
#generate a predicted output from the input using a convolution
pred_output = np.convolve(input, pred_filter, mode='same') + pred_intercept
#compute the correlation coefficient between the predicted and actual output
C = np.corrcoef(output, pred_output)
cc = C[0, 1]
plt.figure()
plt.subplot(2, 2, 1)
plt.plot(input, 'k-', linewidth=2.0)
plt.plot(output, 'r-', linewidth=2.0)
plt.xlabel('Time')
plt.legend(['Input', 'Output'])
plt.axis('tight')
plt.subplot(2, 2, 2)
plt.plot(the_filter, 'bo-', linewidth=2.0)
plt.plot(pred_filter, 'co-', linewidth=2.0)
plt.xlabel('Lag')
plt.legend(['Actual', 'Predicted'])
plt.title('Filters')
plt.axis('tight')
plt.subplot(2, 2, 3)
plt.plot(output, 'r-', linewidth=2.0)
plt.plot(pred_output, 'g-', linewidth=2.0)
plt.legend(['Actual Output', 'Predicted Output'])
plt.xlabel('Time')
plt.axis('tight')
plt.title('cc=%0.2f' % cc)
plt.show()
| [
"mike.schachter@gmail.com"
] | mike.schachter@gmail.com |
07fd478a0c99e3470575c12f1bb74ad945580d0c | e9a083fb04bf9061a2c49871cfbec9b37ff8f71b | /docs/source/conf.py | c609679b4ac4c8100904a33ce92c16726bc46c12 | [] | no_license | olaurino/rama | 7f86223d66f42c639672da6b8979eacaf56b28ed | 2c88ca2263ccbf6d0737fea0ac5dc0341d71c53a | refs/heads/master | 2021-01-25T14:49:32.330753 | 2018-06-04T14:25:27 | 2018-06-04T14:25:27 | 123,731,355 | 0 | 2 | null | 2018-05-08T13:25:28 | 2018-03-03T21:05:53 | Python | UTF-8 | Python | false | false | 5,608 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from rama.utils import Singleton
from sphinx.ext.autodoc import ClassDocumenter
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'Rama'
copyright = '2018, Omar Laurino'
author = 'Omar Laurino'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# html_theme_options = {}
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ramadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Rama.tex', 'Rama Documentation',
'Omar Laurino', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rama', 'Rama Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Rama', 'Rama Documentation',
author, 'Rama', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
class SingletonDocumenter(ClassDocumenter):
objtype = 'Singleton'
directivetype = 'class'
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, Singleton)
def setup(app):
app.add_autodocumenter(SingletonDocumenter)
| [
"olaurino@cfa.harvard.edu"
] | olaurino@cfa.harvard.edu |
36e2367cd04b5957640725d4b5e176a6e2d4a2ec | 60391b28bd2b11d7c905e8fc20ec049eb05e8f92 | /lib/tests.py | fc271cced34e21d28d15cdfb68d1c0b783ea417d | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | serl/topoblocktest | d2adc8c886f4f637da7377c2c2ea2cce35e8b497 | 32870f863e128414b0f0f9b786815f38c35d7c73 | refs/heads/master | 2022-07-01T12:23:10.327429 | 2019-11-02T14:51:32 | 2019-11-02T14:51:32 | 58,321,253 | 0 | 0 | MIT | 2022-06-20T08:20:52 | 2016-05-08T16:34:10 | Python | UTF-8 | Python | false | false | 9,666 | py | import textwrap
from .bash import CommandBlock
def begin():
script = CommandBlock()
script += '#!/bin/bash'
script += 'umask 0000' # as the script will be run as root, this ensures that after you can play around as normal user ;)
script += ''
return script
def counter_increment(**settings):
return 'counter=$(cat {result_file}.count 2>/dev/null) ; echo $((counter + 1)) > {result_file}.count'.format(**settings)
def iperf2(**in_settings):
defaults = {
'tcpdump': False,
'parallelism': 1,
'protocol': 'tcp',
'__udp_param': '',
'packet_size': 'default',
'__packet_size_param': '',
'result_file': None,
'ns1': None,
'ns2': None,
'duration': 30,
}
settings = defaults.copy()
settings.update(in_settings)
settings['counter_increment'] = counter_increment(**settings)
if settings['protocol'] == 'udp':
settings['__udp_param'] = '--udp --bandwidth 100G ' # will generate a warning on the server side, but we don't care
else:
settings['protocol'] = 'tcp'
if settings['packet_size'] != 'default':
if settings['protocol'] == 'udp':
settings['__packet_size_param'] = '--len {} '.format(settings['packet_size'])
else:
settings['__packet_size_param'] = '--mss {} '.format(settings['packet_size'])
for key in ('result_file', 'ns1', 'ns2'):
if settings[key] is None:
raise ValueError('{} missing'.format(key))
settings['duration'] = int(settings['duration'])
if settings['duration'] < 5:
raise ValueError('Duration must be integer >= 5')
settings['kill_after'] = settings['duration'] + 15
settings['iostat_interval'] = 5
settings['iostat_count'] = settings['duration'] // settings['iostat_interval']
script = CommandBlock()
script += textwrap.dedent("""
mkdir -p `dirname {result_file}`
ip netns exec {ns1} iperf -s {__udp_param} &>/dev/null & IPERF_PID=$!
if [ "{tcpdump}" == True ]; then #tcpdump
ip netns exec {ns1} tcpdump -s 96 -w {result_file}.pcap &>/dev/null & TCPDUMP_PID=$!
fi
server_addr=$(ip netns exec {ns1} ip addr show scope global | grep inet | cut -d' ' -f6 | cut -d/ -f1)
echo -n "Running iperf2 over {protocol} (with {parallelism} clients)... "
sleep 1
(LC_ALL=C iostat -c {iostat_interval} {iostat_count} | awk 'FNR==3 {{ header = $0; print }} FNR!=1 && $0 != header && $0' > {result_file}.cpu.temp) & IOSTAT_PID=$! # CPU monitoring
iperf2out="$(ip netns exec {ns2} timeout --signal=KILL {kill_after} iperf --time {duration} {__udp_param}{__packet_size_param} --client $server_addr --reportstyle C --parallel {parallelism})"
wait $IOSTAT_PID
expected_lines={parallelism}
[ '{protocol}' == 'udp' ] && [ '{packet_size}' -ge 52 ] && expected_lines=$((expected_lines * 2))
[ {parallelism} -gt 1 ] && expected_lines=$((expected_lines + 1))
output_lines=$(echo "$iperf2out" | wc -l)
if [ $expected_lines == $output_lines ]; then
master_line=$(echo "$iperf2out" | grep ',-1,')
echo measured $(numfmt --to=iec --suffix=b/s ${{master_line##*,}})
echo 'begin' >> {result_file}.iperf2
echo "$iperf2out" >> {result_file}.iperf2
cat {result_file}.cpu.temp >> {result_file}.cpu
{counter_increment}
sleep 5 #let the load decrease
else
echo error
fi
rm {result_file}.cpu.temp
kill $IPERF_PID $TCPDUMP_PID
wait
sleep 1
""").format(**settings)
return script
def iperf3(**in_settings):
defaults = {
'tcpdump': False,
'parallelism': 1,
'protocol': 'tcp',
'__udp_param': '',
'packet_size': 'default',
'__packet_size_param': '',
'result_file': None,
'ns1': None,
'ns2': None,
'duration': 30,
'zerocopy': False,
'__zerocopy_param': '',
'affinity': False,
'__affinity_param': '',
}
settings = defaults.copy()
settings.update(in_settings)
settings['counter_increment'] = counter_increment(**settings)
for key in ('result_file', 'ns1', 'ns2'):
if settings[key] is None:
raise ValueError('{} missing'.format(key))
if settings['protocol'] == 'udp':
settings['__udp_param'] = '--udp --bandwidth 0 '
else:
settings['protocol'] = 'tcp'
if settings['packet_size'] != 'default':
if settings['protocol'] == 'udp':
settings['__packet_size_param'] = '--length {} '.format(settings['packet_size'])
else:
settings['__packet_size_param'] = '--set-mss {} '.format(settings['packet_size'])
if settings['zerocopy']:
settings['__zerocopy_param'] = '--zerocopy '
if settings['affinity']:
settings['__affinity_param'] = '--affinity 0,1 '
settings['duration'] = int(settings['duration'])
if settings['duration'] < 5:
raise ValueError('Duration must be integer >= 5')
settings['kill_after'] = settings['duration'] + 15
settings['iostat_interval'] = 5
settings['iostat_count'] = settings['duration'] // settings['iostat_interval']
script = CommandBlock()
script += textwrap.dedent("""
mkdir -p `dirname {result_file}`
ip netns exec {ns1} iperf3 -s --interval 0 &>/dev/null & IPERF_PID=$!
if [ "{tcpdump}" == True ]; then #tcpdump
ip netns exec {ns1} tcpdump -s 96 -w {result_file}.pcap &>/dev/null & TCPDUMP_PID=$!
fi
server_addr=$(ip netns exec {ns1} ip addr show scope global | grep inet | cut -d' ' -f6 | cut -d/ -f1)
echo -n "Running iperf3 over {protocol} (with {parallelism} clients)... "
sleep 1
(LC_ALL=C iostat -c {iostat_interval} {iostat_count} | awk 'FNR==3 {{ header = $0; print }} FNR!=1 && $0 != header && $0' > {result_file}.cpu.temp) & IOSTAT_PID=$! # CPU monitoring
ip netns exec {ns2} timeout --signal=KILL {kill_after} iperf3 --time {duration} --interval 0 {__affinity_param}{__zerocopy_param}{__udp_param}{__packet_size_param} --parallel {parallelism} --client $server_addr --json >> {result_file}.iperf3
iperf_exitcode=$?
wait $IOSTAT_PID
if [ $iperf_exitcode == 0 ]; then
echo success
cat {result_file}.cpu.temp >> {result_file}.cpu
{counter_increment}
sleep 5 #let the load decrease
else
echo error
fi
rm {result_file}.cpu.temp
kill $IPERF_PID $TCPDUMP_PID
wait
sleep 1
""").format(**settings)
return script
def iperf3m(**in_settings):
defaults = {
'tcpdump': False,
'parallelism': 1,
'protocol': 'tcp',
'__udp_param': '',
'packet_size': 'default',
'__packet_size_param': '',
'result_file': None,
'ns1': None,
'ns2': None,
'duration': 30,
'zerocopy': False,
'__zerocopy_param': '',
'affinity': False,
}
settings = defaults.copy()
settings.update(in_settings)
settings['counter_increment'] = counter_increment(**settings)
for key in ('result_file', 'ns1', 'ns2'):
if settings[key] is None:
raise ValueError('{} missing'.format(key))
if settings['protocol'] == 'udp':
settings['__udp_param'] = '--udp --bandwidth 0 '
else:
settings['protocol'] = 'tcp'
if settings['packet_size'] != 'default':
if settings['protocol'] == 'udp':
settings['__packet_size_param'] = '--length {} '.format(settings['packet_size'])
else:
settings['__packet_size_param'] = '--set-mss {} '.format(settings['packet_size'])
if settings['zerocopy']:
settings['__zerocopy_param'] = '--zerocopy '
settings['duration'] = int(settings['duration'])
if settings['duration'] < 5:
raise ValueError('Duration must be integer >= 5')
settings['kill_after'] = settings['duration'] + 15
settings['iostat_interval'] = 5
settings['iostat_count'] = settings['duration'] // settings['iostat_interval']
script = CommandBlock()
script += textwrap.dedent("""
mkdir -p `dirname {result_file}`
for i in `seq {parallelism}`; do
PORT=$((5201 + i))
ip netns exec {ns1} iperf3 -s --interval 0 --port $PORT &>/dev/null & IPERF_PIDs="$IPERF_PIDs $!"
done
if [ "{tcpdump}" == True ]; then #tcpdump
ip netns exec {ns1} tcpdump -s 96 -w {result_file}.pcap &>/dev/null & TCPDUMP_PID=$!
fi
server_addr=$(ip netns exec {ns1} ip addr show scope global | grep inet | cut -d' ' -f6 | cut -d/ -f1)
echo "Running iperf3 over {protocol} (with {parallelism} servers and clients)... "
sleep 1
(LC_ALL=C iostat -c {iostat_interval} {iostat_count} | awk 'FNR==3 {{ header = $0; print }} FNR!=1 && $0 != header && $0' >> {result_file}.cpu) & IOSTAT_PID=$! # CPU monitoring
for i in `seq {parallelism}`; do
PORT=$((5201 + i))
if [ "{affinity}" == True ]; then #affinity
NPROC=$(nproc)
PROC_CLIENT=$(( (i - 1) % NPROC ))
PROC_SERVER=$(( (i - 1 + {parallelism}) % NPROC ))
AFFINITY="--affinity $PROC_CLIENT,$PROC_SERVER"
fi
(ip netns exec {ns2} timeout --signal=KILL {kill_after} iperf3 --time {duration} --interval 0 $AFFINITY {__zerocopy_param}{__udp_param}{__packet_size_param} --client $server_addr --port $PORT --json >> {result_file}.iperf3.$i) &
CLIENT_IPERF_PIDs="$CLIENT_IPERF_PIDs $!"
done
wait $IOSTAT_PID $CLIENT_IPERF_PIDs
{counter_increment}
sleep 5 #let the load decrease
kill $IPERF_PIDs $TCPDUMP_PID
wait
sleep 1
""").format(**settings)
return script
| [
"livi@i3s.unice.fr"
] | livi@i3s.unice.fr |
79800f0401cbb5ee3dfdafd55b2f0532cd565719 | ab37cdd76b8d4da54ff1ce30b0fa2e3dfadd207f | /1001-1099/1008/1008.py | e2d869f2abde08e4f8eb119987332f9815576b5f | [] | no_license | hay86/timus | b163d94052d3dedd51c82f5c10874402f805c6e1 | 0d06073228c23538ca785938c862d2b5e08bda63 | refs/heads/master | 2023-03-08T06:34:28.707612 | 2021-02-20T14:38:48 | 2021-02-20T14:38:48 | 100,444,783 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,553 | py | import sys
img = [[False for i in range(12)] for j in range(12)]
def one():
for i in range(n):
x, y = [int(j) for j in sys.stdin.readline().split()]
img[x][y] = True
if i == 0:
x0, y0 = x, y
v = [[False for i in range(12)] for j in range(12)]
q = [(x0, y0)]
v[x0][y0] = True
print '%d %d' % (x0, y0)
while len(q) > 0:
x, y = q.pop(0)
o = ''
if img[x+1][y] and not v[x+1][y]:
o += 'R'
q.append((x+1, y))
v[x+1][y] = True
if img[x][y+1] and not v[x][y+1]:
o += 'T'
q.append((x, y+1))
v[x][y+1] = True
if img[x-1][y] and not v[x-1][y]:
o += 'L'
q.append((x-1, y))
v[x-1][y] = True
if img[x][y-1] and not v[x][y-1]:
o += 'B'
q.append((x, y-1))
v[x][y-1] = True
if len(q) == 0:
print '.'
else:
print o+','
def two():
xn, yn = x0, y0
xm, ym = x0, y0
count = 1
q=[(x0, y0)]
img[x0][y0] = True
for line in sys.stdin:
if '.' in line:
break
x, y = q.pop(0)
if 'R' in line:
q.append((x+1, y))
count += 1
img[x+1][y] = True
xm = max(xm, x+1)
if 'T' in line:
q.append((x, y+1))
count += 1
img[x][y+1] = True
ym = max(ym, y+1)
if 'L' in line:
q.append((x-1, y))
count += 1
img[x-1][y] = True
xn = min(xn, x-1)
if 'B' in line:
q.append((x, y-1))
count += 1
img[x][y-1] = True
yn = min(yn, y-1)
print count
for x in range(xn, xm+1):
for y in range(yn, ym+1):
if img[x][y]:
print x, y
line = sys.stdin.readline()
if not ' ' in line:
n = int(line)
one()
else:
x0, y0 = [int(y) for y in line.split()]
two()
| [
"xukaifeng1986@gmail.com"
] | xukaifeng1986@gmail.com |
81a081122381d85928f0ad8fb3aab7f699135e78 | aca971629c16f16a4b0360669579d0751fd5da67 | /src/indelPlot.py | f896ef722c39a732f5391609743e33d9627abe56 | [
"MIT"
] | permissive | ngannguyen/referenceViz | 43769ded8cb3c77445391e26233352a61ed72744 | 6990a00739a712ccd1371e996229882252fa8f91 | refs/heads/master | 2021-01-01T06:26:33.975514 | 2012-03-22T21:34:54 | 2012-03-22T21:34:54 | 1,750,339 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,150 | py | #!/usr/bin/env python
"""
Create Snp plots
nknguyen at soe dot ucsc dot edu
Jun 15 2011
"""
import os, sys
from optparse import OptionParser
import xml.etree.ElementTree as ET
#from numpy import *
import libPlotting as libplot
import matplotlib.pyplot as pyplot
import matplotlib.pylab as pylab
from matplotlib.ticker import *
from matplotlib.font_manager import FontProperties
class Sample():
def __init__( self, xmlnode ):
self.name = xmlnode.attrib[ 'sampleName' ]
self.refname = xmlnode.attrib[ 'referenceName' ]
self.dels = float(xmlnode.attrib['totalDeletionPerAlignedBase'])
self.ins = float(xmlnode.attrib['totalInsertionPerAlignedBase'])
self.indel = float(xmlnode.attrib['totalInsertionAndDeletionPerAlignedBase'])
#add cases where it's both insertion & deletion (indel) to insertion & deletion:
self.dels += self.indel
self.ins += self.indel
def readfiles( options ):
statsList = []
for f in options.files:
samples = []
xmltree = ET.parse( f )
root = xmltree.getroot()
for s in root.findall( 'statsForSample' ):
name = s.attrib[ 'sampleName' ]
if name != 'aggregate' and name != 'ROOT' and name != '' and name not in options.filteredSamples:
samples.append( Sample( s ) )
statsList.append( samples )
return statsList
def initOptions( parser ):
parser.add_option( '--outdir', dest='outdir', default='.', help='Output directory' )
#parser.add_option( '--numOutliners', dest='numOutliners', type='int', default=0, help='Number of outliners' )
parser.add_option('--filteredSamples', dest='filteredSamples', help='Hyphen separated list of samples that were filtered out (not to include in the plot)')
def checkOptions( args, options, parser ):
if len( args ) < 2:
parser.error( 'Please provide two snpStats xml files.\n' )
options.files = []
for f in args:
if not os.path.exists(f):
parser.error( 'File %s does not exist.\n' % f )
options.files.append( f )
if options.filteredSamples:
options.filteredSamples = options.filteredSamples.split('-')
else:
options.filteredSamples = []
def setAxes(fig, range1, range2):
axleft = 0.12
axright = 0.95
axwidth = axright - axleft
axbottom = 0.15
axtop = 0.95
axheight = axtop - axbottom
margin = 0.015
h1 = (axheight - margin)*(range1/(range1 + range2))
h2 = axheight - margin - h1
ax2 = fig.add_axes( [axleft, axbottom, axwidth, h2] )
ax = fig.add_axes( [axleft, axbottom + h2 + margin, axwidth, h1] )
return ax, ax2
def drawPlot( options, samples1, samples2, type ):
#Sorted in decreasing order of errorPerSite in samples1
if type == 'insertion':
samples1 = sorted( samples1, key=lambda s:s.ins, reverse=True )
else:
samples1 = sorted( samples1, key=lambda s:s.dels, reverse=True )
if len( samples1 ) < 1:
return
#remove chimpSample:
chimpSample = None
for i, s in enumerate(samples1):
if s.name == 'panTro3':
chimpSample = samples1.pop(i)
break
refname1 = samples1[0].refname
refname2 = samples2[0].refname
y1data = [ s.ins for s in samples1 ]
if type == 'deletion':
y1data = [ s.dels for s in samples1 ]
xticklabels = [ s.name for s in samples1 ]
#indel of refname1 w.r.t itself (0)
y1data.append(0)
xticklabels.append(refname1)
y2data = []
for name in xticklabels:
if name == refname2:#indel of refname2 w.r.t itself (0)
y2data.append(0)
for s in samples2:
if s.name == name:
if type == 'insertion':
y2data.append(s.ins)
else:
y2data.append(s.dels)
break
if len(xticklabels) != len(y2data):
sys.stderr.write("Input file 1 and 2 do not have the same set of samples\n")
sys.exit( 1 )
#add the average column:
num = 1
y1avr = sum(y1data)/float(len(y1data) - 1)
y1data.append(y1avr)
xticklabels.append('average')
y2avr = sum(y2data)/float(len(y2data) - 1)
y2data.append(y2avr)
print "%s Average: %s %f, %s %f" %(type, refname1, y1avr, refname2, y2avr)
#Add chimp:
samples1.append(chimpSample)
if type == 'insertion':
y1data.append( chimpSample.ins )
else:
y1data.append( chimpSample.dels )
for s in samples2:
if s.name == 'panTro3':
if type == 'insertion':
y2data.append(s.ins)
else:
y2data.append(s.dels)
xticklabels.append("panTro3")
minMajority = min( [min(y2data), min(y1data)] ) - 0.0001
maxMajority = max( [max(y2data), max(y1data)] ) + 0.0001
basename = os.path.basename(options.files[0])
options.out = os.path.join( options.outdir, '%s_%s' %( type, basename.lstrip('pathStats').lstrip('_').rstrip('.xml') ) )
fig, pdf = libplot.initImage( 11.2, 10.0, options )
#ax, ax2 = setAxes(fig, maxOutlier - minOutlier, maxMajority - minMajority)
ax2 = fig.add_axes( [0.15, 0.15, 0.8, 0.8] )
l2 = ax2.plot( y2data, marker='.', markersize=14.0, linestyle='none', color="#E31A1C" )
l1 = ax2.plot( y1data, marker='.', markersize=14.0, linestyle='none', color="#1F78B4" )
#Legend
fontP = FontProperties()
fontP.set_size("x-small")
legend = ax2.legend([l1, l2], [libplot.properName(refname1), libplot.properName(refname2)], 'upper right', numpoints=1, prop=fontP)
legend._drawFrame = False
ax2.set_ylim( minMajority, maxMajority )
ax2.set_xlim( -0.5, len(xticklabels) -0.5 )
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.xaxis.tick_bottom()
ax2.yaxis.set_ticks_position( 'left' )
ax2.set_xticks( range( 0, len(xticklabels) ) )
properxticklabels = [ libplot.properName(l) for l in xticklabels ]
ax2.set_xticklabels( properxticklabels )
for label in ax2.xaxis.get_ticklabels():
label.set_rotation( 90 )
ax2.yaxis.grid(b=True, color="#CCCCCC", linestyle='-', linewidth=0.005)
ax2.xaxis.grid(b=True, color="#CCCCCC", linestyle='-', linewidth=0.005)
ax2.set_xlabel( 'Samples' )
title = 'Deletions'
#if type == 'insertion':
if type == 'insertion':
ax2.set_ylabel( 'Insertions per site' )
title = 'Insertions'
else:
ax2.set_ylabel( 'Deletions per site' )
ax2.set_title( title )
libplot.writeImage( fig, pdf, options )
def main():
usage = ('Usage: %prog [options] file1.xml file2.xml\n\n')
parser = OptionParser( usage = usage )
initOptions( parser )
libplot.initOptions( parser )
options, args = parser.parse_args()
checkOptions( args, options, parser )
libplot.checkOptions( options, parser )
statsList = readfiles( options )
drawPlot( options, statsList[0], statsList[1], 'insertion' )
drawPlot( options, statsList[0], statsList[1], 'deletion' )
if __name__ == "__main__":
main()
| [
"nknguyen@soe.ucsc.edu"
] | nknguyen@soe.ucsc.edu |
b79cd911e54384cd26db6f16bcb60444a2e2f53c | dcbe98b20d12de456afdf1a0d7eafea03b347254 | /src/ast/ast_node.py | bc7ef5cf60cc6fe132964c1c25981810cdd51ce1 | [] | no_license | hborkows/TKOM | 6c8cc257700b729f38ff9b4b8b3ebf90322096a5 | 115e45d620629177968a4cdd707b00d8a148e547 | refs/heads/master | 2020-09-27T09:51:54.716090 | 2020-01-19T20:18:06 | 2020-01-19T20:18:06 | 226,489,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | from typing import List, Optional
class ASTNode(object):
def get_representation(self) -> str:
pass
def get_children(self) -> Optional[List]:
pass
| [
"hubert.borkowski@hotmail.com"
] | hubert.borkowski@hotmail.com |
b7f93333d8f7f87274baefcfac762f864f7617c2 | f385e93fb799629318b6f5bbae1a3b29d62d8359 | /database/citations/asuncion2012a.py | ea381f3189a4abf25a7944dc6845c8cf0f359501 | [] | no_license | JoaoFelipe/ipaw-index | bf113649de497d2008922eb80f8ea3bf2cd6aba5 | f8fe329f0c35b11c84bd76e7b7da7a465d380a02 | refs/heads/master | 2020-03-17T19:51:13.892958 | 2018-05-18T00:54:08 | 2018-05-18T00:54:08 | 133,880,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | # coding: utf-8
from snowballing.models import *
from snowballing import dbindex
dbindex.last_citation_file = dbindex.this_file(__file__)
from ..work.y2012 import asuncion2012a
from ..work.y2016 import reddish2016a
DB(Citation(
reddish2016a, asuncion2012a, ref="",
contexts=[
],
))
| [
"joaofelipenp@gmail.com"
] | joaofelipenp@gmail.com |
1d8418fc6edd086d8a53c8ff9b3b5c18aeddbd9d | b1249f4bae1106570601870d9e50f946fb1600f2 | /0x01-python-if_else_loops_functions/0-positive_or_negative.py | a1fc89629667083b123b47fc3ece65490acb4010 | [] | no_license | Eltotino/holbertonschool-higher_level_programming | 275a1661589666194ffc3666bb9af8c1ec314983 | 5972c0d86f6a03f41861b35716276d79e1ccfe8b | refs/heads/main | 2023-06-22T23:36:54.168404 | 2021-07-12T15:42:14 | 2021-07-12T15:42:14 | 259,333,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | #!/usr/bin/python3
import random
number = random.randint(-10, 10)
if (number < 0):
print("{} is negative".format(number))
elif (number == 0):
print("{} is zero".format(number))
else:
print("{} is positive".format(number))
| [
"totojaber1@gmail.com"
] | totojaber1@gmail.com |
3eaa626d2ccbdec574056baced33f7ac3935a0e0 | 23acbd859a6f4ffa0296d092331a8ca83b79c543 | /users/migrations/0005_auto_20200830_1602.py | c0f1b48a2c2bd2a89dfc5a4ba28ca991d551e438 | [] | no_license | Timothy-Parteka/INVENTORY-MANAGEMENT-SYSTEM | 0c0986174f8d7457103b83d9fe2b35b354bf30a5 | bcd056c14b350e04abe0c82fd68f8789400ce45f | refs/heads/master | 2023-02-12T18:22:20.120662 | 2021-01-13T13:40:15 | 2021-01-13T13:40:15 | 276,723,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,794 | py | # Generated by Django 3.0.8 on 2020-08-30 13:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20200701_0421'),
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('department', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='customuser',
name='first_name',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='customuser',
name='id_number',
field=models.CharField(max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='customuser',
name='last_name',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='customuser',
name='department',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='users.Department'),
),
migrations.AddField(
model_name='customuser',
name='role',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='users.Role'),
),
]
| [
"timothymweshen@gmail.com"
] | timothymweshen@gmail.com |
2b64d7c87cb71a336307dcbab4db4d5d324b4e43 | 95f1541cac9e356b108fd4a7698dccd469f3996e | /backend/app/misc/tasks.py | 1b2ff7d1a667b0fef02eb0b51a72ae62a22e53c2 | [] | no_license | ramonsaraiva/multiplicae | f98c29e57cf53e358540fd590d55472311d9c382 | 29054abc83bad2309ab66698d7f3f3bbd683d570 | refs/heads/master | 2023-08-15T00:07:51.203417 | 2020-02-19T02:57:24 | 2020-02-19T02:57:24 | 238,992,039 | 2 | 1 | null | 2021-09-22T18:35:52 | 2020-02-07T18:00:24 | JavaScript | UTF-8 | Python | false | false | 114 | py | import dramatiq
@dramatiq.actor(
queue_name='default',
max_retries=1,
)
def healthcheck():
return 0
| [
"ramonsaraiva@gmail.com"
] | ramonsaraiva@gmail.com |
6dff353a0bd671343ab8740d4532e77f168e7ee7 | c55a5a5f4b6acdb78a1c5e3cd722ecf5c4bb9257 | /CodeForces And Random Practice Problems/Pangarm.py | ea10d32e2d1ea0d6686cdb5d127a2eae6066d936 | [] | no_license | NazmulHayat/Competitive-Programming | 9aeba8c12e31b1c240710dea3314a674062ae584 | 1aa432b81b1dd223d5414d40ad5a2807899a536f | refs/heads/master | 2023-04-09T07:46:09.969017 | 2021-03-30T04:54:29 | 2021-03-30T04:54:29 | 289,615,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | lst = []
i = int(input())
s = input()
if(i<26):
print("NO")
else:
s = s.lower()
mylist = s.split()
mylist =
print(mylist)
if(len(mylist) == 26):
print("YES")
else:
print("NO")
| [
"hayatnazmul@gmail.com"
] | hayatnazmul@gmail.com |
d82ed1cfeb3e9cf9432826e65574bb198fceddb4 | 01dad4d1d2ffaf2fa070e99fe828d42f59a9f9d1 | /src/pycrop2ml_ui/packages/SQ_Energy_Balance/src/openalea/Penman.py | ab023da150792ac8a482b0e9d2ed39cd94ea4ed8 | [
"MIT",
"BSD-3-Clause"
] | permissive | AgriculturalModelExchangeInitiative/Pycrop2ml_ui | 5e210facf9689348bb57c16060967118b7c5f49a | 3d5d2b87a74f0be306056b71808286922fef2945 | refs/heads/master | 2023-06-24T13:52:39.933728 | 2023-06-17T00:17:26 | 2023-06-17T00:17:26 | 193,912,881 | 0 | 4 | MIT | 2023-02-25T13:26:57 | 2019-06-26T13:44:34 | Jupyter Notebook | UTF-8 | Python | false | false | 6,614 | py | # coding: utf8
import numpy
from math import *
def model_penman(evapoTranspirationPriestlyTaylor = 449.367,
hslope = 0.584,
VPDair = 2.19,
psychrometricConstant = 0.66,
Alpha = 1.5,
lambdaV = 2.454,
rhoDensityAir = 1.225,
specificHeatCapacityAir = 0.00101,
conductance = 598.685):
"""
- Description:
* Title: Penman Model
* Author: Pierre Martre
* Reference: Modelling energy balance in the wheat crop model SiriusQuality2:
Evapotranspiration and canopy and soil temperature calculations
* Institution: INRA/LEPSE Montpellier
* Abstract: This method is used when wind and vapor pressure daily data are available
- inputs:
* name: evapoTranspirationPriestlyTaylor
** min : 0
** default : 449.367
** max : 10000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : rate
** datatype : DOUBLE
** inputtype : variable
** unit : g m-2 d-1
** description : evapoTranspiration of Priestly Taylor
* name: hslope
** min : 0
** default : 0.584
** max : 1000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : hPa °C-1
** description : the slope of saturated vapor pressure temperature curve at a given temperature
* name: VPDair
** min : 0
** default : 2.19
** max : 1000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : hPa
** description : vapour pressure density
* name: psychrometricConstant
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 1
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 0.66
** inputtype : parameter
** unit :
** description : psychrometric constant
* name: Alpha
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 100
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 1.5
** inputtype : parameter
** unit :
** description : Priestley-Taylor evapotranspiration proportionality constant
* name: lambdaV
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 10
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 2.454
** inputtype : parameter
** unit :
** description : latent heat of vaporization of water
* name: rhoDensityAir
** parametercategory : constant
** datatype : DOUBLE
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 1.225
** inputtype : parameter
** unit :
** description : Density of air
* name: specificHeatCapacityAir
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 1
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 0.00101
** inputtype : parameter
** unit :
** description : Specific heat capacity of dry air
* name: conductance
** min : 0
** default : 598.685
** max : 10000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : state
** datatype : DOUBLE
** inputtype : variable
** unit : m d-1
** description : conductance
- outputs:
* name: evapoTranspirationPenman
** min : 0
** variablecategory : rate
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** datatype : DOUBLE
** unit : g m-2 d-1
** description : evapoTranspiration of Penman Monteith
"""
evapoTranspirationPenman = evapoTranspirationPriestlyTaylor / Alpha + (1000.0 * (rhoDensityAir * specificHeatCapacityAir * VPDair * conductance / (lambdaV * (hslope + psychrometricConstant))))
return evapoTranspirationPenman | [
"ahmedmidingoyi@yahoo.fr"
] | ahmedmidingoyi@yahoo.fr |
c509a489439ea63675a461d4649b190defd866ce | 66cc457d251021dae6d875dbef97d2d2402ba619 | /main/urls.py | b663ff5e0f6ae78f2125722f54cce3c2f55a9d33 | [] | no_license | Jayapraveen/Simple-chatbot | eb3f40316b3c67bc4f682b2793689233840dd809 | a5117d5ed37b94d438ece5524d5934d400181847 | refs/heads/master | 2020-07-25T14:17:27.002689 | 2019-09-13T18:08:05 | 2019-09-13T18:08:05 | 208,320,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | from django.conf.urls import include,url
from django.contrib import admin
from chatterbot.ext.django_chatterbot import urls as chatterbot_urls
from main import views
from main.views import ChatterBotApiView
from django.views.decorators.csrf import csrf_exempt
app_name = 'main'
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', csrf_exempt(views.home), name='home'),
url(r'^api/chatterbot/', csrf_exempt(ChatterBotApiView.as_view()), name='chatterbot'),
]
| [
"jayapraveen.ar.2015.cse@rajalakshmi.edu.in"
] | jayapraveen.ar.2015.cse@rajalakshmi.edu.in |
7a460b0c00e51c1c350cb5355df2ed784d08ca50 | 5a5b1cf029ef50c8f960cf283f7474046c5eebe8 | /static/datatool/split.py | 157985d42f365857e70387b5cc0737c9c01b72c4 | [] | no_license | chenjuntao/gisdemo | e337f6949c0eabe68f58d3011894551ac801edfd | 75a7b9a0e19d75ebf14a243ceb69d058def76106 | refs/heads/master | 2020-12-28T13:43:11.617876 | 2020-02-05T03:10:48 | 2020-02-05T03:10:48 | 238,354,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | # coding:utf-8
import csv
import os
dir = './csv/in'
page = 30000
print('打开要写入的文件...')
f = csv.reader(open('./csv/allin.csv', 'r', encoding='UTF-8'))
num = page
count = 0
f3 = None
writer = None
csvHeader = [] # 表头
for i in f:
if len(csvHeader)==0:
csvHeader = i
if num < page:
writer.writerow(i)
num += 1
else:
if f3 is not None:
writer.writerow(i)
f3.close
newFileName = dir+'/f'+str(count)+'.csv'
print(newFileName)
f3 = open(newFileName, 'w', newline='', encoding='UTF-8')
writer = csv.writer(f3)
writer.writerow(csvHeader)
num = 0
count += 1
print('拆分完成!') | [
"cjt@snp.net"
] | cjt@snp.net |
0284b2f0d100aa1fb76d5248cd791f88aa401612 | 8052c4237d2ef6da92c386d3199937b26a47cc46 | /ckuser/migrations/0021_auto_20190608_1148.py | 607f18e7ce94a45bdeffbf2cde0440119f75d74d | [] | no_license | prateekagarwal721/ck_backedn | 58f1039902bfb87b930986069c393c11ebef7c80 | 70e9449171153d5c7850999d25b4e5562c64d8f0 | refs/heads/master | 2020-06-01T00:16:25.144131 | 2019-09-12T16:43:17 | 2019-09-12T16:43:17 | 190,553,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('ckuser', '0020_auto_20190608_1147'),
]
operations = [
migrations.AlterField(
model_name='ckuser',
name='date_joined',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 8, 11, 48, 32, 796411, tzinfo=utc), verbose_name='date joined'),
),
]
| [
"prateek.agarwal@sporthood.in"
] | prateek.agarwal@sporthood.in |
51d2268bc441dc7e0809af59b51d58def2641769 | 5f10ca2439551040b0af336fd7e07dcc935fc77d | /Binary tree/二叉树性质相关题目/求每层的宽度.py | dcef6bda78001250e1f9f1a433d3a54382bd13b5 | [] | no_license | catdog001/leetcode2.0 | 2715797a303907188943bf735320e976d574f11f | d7c96cd9a1baa543f9dab28750be96c3ac4dc731 | refs/heads/master | 2021-06-02T10:33:41.552786 | 2020-04-08T04:18:04 | 2020-04-08T04:18:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,494 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/2/13 13:06
# @Author : LI Dongdong
# @FileName: 求每层的宽度.py
''''''
'''
题目分析
1.要求:求每层中,左边第一个到右边第一个的宽度
2.理解:
3.类型:
4.确认输入输出及边界条件:
4.方法及方法分析:
time complexity order:
space complexity order:
'''
'''
思路:bfs + [node, index]
方法:
deque record node and index (2* index (+1))
traversal all nodes, calculate index dif of every level node by for loop
time complex:
space complex:
易错点:
'''
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from collections import deque
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
if not root: # corner case
return 0
width = 0
res = []
queue = deque()
queue.append([root, 0])
while queue:
width = len(queue)
left = queue[0][1]
right = queue[-1][1]
res.append(right - left + 1)
for _ in range(width): # traversal all same level node
node, index = queue.popleft() # 易错点
if node.left:
queue.append([node.left, index * 2])
if node.right:
queue.append([node.right, index * 2 + 1])
return res
'''
思路:dfs + dic[level:index]
方法:
main: set level, index, dic
helper:
DFS scan every node, renew level and index = index * 2 (+ 1)
save dic[level] = [first node index, other node index]
time complex:
space complex:
易错点:dic[level] = max(index + 1, dic[level])
'''
# 求tree的每层的节点数,求哪一层具有最多节点数,节点数是多少
# input: root
# output: dic:key:每层序数,value:每层的node个数
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
if not root: # corner case
return 0
level = 0
index = 0
dic ={} # store level and index
self.res = 0
self.dfs(root, level, index, dic)
return dic
def dfs(self, root, level, index, dic):
if not root: # corner case
return
if level in dic:
dic[level][1] = index
else:
dic[level] = [index, index]
self.dfs(root.left, level + 1, index * 2, dic)
self.dfs(root.right, level + 1, index * 2 + 1, dic)
from collections import deque
def constructTree(nodeList): # input: list using bfs, output: root
new_node = []
for elem in nodeList: # transfer list val to tree node
if elem:
new_node.append(TreeNode(elem))
else:
new_node.append(None)
queue = deque()
queue.append(new_node[0])
resHead = queue[0]
i = 1
while i <= len(new_node) - 1: # bfs method building
head = queue.popleft()
head.left = new_node[i] # build left and push
queue.append(head.left)
if i + 1 == len(new_node): # if no i + 1 in new_node
break
head.right = new_node[i + 1] # build right and push
queue.append(head.right)
i = i + 2
return resHead
root = constructTree([1,2,3,None,5,6])
x = Solution()
x.widthOfBinaryTree(root) | [
"lidongdongbuaa@gmail.com"
] | lidongdongbuaa@gmail.com |
9b28a494fb6b543207c34e1d6e2846be94ab030c | fea5ab6175de05ede2808e26b72cb6de6ae52aba | /poker/validators/royal_flush_validator.py | f8f72202575e6b874cbe185d50f5c12738c1a85e | [] | no_license | phamin2001/Texas-Hold-em-Poker | 7fc71d285307a96b1e1577889c3c996b503d81e2 | 2299a25c2725b430c43e59879e903a656c8fca19 | refs/heads/master | 2022-11-12T14:02:42.491005 | 2020-07-07T06:18:04 | 2020-07-07T06:18:04 | 271,067,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | from poker.validators import StraightFlushValidator
class RoyalFlushValidator(StraightFlushValidator):
def __init__(self, cards):
self.cards = cards
self.name = "Royal Flush"
def is_valid(self):
straight_flush_validator = StraightFlushValidator(cards=self.cards)
if straight_flush_validator.is_valid():
straight_flush_cards = straight_flush_validator.valid_cards()
is_royal = straight_flush_cards[-1].rank == "Ace"
return is_royal
return False
def valid_cards(self):
return StraightFlushValidator(cards=self.cards).valid_cards()
| [
"amin.pahlavani@gmail.com"
] | amin.pahlavani@gmail.com |
5ef60668d91237a987d9c330447fbc97da91786a | 65f1cccaa0df91fff1bd8786a0a5f305c145a21a | /even.py | 4af272baffb3dd775c5b3df6ca41b368d9312468 | [] | no_license | sandeepny441/tom_git | 16239bd71851f248104e5785acb0a84b3b9ed0cc | 56e4327676b08de0d9880ff5a392080792bdcbeb | refs/heads/master | 2023-05-31T14:59:53.260479 | 2021-06-27T20:44:24 | 2021-06-27T20:44:24 | 348,614,790 | 0 | 0 | null | 2021-06-27T20:44:24 | 2021-03-17T07:17:28 | HTML | UTF-8 | Python | false | false | 112 | py | def is_even(given_num):
if given_num % 2 == 0:
return 'even'
return 'Odd'
print(is_even(100))
| [
"sandeepny441@gmail.com"
] | sandeepny441@gmail.com |
55a5c60123c11c03dbae53d05a091e926626cf67 | 4807c5ce0bcbb7f3c678f21a0e02f586ef1862e2 | /grainburn/grainburn.py | 913730da3de86f1db42d47328385357b2e9aa961 | [] | no_license | JacobRoth/JacobRothSketchbook | 91ecb74ac993304b2959448f71da61920c0b1d60 | c36c130dfc64a946adbdf88a2cb0b687de359e53 | refs/heads/master | 2021-03-12T23:15:17.217630 | 2014-04-01T20:24:56 | 2014-04-01T20:24:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import random
import math
def convertDown(image):
rows,columns,_ = image.shape
bufferedImage=np.eye(rows,columns)
for row in range(0,rows-1):
for column in range(0,columns-1):
if image[row][column].all()==0:
bufferedImage[row][column] = 0 # fuel
else:
bufferedImage[row][column] = 1 # chamber
return np.copy(bufferedImage)
def scopeGrain():
for row in range(0,rows-1):
for column in range(0,columns-1):
if fuelgrain[row][column] == 1: # we found chamber!
pass
else:
pass
'''# we're analyzing fuel
#let's analyze this for the nearest None (chamber) pixel
distanceToChamber = 10000000000000 # arbitrarily large
for rrr in range(0,rows-1):
for ccc in range(0,columns-1):
if fuelgrain[rrr][ccc] is None:
sqrtMe = (ccc-column)*(ccc-column) + (rrr-row)*(rrr-row)
distanceCurrent = math.sqrt( sqrtMe)
if distanceCurrent < distanceToChamber:
distanceToChamber = distanceCurrent
fuelgrain[row][column] = distanceToChamber'''
def main():
global fuelgrain
fuelgrain = convertDown(mpimg.imread('samplegrain.png'))
global rows,columns
rows,columns = fuelgrain.shape
#scopeGrain()
fig,ax = plt.subplots()
imgplot = plt.imshow(fuelgrain)
plt.show()
if __name__=="__main__":
main()
#x = input(">")
| [
"rothj1459@gmail.com"
] | rothj1459@gmail.com |
8ed992846cecdd828e575dfa6c66da38336b9797 | acf7457d3a799cb9bff12686d2d616688bcd4b5b | /packages/python/plotly/plotly/validators/scatter/legendgrouptitle/font/_family.py | 630d0965793bea6d3cb6b80383b246586092f423 | [
"MIT"
] | permissive | plotly/plotly.py | f4f61639f08160f16195efc95b5901dc5a937346 | 975a704074f01c078e0fdfa32bdf17130bf89e69 | refs/heads/master | 2023-09-06T06:15:08.340035 | 2023-08-24T12:28:14 | 2023-08-24T12:28:14 | 14,579,099 | 14,751 | 2,989 | MIT | 2023-09-08T19:55:32 | 2013-11-21T05:53:08 | Python | UTF-8 | Python | false | false | 554 | py | import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="scatter.legendgrouptitle.font",
**kwargs,
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
| [
"nicolas@plot.ly"
] | nicolas@plot.ly |
4cf602dd90dff5469578d17432152af4c4ed5a52 | b5758fdb0b0f757b045ae091e85007366a2b78ab | /opcua2mqtt.py | 5d66c60a3718e2e716e428e61bbe5ad012aff823 | [
"Apache-2.0"
] | permissive | tyrbonit/opcua2cloud | f5903e984ba325268fdf4ecd277b5f3677474818 | 32e1e745e4939f8d4fd51892d9a51230ffdfc198 | refs/heads/master | 2021-01-02T20:48:58.109135 | 2019-01-10T23:36:41 | 2019-01-10T23:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,930 | py | import sys
#sys.path.insert(0, "..")
import time
from opcua import Client
#import context # Ensures paho is in PYTHONPATH
import paho.mqtt.publish as publish
import awsiot
import redis
MQTT_MODE = 'simple' # simple,simplewithpass,awsiot
AWSIOT_AK = 'test'
AWSIOT_SK = 'test'
HOST = '127.0.0.1'
SAVE_TO_REDIS_EN = False
#------------------------
# normal_mqtt_publish
#------------------------
def normal_mqtt_publish(topic,payload):
try:
publish.single(topic, payload, hostname='m2m.eclipse.org')
print('publish data to m2m.eclipse.org opcua2mqtt/ topic')
except Exception as e:
print('normal_mqtt_publish exception:',str(e))
#------------------------
# awsiot_mqtt_publish
#------------------------
def awsiot_mqtt_publish(topic,payload):
try:
client = awsiot.awsiot_client(access_key = AWSIOT_AK, secret_key = AWSIOT_SK)
client.publish(topic, payload)
except Exception as e:
print('awsiot_mqtt_publish exception:',str(e))
if __name__ == "__main__":
client = Client("opc.tcp://" + HOST + ":4840/freeopcua/server/")
#client = Client("opc.tcp://localhost:4840/freeopcua/server/")
# client = Client("opc.tcp://admin@localhost:4840/freeopcua/server/") #connect using a user
r = redis.Redis(host=HOST, port=6379, db=0)
while True:
try:
client.connect()
# Client has a few methods to get proxy to UA nodes that should always be in address space such as Root or Objects
root = client.get_root_node()
print("Objects node is: ", root)
# Node objects have methods to read and write node attributes as well as browse or populate address space
print("Children of root are: ", root.get_children())
# get a specific node knowing its node id
#var = client.get_node(ua.NodeId(1002, 2))
#var = client.get_node("ns=3;i=2002")
#print(var)
#var.get_data_value() # get value of node as a DataValue object
#var.get_value() # get value of node as a python builtin
#var.set_value(ua.Variant([23], ua.VariantType.Int64)) #set node value using explicit data type
#var.set_value(3.9) # set node value using implicit data type
# Now getting a variable node using its browse path
myvar = root.get_child(["0:Objects", "2:MyObject", "2:MyVariable"])
obj = root.get_child(["0:Objects", "2:MyObject"])
client.disconnect()
print("myvar is: ", myvar)
print("myobj is: ", obj)
#-------------------------
# save opc ua data redis
#-------------------------
if SAVE_TO_REDIS_EN == True:
try:
l = r.lpush('opcua_001',str(myvar))
print('save opcua data to redis, ret=',l)
except Exception as e:
print('save opcua data to redis exception:',str(e))
try:
r.close()
r = redis.Redis(host=HOST, port=6379, db=0)
except:
pass
# Stacked myvar access
# print("myvar is: ", root.get_children()[0].get_children()[1].get_variables()[0].get_value())
if MQTT_MODE == 'simple':
normal_mqtt_publish('opcua2mqtt/',str(myvar))
elif MQTT_MODE == 'awsiot':
awsiot_mqtt_publish('opcua2mqtt/',str(myvar))
else:
print('no valid mqtt_mode defined')
except Exception as e:
print('exception 2:',str(e))
try:
client.disconnect()
except:
pass
time.sleep(1)
| [
"aixi.wang@hotmail.com"
] | aixi.wang@hotmail.com |
12c48d5ff16675e2a8f6a2d902504de1ea724719 | 3540272eb4522c637fb293a924a6ad8d7b365718 | /tribune/news/models.py | 33b78f25b5e2b10758918424db9589c053a886ce | [] | no_license | nevooronni/tribune | b0e80a4613758690702fa88eb99f44f4e8e66a30 | 7218d3514277ce408128b4e8c66da5639cf7dec4 | refs/heads/master | 2021-08-14T16:55:46.280863 | 2017-11-16T08:41:18 | 2017-11-16T08:41:18 | 110,561,868 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,167 | py | from django.db import models#import models class configured to allow us to communicate with the db
import datetime as dt
class Editor(models.Model):#created editor class inherits from model class
first_name = models.CharField(max_length = 30)#charfield is the sql equivalent to varchar a string field for small to large size strings
last_name = models.CharField(max_length = 30)
email = models.EmailField()
phone_number = models.CharField(max_length = 10,blank = True)
def save_editor(self):
self.save()
def delete_editor(self):
self.delete()
#def display_all(self):
#self.objects.all()
#this updates our models so we can easily read it in the shell
def __str__(self):#string representation of our model
return self.first_name
class Meta:
ordering = ['first_name']
class tags(models.Model):
name = models.CharField(max_length = 30)
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField(max_length = 60)
post = models.TextField()#textarea tag in html
editor = models.ForeignKey(Editor)#foreign key column defines one to many relationship to editor
tags = models.ManyToManyField(tags)#many to many relationship with the tags class
pub_date = models.DateTimeField(auto_now_add=True)#timestamp to establish when the articles were published
article_image = models.ImageField(upload_to = 'articles/')#image field takes upload_to argument defines where the image will be stored in the file system.
#def save_article(self):
#self.save()
def __str__(self):
return self.title
@classmethod
def todays_news(cls):
today = dt.date.today()#module to get todays date
news = cls.objects.filter(pub_date__date = today)#qeury db to filter articles by current date
return news
@classmethod
def day_news(cls,date):#takes date object as an argument
news = cls.objects.filter(pub_date__date = date)
return news
@classmethod
def search_by_title(cls,search_term):
news = cls.objects.filter(title__icontains=search_term)#will filter our model data using the __icontains filter will check if any word in the title field of our articles matches the search_term
return news
| [
"nevooronni@gmail.com"
] | nevooronni@gmail.com |
5299ea0fe7a31a54d5deabcb2c8a6f8591fb6325 | 09c9600eccc223db658cf3ae4f5292333f22e3e7 | /orange_manage/apps.py | 22b81363b9618573a1a15de6cad9de58f8099247 | [] | no_license | sy159/order_background_management | d19bfd209475731dce24b6a26f824fe0ab43fb37 | 0f419376ed90aca85ef1f517927c7a2b1fc8e2b6 | refs/heads/master | 2022-03-08T17:48:10.000382 | 2019-11-27T12:16:57 | 2019-11-27T12:16:57 | 149,939,020 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | from django.apps import AppConfig
class OrangeMangeConfig(AppConfig):
name = 'orange_manage'
| [
"snow@cqgynet.com"
] | snow@cqgynet.com |
2c0897784bb77d41c6320fddcb72b830ce8528cb | 53d23ecc25f53323e3cc2a22d2dc5fe6d9ffae57 | /electrum/plugins/gpg_integration/__init__.py | 258671bccf6d879b16f55cc1a1bc85ab1592361a | [
"MIT"
] | permissive | asfin/electrum | b10df9f7a51d9515d6a0ec03f7a4d2fdd7331f37 | 9abf5cda9b2906a0575b3629bccc4b1ceed8df49 | refs/heads/master | 2021-01-13T06:54:03.918146 | 2018-04-24T10:14:20 | 2018-09-07T14:57:00 | 34,674,696 | 2 | 2 | null | 2017-02-14T08:03:09 | 2015-04-27T15:27:36 | Python | UTF-8 | Python | false | false | 250 | py | from electrum.i18n import _
fullname = _('GPG integration')
description = ''.join([
_('Allows to use your bitcoin private keys as familiar GPG keys'), '<br>',
_('Open plugin window from main menu Tools->GPG tools')
])
available_for = ['qt']
| [
"asfins@gmail.com"
] | asfins@gmail.com |
902d047c818eadc130281316b90cfde772634bd0 | 6b1aaded6a6d7ad8133eb93f5570d087b9ecefc0 | /57.py | a778a4be509d89c1f7027528296bead44d2f43f7 | [] | no_license | huangyingw/Leetcode-Python | 53a772e1ecf298c829c0f058faa54c420420b002 | 9513e215d40145a5f2f40095b459693c79c4b560 | refs/heads/master | 2021-07-16T10:10:02.457765 | 2020-07-01T05:35:21 | 2020-07-01T05:35:21 | 192,150,219 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | # Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution:
def insert(self, intervals, newInterval):
"""
:type intervals: List[Interval]
:type newInterval: Interval
:rtype: List[Interval]
"""
res = []
i = 0
while i < len(intervals) and intervals[i].end < newInterval.start:
res.append(intervals[i])
i += 1
while i < len(intervals) and intervals[i].start <= newInterval.end:
newInterval.start = min(intervals[i].start, newInterval.start)
newInterval.end = max(intervals[i].end, newInterval.end)
i += 1
res.append(newInterval)
res.extend(intervals[i:])
return res | [
"tiant@qualtrics.com"
] | tiant@qualtrics.com |
5ff636a6548041d3e2e3d0bf15865a94696a1f08 | 99364458e4fe84fc006fd90b7924915e23d2406f | /risk_assess/uncertain_agent/test_python_cpp_interface.py | 2636ca4ccddb59383a5ae78441da7716962fe570 | [] | no_license | yangkaia123/risk_assess | 27cf4991f0901960dc74983fe76d735054f6bcc1 | 8da6a2d7e4332ebc7ee8d73202c56688167daedb | refs/heads/master | 2023-05-15T09:04:52.220295 | 2020-07-12T20:23:38 | 2020-07-12T20:23:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,702 | py | import ctypes
from ctypes import cdll
class AMS(ctypes.Structure):
_fields_ = [('E_x', ctypes.c_double),
('E_y', ctypes.c_double),
('E_xy', ctypes.c_double),
('E2_x', ctypes.c_double),
('E2_y', ctypes.c_double),
('E_xvs', ctypes.c_double),
('E_xvc', ctypes.c_double),
('E_yvs', ctypes.c_double),
('E_yvc', ctypes.c_double),
('E_xs', ctypes.c_double),
('E_xc', ctypes.c_double),
('E_ys', ctypes.c_double),
('E_yc', ctypes.c_double),
('E_v', ctypes.c_double),
('E2_v', ctypes.c_double)]
class ExogMoments(ctypes.Structure):
_fields_ = [('E_wv', ctypes.c_double),
('E2_wv', ctypes.c_double),
('E_c', ctypes.c_double),
('E2_c', ctypes.c_double),
('E_s', ctypes.c_double),
('E2_s', ctypes.c_double),
('E_cs', ctypes.c_double),
('E_cw', ctypes.c_double),
('E_sw', ctypes.c_double)]
def wrap_function(lib, funcname, restype, argtypes):
"""Simplify wrapping ctypes functions"""
func = lib.__getattr__(funcname)
func.restype = restype
func.argtypes = argtypes
return func
lib = cdll.LoadLibrary('/home/allen/plan_verification_rss_2020/risk_assess/build/libtest_cpp_moment_dynamics.so')
test_fun = wrap_function(lib, 'propagate_moments', AMS, [AMS, ExogMoments])
foo1 = AMS()
foo2 = ExogMoments()
import time
tstart = time.time()
for i in range(int(1e3)):
bar = test_fun(foo1, foo2)
t_tot = time.time() - tstart
print(t_tot) | [
"allenw@mit.edu"
] | allenw@mit.edu |
844e243a462d989d07a4e429980a714f3f1d65f9 | 22f3a291ea76f63905fe781b4d5c94c1791ddb4c | /workflow/deploy/tensorflow-serving/src/app/entrypoint.py | 30549ff4564d901677cc0ebeb53277c300972cdb | [
"MIT"
] | permissive | bgulla/jetson | 529920442ca4ecfc4c49045510ff8f37515ea248 | e037a99c9ca29e6c9331babcacbc0d530c8ec762 | refs/heads/master | 2020-06-25T01:01:46.044669 | 2019-07-25T13:12:39 | 2019-07-25T13:12:39 | 199,147,932 | 1 | 1 | MIT | 2019-07-27T10:37:51 | 2019-07-27T10:04:15 | Python | UTF-8 | Python | false | false | 1,716 | py | import os
import signal
import subprocess
# Making sure to use virtual environment libraries
activate_this = "/home/ubuntu/tensorflow/bin/activate_this.py"
exec(open(activate_this).read(), dict(__file__=activate_this))
# Change directory to app
os.chdir("/app")
tf_server = ""
api_server = ""
s
try:
tf_server = subprocess.Popen(["tensorflow_model_server "
"--model_base_path=/model/example"
"--rest_api_port=9000 --model_name=example"],
stdout=subprocess.DEVNULL,
shell=True,
preexec_fn=os.setsid)
print("Started TensorFlow Serving ImageClassifier server!")
api_server = subprocess.Popen(["uvicorn app.main:app --host 0.0.0.0 --port 80"],
stdout=subprocess.DEVNULL,
shell=True,
preexec_fn=os.setsid)
print("Started API server!")
while True:
print("Type 'exit' and press 'enter' OR press CTRL+C to quit: ")
in_str = input().strip().lower()
if in_str == 'q' or in_str == 'exit':
print('Shutting down all servers...')
os.killpg(os.getpgid(tf_server.pid), signal.SIGTERM)
os.killpg(os.getpgid(api_server.pid), signal.SIGTERM)
print('Servers successfully shutdown!')
break
else:
continue
except KeyboardInterrupt:
print('Shutting down all servers...')
os.killpg(os.getpgid(tf_server.pid), signal.SIGTERM)
os.killpg(os.getpgid(api_server.pid), signal.SIGTERM)
print('Servers successfully shutdown!') | [
"helmuthva@googlemail.com"
] | helmuthva@googlemail.com |
bfe91353f94b7324769a5908cb44a049673dd6e2 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/287/85098/submittedfiles/testes.py | 890ae53b050f276e8465ffbca31d1de6d83407c7 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | # -*- coding: utf-8 -*-
from minha_bib import*
print(multiplicacao(2,3))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
67752a7742ec7157fc73cc678de0694074b589de | 9567b87d380540ca9c92de5fda66981029b831d9 | /Yaml Converter/MakeROSMap.py | 241629d9bb44cef6f4ab2c9ca656b8b7279d43e9 | [] | no_license | Dhaour9x/cSLAM | 782d97fcc5b03aa878604f9dab9caced2fc1873e | 78cf2a2847e02430f13cec924175a6d08c9aa32b | refs/heads/master | 2022-10-29T14:59:24.337726 | 2020-06-13T19:03:27 | 2020-06-13T19:03:27 | 272,068,722 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,189 | py | import math
import os.path
import cv2
#
# This is a start for the map program
#
import pywt.tests.data.generate_matlab_data
#import pywt
prompt = '> '
print("What is the name of your floor plan you want to convert to a ROS map:")
#file_name = eval(input(prompt))
file_name = input(prompt)
print("You will need to choose the x coordinates horizontal with respect to each other")
print("Double Click the first x point to scale")
#
# Read in the image
#
image = cv2.imread(file_name)
#
# Some variables
#
ix, iy = -1, -1
x1 = [0, 0, 0, 0]
y1 = [0, 0, 0, 0]
font = cv2.FONT_HERSHEY_SIMPLEX
#
# mouse callback function
# This allows me to point and
# it prompts me from the command line
#
def draw_point(event, x, y, flags, param):
global ix, iy, x1, y1n, sx, sy
if event == cv2.EVENT_LBUTTONDBLCLK:
ix, iy = x, y
print((ix, iy))
#
# Draws the point with lines around it so you can see it
#
image[iy, ix] = (0, 0, 255)
cv2.line(image, (ix + 2, iy), (ix + 10, iy), (0, 0, 255), 1)
cv2.line(image, (ix - 2, iy), (ix - 10, iy), (0, 0, 255), 1)
cv2.line(image, (ix, iy + 2), (ix, iy + 10), (0, 0, 255), 1)
cv2.line(image, (ix, iy - 2), (ix, iy - 10), (0, 0, 255), 1)
#
# This is for the 4 mouse clicks and the x and y lengths
#
if x1[0] == 0:
x1[0] = ix
y1[0] = iy
print("Double click a second x point")
elif x1[0] != 0 and x1[1] == 0:
x1[1] = ix
y1[1] = iy
prompt = '> '
print("What is the x distance in meters between the 2 points?")
delta = float(input(prompt))
#dx: float = math.sqrt((x1[1] - x1[0]) ** 2 + (y1[1] - y1[0]) ** 2) * .05
dx = math.sqrt(((x1[1] - x1[0]) ** 2) + ((y1[1] - y1[0]) ** 2)) * .05
sx = delta / dx
print("You will need to choose the y coordinates vertical with respect to each other")
print('Double Click a y point')
elif x1[1] != 0 and x1[2] == 0:
x1[2] = ix
y1[2] = iy
print('Double click a second y point')
else:
prompt = '> '
print("What is the y distance in meters between the 2 points?")
delay = float(input(prompt))
x1[3] = ix
y1[3] = iy
dy = math.sqrt((x1[3] - x1[2]) ** 2 + (y1[3] - y1[2]) ** 2) * .05
sy = delay / dy
print((sx, sy))
res = cv2.resize(image, None, fx=sx, fy=sy, interpolation=cv2.INTER_CUBIC)
cv2.imwrite("ground.pgm", res);
cv2.imshow("Image2", res)
# for i in range(0,res.shape[1],20):
# for j in range(0,res.shape[0],20):
# res[j][i][0] = 0
# res[j][i][1] = 0
# res[j][i][2] = 0
# cv2.imwrite("KEC_BuildingCorrectedDots.pgm",res)
# Show the image in a new window
# Open a file
prompt = ">"
print("what is the name of the map")
mapName = input(prompt)
prompt = '> '
print("Where is the desired location of the map and yaml file?")
print("NOTE: if this program is not run on the TurtleBot, Please input the file location of where the map should be "
"saved on TurtleBot. The file will be saved at that location on this computer. Please then tranfer the files to "
"TurtleBot.")
mapLocation = input(prompt)
completeFileNameMap = os.path.join(mapLocation, mapName + ".pgm")
completeFileNameYaml = os.path.join(mapLocation, mapName + ".yaml")
yaml = open(completeFileNameYaml, "w")
cv2.imwrite(completeFileNameMap, pywt.tests.data.generate_matlab_data.res)
#
# Write some information into the file
#
yaml.write("image: " + mapLocation + "/" + mapName + ".pgm\n")
yaml.write("resolution: 0.050000\n")
yaml.write("origin: [" + str(-1) + "," + str(-1) + ", 0.000000]\n")
yaml.write("negate: 0\noccupied_thresh: 0.65\nfree_thresh: 0.196")
yaml.close()
exit()
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.setMouseCallback('image', draw_point)
#
# Waiting for a Esc hit to quit and close everything
#
while 1:
cv2.imshow('image', image)
k = cv2.waitKey(20) & 0xFF
if k == 27:
break
elif k == ord('a'):
print("Done")
cv2.destroyAllWindows()
| [
"riadh.dhaoui@rub.de"
] | riadh.dhaoui@rub.de |
e7f5a53908b335955397208487dc62f8fbd7f14e | 5ccd5739764468538a1b52cc43dfbc800e480a82 | /Common_Word_Forms_1.py | cab16cb1a347b78118153858f693b6896ea3446a | [] | no_license | dhmo1900/Python-Base-Word-Form-Finder | 5cad5913bcb54b488625e6a11f0f8c644dd103cb | 8441b37472d3fdab495517af973785559d682626 | refs/heads/master | 2022-11-18T06:16:49.000407 | 2020-07-10T02:15:42 | 2020-07-10T02:15:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | from urllib import request
from bs4 import BeautifulSoup
import nltk
import distance
import pprint
pp = pprint.PrettyPrinter(indent=2)
def contiguous_string_matches(main_s, s_compare):
if len(main_s) < len(s_compare):
main_s, s_compare = s_compare, main_s
for i in range(len(s_compare)):
if s_compare[i] != main_s[i]:
return False
return True
csm = contiguous_string_matches
word = 'processability'
word = 'resistant'
url = f'https://www.merriam-webster.com/dictionary/{word}'
dist = distance.nlevenshtein
html = request.urlopen(url).read().decode('utf8')
raw = BeautifulSoup(html, 'html.parser').get_text()
tokens = nltk.wordpunct_tokenize(raw) # nltk.word_tokenize
poss_tokens = [t.lower() for t in tokens
if dist(t.lower(), word) <= 0.51
and csm(word, t.lower())]
poss_tokens = sorted(set(poss_tokens))
pp.pprint(poss_tokens)
###############################################################################
# url = "http://www.gutenberg.org/files/2554/2554-0.txt"
# response = request.urlopen(url)
# raw = response.read().decode('utf8')
# print(type(raw))
# print(len(raw))
# print(raw[:75])
| [
"thom.ives@gmail.com"
] | thom.ives@gmail.com |
be90578add9db3c84ccf82cfd059d0f9b9a205d0 | c697fa30fbcb3af706318be11dd0445c817c0b68 | /ACM ICPC Team.py | f21a0fc3b918845a587892df9deb17682cb4a0f9 | [] | no_license | VishalDeoPrasad/HackerRank-Python | 59df8ccd128d602dd0b28a4abf017608b1496486 | 7090634e806a88b7934e16a628a1edd97ba85a28 | refs/heads/master | 2023-05-01T00:33:56.371330 | 2023-04-28T10:06:10 | 2023-04-28T10:06:10 | 160,024,108 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | def normalize_string(topic):
new_topic = []
for i in range(len(topic)):
temp_lst = []
for j in range(len(topic[0])):
if topic[i][j] == '1':
temp_lst.append(j+1)
new_topic.append(temp_lst)
temp_lst = []
return new_topic
def acmTeam(topic):
# Write your code here
topic = normalize_string(topic)
pair_list = []
for i in range(len(topic)-1):
for j in range(i+1, len(topic)):
pair_list.append(len(set(topic[i]+topic[j])))
print(pair_list)
return max(pair_list), pair_list.count(max(pair_list))
topic1 = ['10101', '11110', '00010']
topic = ['10101' ,'11100', '11010', '00101']
print(acmTeam(topic)) | [
"vishaldeoprasad@yahoo.com"
] | vishaldeoprasad@yahoo.com |
3f64960bfcafeb82116b2ca0f42b1115cc47d3b8 | 974e81ec41f32900f3bd633f251227ed352edd79 | /examples/01-simple.py | 18a9cd86210e21059edda91d9e801f77f44de2b6 | [
"MIT"
] | permissive | defcon201/wikipedia_ql | a3321f8bc43308b92fadfe40c1913d7644e8c666 | 5bae4ed8d1fbf9ba9c818d02a4a0905a97f31cd9 | refs/heads/main | 2023-08-25T22:33:12.148362 | 2021-10-13T19:26:03 | 2021-10-13T19:26:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # Simple fetching of just a few properties from one pages
from pprint import pprint
from wikipedia_ql import media_wiki
wikipedia = media_wiki.Wikipedia(cache_folder='tmp/cache')
pprint(wikipedia.query(r'''
from "Pink Floyd" {
section[heading="Discography"] >> li {
a as "title";
text["\((.+)\)"] >> text-group[1] as "year";
}
}
'''))
| [
"zverok.offline@gmail.com"
] | zverok.offline@gmail.com |
0b65a0a577f66a922a2ce3ea908ed06aa11bc88f | 95a064d1cb2d9e7f4f4bbfa4eb0713e28b0a440f | /RPC/Rpc_Server.py | 71251b18db5154acd823602a17f10f8397416125 | [] | no_license | gopiprasanthpotipireddy/Python-RabbitMQ | 89302dc6b6cdaf27a44b792fda967d87894d55b0 | f2f3a9bb3c9d0cc0a5566a1fd94409fff3a6bbe6 | refs/heads/master | 2021-01-06T13:30:24.478384 | 2020-02-20T08:08:11 | 2020-02-20T08:08:11 | 241,343,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import pika
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='rpc_queue')
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
def on_request(ch, method, props, body):
n = int(body)
print(" [.] fib(%s)" % n)
response = fib(n)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue='rpc_queue', on_message_callback=on_request)
print(" [x] Awaiting RPC requests")
channel.start_consuming() | [
"gopiprasanthp@virtusa.com"
] | gopiprasanthp@virtusa.com |
d2303e3bc513f042b2df38e56dd4badb36243be0 | 80d32b5911a1d9527c357d02690ba47cb362aca7 | /app/routes.py | a3f5b1f4632c0330de02a3c0da70813d268b46c7 | [] | no_license | Shikhar-SRJ/flaskapp_CI_CD | 0a12a4f11b10cef6dc630714b279a88c2f065f42 | 225a399b2ec98848cc8ff5ac11d5d0e5e2fc7b37 | refs/heads/main | 2023-08-23T17:18:50.183350 | 2021-10-25T05:54:37 | 2021-10-25T05:54:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,137 | py | from app import app, db
from flask import render_template, flash, url_for, redirect, request
from app.forms import LoginForm, RegistrationForm, EditProfileForm, EmptyForm, PostForm, ResetPasswordRequestForm, ResetPasswordForm
from flask_login import login_user, current_user, logout_user, login_required
from app.models import User, Post
from werkzeug.urls import url_parse
from datetime import datetime
from app.email import send_password_reset_email
@app.before_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
@login_required
def index():
form = PostForm()
if form.validate_on_submit():
post = Post(body=form.post.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post is now live!')
return redirect(url_for('index'))
page = request.args.get('page', 1, type=int)
posts = current_user.followed_posts().paginate(page, app.config['POSTS_PER_PAGE'], False)
next_url = url_for('index', page=posts.next_num) if posts.has_next else None
prev_url = url_for('index', page=posts.prev_num) if posts.has_prev else None
return render_template('index.html', posts=posts.items, form=form, title='Home', next_url=next_url, prev_url=prev_url)
@app.route('/explore')
@login_required
def explore():
page = request.args.get('page', 1, type=int)
posts = Post.query.order_by(Post.timestamp.desc()).paginate(
page, app.config['POSTS_PER_PAGE'], False
)
next_url = url_for('explore', page=posts.next_num) if posts.has_next else None
prev_url = url_for('explore', page=posts.prev_num) if posts.has_prev else None
return render_template('index.html', posts=posts.items, title='Explore', next_url=next_url, prev_url=prev_url)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if not user or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('You have successfully registered! Login to continue.')
return redirect(url_for('login'))
return render_template('register.html', form=form)
@app.route('/user/<string:username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
posts = user.posts.order_by(Post.timestamp.desc()).paginate(page, app.config['POSTS_PER_PAGE'], False)
next_url = url_for('user', username=user.username, page=posts.next_num) if posts.has_next else None
prev_url = url_for('user', username=user.username, page=posts.prev_num) if posts.has_prev else None
form = EmptyForm()
return render_template('user.html', user=user, posts=posts.items, form=form, prev_url=prev_url, next_url=next_url)
@app.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('edit_profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', title='Edit Profile', form=form)
@app.route('/follow/<string:username>', methods=['POST'])
@login_required
def follow(username):
form = EmptyForm()
if form.validate_on_submit():
user = User.query.filter_by(username=username).first()
if not user:
flash(f'User {username} not found!')
return redirect(url_for('index'))
if user == current_user:
flash('You cannot follow yourself')
return redirect(url_for('user', username=username))
current_user.follow(user)
db.session.commit()
flash(f'You are now following {username}')
return redirect(url_for('user', username=username))
else:
return redirect(url_for('index'))
@app.route('/unfollow/<string:username>', methods=['POST'])
@login_required
def unfollow(username):
form = EmptyForm()
if form.validate_on_submit():
user = User.query.filter_by(username=username).first()
if not user:
flash(f'User {username} not found!')
return redirect(url_for('index'))
if user == current_user:
flash('You cannot unfollow yourself')
return redirect(url_for('user', username=username))
current_user.unfollow(user)
db.session.commit()
flash(f'You have stopped following {username}')
return redirect(url_for('user', username=username))
else:
return redirect(url_for('index'))
@app.route('/reset_password_request', methods=['GET', 'POST'])
def reset_password_request():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = ResetPasswordRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash('Check your email for the instructions to reset your password')
return redirect(url_for('login'))
return render_template('reset_password_request.html', title='Reset Password', form=form)
@app.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_password(token):
if current_user.is_authenticated:
return redirect(url_for('index'))
user = User.verify_reset_password(token)
if not user:
return redirect(url_for('index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash('Your password has been reset')
return redirect(url_for('login'))
return render_template('reset_password.html', form=form)
| [
"ptksuraj@gmail.com"
] | ptksuraj@gmail.com |
57b906c83a2d61619b54bb2afe90bb43616f21ce | 4111ca5a73a22174f189361bef654c3f91c3b7ed | /Lintcode/Ladder_47_BigData/128. Hash Function.py | 6d7607ff24020011e66b6d9c8c4b6774644f2261 | [
"MIT"
] | permissive | ctc316/algorithm-python | 58b541b654509ecf4e9eb8deebfcbdf785699cc4 | ac4580d55e05e93e407c6156c9bb801808027d60 | refs/heads/master | 2020-03-16T06:09:50.130146 | 2019-08-02T02:50:49 | 2019-08-02T02:50:49 | 132,548,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | class Solution:
"""
@param key: A string you should hash
@param HASH_SIZE: An integer
@return: An integer
"""
def hashCode(self, key, HASH_SIZE):
code = 0
for ch in key:
code = (code * 33 + ord(ch)) % HASH_SIZE
return code | [
"mike.tc.chen101@gmail.com"
] | mike.tc.chen101@gmail.com |
5732900ceff22c44e5544b54d08656cd21a9af39 | a80e0ad5821b4d83164f16c0f04a0559cba4c3e8 | /papadimitriou/papadimitriou.py | cf902885d165167418c2414ae66c2029031d3867 | [] | no_license | vnck/2SATSolver | e5645e22f4cef5a1da058ef659e6cc2e333eda0f | a7a29e01f2adc050819bb141e4c13924baa7b4fb | refs/heads/master | 2020-04-01T04:51:02.144623 | 2018-10-31T08:17:55 | 2018-10-31T08:17:55 | 152,879,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,358 | py | import random
import os
import fileinput
import math
import time
clauses = []
variables = []
def papadimitriou(fileName):
text = open(fileName).read()
lines = text.split("\n")
lines = [line.strip().split(" ") for line in lines]
varSize = int(lines[2][3])
clauses = [[int(line[0]),int(line[1])] for line in lines[3:]]
for i in range(varSize-1):
variables.append(random.randint(0,1))
startTime = time.time()
errClauses = returnErrClauses(clauses)
k = 2*varSize*varSize
while len(errClauses) > 0 and k > 0:
k-=1
flip = random.choice(errClauses)
access = abs(random.choice(flip))
clauses[access] = 1 if clauses[access] == 0 else 0
errClauses = returnErrClauses(clauses)
finishingTime = time.time() - startTime
if len(errClauses) == 0:
print("Satisfiable")
return finishingTime * 1000
else:
print("Unsatisfiable")
return "err"
def returnErrClauses(listClauses):
trueClauses = []
falseClauses = []
for clause in listClauses:
firstNegated = False
secondNegated = False
if clause[0] < 0:
firstNegated = not firstNegated
if clause[1] < 0:
secondNegated = not secondNegated
firstLit = clause[0]
first = variables[abs(firstLit) - 1]
secondLit = clause[1]
second = variables[abs(secondLit)-1]
if not firstNegated and not secondNegated:
trueClauses.append((firstLit,secondLit)) if first or second else falseClauses.append((firstLit,secondLit))
elif firstNegated and not secondNegated:
trueClauses.append((firstLit, secondLit)) if not first or second else falseClauses.append((firstLit,secondLit))
elif not firstNegated and secondNegated:
trueClauses.append((firstLit, secondLit)) if first or not second else falseClauses.append((firstLit,secondLit))
else:
trueClauses.append((firstLit, secondLit)) if not first or not second else falseClauses.append(firstLit,secondLit)
return falseClauses
return 0
totalTime = 0
s = 100
errCount = 0
for i in range(s):
random.seed(i)
n = papadimitriou("ex500.cnf")
if n == "err":
errCount+=1
else:
totalTime += n
print("time: ", totalTime/s)
print("err: ", errCount) | [
"noreply@github.com"
] | noreply@github.com |
aaa856e1b4ceb5000f6ed483577d1b5f78a93e9b | 0ddcf5569f3214583ceb8ce65734d087bfe6e26f | /main.py | 3dcd6c61f282de0d01112405ddb4c6b2c9cc6a6e | [] | no_license | kubawirus/pythonCanteraTrial | ad8ebab2173e3761ee77afcef5864cc73d35b600 | f9455a312c0deb52f6fa189532c6a497fc4fb2e5 | refs/heads/master | 2023-05-01T05:12:27.325423 | 2021-05-11T12:24:44 | 2021-05-11T12:24:44 | 364,248,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | import cantera as ct
import numpy as np
| [
"k.wieremiejczuk@gmail.com"
] | k.wieremiejczuk@gmail.com |
bd788fa1d51cbbadecdea78fe391360278fae038 | d420f185ad12f0c963b0863a3ecaabbf5331527a | /HW5/reciprocal.py | 8f0529ed8228699bc12c8d3653d33a9aba73b557 | [
"MIT"
] | permissive | amitpatra/ISYE-6669-Deterministic-Optimization | b8357272a87a39554fe28e4074ccd60274cfa99a | 0d6eefaa34f3e8e909f4109504fc3e2212d8725f | refs/heads/main | 2023-03-21T04:54:32.678888 | 2020-12-20T19:47:08 | 2020-12-20T19:47:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py |
# Implement the Linear Reciprocal Maximization problem
import gurobipy as gp
import numpy as np
from gurobipy import *
id = 903515184
# Invoke Gurobi to solve the SOCP
def solve_socp(id, m=400, n=50):
gm = gp.Model("reciprocal")
A, b = get_data(id, m, n)
# print(A.shape, b.shape) # (400, 50) (400,)
x = gm.addVars(n, lb=-GRB.INFINITY, ub=GRB.INFINITY)
y = gm.addVars(m, lb=0, ub=GRB.INFINITY)
Ax = gm.addVars(m, lb=-GRB.INFINITY)
z1 = gm.addVars(m, lb=-GRB.INFINITY)
z2 = gm.addVars(m, lb=0)
gm.setObjective(sum([y[mi] for mi in range(m)]), GRB.MINIMIZE)
gm.addConstrs(Ax[mi] == sum([A[mi, ni] * x[ni] for ni in range(n)]) for mi in range(m))
gm.addConstrs((Ax[mi] - b[mi]) >= 0 for mi in range(m)) # Ax-b > 0
gm.addConstrs(z1[mi] == y[mi] - Ax[mi] + b[mi] for mi in range(m))
gm.addConstrs(z2[mi] == y[mi] + Ax[mi] - b[mi] for mi in range(m))
gm.addConstrs(4 + z1[mi]*z1[mi] <= z2[mi]*z2[mi] for mi in range(m))
# Solve the model
gm.update()
gm.optimize()
return(gm)
# Generate the data (DO NOT MODIFY)
def get_data(id, m, n):
np.random.seed(id)
bm = 1 + 9 * np.random.rand(m)
Am_base = 2 * (np.random.rand(m, n) - 0.5)
Am_row_norm = np.sum(np.sqrt(Am_base ** 2), 1)
scaling_factor = bm / Am_row_norm
Am = np.multiply(Am_base.transpose(), scaling_factor).transpose()
A = -Am
b = -bm
return(A, b)
solve_socp(id=id) | [
"noreply@github.com"
] | noreply@github.com |
c826ef5b94146713cbfb40ea8d2456a72ea50850 | 11137bde91389c04a95df6f6fdaf64f7f49f5f80 | /introduction_MIT/16_2图表会骗人.py | 582608abe22748eee617a7fb4bb7c90df1af46fb | [] | no_license | starschen/learning | cf3c5a76c867567bce73e9cacb2cf0979ba053d9 | 34decb8f9990117a5f40b8db6dba076a7f115671 | refs/heads/master | 2020-04-06T07:02:56.444233 | 2016-08-24T08:11:49 | 2016-08-24T08:11:49 | 39,417,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | #encoding:utf8
#16_2图表会骗人.py
#绘制房价
import pylab
def plotHousing(impression):
'''假设impression是一个字符串,必须是‘flat’, ‘volatile’或者是‘fair’
生成房价随时间变化的图表'''
f=open('midWestHousingPrices.txt','r')
#文件的每一行是年季度价格
#数据来自美国中部区域
labels,prices=([],[])
for line in f:
year,quarter,price=line.split(' ')
label=year[2:4]+'\n Q'+quarter[1]
labels.append(label)
prices.append(float(price)/1000)
quarters=pylab.arange(len(labels))
width=0.8
if impression=='flat':
pylab.semilogy()
pylab.bar(quarters,prices,width)
pylab.xticks(quarters+width/2.0,labels)
pylab.title('Housing Prices in U.S. Midwest')
pylab.xlabel('Quarter')
pylab.ylabel('Average Price($1,000\'s)')
if impression=='flat':
pylab.ylim(10,10**3)
elif impression =='volatile':
pylab.ylim(180,220)
elif impression=='fair':
pylab.ylim(150,250)
else:
raise ValueError
plotHousing('flat')
pylab.figure()
plotHousing('volatile')
pylab.figure()
plotHousing('fair')
pylab.show()
| [
"stars_chenjiao@163.com"
] | stars_chenjiao@163.com |
ea29d994f42ebba9d6dc03fdd1e97c8201db4622 | 5eacabadc016ff67b913d66b88a5002865ff71e4 | /tests/archives/cvt_archive_test.py | db2b4925c8ccd9d5d0cde2a569dc41f08278a6a3 | [
"MIT"
] | permissive | Nikitaskla/pyribs | 8e3d0ac580130c5389826f7c518591842accaa15 | ef289a930e7a8a51286cf657f7e4b29551277350 | refs/heads/master | 2023-07-03T16:59:55.656682 | 2021-08-01T09:39:51 | 2021-08-01T09:39:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,825 | py | """Tests for the CVTArhive."""
import unittest
import numpy as np
import pytest
from ribs.archives import AddStatus, CVTArchive
from .conftest import get_archive_data
# pylint: disable = redefined-outer-name
@pytest.fixture
def data(use_kd_tree):
"""Data for CVT Archive tests."""
return (get_archive_data("CVTArchive-kd_tree")
if use_kd_tree else get_archive_data("CVTArchive-brute_force"))
def assert_archive_elite(archive, solution, objective_value, behavior_values,
centroid, metadata):
"""Asserts that the archive has one specific elite."""
assert len(archive) == 1
elite = list(archive)[0]
assert np.isclose(elite.sol, solution).all()
assert np.isclose(elite.obj, objective_value).all()
assert np.isclose(elite.beh, behavior_values).all()
assert np.isclose(archive.centroids[elite.idx], centroid).all()
assert elite.meta == metadata
def test_samples_bad_shape(use_kd_tree):
# The behavior space is 2D but samples are 3D.
with pytest.raises(ValueError):
CVTArchive(10, [(-1, 1), (-1, 1)],
samples=[[-1, -1, -1], [1, 1, 1]],
use_kd_tree=use_kd_tree)
def test_properties_are_correct(data):
assert np.all(data.archive.lower_bounds == [-1, -1])
assert np.all(data.archive.upper_bounds == [1, 1])
points = [[0.5, 0.5], [-0.5, 0.5], [-0.5, -0.5], [0.5, -0.5]]
unittest.TestCase().assertCountEqual(data.archive.samples.tolist(), points)
unittest.TestCase().assertCountEqual(data.archive.centroids.tolist(),
points)
def test_custom_centroids(use_kd_tree):
centroids = np.array([[-0.25, -0.25], [0.25, 0.25]])
archive = CVTArchive(centroids.shape[0], [(-1, 1), (-1, 1)],
custom_centroids=centroids,
use_kd_tree=use_kd_tree)
archive.initialize(solution_dim=3)
assert archive.samples is None
assert (archive.centroids == centroids).all()
def test_custom_centroids_bad_shape(use_kd_tree):
with pytest.raises(ValueError):
# The centroids array should be of shape (10, 2) instead of just (1, 2),
# hence a ValueError will be raised.
CVTArchive(10, [(-1, 1), (-1, 1)],
custom_centroids=[[0.0, 0.0]],
use_kd_tree=use_kd_tree)
@pytest.mark.parametrize("use_list", [True, False], ids=["list", "ndarray"])
def test_add_to_archive(data, use_list):
if use_list:
status, value = data.archive.add(list(data.solution),
data.objective_value,
list(data.behavior_values),
data.metadata)
else:
status, value = data.archive.add(data.solution, data.objective_value,
data.behavior_values, data.metadata)
assert status == AddStatus.NEW
assert np.isclose(value, data.objective_value)
assert_archive_elite(data.archive_with_elite, data.solution,
data.objective_value, data.behavior_values,
data.centroid, data.metadata)
def test_add_and_overwrite(data):
"""Test adding a new solution with a higher objective value."""
arbitrary_sol = data.solution + 1
arbitrary_metadata = {"foobar": 12}
high_objective_value = data.objective_value + 1.0
status, value = data.archive_with_elite.add(arbitrary_sol,
high_objective_value,
data.behavior_values,
arbitrary_metadata)
assert status == AddStatus.IMPROVE_EXISTING
assert np.isclose(value, high_objective_value - data.objective_value)
assert_archive_elite(data.archive_with_elite, arbitrary_sol,
high_objective_value, data.behavior_values,
data.centroid, arbitrary_metadata)
def test_add_without_overwrite(data):
"""Test adding a new solution with a lower objective value."""
arbitrary_sol = data.solution + 1
arbitrary_metadata = {"foobar": 12}
low_objective_value = data.objective_value - 1.0
status, value = data.archive_with_elite.add(arbitrary_sol,
low_objective_value,
data.behavior_values,
arbitrary_metadata)
assert status == AddStatus.NOT_ADDED
assert np.isclose(value, low_objective_value - data.objective_value)
assert_archive_elite(data.archive_with_elite, data.solution,
data.objective_value, data.behavior_values,
data.centroid, data.metadata)
| [
"noreply@github.com"
] | noreply@github.com |
50d9115c118e9ba2d5ebc6b7d58c98818ecd010c | bd4734d50501e145bc850426c8ed595d1be862fb | /7Kyu - Growth of a Populationdef nb_year-p0- percent- aug- p- count - 0 while-p0-p- p0 - p0 - p0-percent/7Kyu - Growth of a Population.py | 1259b444757fdd602a08df17a598257ae2dcc7d2 | [] | no_license | OrdinaryCoder00/CODE-WARS-PROBLEMS-SOLUTIONS | f61ff9e5268305519ffeed4964589289f4148cfd | 5711114ddcc6a5f22f143d431b2b2e4e4e8ac9fb | refs/heads/master | 2021-10-23T09:09:45.670850 | 2019-03-16T13:24:17 | 2019-03-16T13:24:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | def nb_year(p0, percent, aug, p):
count = 0
while(p0<p):
p0 = p0 + p0*(percent/100) + aug
count = count + 1
return count
| [
"noreply@github.com"
] | noreply@github.com |
3f7c1ecec5f580016a5068bd48f1de2040b2bf6b | 0a801544da5ad2f1969348512f7def8fa9176c7d | /backend/simplicite_23801/urls.py | 25d91cd59bdf8a1ad01544b6a94abcbe42abf6ab | [] | no_license | crowdbotics-apps/simplicite-23801 | 75477b76531c2c53992f74183e9e2e80aefd22e4 | e6a6f569c61449e50988201cf58cc5203d23e039 | refs/heads/master | 2023-02-11T12:27:04.807579 | 2021-01-13T03:01:57 | 2021-01-13T03:01:57 | 329,176,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,668 | py | """simplicite_23801 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("task.api.v1.urls")),
path("task/", include("task.urls")),
path("api/v1/", include("task_profile.api.v1.urls")),
path("task_profile/", include("task_profile.urls")),
path("api/v1/", include("tasker_business.api.v1.urls")),
path("tasker_business/", include("tasker_business.urls")),
path("api/v1/", include("location.api.v1.urls")),
path("location/", include("location.urls")),
path("api/v1/", include("wallet.api.v1.urls")),
path("wallet/", include("wallet.urls")),
path("api/v1/", include("task_category.api.v1.urls")),
path("task_category/", include("task_category.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "Simplicite"
admin.site.site_title = "Simplicite Admin Portal"
admin.site.index_title = "Simplicite Admin"
# swagger
api_info = openapi.Info(
title="Simplicite API",
default_version="v1",
description="API documentation for Simplicite App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
df437ae369a37495789bc3dcddaf0479010d41ea | 728416d6e4dba67a0a85276f76bd59d3e857a284 | /project/urls.py | 0786cd4530e24ae874365301c1e96fa0b4aa8a99 | [] | no_license | samilani/avacado | c80a4c21480d86b1c6cde05245464ed077d90221 | c08e41f851e6e575775ef52750089013f386cd1a | refs/heads/master | 2020-06-06T16:26:11.183489 | 2013-11-12T12:35:16 | 2013-11-12T12:35:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | from django.conf.urls import patterns, url
from project import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index')) | [
"milani.ahmad@gmail.com"
] | milani.ahmad@gmail.com |
4d909ba4892e6c9c564466ba0ea7fe903b3857ab | 8bc7ba8eb10e30b38f2bcf00971bfe540c9d26b7 | /paxes_cinder/k2aclient/v1/virtualswitch_manager.py | 6d956eb5bdf9ebcdab6aba2f1378e4c6151dbbdc | [
"Apache-2.0"
] | permissive | windskyer/k_cinder | f8f003b2d1f9ca55c423ea0356f35a97b5294f69 | 000ee539ee4842a158071d26ee99d12c7c0a87da | refs/heads/master | 2021-01-10T02:19:51.072078 | 2015-12-08T15:24:33 | 2015-12-08T15:24:33 | 47,629,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | #
#
# =================================================================
# =================================================================
"""VirtualSwitch interface."""
from paxes_cinder.k2aclient import base
from paxes_cinder.k2aclient.v1 import k2uom
class VirtualSwitchManager(base.ManagerWithFind):
"""Manage :class:`ClientNetworkAdapter` resources."""
resource_class = k2uom.VirtualSwitch
def new(self):
return self.resource_class(self, None)
def list(self, managedsystem, xa=None):
"""Get a list of all VirtualSwitch for a particular
ManagedSystem accessed through a particular hmc.
:rtype: list of :class:`ClientNetworkAdapter`.
"""
return self._list("/rest/api/uom/ManagedSystem/%s/VirtualSwitch"
% managedsystem, xa=xa)
def get(self, managedsystem, virtualswitch, xa=None):
"""Given managedsystem, get a specific VirtualSwitch.
:param virtualswitch: The ID of the :class:`VirtualSwitch`.
:rtype: :class:`VirtualSwitch`
"""
return self._get("/rest/api/uom/ManagedSystem/%s/VirtualSwitch/%s"
% (managedsystem, virtualswitch,),
xa=xa)
def delete(self, managedsystem, virtualswitch, xa=None):
"""Delete the specified instance
"""
return self._delete("uom",
managedsystem,
child=virtualswitch,
xa=xa)
def deletebyid(self, managedsystem_id, virtualswitch_id, xa=None):
"""Delete the specified instance
"""
return self._deletebyid("uom",
"ManagedSystem",
managedsystem_id,
child_type=k2uom.VirtualSwitch,
child_id=virtualswitch_id,
xa=xa)
| [
"leidong@localhost"
] | leidong@localhost |
0bdd466da74a7ca034ecd75ac4369832698933fc | cf582a6c4a45db58666dc2de66cfb7cdc50151e4 | /account/authentication.py | 704682ce691f91ac4cdf86b326a676cce94b66c9 | [] | no_license | NankuF/bookmarks | 361def76d0d3ad42ff0103371a576f4cd4bddaaf | 88cd24cc6909211b3bfc90960891965c2e4e5605 | refs/heads/master | 2023-08-20T06:57:01.098014 | 2021-10-24T10:55:08 | 2021-10-24T10:55:08 | 413,335,312 | 2 | 0 | null | 2021-10-08T06:01:39 | 2021-10-04T08:24:05 | Python | UTF-8 | Python | false | false | 1,450 | py | from django.contrib.auth.models import User
class EmailAuthBackend(object):
"""Выполняет аутентификацию пользователя по email
Идентификационные данные сначала будут проверены ModelBackend ( см. в settings.py).
Если этот бэкэнд не вернет объект пользователя, Django попробует аутентифи-
цировать его с помощью нашего класса, EmailAuthBackend"""
def authenticate(self, request, username=None, password=None):
try:
# Если в форме в поле username вы ввели email, то сработает этот бэкенд
# например вы ввели username=val@val.com, User.objects.get(email=val@val.com)
# важно, чтобы на фронте в поле <input... name='username' - было написано username.
# Если там написано не username, то ф-я не сработает.
user = User.objects.get(email=username)
if user.check_password(password):
return user
return None
except User.DoesNotExist:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| [
"71669154+NankuF@users.noreply.github.com"
] | 71669154+NankuF@users.noreply.github.com |
0aa438918ed4b7436f3f4cf20ae451e99c599526 | 676edefaffabb933fe9336cf6c23aa049b6962b4 | /mysite/cats/models.py | b74bb144c9ec6ebc480b5adf2e2c672e4cc65abd | [] | no_license | danielbaraniak/zadania-rekrutacyjne | 917b2785e0d8e867e66ebe610f4deb4f147f8984 | fba379c4688275f0328012cffef525bfb1e93848 | refs/heads/main | 2023-08-25T14:37:02.430574 | 2021-10-25T00:50:34 | 2021-10-25T00:50:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,367 | py | from django.contrib.auth.models import User
from django.db import models
class Cat(models.Model):
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
name = models.CharField(max_length=50)
color = models.CharField(max_length=20)
is_male = models.BooleanField()
def get_prays_number(self):
return Prey.objects.filter(hunting__cat=self).count()
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if Cat.objects.filter(user=self.user).count() < 4:
super(Cat, self).save()
else:
raise Exception(f'{self.user} has already 4 cats. No more are allowed.')
def __str__(self):
return self.name
class Hunting(models.Model):
cat = models.ForeignKey(Cat, on_delete=models.CASCADE)
start_date = models.DateTimeField()
duration = models.DurationField()
def __str__(self):
return "{} at {}".format(self.cat.name, self.start_date.strftime("%m/%d/%Y %H:%M:%S"))
class PreyType(models.Model):
type_name = models.CharField(max_length=20)
def __str__(self):
return self.type_name
class Prey(models.Model):
type = models.ForeignKey(PreyType, on_delete=models.PROTECT)
hunting = models.ForeignKey(Hunting, on_delete=models.CASCADE)
def __str__(self):
return self.type.type_name
| [
"daniel@example.pl"
] | daniel@example.pl |
0509f599fc55aff09422b32059466ba37f55ba10 | 4b7deb5769a689a333efe7b3a32a6a0ac69d67bc | /windows/samples/python/sample_open3d_pclviewer.py | bb7249a9ea684bcdb953b157657f6deb39961a14 | [] | no_license | ZC1231/SDK | cadc4b884b2f05fbd0d4b72448ea60e76c00fbae | 13bc36b5ce8a76a30ad4d469a00e8df75203abfb | refs/heads/master | 2022-11-29T06:12:49.086402 | 2020-08-11T12:38:33 | 2020-08-11T12:38:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,071 | py | import sys, os
import numpy as np
import time
import open3d as o3d
import dmcam
# -- init the lib with default log file
dmcam.init(None)
# -- init with specified log file
# dmcam.init("test.log")
# -- set debug level
dmcam.log_cfg(dmcam.LOG_LEVEL_INFO, dmcam.LOG_LEVEL_DEBUG, dmcam.LOG_LEVEL_NONE)
# -- list device
print(" Scanning dmcam device ..")
devs = dmcam.dev_list()
if devs is None:
print(" No device found")
sys.exit(1)
print("found %d device" % len(devs))
for i in range(len(devs)):
print("#%d: %s" % (i, dmcam.dev_get_uri(devs[i], 256)[0]))
print(" Open dmcam device ..")
# open the first device
dev = dmcam.dev_open(devs[0])
# Or open by URI
# dev = dmcam.dev_open_by_uri(br"xxx")
assert dev is not None
# - set capture config -
cap_cfg = dmcam.cap_cfg_t()
cap_cfg.cache_frames_cnt = 10 # framebuffer = 10
cap_cfg.on_error = None # use cap_set_callback_on_error to set cb
cap_cfg.on_frame_rdy = None # use cap_set_callback_on_frame_ready to set cb
cap_cfg.en_save_replay = False # True = save replay, False = not save
cap_cfg.en_save_dist_u16 = False # True to save dist stream for openni replay
cap_cfg.en_save_gray_u16 = False # True to save gray stream for openni replay
cap_cfg.fname_replay = os.fsencode("dm_replay.oni") # set replay filename
dmcam.cap_config_set(dev, cap_cfg)
# dmcam.cap_set_callback_on_frame_ready(dev, on_frame_rdy)
# dmcam.cap_set_callback_on_error(dev, on_cap_err)
print(" Set paramters ...")
wparams = {
dmcam.PARAM_INTG_TIME: dmcam.param_val_u(),
dmcam.PARAM_FRAME_RATE: dmcam.param_val_u(),
dmcam.PARAM_MOD_FREQ: dmcam.param_val_u(),
}
# set dual frequency modulation, note that mod_freq0 should always be larger than mod_freq1
# you can trivially set mod_freq1 to zeros, if single frequency modulation is desired
wparams[dmcam.PARAM_MOD_FREQ].mod_freq0=100000000
wparams[dmcam.PARAM_MOD_FREQ].mod_freq1=36000000
wparams[dmcam.PARAM_INTG_TIME].intg.intg_us = 1000
wparams[dmcam.PARAM_FRAME_RATE].frame_rate.fps = 20
amp_min_val = dmcam.filter_args_u()
amp_min_val.min_amp = 80
if not dmcam.filter_enable(dev, dmcam.DMCAM_FILTER_ID_AMP, amp_min_val,
sys.getsizeof(amp_min_val)):
print("set amp to %d %% failed" % amp_min_val.min_amp)
if not dmcam.param_batch_set(dev, wparams):
print(" set parameter failed")
print(" Start capture ...")
dmcam.cap_start(dev)
f = bytearray(640 * 480 * 4 * 2)
print(" sampling 100 frames ...")
count = 0
run = True
vis = o3d.visualization.Visualizer()
vis.create_window()
opt = vis.get_render_option()
opt.background_color = np.asarray([0, 0, 0])
opt.point_size = 1
opt.show_coordinate_frame = True
pcd = o3d.geometry.PointCloud()
bind_flag = False
while run:
# get one frame
finfo = dmcam.frame_t()
ret = dmcam.cap_get_frames(dev, 1, f, finfo)
# print("get %d frames" % ret)
if ret > 0:
w = finfo.frame_info.width
h = finfo.frame_info.height
print(" frame @ %d, %d, %dx%d" %
(finfo.frame_info.frame_idx, finfo.frame_info.frame_size, w, h))
dist_cnt, dist = dmcam.frame_get_distance(dev, w * h, f, finfo.frame_info)
gray_cnt, gray = dmcam.frame_get_gray(dev, w * h, f, finfo.frame_info)
_, pcloud = dmcam.frame_get_pcl(dev, w * h*3, dist, w, h, None)
pcd.points=o3d.utility.Vector3dVector(pcloud.reshape(-1, 3))
if not bind_flag:
vis.add_geometry(pcd)
bind_flag = True
else:
vis.update_geometry(pcd)
vis.poll_events()
vis.update_renderer()
#o3d.visualization.draw_geometries([pcd])
# dist = dmcam.raw2dist(int(len(f) / 4), f)
# gray = dmcam.raw2gray(int(len(f) / 4), f)
else:
break
time.sleep(0.01)
# break
vis.destroy_window()
print(" Stop capture ...")
dmcam.cap_stop(dev)
print(" Close dmcam device ..")
dmcam.dev_close(dev)
dmcam.uninit()
| [
"U1@ip-192-168-254-95.cn-northwest-1.compute.internal"
] | U1@ip-192-168-254-95.cn-northwest-1.compute.internal |
fcb2435eb093fb368a090ef2efed05fd1120970b | 2e83b09448d2756ac0c8f77568086763391a43c8 | /trackback.py | 6014ed7933bc4f0d5720fe1f24124721456b4d4e | [] | no_license | raghvendra98/Computer-vision-OpenCV | f298cee5600bff7abfdaf9cc69ae1793affeb923 | 084b8f4061b3871a2103680af73301629f18df32 | refs/heads/master | 2023-02-17T01:45:53.735727 | 2021-01-16T13:42:31 | 2021-01-16T13:42:31 | 320,251,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | import numpy as np
import cv2 as cv
def nothing(x):
print(x)
# Create a black image, a window
cv.namedWindow('image')
cv.createTrackbar('CP', 'image', 10, 400, nothing)
switch = 'color/gray'
cv.createTrackbar(switch, 'image', 0, 1, nothing)
while(1):
img = cv.imread('trex.png')
pos = cv.getTrackbarPos('CP', 'image')
font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(img, str(pos), (50, 150), font, 3, (0, 0, 255), 10)
k = cv.waitKey(1) & 0xFF
if k == 27:
break
s = cv.getTrackbarPos(switch, 'image')
if s == 0:
pass
else:
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
img = cv.imshow('image',img)
cv.destroyAllWindows() | [
"50679225+raghvendra98@users.noreply.github.com"
] | 50679225+raghvendra98@users.noreply.github.com |
a4e7a4cadea98270cebf7bd8ab170c2f030ac42c | f0ec40da9043db89265b2f90f7c77f896b9344aa | /instaclone/migrations/0004_auto_20170724_0943.py | 8a0c180cc08c42cdb1e0a091134b137a40d1d4c7 | [] | no_license | shivajaat/InstaClone | 9c5547837ec639195a93f316dcd82703d2e0b621 | c0cabe48abde151920c5a480738523acee819a84 | refs/heads/master | 2021-01-02T08:35:36.497159 | 2017-08-01T17:16:11 | 2017-08-01T17:16:11 | 99,025,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-24 04:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instaclone', '0003_auto_20170724_0940'),
]
operations = [
migrations.AlterField(
model_name='usermodel',
name='email',
field=models.CharField(max_length=40, unique=True),
),
migrations.AlterField(
model_name='usermodel',
name='username',
field=models.CharField(max_length=20, unique=True),
),
]
| [
"shivajaat21@gmail.com"
] | shivajaat21@gmail.com |
e463337960661352eec76273356b1323176686ca | 04ae1836b9bc9d73d244f91b8f7fbf1bbc58ff29 | /170/Solution.py | 1ece9965f93617191e3d6e484aeefd64c54f0c67 | [] | no_license | zhangruochi/leetcode | 6f739fde222c298bae1c68236d980bd29c33b1c6 | cefa2f08667de4d2973274de3ff29a31a7d25eda | refs/heads/master | 2022-07-16T23:40:20.458105 | 2022-06-02T18:25:35 | 2022-06-02T18:25:35 | 78,989,941 | 14 | 6 | null | null | null | null | UTF-8 | Python | false | false | 2,272 | py | """
Design and implement a TwoSum class. It should support the following operations: add and find.
add - Add the number to an internal data structure.
find - Find if there exists any pair of numbers which sum is equal to the value.
Example 1:
add(1); add(3); add(5);
find(4) -> true
find(7) -> false
Example 2:
add(3); add(1); add(2);
find(3) -> true
find(6) -> false
"""
from collections import defaultdict
class TwoSum:
def __init__(self):
"""
Initialize your data structure here.
"""
self.count = 0
self.numbers = defaultdict(list)
def add(self, number):
"""
Add the number to an internal data structure..
:type number: int
:rtype: void
"""
self.numbers[number].append(self.count)
self.count += 1
def find(self, value):
"""
Find if there exists any pair of numbers which sum is equal to the value.
:type value: int
:rtype: bool
"""
for num, indexs in self.numbers.items():
tmp = value - num
if tmp in self.numbers:
if tmp == num and len(indexs) > 1:
return True
if tmp != num:
return True
return False
class TwoSum(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.nums = {}
def add(self, number):
"""
Add the number to an internal data structure..
:type number: int
:rtype: None
"""
if number not in self.nums:
self.nums[number] = 1
else:
self.nums[number] += 1
def find(self, value):
"""
Find if there exists any pair of numbers which sum is equal to the value.
:type value: int
:rtype: bool
"""
for key in self.nums:
second = value - key
if ((key != second) and second in self.nums) or (key == second and self.nums[key] > 1):
return True
return False
# Your TwoSum object will be instantiated and called as such:
# obj = TwoSum()
# obj.add(number)
# param_2 = obj.find(value) | [
"zrc720@gmail.com"
] | zrc720@gmail.com |
d4e630393d97b23b24b61c540310af9eced66716 | 4d4fcde3efaa334f7aa56beabd2aa26fbcc43650 | /server/src/uds/core/managers/userservice/comms.py | e2b06381d46b8a78ab640c8bf0c609b7fff00dda | [] | no_license | xezpeleta/openuds | a8b11cb34eb0ef7bb2da80f67586a81b2de229ef | 840a7a02bd7c9894e8863a8a50874cdfdbf30fcd | refs/heads/master | 2023-08-21T17:55:48.914631 | 2021-10-06T10:39:06 | 2021-10-06T10:39:06 | 414,489,331 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,264 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2019-2021 Virtual Cable S.L.U.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L.U. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
.. moduleauthor:: Adolfo Gómez, dkmaster at dkmon dot com
"""
import os
import json
import base64
import tempfile
import logging
import typing
import requests
if typing.TYPE_CHECKING:
from uds.models import UserService
logger = logging.getLogger(__name__)
TIMEOUT = 2
class NoActorComms(Exception):
pass
class OldActorVersion(NoActorComms):
pass
def _requestActor(
userService: 'UserService',
method: str,
data: typing.Optional[typing.MutableMapping[str, typing.Any]] = None,
minVersion: typing.Optional[str] = None,
) -> typing.Any:
"""
Makes a request to actor using "method"
if data is None, request is done using GET, else POST
if no communications url is provided or no min version, raises a "NoActorComms" exception (or OldActorVersion, derived from NoActorComms)
Returns request response value interpreted as json
"""
url = userService.getCommsUrl()
if not url:
# logger.warning('No notification is made because agent does not supports notifications: %s', userService.friendly_name)
raise NoActorComms(
'No notification urls for {}'.format(userService.friendly_name)
)
minVersion = minVersion or '2.0.0'
version = userService.getProperty('actor_version') or '0.0.0'
if '-' in version or version < minVersion:
logger.warning(
'Pool %s has old actors (%s)', userService.deployed_service.name, version
)
raise OldActorVersion(
'Old actor version {} for {}'.format(version, userService.friendly_name)
)
url += '/' + method
proxy = userService.deployed_service.proxy
try:
if proxy:
r = proxy.doProxyRequest(url=url, data=data, timeout=TIMEOUT)
else:
verify: typing.Union[bool, str]
cert = userService.getProperty('cert')
# cert = '' # Untils more tests, keep as previous.... TODO: Fix this when fully tested
if cert:
# Generate temp file, and delete it after
verify = tempfile.mktemp('udscrt')
with open(verify, 'wb') as f:
f.write(cert.encode()) # Save cert
else:
verify = False
if data is None:
r = requests.get(url, verify=verify, timeout=TIMEOUT)
else:
r = requests.post(
url,
data=json.dumps(data),
headers={'content-type': 'application/json'},
verify=verify,
timeout=TIMEOUT,
)
if verify:
try:
os.remove(typing.cast(str, verify))
except Exception:
logger.exception('removing verify')
js = r.json()
if version >= '3.0.0':
js = js['result']
logger.debug('Requested %s to actor. Url=%s', method, url)
except Exception as e:
logger.warning(
'Request %s failed: %s. Check connection on destination machine: %s',
method,
e,
url,
)
js = None
return js
def notifyPreconnect(userService: 'UserService', userName: str, protocol: str) -> None:
"""
Notifies a preconnect to an user service
"""
ip, hostname = userService.getConnectionSource()
try:
_requestActor(
userService,
'preConnect',
{'user': userName, 'protocol': protocol, 'ip': ip, 'hostname': hostname},
)
except NoActorComms:
pass # If no preconnect, warning will appear on UDS log
def checkUuid(userService: 'UserService') -> bool:
"""
Checks if the uuid of the service is the same of our known uuid on DB
"""
try:
uuid = _requestActor(userService, 'uuid')
if (
uuid and uuid != userService.uuid
): # Empty UUID means "no check this, fixed pool machine"
logger.info(
'Machine %s do not have expected uuid %s, instead has %s',
userService.friendly_name,
userService.uuid,
uuid,
)
return False
except NoActorComms:
pass
return True # Actor does not supports checking
def requestScreenshot(userService: 'UserService') -> bytes:
"""
Returns an screenshot in PNG format (bytes) or empty png if not supported
"""
emptyPng = 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg=='
try:
png = _requestActor(
userService, 'screenshot', minVersion='3.0.0'
) # First valid version with screenshot is 3.0
except NoActorComms:
png = None
return base64.b64decode(png or emptyPng)
def sendScript(userService: 'UserService', script: str, forUser: bool = False) -> None:
"""
If allowed, send script to user service
"""
try:
data: typing.MutableMapping[str, typing.Any] = {'script': script}
if forUser:
data['user'] = forUser
_requestActor(userService, 'script', data=data)
except NoActorComms:
pass
def requestLogoff(userService: 'UserService') -> None:
"""
Ask client to logoff user
"""
try:
_requestActor(userService, 'logout', data={})
except NoActorComms:
pass
def sendMessage(userService: 'UserService', message: str) -> None:
"""
Sends an screen message to client
"""
try:
_requestActor(userService, 'message', data={'message': message})
except NoActorComms:
pass
| [
"dkmaster@dkmon.com"
] | dkmaster@dkmon.com |
1b2a6c3181548e466eacfa3b040cbc883242e73b | 35f1a21affd266e0069bfc5a1c83218847f13802 | /pastie-5073437.py | 9cc5c027ce00562a66e8461c02cfa8768964c92c | [] | no_license | KarenWest/pythonClassProjects | ff1e1116788174a2affaa96bfcb0e97df3ee92da | 5aa496a71d36ffb9892ee6e377bd9f5d0d8e03a0 | refs/heads/master | 2016-09-16T15:20:26.882688 | 2014-02-21T20:07:57 | 2014-02-21T20:07:57 | 17,055,355 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | #balance = 5000
#annualInterestRate = 0.18
#monthlyPaymentRate = 0.02
months = range(1, 13) # 1, 2, ... , 11, 12
owe = balance # It made sense to me to call this total amount oweing
totalPaid = 0
for month in months:
minPay = owe * monthlyPaymentRate # calculate our minimum payment
interest = (owe - minPay) * (annualInterestRate / 12) # same for interest
owe = owe - minPay + interest # calculate our new balance
totalPaid += minPay # Sum up how much we've paid so far
print('Month: %d' % month) # %d will be replaced by month
print('Minimum monthly payment: %.2f' % minPay) # %.2f replaced by minPay, with 2 decimal places
print('Remaining balance: %.2f' % owe)
print('Total paid %.2f' % totalPaid)
print('Remaining balance: %.2f' % owe)
| [
"KarenWest15@gmail.com"
] | KarenWest15@gmail.com |
51bacc265849e99d5638fe2aa84fb25204c57781 | a0dda8be5892a390836e19bf04ea1d098e92cf58 | /叶常春视频例题/chap05/5-2-9-生词清单.py | 6c9697103cde9015e7e96499929c29627c18643d | [] | no_license | wmm98/homework1 | d9eb67c7491affd8c7e77458ceadaf0357ea5e6b | cd1f7f78e8dbd03ad72c7a0fdc4a8dc8404f5fe2 | refs/heads/master | 2020-04-14T19:22:21.733111 | 2019-01-08T14:09:58 | 2019-01-08T14:09:58 | 164,055,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | # 例5-2-9 生词清单
new_words = []
for i in range(1, 101):
word = input("输入生词:")
if word == "*":
break # break语句的作用是跳出循环,执行循环后面的语句。
if word not in new_words:
new_words.append(word)
print("生词清单:", new_words) | [
"792545884@qq.com"
] | 792545884@qq.com |
b6accd03937aff683e9c67bcc79daa3e0d5517c3 | 5f80ca7e63eef57c044d937d60a6e3d6e8f138cc | /b2week4.py | adebb2539d6fac6ce435a543badb661b745bb0e4 | [] | no_license | sjerkovic/Bioinformatics-specialization | 49125231650a2db1423f3f2ef49130e99b754bd6 | 1d599934675c8663a61fdedd8944aea0e2cd8ea6 | refs/heads/main | 2023-04-15T20:31:33.249693 | 2021-05-02T22:11:42 | 2021-05-02T22:11:42 | 363,510,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | def spectral_convolution(spectrum):
spectrum_int = sorted([int(v) for v in spectrum.split()])
convolution = []
for i in spectrum_int:
for j in spectrum_int:
a = i-j
if a>0:
convolution.append(a)
return convolution
| [
"noreply@github.com"
] | noreply@github.com |
5f844ff0add74663d1b5f8196c9802f858ac8f5b | ea384acfec1ae21bc8583258ecaa187ded4b22d6 | /data/base/prototypes/entity/miningDrills.py | 9f855b6f52051c33e371df5debe3a40a633cb7d9 | [
"MIT"
] | permissive | cmk1988/Jactorio | da03e97d0fa8bfbf9428e45aa2e4772c0ea9542b | 4056b4c16614d566ec8d90b250621e03645bf4d2 | refs/heads/master | 2023-03-28T03:05:24.337666 | 2021-03-13T20:31:30 | 2021-03-13T20:31:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,510 | py | import jactorioData as j
def addSprite(spritePath):
return (j.Sprite()
.load(spritePath)
.frames(8)
.sets(8)
)
def createDrill(name, icon, spriteN, spriteE, spriteS, spriteW):
(j.MiningDrill(name)
.rotatable(True)
.pickupTime(0.1)
.miningSpeed(1)
.item(
j.Item(name + "-item")
.sprite(
j.Sprite()
.load(icon)
)
)
.sprite(addSprite(spriteN))
.spriteE(addSprite(spriteE))
.spriteS(addSprite(spriteS))
.spriteW(addSprite(spriteW))
.tileWidth(3)
.tileHeight(3)
# resource output: up, right, down left
# <U>
# [0] [ ] [ ]
# <L> [ ] [ ] [ ] <R>
# [ ] [ ] [ ]
# <D>
.resourceOutput(j._OutputTile4Way(
(
(1, -1),
(3, 1),
(1, 3),
(-1, 1)
)
))
)
createDrill("electric-mining-drill",
"base/graphics/icon/electric-mining-drill.png",
"base/graphics/entity/electric-mining-drill/electric-mining-drill-N.png",
"base/graphics/entity/electric-mining-drill/electric-mining-drill-E.png",
"base/graphics/entity/electric-mining-drill/electric-mining-drill-S.png",
"base/graphics/entity/electric-mining-drill/electric-mining-drill-W.png")
| [
"jaihysc@gmail.com"
] | jaihysc@gmail.com |
abb8d377bfc14fc3f720ebee1401edf33b988493 | e0ea106f98cf3bff4a9473d4b225f56da60314d6 | /percy/__init__.py | a1e13d8a562a1a9380d0e06e996b210fa34e65a9 | [
"MIT"
] | permissive | getsentry/python-percy-client | e80dbf257a168cd08ed2af4cf0fbcc80f20ddaac | c3a80ed567ad40b2f1eaaea76f0886aa6f0367eb | refs/heads/master | 2023-08-05T23:24:46.855782 | 2017-07-14T00:45:20 | 2017-07-14T00:45:20 | 97,176,225 | 1 | 2 | MIT | 2020-10-27T22:44:01 | 2017-07-14T00:32:31 | Python | UTF-8 | Python | false | false | 285 | py | # -*- coding: utf-8 -*-
__author__ = 'Perceptual Inc.'
__email__ = 'team@percy.io'
__version__ = '1.0.1'
from percy.client import *
from percy.config import *
from percy.environment import *
from percy.resource import *
from percy.resource_loader import *
from percy.runner import *
| [
"mike@fotinakis.com"
] | mike@fotinakis.com |
927c002da1456a2be2e42271cff26c52213d5fc3 | 85c594b9f5af241d25a4957ba57c65cc1b473008 | /pywrap/setup.py | 43dced996b3fb659e394fca25776f618590906ed | [] | no_license | jeffli678/autoit | 84dca4a78b892a0ab8a7b00c3682481c70d667d3 | a0e57b382abe3d4caed9d2371672fd003bed4c61 | refs/heads/master | 2021-05-27T10:58:32.741270 | 2012-10-10T16:55:30 | 2012-10-10T16:55:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | from distutils.core import setup, Extension
import platform
osname = platform.system()
if osname == "Windows":
shared_libs = []
else:
shared_libs = []
module = Extension('pyautoit',
sources = ['pyautoit.cpp'],
libraries = shared_libs,
library_dirs = [],
extra_objects = ["../build/lib/libautoit.a"],
)
setup(name = 'pyautoit',
version = '0.2.0',
description = 'Python module wrapping libautoit',
author = 'kimzhang',
author_email = 'analyst004@gmail.com',
ext_modules = [module])
| [
"analyst004@gmail.com"
] | analyst004@gmail.com |
d155707dd0625574b9dc3d610d97d6cc26fbfe3f | 8c2d17c1dd5409d63bde7907cdee4adaf4e9443a | /day_3.py | 81eac927ebee6866133652e83eeba8b84259704f | [] | no_license | kmb5/AdventOfCode2019 | f4099686028f7ddd5895918c9c55a7080936cc8c | f10dce035d6134800b4c83e07e1aa2a40061f49e | refs/heads/master | 2020-12-01T21:19:46.357511 | 2019-12-30T09:41:20 | 2019-12-30T09:41:20 | 230,774,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,653 | py | #import pygame
#from pygame.locals import QUIT
from PIL import Image, ImageDraw
def main():
filename = 'inputs/day_3_input.txt'
with open(filename) as f:
puzzle_input = f.read().splitlines()
solution_part_1 = part_1(puzzle_input)
print(f'Part 1 solution: {solution_part_1}')
def part_1(puzzle_input):
wire_1 = puzzle_input[0].split(',')
wire_2 = puzzle_input[1].split(',')
origin_point = (8500,16000)
# Arbitrary number so that the wires will
# fit entriely on the image we generate at the end.
# For sure there is a math way of calculating this but I could not figure out,
# it was just trial and error until the whole image fit.
path_1 = draw_path(wire_1, start_coordinates=origin_point)
path_2 = draw_path(wire_2, start_coordinates=origin_point)
result = list(set(path_1).intersection(path_2))
index_of_origin = result.index(origin_point)
result.pop(index_of_origin) # removing the intersection at origin point
manhattans = []
for i in result:
'''Calculate Manhattan taxicab distance between each point and the origin'''
distance = abs((origin_point[0] - i[0])) + abs((origin_point[1] - i[1]))
manhattans.append(distance)
smallest_dist = min(manhattans) # This is our solution
''' This is only needed to render an image with the paths,
it is not a requirement for solving the case!'''
img = Image.new('RGB', ((15000,19000)))
# Again an arbitrary image size so that everything fits
ImageDraw.Draw(img).point(path_1, fill='red')
ImageDraw.Draw(img).point(path_2, fill='yellow')
img.save('day_3_visualisation.jpg')
return smallest_dist
def draw_path(wire_path: list, start_coordinates: tuple) -> list:
path = []
path.append(start_coordinates)
# Initializing the path with the starting coordinates
for instruction in wire_path:
direction = instruction[0]
distance = int(instruction[1:])
prev_point_x = path[-1][0]
prev_point_y = path[-1][1]
if direction == 'L':
new_points = [(prev_point_x - x, prev_point_y) for x in range(1, distance + 1)]
elif direction == 'R':
new_points = [(prev_point_x + x, prev_point_y) for x in range(1, distance + 1)]
elif direction == 'U':
new_points = [(prev_point_x, prev_point_y + y) for y in range(1, distance + 1)]
elif direction == 'D':
new_points = [(prev_point_x, prev_point_y - y) for y in range(1, distance + 1)]
path.extend(new_points)
return path
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
c886c20cf7de550debdfb0a88d9edcbafb45a992 | 0726db2d56b29f02a884885718deeddbf86df628 | /lienp/visualize.py | af217e55f8ec1524e381eb022bcd47bbb2f01101 | [] | no_license | makora9143/EquivCNP | 515dfd95557d8d3a21d3fc0f295ce885a9deb913 | a78dea12ab672e796c86427823c9f1b2fdd8df8d | refs/heads/master | 2023-03-17T04:34:26.320055 | 2021-03-05T18:02:18 | 2021-03-05T18:02:18 | 254,292,834 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,239 | py | import io
import PIL.Image
import matplotlib.pyplot as plt
import torch
from torchvision.transforms import ToTensor, ToPILImage
from torchvision.utils import make_grid
def mnist_plot_function(target_x, target_y, context_x, context_y):
img = torch.zeros((28, 28, 3))
img[:, :, 2] = torch.ones((28, 28))
idx = (context_x + 14).clamp(0, 27).long()
img[idx[:, 0], idx[:, 1]] = context_y
print(f'num context:{context_x.shape[0]}')
plt.figure(figsize=(8, 4))
plt.subplot(121)
plt.imshow(img.numpy())
plt.gray()
plt.subplot(122)
plt.imshow(target_y.reshape(28, 28).numpy())
plt.show()
def plot_and_save_image(ctxs, tgts, preds, epoch=None):
ctx_img = []
tgt_img = []
pred_img = []
for ctx, tgt, tgt_y_dist in zip(ctxs, tgts, preds):
ctx_coords, ctx_values = ctx
tgt_coords, tgt_values = tgt
img = torch.zeros((28, 28, 3))
img[:, :, 2] = torch.ones((28, 28))
idx = (ctx_coords[0] + 14).clamp(0, 27).long()
img[idx[:, 0], idx[:, 1]] = ctx_values[0]
ctx_img.append(img.unsqueeze(0))
tgt_img.append(tgt_values.reshape(1, 1, 28, 28).repeat(1, 3, 1, 1))
pred_img.append(tgt_y_dist.mean.reshape(1, 1, 28, 28).repeat(1, 3, 1, 1))
ctx_img = torch.cat(ctx_img, 0).permute(0, 3, 1, 2).unsqueeze(1).to(torch.device('cpu'))
tgt_img = torch.cat(tgt_img, 0).unsqueeze(1).to(torch.device('cpu'))
pred_img = torch.cat(pred_img, 0).unsqueeze(1).to(torch.device('cpu'))
img = torch.cat([ctx_img, tgt_img, pred_img], 1).reshape(-1, 3, 28, 28)
img = make_grid(img, nrow=6).permute(1, 2, 0).clamp(0, 1)
plt.imsave("epoch_{}.png".format(epoch if epoch is not None else "test"), img.numpy())
def plot_and_save_image2(ctxs, tgts, preds, img_shape, epoch=None):
ctx_img = []
tgt_img = []
pred_img = []
C, W, H = img_shape
for ctx_mask, tgt, tgt_y_dist in zip(ctxs, tgts, preds):
img = torch.zeros((W, H, 3))
img[:, :, 2] = torch.ones((W, H))
img[ctx_mask[0, 0] == 1] = tgt[0, 0][ctx_mask[0, 0] == 1].unsqueeze(-1)
ctx_img.append(img)
tgt_img.append(tgt.repeat(1, 3//C, 1, 1))
pred_img.append(tgt_y_dist.mean.reshape(1, W, H, C).repeat(1, 1, 1, 3//C))
ctx_img = torch.stack(ctx_img, 0).permute(0, 3, 1, 2).unsqueeze(1).to(torch.device('cpu'))
tgt_img = torch.cat(tgt_img, 0).unsqueeze(1).to(torch.device('cpu'))
pred_img = torch.cat(pred_img, 0).unsqueeze(1).to(torch.device('cpu')).permute(0, 1, 4, 2, 3)
img = torch.cat([ctx_img, tgt_img, pred_img], 1).reshape(-1, 3, W, H)
img = make_grid(img, nrow=6).permute(1, 2, 0).clamp(0, 1)
plt.imsave("epoch_{}.png".format(epoch if epoch is not None else "test"), img.numpy())
def plot_and_save_graph(ctxs, tgts, preds, gp_preds, epoch=None):
graphs = []
for ctx, tgt, tgt_y_dist, gp_dist in zip(ctxs, tgts, preds, gp_preds):
ctx_coords, ctx_values = ctx
tgt_coords, tgt_values = tgt
mean = tgt_y_dist.mean.cpu()
lower, upper = tgt_y_dist.confidence_region()
gp_mean = gp_dist.mean.cpu()
gp_lower, gp_upper = gp_dist.confidence_region()
plt.plot(tgt_coords.reshape(-1).cpu(), gp_mean.detach().cpu().reshape(-1), color='green')
plt.fill_between(tgt_coords.cpu().reshape(-1), gp_lower.detach().cpu().reshape(-1), gp_upper.detach().cpu().reshape(-1), alpha=0.2, color='green')
plt.plot(tgt_coords.reshape(-1).cpu(), mean.detach().cpu().reshape(-1), color='blue')
plt.fill_between(tgt_coords.cpu().reshape(-1), lower.detach().cpu().reshape(-1), upper.detach().cpu().reshape(-1), alpha=0.2, color='blue')
plt.plot(tgt_coords.reshape(-1).cpu(), tgt_values.reshape(-1), '--', color='gray')
plt.plot(ctx_coords.reshape(-1).cpu(), ctx_values.reshape(-1).cpu(), 'o', color='black')
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
plt.clf()
plt.close()
img = PIL.Image.open(buf)
img = ToTensor()(img)
buf.close()
graphs.append(img)
img = ToPILImage()(make_grid(torch.stack(graphs, 0), nrow=2))
img.save("epoch_{}.png".format(epoch if epoch is not None else "test"))
| [
"makoto.kawano@gmail.com"
] | makoto.kawano@gmail.com |
a1b2a9fb37f905611061cfcd13a857ef8d26ffe9 | e068f64dfbcc00ae4c84db0432b9fbcc8d2df0c7 | /orchestra/google/marketing_platform/utils/schema/erf/TargetUnion.py | 6f7fed3d24853e27ba56bee71d7ecb070e656b11 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | trakken/orchestra | 947d58bbe9a645c3554c295dd4512e86b67641a5 | 4803530a7df9c685893aca87250e3f736de23bc8 | refs/heads/master | 2022-12-09T04:05:50.871943 | 2020-03-02T16:09:49 | 2020-03-02T16:09:49 | 295,346,069 | 0 | 0 | Apache-2.0 | 2020-09-14T08:01:09 | 2020-09-14T08:01:08 | null | UTF-8 | Python | false | false | 1,201 | py | ###########################################################################
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
TargetUnion_Schema = [
{ "name":"union",
"type":"RECORD",
"mode":"REPEATED",
"fields":[
{ "name":"criteria_id",
"type":"INTEGER",
"mode":"NULLABLE",
},
{ "name":"parameter",
"type":"STRING",
"mode":"NULLABLE",
},
{ "name":"excluded",
"type":"BOOLEAN",
"mode":"NULLABLE",
},
]
},
{ "name":"excluded",
"type":"BOOLEAN",
"mode":"NULLABLE",
},
]
| [
"kingsleykelly@google.com"
] | kingsleykelly@google.com |
8d44b23e3c33f8af0e8609deae0bf22a79867a27 | 20d87b9d0bac919a7fd9c07af940b5813784b1f6 | /flask_sqlite_sqlalchemy/app.py | 8d3f610ac364d8124add2dd5a33a4f9698ada2e7 | [] | no_license | anacarolinacv/api-python-flask-example | 679e26bc5a43e64aab2457b95ba1013bad951056 | 96438e369084af6e8f6bcd02f437e331b6b67abe | refs/heads/main | 2023-06-20T00:01:56.811601 | 2021-07-15T01:15:49 | 2021-07-15T01:15:49 | 386,044,169 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | from flask import jsonify
from marshmallow import ValidationError
from na import ma
from db import db
from server.instance import server
api = server.api
app = server.app
@app.before_first_request
def create_tables():
db.create_all()
if __name__ == '__main__':
db.init_app(app)
ma.init_app(app)
server.run()
| [
"ana.vasconcelos@ccc.ufcg.edu.br"
] | ana.vasconcelos@ccc.ufcg.edu.br |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.