blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eba383154f45d3e7af31022c4c2cb7368e4e1f19 | 75e03232591b263a50523d7cfef4041db36caf01 | /VMWsolutions/at2-vclient-032/cft/stress_random_loop.py | 9838ee1acd308fedd0e76ae9218942c2a0100af3 | [] | no_license | adamkittel/src | aaf157062d069998a8d18841895e7362cf868ff9 | 11e3927bd990b885eba595346694de2d2601d5c9 | refs/heads/master | 2021-01-11T16:13:14.592894 | 2017-01-25T18:29:09 | 2017-01-25T18:29:09 | 80,040,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,389 | py | """
This action will run random stress tests in a loop
When run as a script, the following options/env variables apply:
--mvip The managementVIP of the cluster
SFMVIP env var
--user The cluster admin username
SFUSER env var
--pass The cluster admin password
SFPASS env var
--emailTo List of addresses to send email to
--iterations how many times to loop over the stress tests, 0=forever
"""
import sys
import time
from optparse import OptionParser
import lib.libsf as libsf
from lib.libsf import mylog
import logging
import lib.sfdefaults as sfdefaults
from lib.action_base import ActionBase
import send_email
import random
import stress_netbounce_sequential
import stress_nodefail_sequential
import stress_reboot_master
import stress_reboot_random
import stress_reboot_sequential
import stress_volume_rebalance
import get_active_nodes
class StressRandomLoopAction(ActionBase):
class Events:
"""
Events that this action defines
"""
FAILURE = "FAILURE"
def __init__(self):
super(self.__class__, self).__init__(self.__class__.Events)
def ValidateArgs(self, args):
libsf.ValidateArgs({"mvip" : libsf.IsValidIpv4Address,
"username" : None,
"password" : None,
"iterationCount" : libsf.IsInteger,
"emailTo" : None},
args)
def Execute(self, mvip=sfdefaults.mvip, username=sfdefaults.username, password=sfdefaults.password, iterationCount=100, emailTo=None, debug=False):
self.ValidateArgs(locals())
if debug:
mylog.console.setLevel(logging.DEBUG)
if iterationCount == 0:
mylog.warning("Looping Forever")
count = 10
else:
count = iterationCount
stress_test = ["stress_netbounce_sequential", "stress_nodefail_sequential", "stress_reboot_master", "stress_reboot_random", "stress_reboot_sequential", "stress_volume_rebalance"]
nodes_list = get_active_nodes.Get(mvip=mvip, username=username, password=password)
if nodes_list == False:
mylog.error("Could not get the list of active nodes")
return False
start_time = time.time()
for i in xrange(0, count):
random_index = random.randint(0, len(stress_test) - 1)
random_iteration = random.randint(1,10)
if iterationCount == 0:
mylog.banner("Starting " + stress_test[random_index].replace("_", " ").title() + " on " + mvip + " with " + str(random_iteration) + " iterations" + "\nIteration " + str(i) + " of infinity")
else:
mylog.banner("Starting " + stress_test[random_index].replace("_", " ").title() + " on " + mvip + " with " + str(random_iteration) + " iterations" + "\nIteration " + str(i) + " of " + str(iterationCount))
try:
if stress_test[random_index] == "stress_netbounce_sequential":
stress_netbounce_sequential.Execute(mvip=mvip, username=username, password=password, iteration=random_iteration, emailTo=emailTo)
#mvip=sfdefaults.mvip, username=sfdefaults.username, password=sfdefaults.password, emailTo=None, iteration=1
if stress_test[random_index] == "stress_nodefail_sequential":
if len(nodes_list) <=3:
mylog.banner("Skipping Stress Nodefail Sequential because there are not enough nodes")
else:
stress_nodefail_sequential.Execute(mvip=mvip, username=username, password=password, iteration=random_iteration, emailTo=emailTo)
#mvip=sfdefaults.mvip, username=sfdefaults.username, password=sfdefaults.password, emailTo=None, iteration=1
elif stress_test[random_index] == "stress_reboot_master":
stress_reboot_master.Execute(mvip=mvip, username=username, password=password, iteration=random_iteration, emailTo=emailTo)
#mvip=sfdefaults.mvip, username=sfdefaults.username, password=sfdefaults.password, emailTo=None, iteration=1
elif stress_test[random_index] == "stress_reboot_random":
stress_reboot_random.Execute(mvip=mvip, username=username, password=password, iteration=random_iteration, emailTo=emailTo)
#mvip=sfdefaults.mvip, username=sfdefaults.username, password=sfdefaults.password, emailTo=None, iteration=1
elif stress_test[random_index] == "stress_reboot_sequential":
stress_reboot_sequential.Execute(mvip=mvip, username=username, password=password, iteration=random_iteration, emailTo=emailTo)
#mvip=sfdefaults.mvip, username=sfdefaults.username, password=sfdefaults.password, emailTo=None, iteration=1
elif stress_test[random_index] == "stress_volume_rebalance":
stress_volume_rebalance.Execute(mvip=mvip, username=username, password=password, iteration=random_iteration, emailTo=emailTo)
except Exception as e:
mylog.error("Could not preform " + stress_test[random_index].replace("_", " ").title())
send_email.Execute(emailTo=emailTo, emailSubject="Test " + stress_test[random_index].replace("_", " ").title() + " failed", emailBody=str(e))
mylog.step("Waiting 2 minutes")
time.sleep(120)
#if loopfoever then increase iterationCount by 1 each time so we never end the for loop
if iterationCount == 0:
count += 1
end_time = time.time()
delta_time = libsf.SecondsToElapsedStr(end_time - start_time)
ave_time_per_iteration = (end_time - start_time) / (i + 1)
ave_time_per_iteration = libsf.SecondsToElapsedStr(ave_time_per_iteration)
mylog.info("\tTotal Time: " + delta_time)
mylog.info("\tNumber of Iterations: " + str(i + 1))
mylog.info("\tAverage Time Per Iteration: " + ave_time_per_iteration)
emailBody = "The stress tests ran for " + delta_time + "\nTotal Iterations " + str(i + 1) + "\nAverage Time Per Iteration " + ave_time_per_iteration
send_email.Execute(emailTo=emailTo, emailSubject="The Testing Finished", emailBody=emailBody)
mylog.passed("Passed " + str(iterationCount) + " iterations of random stress testing")
return True
# Instantate the class and add its attributes to the module
# This allows it to be executed simply as module_name.Execute
libsf.PopulateActionModule(sys.modules[__name__])
if __name__ == '__main__':
mylog.debug("Starting " + str(sys.argv))
# Parse command line arguments
parser = OptionParser(option_class=libsf.ListOption, description=libsf.GetFirstLine(sys.modules[__name__].__doc__))
parser.add_option("-m", "--mvip", type="string", dest="mvip", default=sfdefaults.mvip, help="the management IP of the cluster")
parser.add_option("-u", "--user", type="string", dest="username", default=sfdefaults.username, help="the admin account for the cluster")
parser.add_option("-p", "--pass", type="string", dest="password", default=sfdefaults.password, help="the admin password for the cluster")
parser.add_option("--iterations", type="int", dest="iterations", default=100, help="How many iterations to loop over. 0 = Forever")
parser.add_option("--email_to", type="string", dest="email_to", default=None, help="The email account to send the results / updates to")
parser.add_option("--debug", action="store_true", dest="debug", default=False, help="display more verbose messages")
(options, extra_args) = parser.parse_args()
try:
timer = libsf.ScriptTimer()
if Execute(options.mvip, options.username, options.password, options.iterations, options.email_to, options.debug):
sys.exit(0)
else:
sys.exit(1)
except libsf.SfArgumentError as e:
mylog.error("Invalid arguments - \n" + str(e))
sys.exit(1)
except SystemExit:
raise
except KeyboardInterrupt:
mylog.warning("Aborted by user")
Abort()
exit(1)
except:
mylog.exception("Unhandled exception")
exit(1)
exit(0)
| [
"adam.kittel@solidfire.com"
] | adam.kittel@solidfire.com |
d26c8d733fd5edcbcf3cdce1accbdc56c474b637 | d245c87a5082027f4b390210e0beae15ce19f321 | /python/crc16.py | d52d40b2e38a314dc0c4278bec53300ed81f03fc | [] | no_license | pazderski/spair-stm32-firmware | 9a85e83f1bbdb3fc55a440f039fc18d98e723815 | 26c3a226a4b7dec3e735ab4712f1ad36b97f4a8b | refs/heads/master | 2021-01-10T05:26:55.284937 | 2020-03-02T22:33:11 | 2020-03-02T22:33:11 | 36,146,152 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,635 | py | #* File: CRC16.PY
#* CRC-16 (reverse) table lookup for Modbus or DF1
#*
INITIAL_MODBUS = 0xFFFF
INITIAL_DF1 = 0x0000
table = (
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 )
def calcByte( ch, crc):
"""Given a new Byte and previous CRC, Calc a new CRC-16"""
if type(ch) == type("c"):
by = ord( ch)
else:
by = ch
crc = (crc >> 8) ^ table[(crc ^ by) & 0xFF]
return (crc & 0xFFFF)
def calcString( st, crc):
"""Given a binary string and starting CRC, Calc a final CRC-16 """
for ch in st:
crc = (crc >> 8) ^ table[(crc ^ ch) & 0xFF]
return crc
# end file
| [
"dariusz.pazderski@put.poznan.pl"
] | dariusz.pazderski@put.poznan.pl |
76e4e94088cac8fe23976295b68806f853d5a0ef | cc8b87bef3cb2d93928d7882c1aa2cb6d29bd819 | /Python/cali_usa.py | 81f2f8cd3bcd4b918b3200fd8d66dde07995edb4 | [] | no_license | angelicaba23/MisionTic2022 | e61cf66ebe069d1b72995ebc466d85224486ad1c | e547343dd7f7dcbaf49958a99f0722f53c4df3a3 | refs/heads/master | 2023-08-01T03:42:36.252984 | 2021-09-17T02:29:06 | 2021-09-17T02:29:06 | 379,116,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,227 | py | """
------------MinTic-----------------
-------------UPB-------------------
-------Angélica Barranco-----------
"""
#factura
num_factura = 0
menu = "si"
while menu == "si":
print("\nBienvenido al sistema de calificacion")
nota_a = 0
nota_b = 0
nota_c = 0
nota_d = 0
nota_f = 0
c_f = 0
c_d = 0
c_c = 0
c_b = 0
c_a = 0
menu2= "si"
curso = input("\nIngrese el curso: ")
estudiante = int(input("\nIngrese el numero de estudiantes: "))
for i in range(estudiante):
grado = 200
while grado not in range(101):
grado = int(input(f"\nIngrese la calificacion en grado numerico [0-100] del estudiante {i+1}: "))
else:
pass
if grado < 60:
letra = "F"
nota_f = nota_f + grado
c_f = c_f+ 1
elif 60 <= grado < 70:
letra = "D"
nota_d+= grado
c_d += 1
elif 70 <= grado < 80:
letra = "C"
nota_c += grado
c_c += 1
elif 80 <= grado < 90:
letra = "B"
nota_b += grado
c_b += 1
elif grado >= 90:
letra = "A"
nota_a += grado
c_a += 1
else:
letra=""
print(f"\n\tEstudiante {i+1} : {letra}")
if c_f != 0:
promedio_f = nota_f / c_f
else:
promedio_f = 0
if c_d != 0:
promedio_d = nota_d / c_d
else:
promedio_d = 0
if c_c != 0:
promedio_c = nota_c / c_c
else:
promedio_c = 0
if c_b != 0:
promedio_b = nota_b / c_b
else:
promedio_b = 0
if c_a != 0:
promedio_a = nota_a / c_a
else:
promedio_a = 0
promedio = (nota_f+nota_a+nota_b+nota_c+nota_d)/estudiante
print(f"\n\t--------- CURSO {curso} ----------")
print(f"\n\t\t\t\t PROMEDIO")
print(f"\n\t A \t\t\t {promedio_a}")
print(f"\n\t B \t\t\t {promedio_b}")
print(f"\n\t C \t\t\t {promedio_c}")
print(f"\n\t D \t\t\t {promedio_d}")
print(f"\n\t F \t\t\t {promedio_f}")
print(f"\n\t CURSO \t\t\t {promedio}")
menu = input("\n Desea agregar notas de un nuevo curso? si/no: ") | [
"angelicaba9923@gmail.com"
] | angelicaba9923@gmail.com |
0b130d34300f0d54fda9186249d00d2196464eda | d2ada8e9dea0a59476dbbdcfdebc3b8eed951271 | /CH02/bh_sshserver.py | 5046e3f12011c7357d50aa4e84956dbebd0307ea | [] | no_license | sadavoya/bhp | dccf211f4bd95f5eaf69e44c3bfee8f7d07af688 | 6fbf1be8ca0f83363234d9c95170bdd770716c28 | refs/heads/master | 2021-01-13T14:51:13.347114 | 2017-02-21T01:39:57 | 2017-02-21T01:39:57 | 76,486,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,325 | py | #!/usr/bin/env python
'''SSH'''
import socket
import threading
import paramiko
import sys
# using the demo keys in the paramiko demo files
host_key = paramiko.RSAKey(filename='test_rsa.key')
#print host_key.get_base64()
class Server(paramiko.ServerInterface):
def __init__(self):
self.event = threading.Event()
def check_channel_request(self, kind, chanid):
if kind == 'session':
return paramiko.OPEN_SUCCEEDED
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_auth_password(self, username, password):
if (username == 'joker') and (password == 'joker'):
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def main():
'''Main'''
server = sys.argv[1]
ssh_port = int(sys.argv[2])
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((server, ssh_port))
sock.listen(100)
print '[+] Listening for connection...'
client, addr = sock.accept()
except Exception, e:
print '[-] Listen failed: ' + str(e)
sys.exit(1)
#print '[+] Got a connection to %s:%d!' % (addr[1], addr[2])
try:
bh_session = paramiko.Transport(client)
bh_session.add_server_key(host_key)
server = Server()
try:
bh_session.start_server(server=server)
except paramiko.SSHException, x:
print '[-] SSH negotiation failed.'
chan = bh_session.accept(20)
print '[+] Authenticated!'
print chan.recv(1024)
chan.send('Welcome to bh_ssh')
while True:
try:
command = raw_input("Enter command: ").strip('\n')
if command != 'exit':
chan.send(command)
print chan.recv(1024) + '\n'
else:
chan.send('exit')
print 'exiting'
bh_session.close()
raise Exception('exit')
except KeyboardInterrupt:
bh_session.close()
except Exception, e:
print '[-] Caught exception: ' + str(e)
try:
bh_session.close()
except:
pass
sys.exit(1)
main() | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
67ffd9c692b0207ce65232a7eec862d400e10e60 | 89be3dbe4973f65b17a98780f5b0edce2b3634b0 | /abel/tests/test_tools_center.py | 3bd695c474f386df2c5535e1cf57a56305d97a9f | [
"MIT"
] | permissive | Yarubaobei/PyAbel | 0b53daf496dbc0e88547d166267285d72306c702 | 8c881b59a36ed5bd8ad841e38dc942c23c0ff112 | refs/heads/master | 2020-05-19T05:40:47.176636 | 2019-05-04T05:24:20 | 2019-05-04T05:24:20 | 184,854,479 | 0 | 0 | MIT | 2019-05-04T05:06:37 | 2019-05-04T05:06:37 | null | UTF-8 | Python | false | false | 1,792 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
from numpy.testing import assert_allclose
import abel
from scipy.ndimage.interpolation import shift
def test_center_image():
# BASEX sample image, Gaussians at 10, 15, 20, 70,85, 100, 145, 150, 155
# image width, height n = 361, center = (180, 180)
IM = abel.tools.analytical.SampleImage(n=361, name="dribinski").image
# artificially displace center, now at (179, 182)
IMx = shift(IM, (-1, 2))
true_center = (179, 182)
# find_center using 'slice' method
center = abel.tools.center.find_center(IMx, center="slice")
assert_allclose(center, true_center, atol=1)
# find_center using 'com' method
center = abel.tools.center.find_center(IMx, center="com")
assert_allclose(center, true_center, atol=1)
# check single axis - vertical
# center shifted image IMx in the vertical direction only
IMc = abel.tools.center.center_image(IMx, center="com", axes=1)
# determine the center
center = abel.tools.center.find_center(IMc, center="com")
assert_allclose(center, (179, 180), atol=1)
# check single axis - horizontal
# center shifted image IMx in the horizontal direction only
IMc = abel.tools.center.center_image(IMx, center="com", axes=0)
center = abel.tools.center.find_center(IMc, center="com")
assert_allclose(center, (180, 182), atol=1)
# check even image size returns odd
# drop off one column, to make an even column image
IM = IM[:, :-1]
m, n = IM.shape
IMy = abel.tools.center.center_image(IM, center="slice", odd_size=True)
assert_allclose(IMy.shape, (m, n-1))
if __name__ == "__main__":
test_center_image()
| [
"Stephen.Gibson@anu.edu.au"
] | Stephen.Gibson@anu.edu.au |
9e304994375f0455b78a9d29d08791808a9cccc8 | 6b77fc3e8a2b919483723c02ab47681f6c487caa | /main.py | 30ccf696cee9f3e806a1491e43f53d4984ec88ab | [] | no_license | kazato110tm/gcp_python_app | 1d94482eda88e176364ceb6d1d6be39676780df5 | 1a5a2ea65ad82da8a7261d84908faa136fb067d3 | refs/heads/master | 2022-12-06T21:53:56.408582 | 2020-08-23T16:01:10 | 2020-08-23T16:01:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START app]
import logging
from flask import Flask, redirect, request, render_template
from google.appengine.ext import ndb
class Message(ndb.Model):
body = ndb.StringProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
app = Flask(__name__)
@app.route('/')
def hello():
messages = Message.query().fetch()
return render_template('hello.html', messages=messages)
@app.route('/add', methods=['POST'])
def add_message():
message_body = request.form.get('message', '')
message = Message(body=message_body)
message.put()
return redirect('/')
@app.errorhandler(500)
def server_error(e):
# Log the error and stacktrace.
logging.exception('An error occurred during a request.')
return 'An internal error occurred.', 500
# [END app]
| [
"deguchi.k.110@gmail.com"
] | deguchi.k.110@gmail.com |
f1a2454b34e5977852b4f29981527465145cb558 | c86c07584e618c5a4936b13768ef417a319d4b06 | /show_user_permissions.py | 58928ceed0f506d981b1709985ce96329146f53c | [] | no_license | DivvyCloud/tools | 24c1404576e1d30625ae3be16b21c033c8e0bad2 | 53b3686b89e23fcfa44cd4fb95014929ee3cca27 | refs/heads/master | 2020-07-12T07:27:44.537320 | 2019-11-01T18:10:03 | 2019-11-01T18:10:03 | 204,754,278 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,059 | py | # Script to list all permissions a user has via groups and roles
##### SAMPLE OUTPUT
'''
[Alex-MBP scripts]$python show_user_permissions.py
User: ben.calpotura
Number of attached groups: 3
Roles for group: Marketing Team
[]
Roles for group: DivvySales
[ { 'add_cloud': False,
'all_permissions': False,
'badge_scopes': [],
'cloud_scopes': [],
'delete': False,
'description': 'Testing role',
'global_scope': True,
'groups': ['divvyusergroup:39', 'divvyusergroup:45'],
'manage': True,
'name': 'Global View-Provision-Manage',
'provision': True,
'resource_group_scopes': [],
'resource_id': 'divvyrole:1:25',
'view': True},
{ 'add_cloud': False,
'all_permissions': False,
'badge_scopes': [],
'cloud_scopes': [],
'delete': False,
'description': 'DivvyCloud Sales Team role',
'global_scope': True,
'groups': ['divvyusergroup:45'],
'manage': True,
'name': 'DivvySales',
'provision': True,
'resource_group_scopes': [],
'resource_id': 'divvyrole:1:34',
'view': True}]
Roles for group: Group Curation
[ { 'add_cloud': False,
'all_permissions': False,
'badge_scopes': [],
'cloud_scopes': [],
'delete': False,
'description': 'Curate into whitelist RG',
'global_scope': False,
'groups': ['divvyusergroup:52', 'divvyusergroup:64'],
'manage': True,
'name': 'Resource Group Curation',
'provision': False,
'resource_group_scopes': ['resourcegroup:64:'],
'resource_id': 'divvyrole:1:53',
'view': True}]
'''
import json
import requests
import getpass
import pprint
pp = pprint.PrettyPrinter(indent=4)
# Username/password to authenticate against the API
username = ""
password = "" # Leave this blank if you don't want it in plaintext and it'll prompt you to input it when running the script.
# User in DivvyCloud
divvy_user = ""
# API URL
base_url = "https://sales-demo.divvycloud.com"
# Param validation
if not username:
username = input("Username: ")
if not password:
passwd = getpass.getpass('Password:')
else:
passwd = password
if not base_url:
base_url = input("Base URL (EX: http://localhost:8001 or http://45.59.252.4:8001): ")
if not divvy_user:
divvy_user = input("Username in DivvyCloud: ")
# Full URL
login_url = base_url + '/v2/public/user/login'
# Shorthand helper function
def get_auth_token():
response = requests.post(
url=login_url,
data=json.dumps({"username": username, "password": passwd}),
headers={
'Content-Type': 'application/json;charset=UTF-8',
'Accept': 'application/json'
})
return response.json()['session_id']
auth_token = get_auth_token()
headers = {
'Content-Type': 'application/json;charset=UTF-8',
'Accept': 'application/json',
'X-Auth-Token': auth_token
}
# Get User info
def get_users():
data = {}
response = requests.get(
url=base_url + '/v2/public/users/list',
data=json.dumps(data),
headers=headers
)
return response.json()
# Get User info
def get_group_info(user_id):
data = {}
response = requests.post(
url=base_url + '/v2/prototype/user/divvyuser:' + user_id + ':/groups/list',
data=json.dumps(data),
headers=headers
)
return response.json()
# Create the pack
user_list = get_users()
#print(user_list)
for user_info in user_list['users']:
username = user_info['username']
if username == divvy_user:
# List group info for the user
group_info = get_group_info(str(user_info['user_id']))
print("User: " + username)
print("Number of attached groups: " + str(len(group_info['groups'])))
for group in group_info['groups']:
print("Roles for group: " + group['name'])
#print(group['roles'])
pp.pprint(group['roles'])
| [
"alex.corstorphine@divvycloud.com"
] | alex.corstorphine@divvycloud.com |
e89d6dc70ef70ba87520aa3295eb41f07cb4aaa9 | 2a3606551a4d850a7b4d6a4e08089c51108ef7be | /plugin.video.mrknow/resources/lib/crypto/keyedHash/pbkdf2.py | cf79523b747c13cbeb4fb110e54813a48c123a41 | [
"Apache-2.0"
] | permissive | rrosajp/filmkodi | a6bb1823f4ed45453c8b8e54ffbd6a7b49f44450 | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | refs/heads/master | 2021-09-18T06:03:17.561062 | 2018-06-22T23:28:53 | 2018-06-22T23:28:53 | 234,768,781 | 1 | 0 | Apache-2.0 | 2021-06-03T20:33:07 | 2020-01-18T17:11:57 | null | WINDOWS-1252 | Python | false | false | 1,571 | py | # -*- coding: iso-8859-1 -*-
""" crypto.keyedHash.pbkdf2
Password Based Key Derivation Function 2
References: RFC2898, B. Kaliski, September 2000, PKCS #5
This function is used for IEEE 802.11/WPA passphrase to key hashing
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
"""
from ..keyedHash.hmacHash import HMAC_SHA1
from ..common import xor
from math import ceil
from struct import pack
def pbkdf2(password, salt, iterations, keySize, PRF=HMAC_SHA1):
""" Create key of size keySize from password and salt """
if len(password)>63:
raise 'Password too long for pbkdf2'
#if len(password)<8 : raise 'Password too short for pbkdf2'
if (keySize > 10000): # spec says >4294967295L*digestSize
raise 'keySize too long for PBKDF2'
prf = PRF(key=password) # HMAC_SHA1
numBlocks = int(ceil(1.*keySize/prf.digest_size)) # ceiling function
key = ''
for block in range(1,numBlocks+1):
# Calculate F(P, salt, iterations, i)
F = prf(salt+pack('>i',block)) # i is packed into 4 big-endian bytes
U = prf(salt+pack('>i',block)) # i is packed into 4 big-endian bytes
for count in range(2,iterations+1):
U = prf(U)
F = xor(F,U)
key = key + F
return key[:keySize]
def dot11PassPhraseToPSK(passPhrase,ssid):
""" The 802.11 TGi recommended pass-phrase-to-preshared-key mapping.
This function simply uses pbkdf2 with interations=4096 and keySize=32
"""
assert( 7<len(passPhrase)<64 ), 'Passphrase must be greater than 7 or less than 64 characters'
return pbkdf2(passPhrase, ssid, iterations=4096, keySize=32)
| [
"mrknow@interia.pl"
] | mrknow@interia.pl |
6d5294bd220daf2939a8aa6af9e29395f72721da | 72c02a60f2a2894a148f5fb4c8ff62e0b0a7fc4e | /news_system/exception.py | 848ec9f9f4ef5f637e6ed55002df3f683d459caf | [] | no_license | Kamonnny/news_system | a798c258dc84efb9756889e2c9eace9b1216c5b3 | 476b4fc0eec8ecb7349cc5e44df990a629c600e4 | refs/heads/main | 2023-03-29T11:46:36.089419 | 2021-03-25T12:45:19 | 2021-03-25T12:45:19 | 314,820,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | class APIError(Exception):
""" 自定义错误类 """
def __init__(self, code: int = 400, msg: str = "ok", data: dict = None):
self.code = code
self.msg = msg
self.data = data or {}
| [
"EachinChung@gmail.com"
] | EachinChung@gmail.com |
91956ba4d19b41720a01993ac3acbd491ad295d4 | cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc | /Python Books/PythonTesting-BeginnersGuide/code/tests/test_chapter5/test_pid.py | 72b93cd1bcd67c97b5266912ef867908e2d9e800 | [] | no_license | theGreenJedi/Path | df24fca355590efef0c6cb5c52e7216c6b5d2464 | b5ed2805dbb046480929e49e550bfd8af5bb4d6f | refs/heads/master | 2023-07-27T14:23:37.694546 | 2021-07-16T01:38:55 | 2021-07-16T01:38:55 | 87,686,563 | 8 | 2 | null | 2023-07-11T22:49:03 | 2017-04-09T05:57:30 | Jupyter Notebook | UTF-8 | Python | false | false | 2,718 | py | from unittest import TestCase, main
from mocker import Mocker
import pid
class test_pid_constructor(TestCase):
def test_without_when(self):
mocker = Mocker()
mock_time = mocker.replace('time.time')
mock_time()
mocker.result(1.0)
mocker.replay()
controller = pid.PID(P = 0.5, I = 0.5, D = 0.5,
setpoint = 0, initial = 12)
mocker.restore()
mocker.verify()
self.assertEqual(controller.gains, (0.5, 0.5, 0.5))
self.assertAlmostEqual(controller.setpoint[0], 0.0)
self.assertEqual(len(controller.setpoint), 1)
self.assertAlmostEqual(controller.previous_time, 1.0)
self.assertAlmostEqual(controller.previous_error, -12.0)
self.assertAlmostEqual(controller.integrated_error, 0)
def test_with_when(self):
controller = pid.PID(P = 0.5, I = 0.5, D = 0.5,
setpoint = 1, initial = 12,
when = 43)
self.assertEqual(controller.gains, (0.5, 0.5, 0.5))
self.assertAlmostEqual(controller.setpoint[0], 1.0)
self.assertEqual(len(controller.setpoint), 1)
self.assertAlmostEqual(controller.previous_time, 43.0)
self.assertAlmostEqual(controller.previous_error, -11.0)
self.assertAlmostEqual(controller.integrated_error, 0)
class test_calculate_response(TestCase):
def test_without_when(self):
mocker = Mocker()
mock_time = mocker.replace('time.time')
mock_time()
mocker.result(1.0)
mock_time()
mocker.result(2.0)
mock_time()
mocker.result(3.0)
mock_time()
mocker.result(4.0)
mock_time()
mocker.result(5.0)
mocker.replay()
controller = pid.PID(P = 0.5, I = 0.5, D = 0.5,
setpoint = 0, initial = 12)
self.assertEqual(controller.calculate_response(6), -3)
self.assertEqual(controller.calculate_response(3), -4.5)
self.assertEqual(controller.calculate_response(-1.5), -0.75)
self.assertEqual(controller.calculate_response(-2.25), -1.125)
mocker.restore()
mocker.verify()
def test_with_when(self):
controller = pid.PID(P = 0.5, I = 0.5, D = 0.5,
setpoint = 0, initial = 12,
when = 1)
self.assertEqual(controller.calculate_response(6, 2), -3)
self.assertEqual(controller.calculate_response(3, 3), -4.5)
self.assertEqual(controller.calculate_response(-1.5, 4), -0.75)
self.assertEqual(controller.calculate_response(-2.25, 5), -1.125)
if __name__ == '__main__':
main()
| [
"GreenJedi@protonmail.com"
] | GreenJedi@protonmail.com |
002330b328f74d714aa97ba917e8782c833ab8da | 92c7311a8c145b2d415901991a459bf7d2734929 | /src/web/Result.py | d6cac504f8c02f8e040af32d4e04cdcf39c4d2b1 | [] | no_license | liuyanglxh/py-web | 3aa1043b672a034d548bce7042c8e0cf8faa24b2 | 441ed2077faeabf38f1449762a6ce692bb6a1115 | refs/heads/master | 2022-11-20T15:37:39.612580 | 2020-05-29T10:41:32 | 2020-05-29T10:41:32 | 267,832,787 | 0 | 0 | null | 2022-11-17T15:08:32 | 2020-05-29T10:40:30 | Python | UTF-8 | Python | false | false | 349 | py | from flask import json
class Result(object):
success = True
data = None
code = 200
def __init__(self, success, data, code):
self.success = success
self.data = data
self.code = code
r = Result(True, "a", 200)
print json.dumps(r, default=lambda o: o.__dict__, sort_keys=True, indent=4, ensure_ascii=False)
| [
"yang.liu@mail.dealmoon.com"
] | yang.liu@mail.dealmoon.com |
0e00d7b9dfd12f62cf14341e65cd37786e0b1482 | f687b45b061a0a4ed849d5d56e265a3423c95f56 | /mime_gen_both.py | 9f8121e8b8ff5edba0344a98ab758923591037af | [] | no_license | wwwlwscom/python | 45e52529fffccf161a0cff8aaf2d19a149ac2056 | 5478329f068f9a4eff5c07eee8005318b41b6440 | refs/heads/master | 2021-01-20T10:06:17.251976 | 2015-10-20T20:03:34 | 2015-10-20T20:03:34 | 41,769,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | #!/usr/bin/env python
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email import Utils, Encoders
import mimetypes, sys
def genpart(data, contenttype):
maintype, subtype = contenttype.split('/')
if maintype == 'text':
retval = MIMEText(data, _subtype=subtype)
else:
retval = MIMEBase(maintype, subtype)
retval.set_payload(data)
Encoders.encode_base64(retval)
return retval
def attachment(filename):
fd = open(filename, 'rb')
mimetype, mimeencoding = mimetypes.guess_type(filename)
if mimeencoding or (mimetype is None):
mimetype = 'application/octet-stream'
retval = genpart(fd.read(), mimetype)
retval.add_header('Content-Disposition', 'attachment', filename = filename)
fd.close()
return retval
message = """Hello,
This is a test message from Rock. I hope you enjoy it!
--Anonymous"""
messagehtml = """Hello,<P>
This is a <B>great</B>test message from Rock. I hope you enjoy it!<P>
--<I>Anonymous<I>"""
msg = MIMEMultipart()
msg['To'] = 'recipient@example.com'
msg['From'] = 'Test Sender <sender@example.com>'
msg['Subject'] = 'Test Message, Rock'
msg['Date'] = Utils.formatdate(localtime = 1)
msg['Message-ID'] = Utils.make_msgid()
body = MIMEMultipart('alternative')
body.attach(genpart(message, 'text/plain'))
body.attach(genpart(messagehtml, 'text/html'))
msg.attach(body)
for filename in sys.argv[1:]:
msg.attach(attachment(filename))
print msg.as_string()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
5589f53032b5d4e17f0d40bc8a5c208b3fe36000 | 2fd46266ea2a0155d9de4e539970cf7fcdcdfc69 | /backhotel/migrations/0001_initial.py | c8c3e482f97eec20dc732a9c24929f5b19fa4b40 | [] | no_license | djcors/ultragroup | c31307cc7786838c0f016d06c4799b9c822026ef | 0c05014dd7702a99861b7b341883231b627c04d7 | refs/heads/master | 2023-01-13T23:17:49.642271 | 2019-11-13T03:29:58 | 2019-11-13T03:29:58 | 220,570,849 | 0 | 0 | null | 2023-01-07T11:34:22 | 2019-11-09T00:50:13 | JavaScript | UTF-8 | Python | false | false | 2,876 | py | # Generated by Django 2.2.7 on 2019-11-10 15:54
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='AgencyModel',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'AgencyModel',
'verbose_name_plural': 'AgencyModels',
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='HotelModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=100, verbose_name='name')),
('active', models.BooleanField(default=True)),
('code', models.CharField(max_length=30, verbose_name='code')),
('category', models.PositiveSmallIntegerField(default=3, verbose_name='category')),
('agency', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='agency_hoteles', to='backhotel.AgencyModel')),
],
options={
'verbose_name': 'HotelModel',
'verbose_name_plural': 'HotelModels',
},
),
migrations.CreateModel(
name='RoomModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=100, verbose_name='name')),
('active', models.BooleanField(default=True)),
('code', models.CharField(max_length=30, verbose_name='code')),
('room_type', models.CharField(max_length=50, verbose_name='type')),
('base_price', models.PositiveIntegerField(verbose_name='price')),
('tax', models.PositiveIntegerField(verbose_name='% tax')),
('location', models.CharField(max_length=250, verbose_name='location')),
('hotel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hotel_rooms', to='backhotel.HotelModel')),
],
options={
'verbose_name': 'RoomModel',
'verbose_name_plural': 'RoomModels',
},
),
]
| [
"jonathan.cortes@inmersa.co"
] | jonathan.cortes@inmersa.co |
7c6fc5bd03e1385e6cf3dcf99c83d1f366d599fb | ce72b098359697eb28628848ec13f736f29366fa | /10-11_like_number_1.py | 1d4d7fef40c7c08b8efc1dad28a2b51c1358d64c | [] | no_license | TCmatj/learnpython | 1e9ab09deebda025ee4cdd1a465815fcd0594f48 | d92b38a760b29f26efba1f4770ab822aba454931 | refs/heads/master | 2023-01-31T18:19:56.211173 | 2020-12-14T03:06:35 | 2020-12-14T03:06:35 | 298,828,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | # TC 2020/10/11/21:39
import json
filename = 'json\\like_number.json'
number = int(input("输入喜欢的数字:"))
with open(filename,'w') as fm:
json.dump(number,fm) | [
"2216685752@qq.com"
] | 2216685752@qq.com |
76b298420595288e18d020473760727475db4570 | 3e6d90546509004d836b4a74df93c9d1515529a0 | /Interpolazione.py | 6e7821a583fda9e05fe5c8f0f525372fc9047150 | [] | no_license | pelagos91/UniBa-Calcolo-Numerico-ICD | 8a05438837a17ffe13ddc684ef187b340ac8f21a | 969c3deeaf184c81df956756b7157b398944a28c | refs/heads/master | 2021-01-10T05:32:48.845826 | 2016-01-26T22:25:41 | 2016-01-26T22:25:41 | 50,463,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,269 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 09:25:54 2015
@author: pelag
"""
import numpy as np
import scipy.linalg as las
import matplotlib.pylab as plt
import scipy.interpolate as interpolate
def lagrange(xnodi,fnodi,x):
"""
Funzione che determina in un insieme di punti
il valore del polinomio interpolante ottenuto
dalla formula di Lagrange e la funzione di Lebesgue
f,L = lagrange(xnodi,fnodi,x)
Dati di input:
xnodi vettore con i nodi dell'interpolazione
fnodi vettore con i valori nei nodi
x vettore con i punti in cui si vuole
calcolare il valore del polinomio
interpolante
Dati di output:
f vettore contenente i valori assunti
dal polinomio interpolante
L vettore contenente i valori assunti dalla
funzione di lebesgue in x
"""
n=xnodi.shape[0]
m=x.shape[0]
l=np.zeros([n,m])
ind=np.zeros([n,n-1],dtype=int)
ind[0,:]=np.arange(1,n)
for i in range(1,(n)):
ind[i,0:(n-1)]=np.floor(np.concatenate((np.arange(0,(i)),np.arange(i+1,n))))
ind[n-1,:]=np.arange(0,n-1)
for i in range(0,n):
den = np.prod( xnodi[i]-xnodi[(ind[i,:])])
for j in range(0,m):
l[i,j]=np.prod( x[j] - xnodi[ind[i,:]] )/den
y = np.sum(np.dot(np.diag(fnodi),l),axis=0) #Vettore di punti del olinomio interpolante
L=np.sum(abs(l),axis=0) #Funzione di Lebesgue
return y,L
def potenze(xnodi,fnodi,xx):
"""
Funzione che determina in un insieme di punti
il valore del polinomio interpolante ottenuto
dalla formula di Lagrange e la funzione di Lebesgue
f = potenze(xnodi,fnodi,x)
Dati di input:
xnodi vettore con i nodi dell'interpolazione
fnodi vettore con i valori nei nodi
x vettore con i punti in cui si vuole
calcolare il valore del polinomio
interpolante
Dati di output:
f vettore contenente i valori assunti
dal polinomio interpolante
"""
n=xnodi.shape[0]
A=np.zeros([n,n]) #Matrice di Vandermonde
for j in range(0,n):
A[:,j]=xnodi**(j)
p = las.solve(A,fnodi)
f = np.polyval(p[np.arange(n-1,-1,-1)],xx)
condA = np.linalg.cond(A, np.inf)
return f, condA
def cheby(a,b,n):
"""
Nodi di Chebyshev
"""
c = (a + b + (b-a)*np.cos((2*(np.arange(0,n+1))+1)*np.pi/(2*(n+1))))/2
return c
def runge(x):
"""
Funzione di Runge
"""
y=1/(1+25*x**2)
return y
def plotinterp(ftype,a,b,n,tn,baseType):
"""
plot del polinomio interpolante di grado n la funzione
f in [a,b] usando n+1 nodi equidistanti se tn=0
n+1 nodi di Chebyshev se tn=1
ftype indica quale delle due funzioni utilizzare:
- 0 per la funzione coseno
- 1 per la funzione di Runge
baseType indica il tipo di base usata:
- 0 per la base di Lagrange
- 1 per la base delle potenze
"""
if (tn==0):
xnodi = np.linspace(a,b,n+1)
else:
xnodi = cheby(a,b,n)
if (ftype==0):
fname='f=cos(x)'
f=np.cos
fnodi = f(xnodi)
xx = np.linspace(a,b,500)
ye = f(xx)
else:
fname='g=1/(1+25*(x**2))'
fnodi = gfunction(xnodi)
xx = np.linspace(a,b,500)
ye = gfunction(xx)
if(baseType==0):
fi, L = lagrange(xnodi,fnodi,xx)
Lc = las.norm(L, np.inf)
else:
fi, condA = potenze(xnodi, fnodi, xx)
error = np.max(np.abs(fi-ye))
if(baseType==0):
plt.figure(1)
plt.cla()
plt.title('Polinomio interpolante per la funzione %s con n= %i'%(fname, n))
plt.plot(xx,fi,xnodi,fnodi,'o',xx,ye,'--')
plt.figure(2)
plt.cla()
plt.plot(xx,L)
plt.show()
else:
plt.figure(1)
plt.cla()
plt.title('Polinomio interpolante per la funzione %s con n= %i'%(fname, n))
plt.plot(xx,fi,xnodi,fnodi,'o',xx,ye,'--')
plt.show()
if(baseType==0):
return error, Lc
else:
return error, condA
def splineFunction(xnodi,fnodi, xx, fType, sType):
if(sType==0):
s1 = interpolate.interp1d(xnodi, fnodi, 'linear')
sname='lineare'
else:
s1 = interpolate.interp1d(xnodi, fnodi, 'cubic')
sname='cubica'
if(fType==0):
fname='f=cos(x)'
else:
fname='g=1/(1+25*(x**2))'
ys = s1(xx)
error.append(np.max(np.abs(ys-yy)))
plt.figure(i)
plt.cla()
plt.title('Spline %s per la funzione %s' %(sname,fname))
plt.plot(xx,ys,xnodi,fnodi,'o',xx,yy,'--')
plt.show()
def gfunction(x_variable):
return 1/(1+25*(x_variable**2))
print("________________________________")
print("| POTENZE NODI EQUIDISTANTI |")
print("|______________________________|")
#prima funzione per n=4
a=0
b=2
n=4
numeroCondA = np.zeros([4,2])#Vettore in cui avviene lo store dei numeri di condizione della matrice A
numeroCondA[0]= plotinterp(0,a,b,n,0,1)
#prima funzione per n=16
n=16
numeroCondA[1] = plotinterp(0,a,b,n,0,1)
#seconda funzione per n=4
a = -1
b = 1
n=4
numeroCondA[2] = plotinterp(1,a,b,n,0,1)
#seconda funzione per n=16
n=16
numeroCondA[3] = plotinterp(1,a,b,n,0,1)
for i in range(0,4):
print numeroCondA[i]
print("________________________________")
print("| LAGRANGE NODI EQUIDISTANTI |")
print("|______________________________|")
erroreInterpE = np.zeros([4,2])#Vettore in cui avviene lo store degli errori per nodi equidistanti
#prima funzione per n=4
a = 0
b = 2
n=4
erroreInterpE[0] = plotinterp(0,a,b,n,0,0)
#prima funzione per n=16
n=16
erroreInterpE[1] = plotinterp(0,a,b,n,0,0)
#seconda funzione per n=4
a = -1
b = 1
n = 4
erroreInterpE[2] = plotinterp(1,a,b,n,0,0)
#seconda funzione per n=16
n=16
erroreInterpE[3] = plotinterp(1,a,b,n,0,0)
print("________________________________")
print("| POTENZE NODI DI CHEBYCHEV |")
print("|______________________________|")
#prima funzione per n=4
a=0
b=2
n=4
numeroCondAC = np.zeros([4,2])#Vettore in cui avviene lo store dei numeri di condizione della matrice A
numeroCondAC[0] = plotinterp(0,a,b,n,1,1)
#prima funzione per n=16
n=16
numeroCondAC[1] = plotinterp(0,a,b,n,1,1)
#seconda funzione per n=4
a = -1
b = 1
n=4
numeroCondAC[2] = plotinterp(1,a,b,n,1,1)
#seconda funzione per n=16
n=16
numeroCondAC[3] = plotinterp(1,a,b,n,1,1)
for i in range(0,4):
print numeroCondA[i]
print("_________________________________")
print("| LAGRANGE NODI DI CHEBYCHEV |")
print("|_______________________________|")
erroreInterpC = np.zeros([4,2])#Vettore in cui avviene lo store degli errori per nodi di Chebychev
#prima funzione per n=4
a = 0
b = 2
n=4
erroreInterpC[0] = plotinterp(0,a,b,n,1,0)
#prima funzione per n=16
n=16
erroreInterpC[1] = plotinterp(0,a,b,n,1,0)
#seconda funzione per n=4
a = -1
b = 1
n = 4
erroreInterpC[2] = plotinterp(1,a,b,n,1,0)
#seconda funzione per n=16
n=16
erroreInterpC[3] = plotinterp(1,a,b,n,1,0)
# interplazione con le funzioni spline
#funzione f
#4 nodi
f=np.cos
xx = np.linspace(a,b,200)
yy = f(xx)
error=[]
n=4
xnodi = np.linspace(a,b,n+1)
fnodi = f(xnodi)
splineFunction(xnodi, fnodi, xx, 0, 0)
splineFunction(xnodi, fnodi, xx, 0, 1)
#16 nodi
n=16
xnodi = np.linspace(a,b,n+1)
fnodi = f(xnodi)
splineFunction(xnodi, fnodi, xx, 0, 0)
splineFunction(xnodi, fnodi, xx, 0, 1)
#funzione g
#4 nodi
xx = np.linspace(a,b,200)
yy = gfunction(xx)
n=4
xnodi = np.linspace(a,b,n+1)
fnodi = gfunction(xnodi)
splineFunction(xnodi, fnodi, xx, 1, 0)
splineFunction(xnodi, fnodi, xx, 1, 1)
#16 nodi
n=16
xnodi = np.linspace(a,b,n+1)
fnodi = gfunction(xnodi)
splineFunction(xnodi, fnodi, xx, 1, 0)
splineFunction(xnodi, fnodi, xx, 1, 1)
print
print "ERRORE DELLA SPLINE"
for i in range(0,8):
if (i<4):
print 'Funzione f=cos(x)'
else:
print 'Funzione di Runge'
print error[i]
print("_________________________________")
print("| CONFRONTO ERRORI |")
print("| f = cos(x) |")
print("|_______________________________|")
print("ERRORE | NUMERO DI CONDIZIONE VANDERMONDE/COSTANTE DI LEBESGUE")
print("-----------------------------------------------------------------------------")
print("n = 4")
print("-----------------------------------------------------------------------------")
print numeroCondA[0],"Base delle POTENZE e nodi EQUIDISTANTI"
print numeroCondAC[0],"Base delle POTENZE e nodi di CHEBYCHEV"
print erroreInterpE[0],"Base di LAGRANGE e nodi EQUIDISTANTI"
print erroreInterpC[0],"Base di LAGRANGE e nodi di CHEBYCHEV"
print("-----------------------------------------------------------------------------")
print("n = 16")
print("-----------------------------------------------------------------------------")
print numeroCondA[1],"Base delle POTENZE e nodi EQUIDISTANTI"
print numeroCondAC[1],"Base delle POTENZE e nodi di CHEBYCHEV"
print erroreInterpE[1],"Base di LAGRANGE e nodi EQUIDISTANTI"
print erroreInterpC[1],"Base di LAGRANGE e nodi di CHEBYCHEV"
print("_________________________________")
print("| CONFRONTO ERRORI |")
print("| g=1/(1+25*(x**2)) |")
print("|_______________________________|")
print("ERRORE | NUMERO DI CONDIZIONE VANDERMONDE/COSTANTE DI LEBESGUE")
print("-----------------------------------------------------------------------------")
print("n = 4")
print("-----------------------------------------------------------------------------")
print numeroCondA[2],"Base delle POTENZE e nodi EQUIDISTANTI"
print numeroCondAC[2],"Base delle POTENZE e nodi di CHEBYCHEV"
print erroreInterpE[2],"Base di LAGRANGE e nodi EQUIDISTANTI"
print erroreInterpC[2],"Base di LAGRANGE e nodi di CHEBYCHEV"
print("-----------------------------------------------------------------------------")
print("n = 16")
print("-----------------------------------------------------------------------------")
print numeroCondA[3],"Base delle POTENZE e nodi EQUIDISTANTI"
print numeroCondAC[3],"Base delle POTENZE e nodi di CHEBYCHEV"
print erroreInterpE[3],"Base di LAGRANGE e nodi EQUIDISTANTI"
print erroreInterpC[3],"Base di LAGRANGE e nodi di CHEBYCHEV"
| [
"agopel@gmail.com"
] | agopel@gmail.com |
7da317e87cb08431320105068322690d71269402 | a1092fecf5057e45f1df4e738a14be210dadbc83 | /gen.py | 3d26eb5062cedb3108e425576485a5c6bc7d741c | [] | no_license | robert-giaquinto/baum-welch | ba45b3c80e839ae7fd5b8b5a00ee07dd9228b61a | b57fb2bd64ed3fdfed1552a6ea5afd9c7c120cfc | refs/heads/master | 2021-01-15T09:09:29.267399 | 2014-05-31T21:17:42 | 2014-05-31T21:17:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | import random
import numpy as np
N_SEQ = 10
START = 0
BEFORE = 1
AFTER = 2
END = 3
def gen_seq():
seq = []
state = START
while state != END:
if state == START:
state = BEFORE
seq.append('S')
if state == BEFORE:
n, l, r = np.random.multinomial(1, [0.96, 0.036, 0.004])
if n:
seq.append('N')
elif l:
seq.append('L')
else:
seq.append('R')
state += np.random.binomial(1, 1/5000.)
if state == AFTER:
n, l, r = np.random.multinomial(1, [0.96, 0.004, 0.036])
if n:
seq.append('N')
elif l:
seq.append('L')
else:
seq.append('R')
state += np.random.binomial(1, 1/5000.)
seq.append('E')
return seq
if __name__ == '__main__':
random.seed(42)
for i in xrange(N_SEQ):
seq = gen_seq()
print ''.join(seq)
| [
"piotrek.kaleta@gmail.com"
] | piotrek.kaleta@gmail.com |
6ee7e72ba92ecde352fbe7130382ee1d2873e524 | d5f080543d3004f560c1ae636900080f1c7e8b31 | /configs/D2Det/D2Det_detection_r101_fpn_2x.py | 4e184d8220699043f302581714e52140c0c3b0ba | [
"MIT"
] | permissive | Randl/D2Det | dc7bd395b8c538e96f390d7ce5c396f87ee89bd8 | 5e35b218d9de824e73e0a49953af25a0c6984e74 | refs/heads/master | 2022-09-25T13:52:21.141590 | 2020-06-11T09:08:47 | 2020-06-11T09:08:47 | 271,498,684 | 0 | 0 | MIT | 2020-06-11T08:56:15 | 2020-06-11T08:56:15 | null | UTF-8 | Python | false | false | 5,685 | py | # model settings
model = dict(
type='D2Det',
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='DeformRoIPoolingPack',
out_size=7,
sample_per_part=1,
out_channels=256,
no_trans=False,
group_size=1,
trans_std=0.1),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
with_reg=False,
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0)),
reg_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
D2Det_head=dict(
type='D2DetHead',
num_convs=8,
in_channels=256,
norm_cfg=dict(type='GN', num_groups=36),
MASK_ON=False))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_radius=1,
pos_weight=-1,
max_num_grid=192,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.03, nms=dict(type='nms', iou_thr=0.5), max_per_img=125))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=1.0 / 80,
step=[20, 23])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/D2Det_detection_r101_fpn_2x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"connor@tju.edu.cn"
] | connor@tju.edu.cn |
821821dc8c26febfe8eae588743128fda1bc6ed5 | cbdf42eaf2f3ee908999fb1317d3fe90fa255f42 | /kusinwolf/truth_tables.py | 78e29f83189b337acc29c22cdc795974950e4d69 | [] | no_license | tdyhacker/pythonexploration | 47c4e8a96d541428d4cf1d6c558a29e48f5fc22c | 4b360ef2b61fb565c7e2f82ff31e848776cd649a | refs/heads/master | 2021-01-10T02:51:34.245002 | 2010-11-28T02:31:36 | 2010-11-28T02:31:36 | 47,951,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,895 | py |
def printTable(truth_table):
for key in truth_table['order']:
print key, "\t",
print ""
for item in truth_table['order']:
print "-------",
print ""
for row in range(len(truth_table[truth_table['order'][0]])):
for col in range(len(truth_table['order'])):
print truth_table[truth_table['order'][col]][row], "\t",
print ""
def buildTruths(truth_table):
value = True
# Assign the columns automatically
for col in truth_table['order']:
truth_table[col] = []
truth_table["~%s" % col] = []
while len(truth_table[col]) < pow(2, len(truth_table['order'])):
for var in range(pow(2, len(truth_table['order']) - (truth_table['order'].index(col) + 1) ) ):
truth_table[col].append(value)
truth_table["~%s" % col].append(not value) # Inverses
value = value ^ True # Similar to an xor operator without writing my own
value = True
def evaluatePart(left, word, right):
if word == "V":
return left and right
elif word == "^":
return left or right
elif word == "->":
return not left or right
elif word == "<->":
return not (left ^ right) # not (left xor right)
def evaluateEquation(truth_table):
equation = truth_table['equation']
groups = truth_table['equation'].split(" ")
truth_table[equation] = []
for row in range(pow(2, len(truth_table['order']))):
truth_table[equation].append(evaluatePart(truth_table[groups[0]][row], groups[1], truth_table[groups[2]][row]))
truth_table = {}
truth_table['order'] = ['P', 'Q', 'W',]
truth_table['equation'] = "~P -> Q"
# V = or
# ^ = and
buildTruths(truth_table)
evaluateEquation(truth_table)
truth_table['order'].append(truth_table['equation']) # Cheat for printing out :P
printTable(truth_table)
| [
"kusinwolf@c446d5ca-c62c-11dd-92e3-e3085bfe42ca"
] | kusinwolf@c446d5ca-c62c-11dd-92e3-e3085bfe42ca |
20c0f10f618f37ebff15f67061d06d10a35ab6ef | 59dd90d261756b0de462b693d596f5f06f71270b | /samples/openapi3/client/petstore/python-experimental/petstore_api/api/fake_api_endpoints/group_parameters.py | bab146a1f469249eb19f39009467267a361a1734 | [
"Apache-2.0"
] | permissive | wsalembi/openapi-generator | cf76c5241e28956fc44a073d17a1ee14fd9aef85 | 035736f5c413bbdc8e70f840cc2e8ff32da9a5a8 | refs/heads/master | 2023-03-17T02:22:04.106748 | 2022-02-16T07:33:23 | 2022-02-16T07:33:23 | 227,096,278 | 0 | 0 | Apache-2.0 | 2023-03-13T17:02:23 | 2019-12-10T10:56:56 | Java | UTF-8 | Python | false | false | 6,531 | py | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import re # noqa: F401
import sys # noqa: F401
import typing
import urllib3
from urllib3._collections import HTTPHeaderDict
from petstore_api import api_client, exceptions
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
InstantiationMetadata,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
# query params
RequiredStringGroupSchema = IntSchema
RequiredInt64GroupSchema = Int64Schema
StringGroupSchema = IntSchema
Int64GroupSchema = Int64Schema
RequestRequiredQueryParams = typing.TypedDict(
'RequestRequiredQueryParams',
{
'required_string_group': RequiredStringGroupSchema,
'required_int64_group': RequiredInt64GroupSchema,
}
)
RequestOptionalQueryParams = typing.TypedDict(
'RequestOptionalQueryParams',
{
'string_group': StringGroupSchema,
'int64_group': Int64GroupSchema,
},
total=False
)
class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams):
pass
request_query_required_string_group = api_client.QueryParameter(
name="required_string_group",
style=api_client.ParameterStyle.FORM,
schema=RequiredStringGroupSchema,
required=True,
explode=True,
)
request_query_required_int64_group = api_client.QueryParameter(
name="required_int64_group",
style=api_client.ParameterStyle.FORM,
schema=RequiredInt64GroupSchema,
required=True,
explode=True,
)
request_query_string_group = api_client.QueryParameter(
name="string_group",
style=api_client.ParameterStyle.FORM,
schema=StringGroupSchema,
explode=True,
)
request_query_int64_group = api_client.QueryParameter(
name="int64_group",
style=api_client.ParameterStyle.FORM,
schema=Int64GroupSchema,
explode=True,
)
# header params
RequiredBooleanGroupSchema = BoolSchema
BooleanGroupSchema = BoolSchema
RequestRequiredHeaderParams = typing.TypedDict(
'RequestRequiredHeaderParams',
{
'required_boolean_group': RequiredBooleanGroupSchema,
}
)
RequestOptionalHeaderParams = typing.TypedDict(
'RequestOptionalHeaderParams',
{
'boolean_group': BooleanGroupSchema,
},
total=False
)
class RequestHeaderParams(RequestRequiredHeaderParams, RequestOptionalHeaderParams):
pass
request_header_required_boolean_group = api_client.HeaderParameter(
name="required_boolean_group",
style=api_client.ParameterStyle.SIMPLE,
schema=RequiredBooleanGroupSchema,
required=True,
)
request_header_boolean_group = api_client.HeaderParameter(
name="boolean_group",
style=api_client.ParameterStyle.SIMPLE,
schema=BooleanGroupSchema,
)
_path = '/fake'
_method = 'DELETE'
_auth = [
'bearer_test',
]
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: Unset = unset
headers: Unset = unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
)
_status_code_to_response = {
'400': _response_for_400,
}
class GroupParameters(api_client.Api):
def group_parameters(
self: api_client.Api,
query_params: RequestQueryParams = frozendict(),
header_params: RequestHeaderParams = frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
api_client.ApiResponseWithoutDeserialization
]:
"""
Fake endpoint to test group parameters (optional)
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs(RequestQueryParams, query_params)
self._verify_typed_dict_inputs(RequestHeaderParams, header_params)
_query_params = []
for parameter in (
request_query_required_string_group,
request_query_required_int64_group,
request_query_string_group,
request_query_int64_group,
):
parameter_data = query_params.get(parameter.name, unset)
if parameter_data is unset:
continue
serialized_data = parameter.serialize(parameter_data)
_query_params.extend(serialized_data)
_headers = HTTPHeaderDict()
for parameter in (
request_header_required_boolean_group,
request_header_boolean_group,
):
parameter_data = header_params.get(parameter.name, unset)
if parameter_data is unset:
continue
serialized_data = parameter.serialize(parameter_data)
_headers.extend(serialized_data)
# TODO add cookie handling
response = self.api_client.call_api(
resource_path=_path,
method=_method,
query_params=tuple(_query_params),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
| [
"noreply@github.com"
] | wsalembi.noreply@github.com |
1d5db696b220dc961fb989dfd34319751e4378f2 | 66ff35c1389e468e809c4080262f79a91373ce29 | /RotorS_ws/build/rotors_control/catkin_generated/pkg.installspace.context.pc.py | 43cfa00f3c30211991b0271b5a9ef0c38c9db3c3 | [] | no_license | TJHDL/Tilted-Hexarotor-Omnicopter | e544a82b505ecd148f8bde10fced42605397bec0 | f8f47b4d101b0c2665b4c78180908eeac6758627 | refs/heads/main | 2023-05-01T14:13:53.670321 | 2021-05-10T12:06:46 | 2021-05-10T12:06:46 | 366,026,610 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;mav_msgs;nav_msgs;roscpp;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-llee_position_controller;-lroll_pitch_yawrate_thrust_controller".split(';') if "-llee_position_controller;-lroll_pitch_yawrate_thrust_controller" != "" else []
PROJECT_NAME = "rotors_control"
PROJECT_SPACE_DIR = "/home/hdl/GraduateDesign/Catkin_workspace_assemble/RotorS_ws/install"
PROJECT_VERSION = "2.2.3"
| [
"hdltjdx@163.com"
] | hdltjdx@163.com |
79397e0d2322942df03ee78140a794d44521cfca | 2b01258d3a1dca79d548f490731b0474cab5202f | /app/model/file.py | adb9d84802b812ecb99f5c5f112817f1a79f7abd | [
"MIT"
] | permissive | icrdr/1-mu-server | 9714b60ea285c843dca6f0cda74566c70449fe76 | 8212b79a9949e334c185b99b1be41add5e577937 | refs/heads/master | 2022-11-23T08:11:44.258413 | 2020-10-16T14:03:36 | 2020-10-16T14:03:36 | 194,759,358 | 0 | 0 | MIT | 2022-11-22T04:07:08 | 2019-07-02T00:18:35 | Python | UTF-8 | Python | false | false | 4,551 | py | from .. import db, app
from datetime import datetime
import os
import shortuuid
from psd_tools import PSDImage
from PIL import Image
from .post import Tag
from ..utility import word2List
FILE_TAG = db.Table(
'file_tags',
db.Column('tag_id', db.Integer,
db.ForeignKey('tags.id')),
db.Column('file_id', db.Integer,
db.ForeignKey('files.id')),
)
class File(db.Model):
__tablename__ = 'files'
id = db.Column(db.Integer, primary_key=True)
# one-many: File.uploader-User.files
uploader_user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
author = db.Column(db.String(64))
name = db.Column(db.String(64))
format = db.Column(db.String(16))
url = db.Column(db.String(512), unique=True)
from_url = db.Column(db.String(512))
upload_date = db.Column(db.DateTime, default=datetime.utcnow)
# one-many: Preview.file-File.previews
previews = db.relationship(
'Preview', backref=db.backref('file', lazy=True))
description = db.Column(db.String(512))
public = db.Column(db.Boolean, nullable=False, default=False)
tags = db.relationship(
'Tag', secondary=FILE_TAG,
lazy='subquery', backref=db.backref('files', lazy=True))
@staticmethod
def create_file(uploader_id, file, description, tags, public):
# filename = utils.secure_filename(file.filename)
format = file.filename.split(".")[-1].lower()
rawname = file.filename[:-len(format)-1]
date = datetime.utcnow().strftime("%Y%m%d")
year = date[:4]
month = date[4:6]
day = date[6:8]
random_name = str(shortuuid.uuid())
filename = random_name +'.'+ format
path = os.path.join(app.config['UPLOAD_FOLDER'], year, month, day)
if not os.path.exists(path):
os.makedirs(path)
file.save(os.path.join(path, filename))
new_file = File(
uploader_user_id = uploader_id,
name = rawname,
format = format,
url = str(os.path.join(year, month, day , filename)).replace('\\', '/')
)
if description:
new_file.description = description
if public:
new_file.public = True
if tags:
all_tag_list = []
for tag in tags:
tag_list = word2List(tag)
all_tag_list += tag_list
for tag in all_tag_list:
_tag = Tag.query.filter_by(name=tag).first()
if not _tag:
_tag = Tag(name=tag)
db.session.add(_tag)
new_file.tags.append(_tag)
db.session.add(new_file)
db.session.commit()
if format in ['png','jpg','psd','jpeg','gif','bmp','tga','tiff','tif']:
try:
im_path = os.path.join(path, filename)
if format == 'psd':
psd = PSDImage.open(im_path)
im = psd.compose()
else:
im = Image.open(im_path)
im = im.convert('RGB')
for size in app.config['THUMBNAIL_SIZE']:
im.thumbnail((size, size))
im.save(os.path.join(path, random_name) + "_%s.jpg"%str(size), "JPEG")
new_preview = Preview(
bind_file_id = new_file.id,
url = str(os.path.join(year, month, day , random_name+"_%s.jpg"%str(size))).replace('\\', '/'),
size = size
)
db.session.add(new_preview)
db.session.commit()
except Exception as e:
print(e)
return new_file
@staticmethod
def clear_missing_file():
files_list = File.query.all()
for file in files_list:
if not os.path.exists(os.path.join(app.config['UPLOAD_FOLDER'], file.url)):
for preview in file.previews:
db.session.delete(preview)
db.session.delete(file)
db.session.commit()
def __repr__(self):
return '<File %r>' % self.name
class Preview(db.Model):
__tablename__ = 'previews'
id = db.Column(db.Integer, primary_key=True)
# one-many: Preview.file-File.previews
bind_file_id = db.Column(db.Integer, db.ForeignKey('files.id'))
url = db.Column(db.String(512), unique=True)
size = db.Column(db.Integer)
def __repr__(self):
return '<Preview %r>' % self.nickname | [
"icrdr2010@gmail.com"
] | icrdr2010@gmail.com |
40da92312bb0d21b9e98e3332e5523a47d977ab2 | b36a2ca0e71bd272afd4c831e09b498cedfec889 | /0x07-python-test_driven_development/5-text_indentation.py | 0053243d7e0d860174c7314f26c14ad56febdc8c | [] | no_license | dondropo/holbertonschool-higher_level_programming | 3a132c1dd8d31b5e9f170a6fab83aa1550449965 | 6d0bf581830c4ecfb17d926fdc89b357a5664574 | refs/heads/master | 2023-03-02T02:20:09.866147 | 2021-01-31T22:37:01 | 2021-01-31T22:37:01 | 259,390,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | #!/usr/bin/python3
def text_indentation(text):
if not type(text) is str:
raise TypeError("text must be a string")
sp_chars = [':', '.', '?']
if type(text) is not str:
raise TypeError("text must be a string")
idx = 0
for j in text:
if j in sp_chars:
if text[idx + 1] is " ":
text = text[:idx + 1] + text[idx + 2:]
else:
idx += 1
idx = 0
for j in text:
if j in sp_chars:
text = text[:idx + 1] + '\n\n' + text[idx + 1:]
idx += 3
else:
idx += 1
print(text, end='')
| [
"alejandroruscamoreno@gmail.com"
] | alejandroruscamoreno@gmail.com |
627dc9d2396b751179bf4503d940b93c9c792dcf | b4ea78b8b33e2dee808290e8f87038108b12cf7b | /Python-learning/画图/others/test6.py | eec99faddab877ca9a2c0f07386452d0d66a70e3 | [] | no_license | liang2713020/Learning | d275ddfb8032d49f42143dc71bfd52fdeacb8932 | fbfdc12ce2877af4be020082885519334523c8ab | refs/heads/master | 2021-01-22T19:55:06.788211 | 2015-07-26T13:54:19 | 2015-07-26T13:54:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py |
from pylab import *
figure(figsize=(8,5), dpi=80)
subplot(111)
X = np.linspace(-np.pi, np.pi, 256,endpoint=True)
C,S = np.cos(X), np.sin(X)
plot(X, C, color="blue", linewidth=2.5, linestyle="-")
plot(X, S, color="red", linewidth=2.5, linestyle="-")
xlim(-4.0,4.0)
xticks(np.linspace(-4,4,9,endpoint=True))
ylim(-1.0,1.0)
yticks(np.linspace(-1,1,5,endpoint=True))
#savefig("../figures/exercice_3.png",dpi=72)
show()
| [
"568191222@qq.com"
] | 568191222@qq.com |
0e0b558e0962614dfcb87a6d486c3d9fdd1a129a | 7327dda3e2c72026bfe0de5185645fb24d0e3fe0 | /week2/iterative-sorting.py | b18c3fbefd43c624e00aa455a3487c7eacb86247 | [] | no_license | CarnunMP/CS-morning-challenges | 782b1774344361c69929ab1f0006f99ea7fe5abc | b1bb02d4130d3a4e0f6aa6cd28673f92982ea054 | refs/heads/master | 2021-01-03T06:19:33.004744 | 2020-02-25T21:55:07 | 2020-02-25T21:55:07 | 239,958,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,949 | py | ### Objective challenge:
### 1. Try writing a Python function to perform a linear search on a set of data.
### 2. Try writing a Python function to perform a binary search on a set of data.
### 3. Can you rewrite the above function so that it uses recursion?
test_data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
# 1)
def linear_search(arr, target):
steps = 1
for i, num in enumerate(arr):
if num == target:
return {
'index': i,
'steps': steps
}
steps += 1
return {
'index': None,
'steps': steps
}
print(linear_search(test_data, 10)) # Expect: {'index': 10, 'steps': 11}
# 2)
def binary_search(arr, target):
left_index = 0
right_index = len(arr) - 1
steps = 1
while left_index != right_index:
middle_index = left_index + ( (right_index - left_index) // 2 )
if arr[middle_index] == target:
return {
'index': middle_index,
'steps': steps
}
else:
if arr[middle_index] > target:
right_index = middle_index
else:
left_index = middle_index + 1
steps += 1
return {
'index': None,
'steps': steps
}
print(binary_search(test_data, 10))
# 3)
def recursive_binary_search(arr, target, left_index_offset = 0, steps = 1):
left_index = 0
right_index = len(arr) - 1
middle_index = left_index + ( (right_index - left_index) // 2 )
# Not sure if try-catch was the best way to handle targets which don't exist in arr, but it works!
try:
if arr[middle_index] == target:
return {
'index': left_index_offset + middle_index,
'steps': steps
}
elif arr[middle_index] > target:
return recursive_binary_search(arr[:middle_index], target, left_index_offset, steps + 1)
else:
return recursive_binary_search(arr[middle_index + 1:], target, left_index_offset + middle_index + 1, steps + 1)
except:
return {
'index': None,
'steps': steps
}
print(recursive_binary_search(test_data, 10))
print(recursive_binary_search(test_data, 20))
### Objective challenge:
### 1. What will the array [25, 67, 4, 33, 19, 40] look like after each pass of the Selection Sort algorithm?
### 2. What will the same array look like after each pass of the Insertion Sort algorithm?
# 1) 0th: [25, 67, 4, 33, 19, 40]
# 1st: [4, 67, 25, 33, 19, 40]
# 2nd: [4, 19, 25, 33, 67, 40]
# 3rd: [4, 19, 25, 33, 67, 40]
# 4th: [4, 19, 25, 33, 67, 40]
# 5th: [4, 19, 25, 33, 40, 67]
# 2) 0th: [25, 67, 4, 33, 19, 40]
# 1st: [25, 67, 4, 33, 19, 40]
# 2nd: [4, 25, 67, 33, 19, 40]
# 3rd: [4, 25, 33, 67, 19, 40]
# 4th: [4, 19, 25, 33, 67, 40]
# 5th: [4, 19, 25, 33, 40, 67]
| [
"carnun@hotmail.co.uk"
] | carnun@hotmail.co.uk |
3925710c3420811e6ea8744fbbc871f548568fad | fc8a86e30c286910eef086821397b34093954fed | /day04/note/json1.py | e4125259f10447fc494d84f76907d062ad6d5450 | [
"Apache-2.0"
] | permissive | wjianwei126/learnpython | 0e1a0504b70852eb3d9c53d8827981ce5d609446 | 797ec7afc94e6c467d7e1f037219b3aaf1327cf0 | refs/heads/master | 2020-04-09T04:18:32.059840 | 2015-01-22T12:42:00 | 2015-01-22T12:42:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | #!/usr/bin/evn python
#coding:utf-8
import json
a = {'k1':'v1','k2':'v2'}
a_json = json.dumps(a)
print a_json
print type(a_json)
a_new = json.loads(a_json)
print a_new
print type(a_new) | [
"congmmy@gmail.com"
] | congmmy@gmail.com |
9663b289594d9946c2f865789b20f2e9add71bac | 9b52be243a9fc05f18fb66044e2d23a8c83f3a0d | /Session 1/intro.py | 9457a97a764f74a1394978dd81c48aee0d3b1383 | [] | no_license | trangnguyen21/nguyenvumytrang-fundamentals-c4ejs01 | 5663a4fd064a8fa05bf3ca730fa08e21753fe982 | 03731c706e498cc72e76ba2423d1c8a186efdff2 | refs/heads/master | 2022-12-09T22:12:06.708492 | 2019-12-29T02:45:32 | 2019-12-29T02:45:32 | 218,953,505 | 0 | 1 | null | 2022-12-05T02:22:04 | 2019-11-01T09:18:56 | CSS | UTF-8 | Python | false | false | 61 | py | a = float(input('height'))
b = float(input('hypotense'))
| [
"admin@CSGLANBK-281.sg.cmc.com.vn"
] | admin@CSGLANBK-281.sg.cmc.com.vn |
da12b13c74af1380f00c4a72cbbbc0e05debc10d | 0d68ecb5f8ad4577163550ffd48737ab1c677b38 | /src/blockit/utils/io.py | 5b7c9a0f6d90a5f199e92da246bd9384d94e578f | [
"MIT"
] | permissive | jgarte/blockit | 8372c35ea9d6ed14ab67b48de753e7dfc02cfc84 | e0311444701ac1a1d0fbec623f6ebc72f1b37e6b | refs/heads/main | 2023-05-31T04:59:43.541995 | 2021-06-21T14:48:30 | 2021-06-21T14:48:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | """File I/O util functions."""
from pathlib import Path
from blockit.txn.txn_block import TransactionBlock
def get_project_root_path() -> Path:
"""Get project root path.
Returns:
Path: Absolute path of the project root
"""
return Path(__file__).parents[3].absolute()
def write_block(txn_block: TransactionBlock, path: Path = None) -> None:
"""Save transaction block.
Args:
txn_block (TransactionBlock): Transaction block to save
path (Path): Path to save file
"""
txn_ids = []
for txn in txn_block.transactions:
txn_ids.append(txn.txid)
if path is None:
save_path = get_project_root_path() / "block.txt"
else:
save_path = path
with open(save_path, "w") as f:
for txn_id in txn_ids:
f.write(f"{txn_id}\n")
| [
"ank@leoank.me"
] | ank@leoank.me |
5e3d8619d84b4b1e96647c74f4f542131e866de3 | 0dfa9dc572ea50ae81de8052f4d2ac79bb31a243 | /test_incorrect_ip_address_managers.py | 6feda65aa49efdb3e75d6789fac86e0fa77e7db8 | [] | no_license | dwjhaines/selenium | 6b52c9c1d02d6088695c49fc8a4dfc2d3323206f | 44e0e1285be92013e6d8d2cece7419431ac3f4e3 | refs/heads/master | 2020-04-17T19:45:58.070917 | 2016-09-14T14:53:40 | 2016-09-14T14:53:40 | 66,008,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,416 | py | ###############################################################################################
# #
# test_incorrect_ip_address_managers.py #
# #
# Tests that up to five managers can log in when the only license has an incorrect IP #
# address. #
# #
###############################################################################################
import time
import um_utils
import db_utils
from selenium import webdriver
import pyodbc
if __name__ == "__main__":
# List of managers i.e. users with manager rights
managers = ['maria.a', 'maria.b', 'maria.c', 'maria.d', 'maria.e', 'maria.f', 'maria.g']
# Empty list to be filled with user objects
users = []
testFailed = 0
# Set up connection to database
connection = db_utils.connectToDb()
cur = connection.cursor()
# Delete all existing licenses
db_utils.deleteAllLicenses(connection, cur)
maxUsers = 0
maxManagers = maxUsers + 5
# Install license with and incorrect IP address
maxUsers = db_utils.addUserLicenseIncorrectIPAddress (connection, cur)
print 'License installed with invalid IP address'
# Get the number of users already logged in
count = db_utils.getNumberOfActiveUsers(connection, cur)
print 'Max users allowed: %d' % maxUsers
print 'Max managers allowed: %d' % maxManagers
print 'Number of users already logged in: %d' % count
print 'Opening browsers........'
for manager in managers:
# For each manager, create a user object and add object to users list
users.append(um_utils.user(manager, 'quantel@'))
# Keep trying to log in each of the editors. Once the max number of users have been logged in, no further logins should be allowed.
for user in users:
result = um_utils.login(user)
if (result == 0 or result == 1):
user.loggedin = True
count = db_utils.getNumberOfActiveUsers(connection, cur)
print '\tNumber of active users (max: %d): %d' % (maxManagers, count)
if (count > maxManagers):
testFailed = 1
print 'Test Failed: Max number of users exceded.'
print 'Sleeping for 10 secs.................'
time.sleep( 10 )
# Log out any users that were logged in and close all the browsers
for user in users:
if (user.loggedin == True):
um_utils.logout(user)
user.loggedin = False
time.sleep( 1 )
um_utils.closeBrowser(user)
# Delete incorrect license and reinstall license for five users
db_utils.deleteAllLicenses(connection, cur)
maxUsers = db_utils.addFiveUserLicense(connection, cur)
print 'License installed for %d users' % maxUsers
# Close connection to database
db_utils.closeConnection(connection, cur)
# Print test result
if (testFailed == 1):
print '************ Test Failed ************'
else:
print '************ Test Passed ************' | [
"David.Haines@s-a-m.com"
] | David.Haines@s-a-m.com |
7d812592e10d2a0d003e3156aef68f26c0796648 | 601adbb343313e7cce71b9b8d06620f541f349e5 | /tests/test_ci/test_runners/test_BaseRunner.py | 4545078bf38683e3c939099329a8ad2f0d27d15f | [] | no_license | jgsogo/conan-sword-and-sorcery | f3ff2c9b739410a7fb6eb97c49470d585fd1ab4c | 143f05d8b469a3afc9c807ec87fbe2dcbe63dab3 | refs/heads/master | 2021-04-06T06:23:40.584031 | 2018-08-15T16:50:43 | 2018-08-15T16:50:43 | 124,441,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,120 | py | # -*- coding: utf-8 -*-
import os
import unittest
try:
from unittest import mock
except ImportError:
import mock
from conan_sword_and_sorcery.ci.runners import AppveyorRunner
from conan_sword_and_sorcery.ci.runners.base_runner import SUCCESS, FAIL, DRY_RUN, BaseRunner
from conan_sword_and_sorcery.parsers.settings import get_settings
from conan_sword_and_sorcery.utils.environ import context_env
from conan_sword_and_sorcery.parsers.profile import profile_for
from tests.utils import TestCaseEnvClean
class JobGeneratorClass4Testing:
def __init__(self, *args, **kwargs):
pass
class BaseRunner4Testing(BaseRunner):
job_generator_class = JobGeneratorClass4Testing
class TestBaseRunnerStableBranch(TestCaseEnvClean):
def setUp(self):
self.settings = get_settings()
# Dummy (but valid) conanfile
me = os.path.dirname(__file__)
self.conanfile = os.path.join(me, '..', '..', 'files', 'single', 'conanfile01.py')
def test_enumerate_jobs(self):
runner = AppveyorRunner(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with context_env(CONAN_VISUAL_VERSIONS="12", CONAN_VISUAL_RUNTIMES="MT"):
self.assertTrue(len(list(runner.enumerate_jobs())) != 0)
def test_is_pull_request(self):
runner = BaseRunner4Testing(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with self.assertRaises(NotImplementedError):
runner.is_pull_request()
def test_get_branch_name(self):
runner = BaseRunner4Testing(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with self.assertRaises(NotImplementedError):
runner.get_branch_name()
def test_dry_run(self):
runner = AppveyorRunner(conanfile=self.conanfile, settings=self.settings, osys="Windows", dry_run=True)
with context_env(CONAN_GCC_VERSIONS="6", CONAN_ARCHS='x86', CONAN_BUILD_PACKAGES='pckg1'):
compiler, options = list(runner.enumerate_jobs())[0]
with profile_for(compiler=compiler) as profile_file:
runner.set_compiler(compiler)
runner.set_profile(profile_file)
r = runner.run(options={'shared': True}, username='test', channel='testing')
self.assertEqual(r, DRY_RUN)
def test_run_fail(self):
runner = AppveyorRunner(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with context_env(CONAN_GCC_VERSIONS="6", CONAN_ARCHS='x86', CONAN_BUILD_PACKAGES='pckg1'):
compiler, options = list(runner.enumerate_jobs())[0]
with profile_for(compiler=compiler) as profile_file:
runner.set_compiler(compiler)
runner.set_profile(profile_file)
with mock.patch('conan_sword_and_sorcery.ci.runners.base_runner.cmd', return_value=1) as mocked_cmd:
r = runner.run(options={'shared': True}, username='test', channel='testing')
self.assertEqual(r, FAIL)
def test_run_success(self):
runner = AppveyorRunner(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with context_env(CONAN_GCC_VERSIONS="6", CONAN_ARCHS='x86', CONAN_BUILD_PACKAGES='pckg1'):
compiler, options = list(runner.enumerate_jobs())[0]
with profile_for(compiler=compiler) as profile_file:
runner.set_compiler(compiler)
runner.set_profile(profile_file)
with mock.patch('conan_sword_and_sorcery.ci.runners.base_runner.cmd', return_value=0) as mocked_cmd:
r = runner.run(options={'shared': True}, username='test', channel='testing')
self.assertEqual(r, SUCCESS)
args, kwargs = mocked_cmd.call_args
self.assertEqual(len(args), 0) # All arguments are passed with name
self.assertEqual(kwargs['exception'], None)
command = kwargs.get('command')
self.assertIn('--build=pckg1', command)
self.assertIn('--build=outdated', command)
self.assertIn('--build={}'.format(runner.recipe.name), command)
self.assertIn('--profile {}'.format(profile_file), command)
self.assertIn('-o {}:shared=True'.format(runner.recipe.name), command)
def test_is_upload_requested(self):
runner = AppveyorRunner(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with context_env(CONAN_UPLOAD_ONLY_WHEN_STABLE="True", APPVEYOR_REPO_BRANCH='non-stable-branch'):
self.assertFalse(runner.is_stable_branch())
self.assertFalse(runner.is_upload_requested())
with context_env(CONAN_UPLOAD_ONLY_WHEN_STABLE="False", APPVEYOR_REPO_BRANCH='non-stable-branch'):
self.assertFalse(runner.is_stable_branch())
self.assertTrue(runner.is_upload_requested())
with context_env(CONAN_UPLOAD_ONLY_WHEN_STABLE="False", APPVEYOR_REPO_BRANCH='stable/v1.2.3'):
self.assertTrue(runner.is_stable_branch())
self.assertTrue(runner.is_upload_requested())
with context_env(CONAN_UPLOAD_ONLY_WHEN_STABLE="True", APPVEYOR_REPO_BRANCH='stable/v1.2.3'):
self.assertTrue(runner.is_stable_branch())
self.assertTrue(runner.is_upload_requested())
def test_upload(self):
runner = AppveyorRunner(conanfile=self.conanfile, settings=self.settings, osys="Windows")
with mock.patch('conan_sword_and_sorcery.ci.runners.base_runner.upload', return_value=0) as mocked_upload:
with context_env(CONAN_UPLOAD_ONLY_WHEN_STABLE="True", APPVEYOR_REPO_BRANCH='non-stable-branch'):
runner.upload(username='test', channel='testing')
with context_env(CONAN_UPLOAD_ONLY_WHEN_STABLE="False", APPVEYOR_REPO_BRANCH='non-stable-branch'):
runner.upload(username='test', channel='testing')
args, kwargs = mocked_upload.call_args
self.assertEqual(kwargs['username'], 'test')
| [
"jgsogo@gmail.com"
] | jgsogo@gmail.com |
1b5264d22279cc7d5f53699e4a0c0adf326e2398 | 2836975ce5ee74397fb674bdfd04a164d00baafb | /main/migrations/0015_auto_20201124_1105.py | 54d7a4575c36c78dab5ae3e805029eaf089a4b61 | [] | no_license | tz01x/rental | 57aedf6677ead989a089999b4802a6975d62ce0c | 103491c76c62b71901d3f758f9b9af59d2270fe4 | refs/heads/master | 2023-08-22T14:45:18.125694 | 2021-09-30T01:58:46 | 2021-09-30T01:58:46 | 332,413,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | # Generated by Django 3.1.1 on 2020-11-24 05:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0014_auto_20201122_1658'),
]
operations = [
migrations.AddField(
model_name='property',
name='latlong',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='property',
name='thana',
field=models.CharField(blank=True, max_length=400, null=True, verbose_name='Thana'),
),
migrations.AlterField(
model_name='property',
name='area',
field=models.CharField(blank=True, max_length=400, null=True, verbose_name='District'),
),
]
| [
"abdur963rahman@gmil.com"
] | abdur963rahman@gmil.com |
1b20c5a22901e1d346f020449eeffb7621afe266 | 5f51fdeb5efc6cbcc0736957d2f16eddf9214671 | /python/mind_palace/product_ranker/prepare_data/integerize_clickstream.py | e724ec724f5bfb745e44fd81b6c4d2eb7a35091a | [] | no_license | thejusvm/learn-cascading | aa438e74f26b94a880ad04bb425092f5145612e3 | 1e0fd76f7f746e4c177661e40c5abd4fe081643f | refs/heads/master | 2021-09-14T17:12:29.879467 | 2018-03-01T15:32:05 | 2018-03-01T15:32:05 | 103,110,403 | 0 | 0 | null | 2017-09-11T08:32:29 | 2017-09-11T08:32:29 | null | UTF-8 | Python | false | false | 6,607 | py | import cPickle as pickle
import glob
import json
import numpy as np
import os
import pandas as pd
import time
from contextlib import closing
from functools import partial
from multiprocessing import Pool
import mind_palace.product_ranker.constants as CONST
from mind_palace.product_ranker.commons import init_attribute_dicts, generate_key
"""
Given a file containing the click through data with product attributes,
this file integerizes the data with different integer dictionary for each attribute.
It uses DictIntegerizer class to assign a unique integer for every unique value of the attribute.
TODO : this code currently instantiates a new DictIntegerizer for each attribute,
it needs to support taking a dict in the form of a pickled file and integerizing using it.
"""
def logBreak() :
print "------------------------------------------"
def integerize(attributes, attribute_dicts, products_attributes) :
attributes_integerized = []
for attribute in attributes :
attribute_dict = attribute_dicts[attribute]
if attribute in products_attributes :
attribute_val = products_attributes[attribute]
else :
attribute_val = CONST.MISSING_DATA_TEXT
attribute_integerized = attribute_dict.only_get(attribute_val, missing_val=CONST.DEFAULT_DICT_KEYS.index(CONST.MISSING_DATA_TEXT))
attributes_integerized.append(attribute_integerized)
return attributes_integerized
def get_exploded_columns(keys, field_name):
return map(lambda x : field_name + "_" + x, keys)
def add_to_row(row, attributes, attribute_vals, key_prefix):
for i in range(len(attributes)) :
attribute = attributes[i]
if len(attribute_vals) != 0 :
attribute_val = attribute_vals[i]
else :
attribute_val = []
row[generate_key(key_prefix, attribute)] = attribute_val
def cross_attribute_prefix(attributes, key_prefixes) :
keys = []
for attribute in attributes :
for key_prefix in key_prefixes :
keys.append(generate_key(key_prefix, attribute))
return keys
def integerize_single_val_column(df, column_name, new_column_prefix, attributes, attribute_dicts) :
integerize_single = lambda x: integerize(attributes, attribute_dicts, json.loads(x))
integerized_cols = df[column_name].apply(integerize_single)
for i in range(len(attributes)) :
attribute = attributes[i]
df[generate_key(new_column_prefix, attribute)] = integerized_cols.apply(lambda x : json.dumps(x[i]))
def integerize_multi_val_column(df, column_name, new_column_prefix, attributes, attribute_dicts) :
integerize_multiple = lambda y: np.array(map(lambda x: integerize(attributes, attribute_dicts, x), json.loads(y))).T
integerized_cols = df[column_name].apply(integerize_multiple)
for i in range(len(attributes)) :
attribute = attributes[i]
df[generate_key(new_column_prefix, attribute)] = integerized_cols.apply(lambda x : json.dumps(x[i].tolist() if len(x) > 0 else []))
def process_row(df, attributes, attribute_dicts):
integerize_single_val_column(df, "positiveProducts", CONST.POSITIVE_COL_PREFIX, attributes, attribute_dicts)
integerize_multi_val_column(df, "negativeProducts", CONST.NEGATIVE_COL_PREFIX, attributes, attribute_dicts)
integerize_multi_val_column(df, "pastClickedProducts", CONST.CLICK_COL_PRERFIX, attributes, attribute_dicts)
integerize_multi_val_column(df, "pastBoughtProducts", CONST.BOUGHT_COL_PREFIX, attributes, attribute_dicts)
def process_file(data_path,
attributes,
attribute_dicts):
df = pd.read_csv(data_path, sep="\t")
# df = df[df["findingMethod"].apply(lambda x: str(x).lower() == "search")]
df = df[df["findingMethod"].apply(lambda x: str(x).lower() == "search" or str(x).lower() == "organic")]
start = time.clock()
process_row(df, attributes, attribute_dicts)
attribute_keys = cross_attribute_prefix(attributes, CONST.OUTPUTS_PER_ATTRIBUTE)
necessaryKeys = ["timestamp"]
necessaryKeys += attribute_keys
data = df[necessaryKeys]
print "time taken by data preprocess : " + str(time.clock() - start)
return data
def get_attributedict_path(data_path):
return data_path + "/productdict.pickle"
def get_train_path(data_path):
return data_path + "/train.tsv"
def get_test_path(data_path):
return data_path + "/test.tsv"
def get_attributedict(data_path) :
with open(data_path, 'rb') as handle:
return pickle.load(handle)
def prepare_data(raw_data_path,
processed_data_path,
attributes,
attribute_dicts):
filenames = glob.glob(raw_data_path)
out_files = [processed_data_path + "/part-" + str(counter) for counter in range(len(filenames))]
io_files = zip(filenames, out_files)
with closing(Pool(processes=20)) as pool:
pool.map(partial(integerize_file, attributes, attribute_dicts), io_files)
return attribute_dicts
def integerize_file(attributes, attribute_dicts, io_file):
in_file, out_file = io_file
logBreak()
start = time.clock()
print "start file processing : " + in_file
pd = process_file(in_file, attributes, attribute_dicts)
print "end file processing : " + in_file + ", in " + str(time.clock() - start)
print out_file
start = time.clock()
pd.to_csv(out_file, sep="\t", index=False)
print "dumped content of " + in_file + " to " + out_file + " in " + str(time.clock() - start)
logBreak()
def integerize_clickstream(attributes, attribute_dicts, raw_data_path, output_path) :
prepare_data(raw_data_path, output_path, attributes, attribute_dicts)
if __name__ == '__main__' :
raw_data_path = "/Users/thejus/workspace/learn-cascading/data/sessionExplodeWithAttributes-201708.MOB.smaller" + "/part-*"
processed_data_path = "/Users/thejus/workspace/learn-cascading/data/sessionExplodeWithAttributes-201708.MOB.smaller.search.1"
os.makedirs(processed_data_path)
attributes = ["productId", "brand", "vertical"]
attribute_dicts = init_attribute_dicts(attributes, CONST.DEFAULT_DICT_KEYS)
dicts = integerize_clickstream(attributes, attribute_dicts, raw_data_path, processed_data_path)
product_dict_file = get_attributedict_path(processed_data_path)
start = time.clock()
with open(product_dict_file, 'w+b') as handle:
pickle.dump(dicts, handle, protocol=pickle.HIGHEST_PROTOCOL)
print "pickled attribute dicts into " + product_dict_file + " in " + str(time.clock() - start)
logBreak() | [
"thejus@flipkart.com"
] | thejus@flipkart.com |
c658a9eea9faa8760fe14642bb464cfabf73be90 | 2bdffd6d18cc16801064373409821130f9035ed4 | /experiments/sudoku/solve2_nn.py | 54b53ec5f2281796daec0d673a510b6cb05cce96 | [] | no_license | benthayer/Tensorflow | a433c05ba20db937d4fd6830e30d03d1741e1b2d | 48572c93702799b690c0b061fadc4cffb1a34283 | refs/heads/master | 2021-06-14T14:53:23.337419 | 2017-03-20T22:45:01 | 2017-03-20T22:45:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,084 | py | import numpy as np
import tensorflow as tf
from experiments.sudoku.gen2 import convert_to_normal, get_training_and_test_sets
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
data = tf.placeholder(tf.float32, shape=[None, 4**3])
W1 = weight_variable([4**3, 4**3])
b1 = bias_variable([4**3])
h1 = tf.nn.softmax(tf.matmul(data, W1) + b1)
y = h1
y_ = tf.placeholder(tf.float32, [None, 4**3])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
training_puzzles, training_solutions, test_puzzles, test_solutions = get_training_and_test_sets()
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
k = 1000
for i in range(10000):
sess.run(train_step, feed_dict={data: training_puzzles, y_: training_solutions})
if i % 100 == 0:
print("Batch {} complete".format(i))
correct_prediction = tf.equal(
tf.argmax(
tf.reshape(y, (-1, 4, 4, 4)),
2),
tf.argmax(
tf.reshape(y_, (-1, 4, 4, 4)),
2))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy = sess.run(accuracy, feed_dict={data: test_puzzles, y_: test_solutions})
W = sess.run(W1)
b = sess.run(b1)
incorrect = 0
for i in range(len(test_puzzles)):
guessed_board = sess.run(y, feed_dict={data: [test_puzzles[i]]})
if not np.array_equal(
convert_to_normal(guessed_board.reshape((4, 4, 4))),
convert_to_normal(test_solutions[i].reshape(4, 4, 4))):
incorrect += 1
if incorrect > 4:
break
print()
print("Board:\n", convert_to_normal(test_puzzles[i].reshape((4, 4, 4)), ones=True))
print("Guess:\n", convert_to_normal(guessed_board.reshape((4, 4, 4))))
print("Answer:\n", convert_to_normal(test_solutions[i].reshape((4, 4, 4))))
print("Accuracy = ", accuracy)
| [
"benthayer2365@gmail.com"
] | benthayer2365@gmail.com |
16a3403ab8a7c97642874c0b8f630e03fc070931 | 2546d448f03a57152a701180077fcc904b1b944a | /schedule/urls.py | 8077ba2843cfce809db5893d0f5c814810d77fe0 | [] | no_license | NathanDai5287/Sharetrade | 61f52913591a404766654921c054663d83414a55 | 62a453364c0d97cf0b114e5286bfd0dc8fef44a5 | refs/heads/master | 2023-06-26T20:52:09.932366 | 2021-08-03T04:38:04 | 2021-08-03T04:38:04 | 387,053,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | """schedule URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views as auth_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("users.urls"))
]
| [
"nathandai2000@gmail.com"
] | nathandai2000@gmail.com |
602ecb7bb83ddd5c367c45eeaec4531e135d6824 | f87dc2227f9539ce9f87b8eb417d28f487ea2eac | /이진탐색/부품찾기.py | b3627efacbce4f210bf7ebc9dc2784e06dd4977a | [] | no_license | jjangsungwon/python-for-coding-test | fb1e019a2e68e426bb4f6770bffdc6289a647b4a | 8d9bf8de5de2a9724f75b35ea04dd9bcc40dec86 | refs/heads/master | 2022-12-16T02:53:55.967070 | 2020-08-26T08:41:14 | 2020-08-26T08:41:14 | 285,842,867 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | def binary_search(target, start, end):
if start > end:
return None
while start <= end:
mid = (start + end) // 2
if array[mid] == target: # 일치
return "yes"
elif array[mid] > target: # 중간값이 찾고자 하는 값보다 클 때
end = mid - 1
else:
start = mid + 1
return None # 일치하는 값이 없을 때
if __name__ == "__main__":
# 입력
N = int(input())
array = list(map(int, input().split()))
M = int(input())
find = list(map(int, input().split()))
# 이진 탐색을 하기 위해서 정렬
array.sort()
# find에서 값을 하나씩 읽는다.
for data in find:
# 이진 탐색
result = binary_search(data, 0, N - 1)
if result is not None:
print('yes', end=" ")
else:
print('no', end=" ")
| [
"dnjs2113@gmail.com"
] | dnjs2113@gmail.com |
6907542e7974952c900a54a7451dffc120b1d850 | 719990ee24f8dbfc11024bb5f1ec22cd3b8b4c62 | /scrape.py | ead90be26583ec15b8e8d7b048ed45fdfff202d1 | [] | no_license | raymond-devries/usara-nationals | 790eed3d34a2f2ac2e74c141ae493c51d6eb50c3 | 1c9f82d686de730bde0296f40bf9d92a0ec78bbb | refs/heads/master | 2023-08-12T04:28:51.849196 | 2021-09-19T18:32:43 | 2021-09-19T18:32:43 | 405,495,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from selenium import webdriver
import json
from selenium.webdriver.firefox.options import Options
def main():
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options)
driver.get("https://adventureenablers.s3.amazonaws.com/Tracking/2021USARANationals/SI/index.html")
data = driver.execute_script("return getData(5)")
driver.quit()
with open("raw_data.json", "w") as f:
json.dump(data, f, ensure_ascii=False, indent=4)
if __name__ == '__main__':
main()
| [
"raymond.l.devries@gmail.com"
] | raymond.l.devries@gmail.com |
d8ca730c49e849faef22bb61d6e7c1ea1853c890 | 694d57c3e512ce916269411b51adef23532420cd | /python/chapter-1/lab4-exec1.2.py | 00e1dd5356363c18fb8e1045f63f53286f0a515a | [] | no_license | clovery410/mycode | 5541c3a99962d7949832a0859f18819f118edfba | e12025e754547d18d5bb50a9dbe5e725fd03fd9c | refs/heads/master | 2021-05-16T02:46:47.996748 | 2017-05-10T23:43:50 | 2017-05-10T23:43:50 | 39,235,141 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | def gcb_recur(a, b):
smaller_para = min(a, b)
larger_para = max(a, b)
remainder = larger_para % smaller_para
if smaller_para % remainder == 0:
return remainder
return gcb_recur(smaller_para, remainder)
print(gcb_recur(50, 35))
def gcb_itera(a, b):
smaller_para = min(a, b)
larger_para = max(a, b)
remainder = larger_para % smaller_para
while not smaller_para % remainder == 0:
smaller_para, remainder = remainder, smaller_para % remainder
return remainder
print(gcb_itera(50, 35))
| [
"admin@admins-MacBook-Air.local"
] | admin@admins-MacBook-Air.local |
3b98e43e2f3dc2377b74432e9fe99c572da37f2a | 4904acd900496b4883c2f5b4aa6b45d1ef6654c0 | /graphgallery/gallery/nodeclas/tensorflow/__init__.py | 1cf21d123e086ed846bcb034e8d4271c9735498d | [
"MIT"
] | permissive | blindSpoter01/GraphGallery | aee039edd759be9272d123463b0ad73a57e561c7 | e41caeb32a07da95364f15b85cad527a67763255 | refs/heads/master | 2023-06-17T11:42:27.169751 | 2021-07-15T03:07:39 | 2021-07-15T03:07:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | from .gcn import GCN
from .gat import GAT
from .clustergcn import ClusterGCN
from .sgc import SGC
from .gwnn import GWNN
from .robustgcn import RobustGCN
from .graphsage import GraphSAGE
from .fastgcn import FastGCN
from .chebynet import ChebyNet
from .densegcn import DenseGCN
from .lgcn import LGCN
from .BVAT.obvat import OBVAT
from .BVAT.sbvat import SBVAT
from .gmnn import GMNN
from .dagnn import DAGNN
from .mlp import MLP
from .tagcn import TAGCN
from .appnp import APPNP, PPNP
from .ssgc import SSGC
from .agnn import AGNN
from .arma import ARMA
# experimental model
from .experimental.edgeconv import EdgeGCN
from .experimental.s_obvat import SimplifiedOBVAT
from .experimental.gcn_mix import GCN_MIX
from .experimental.gcna import GCNA
from .experimental.sat import SAT
| [
"cnljt@outlook.com"
] | cnljt@outlook.com |
aa7b59318cba778a709f76ed4f709ab1a5fa40e7 | cc6d9fb4a7c7235ff5985ef17f4a554f19a0263d | /apps/transactions/templatetags/filters.py | 9706794c65b1abd95312c80471d048127a3ae137 | [] | no_license | timohermans/rabo-overview | 6c210a73a68b17620ee8df0985b9b4e28200081c | 0baea9631ee504b63046459718ea1a255992a18d | refs/heads/main | 2023-08-05T19:54:30.751983 | 2021-09-11T18:17:53 | 2021-09-11T18:17:53 | 393,132,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | from datetime import date
from typing import Any, Iterator, List
from dateutil.relativedelta import relativedelta
from django import template
from apps.transactions.models import Account, Transaction
register = template.Library()
@register.filter
def previous_month(source: date) -> date:
"""date - 1"""
return date(source.year, source.month, 1) - relativedelta(months=1)
@register.filter
def next_month(source: date) -> date:
"""date + 1"""
return date(source.year, source.month, 1) + relativedelta(months=1)
@register.filter
def to_date_string(source: date) -> str:
"""date string for month hrefs"""
return source.isoformat()
@register.filter
def receivers(accounts: List[Account]) -> List[Account]:
"""pulls out receivers from all accounts"""
return [a for a in accounts if a.is_user_owner is True]
@register.filter
def short_account_number(account_number: str) -> str:
"""long IBANs are way too hard to read"""
return f"{account_number[:2]}...{account_number[-4:]}"
@register.filter
def of_receiver(
transactions: List[Transaction], receiver: Account
) -> Iterator[Transaction]:
"""returns transactions of a user owned account"""
return (t for t in transactions if t.receiver == receiver)
@register.filter
def get(o: object, key: str) -> Any:
"""I want property access in templates!"""
return getattr(o, key)
| [
"timo.hermans@kabisa.nl"
] | timo.hermans@kabisa.nl |
4b4b5fbf0fcd97b37a90c5dc2ac660d862ce075b | d5a947bf9819e039f7238e61233c3bfab505deeb | /resume/models.py | d9a053342ec016c9bf06a472b188404a8bdfe82a | [] | no_license | P-Tanifor/JobSite | 09e9c30f3682f16fa125dce587b03e4e97e59e28 | afc5b0acd3a6e81fe96f7486a661705fa86b933e | refs/heads/main | 2023-09-03T12:43:00.162298 | 2021-10-29T11:32:45 | 2021-10-29T11:32:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | from django.db import models
from django.contrib.auth.models import User
import django
# Create your models here.
class Resume(models.Model):
description = models.CharField(max_length=1024)
author = models.ForeignKey(django.contrib.auth.models.User, on_delete=models.CASCADE)
| [
"ptanifor@gmail.com"
] | ptanifor@gmail.com |
7d6e7442b32fe58141787e6063cf7b0ae35a74b7 | d49fbd7874b70a93cbc551afed1b87e3e47617a8 | /django/example/repositories/__init__.py | 1efb28043ae95783f5bde83b3415bcedaf028594 | [] | no_license | gitter-badger/tutorials-4 | bbdbb673e978118f9fec3212baa13f6f99226be0 | 3ce1cdb7c6d26f6df4d6bb94e82f83e8cab9389b | refs/heads/master | 2020-04-04T20:52:28.181616 | 2018-10-28T22:05:17 | 2018-10-28T22:05:17 | 156,264,177 | 0 | 0 | null | 2018-11-05T18:32:17 | 2018-11-05T18:32:16 | null | UTF-8 | Python | false | false | 528 | py | from .category import load_categories, load_category # noqa
from .entry import load_entries # noqa
from .notification import create_notification, load_notifications # noqa
from .price import ( # noqa
cheapest_price_by_category,
load_price,
prices_for_category,
)
from .profile import ( # noqa
add_balance,
create_profile,
del_balance,
load_profile,
save_profile,
)
from .subscription import create_subscription, load_subscription # noqa
from .user import create_user, save_password # noqa
| [
"proofit404@gmail.com"
] | proofit404@gmail.com |
6aef4706708cb0d55ce4d56b5e7fcbfcba763ea4 | 5a1d08aac9ed0c730e4f97b0e766c6763cfaab1f | /gb_chat/common/ui_keyboard_interrupt_helper.py | f393750b04ea94f8e2bfb5f048664114872d7b25 | [
"Apache-2.0"
] | permissive | Cerzon/gb_chat | 0b2965e046bcf4d832fb398361271d8eae19e50f | b4f8a6bf62b0971a135fbb2083456193f7a816cb | refs/heads/main | 2023-04-24T12:26:44.142068 | 2021-05-03T14:52:01 | 2021-05-03T14:52:01 | 360,984,777 | 0 | 0 | Apache-2.0 | 2021-05-03T14:52:02 | 2021-04-23T19:22:17 | Python | UTF-8 | Python | false | false | 685 | py | """
This solution is taken from https://coldfix.de/2016/11/08/pyqt-boilerplate/#keyboardinterrupt-ctrl-c
"""
import signal
from typing import Callable
from PyQt5.QtCore import QCoreApplication, QTimer
def _interrupt_handler(app: QCoreApplication) -> None:
app.quit()
def _safe_timer(timeout: int, fun: Callable[[], None]) -> None:
def timer_event() -> None:
try:
fun()
finally:
QTimer.singleShot(timeout, timer_event)
QTimer.singleShot(timeout, timer_event)
def setup_interrupt_handling(app: QCoreApplication) -> None:
signal.signal(signal.SIGINT, lambda *args: _interrupt_handler(app))
_safe_timer(50, lambda: None)
| [
"derlih@gmail.com"
] | derlih@gmail.com |
16a35cd3db0fd05415676003f38fabb5303dec8d | 6f9170a35fa5d758bec62c9a9be21fae11b6783f | /desafio027.py | 06d929be683134e41c754ae05dd54637ce1078f0 | [] | no_license | alineat/python-exercicios | 11e227ceadbff0e997e2ed427f54a721a5e76e9e | 9557dea8d057ded63e2abbed458d00660d9e8c4e | refs/heads/master | 2020-07-03T15:09:40.575431 | 2019-08-12T14:37:00 | 2019-08-12T14:37:00 | 201,947,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # Faça um programa que leia o nome completo de uma pessoa, mostrando em seguida o primeiro e o último nome separadamente
nome = str(input('Nome completo: ')).strip()
dividido = nome.split()
print('Primeiro nome: {}.\nSegundo nome: {}'
'.'.format(dividido [0], dividido[len(dividido)-1])) | [
"aline_atsuta@hotmail.com"
] | aline_atsuta@hotmail.com |
1338076a2a3f108f9a4dc2d5342bb1e00f1c6a08 | bae29c2fb8eedd320bc881c2a22b70298ab0f38d | /icoder/settings.py | 967f1581a2d331e5cd6bb00382882744e5558c5f | [] | no_license | SourabhRishabhMishra/icoder | 1527604df1f93f04bc58c4471555381837da296d | 6f4e279c1e31e99e91fbd7c091c9e3088cc1d2e5 | refs/heads/master | 2022-12-07T05:47:02.222021 | 2020-08-19T01:20:32 | 2020-08-19T01:20:32 | 288,088,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,353 | py | """
Django settings for icoder project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from django.contrib.messages import constants as messages
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4nf#h&87fk-6=prj*#-3tns#4jl#qls79q79ntbw62n42esed^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home.apps.HomeConfig',
'blog.apps.BlogConfig',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'icoder.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'icoder.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,"static"),
]
MESSAGE_TAGS = {
messages.ERROR:'danger'
} | [
"sourabhm384@gmail.com"
] | sourabhm384@gmail.com |
0a19884a78e49b4f205f1efa1f54e90fe911ff31 | a1e2d31682b80aca10bbcd25db550419e04e71e5 | /semesters/apps.py | 1b7401041ba6e4bb8cb6e06093f6fd800242272b | [] | no_license | letzzBuild/ElectiveAPI | 712b1043c533eb00657f0cb481a7fcdbc47bb376 | e89d3af596ae2898e4480f8b380d46e13cd6338d | refs/heads/main | 2023-08-14T16:37:45.390654 | 2021-09-26T07:57:13 | 2021-09-26T07:57:13 | 379,562,536 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class SemestersConfig(AppConfig):
name = 'semesters'
| [
"letzzBuild@gmail.com"
] | letzzBuild@gmail.com |
697a66de3c22d6e8c4704790081528d98a614067 | adc531efc839ec0fc8e67504e5429ad7696c57cc | /API_Article/migrations/0037_auto_20210430_2214.py | 9066067fd23b7e1551788489d86436814bad5e8b | [] | no_license | huynguyen-py/GraduateBackendAPI | 1521db57947804d4b2342060632a4ecf637993cd | bdfb25ae96fd1165ce431be48c03d80b73d32de8 | refs/heads/main | 2023-05-07T00:04:24.866230 | 2021-06-02T05:03:06 | 2021-06-02T05:03:06 | 372,700,747 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # Generated by Django 3.1.7 on 2021-04-30 15:14
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('API_Article', '0036_auto_20210316_0944'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='content_cmt',
field=models.TextField(blank=True, default='Body_comment', null=True),
),
migrations.AlterField(
model_name='comment',
name='create_date_cmt',
field=models.DateTimeField(default=datetime.datetime(2021, 4, 30, 22, 13, 42, 6285)),
),
]
| [
"iamhuynguyen1002@gmail.com"
] | iamhuynguyen1002@gmail.com |
26d76ad4d4f1ddd75f25e843de51546595a08f4d | 3356eb3fbf1ba5a8e5b0a851f07e8df5c852fdf8 | /tasks/takeoff.py | c4c7e6afe91521b29b8fec997819f25673715950 | [] | no_license | spb07/RL-Quadcopter-2 | 640118dcc932780e9c23d2adc36ab49d5e640f80 | 1061f3df2de6e116d281730583aa74acb472509b | refs/heads/master | 2020-03-18T22:32:02.527492 | 2018-05-29T20:49:28 | 2018-05-29T20:49:28 | 135,350,682 | 0 | 0 | null | 2018-05-29T20:41:13 | 2018-05-29T20:41:12 | null | UTF-8 | Python | false | false | 7,157 | py | import numpy as np
from physics_sim import PhysicsSim
class Task():
"""Task (environment) that defines the goal and provides feedback to the agent. Goal is to takeoff to a given height and hover once takeoff height is achieved. Ideally, only vertical movement with no movement in other planes and no rotation"""
def __init__(self, init_pose=None, init_velocities=None,
init_angle_velocities=None, runtime=5., target_pos=None):
"""Initialize a Task object.
Params
======
init_pose: initial position of the quadcopter in (x,y,z) dimensions and the Euler angles
init_velocities: initial velocity of the quadcopter in (x,y,z) dimensions
init_angle_velocities: initial radians/second for each of the three Euler angles
runtime: time limit for each episode
target_pos: target/goal (x,y,z) position for the agent
"""
# Simulation
self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime)
self.action_repeat = 3
self.state_size = self.action_repeat * 2 # multiplier is equal to space size
self.action_low = 0
self.action_high = 900
self.action_size = 1
#self.init_velocities = init_velocities
#self.target_pos = target_pos
# Goal
self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.])
#self.target_v = np.array([0., 0.])
#self.target_angular_v = np.array([0., 0., 0.])
def get_reward(self):
"""Uses current pose of sim to return reward."""
'''
if (abs(self.sim.pose[2] - self.target_pos[2]))<0.3: #within 30cm of target height
prize= 1
else:
if (self.sim.pose[2] > (2* self.target_pos[2])): # penalty for overshooting target height
prize = -1
else:
if ((self.sim.pose[2] - self.target_pos[2])/self.sim.v[2])< 0: # Reward for going in right direction
prize=0.2
else: # penalty for drifting away from target height
prize=-0.2
'''
#Position based reward
pos = (self.sim.pose[2]/self.target_pos[2]) #relative position of quadcopter to target height
if pos > 3: #overshot target height by 3 times
prize =-1
else:
prize= np.sin(pos * (np.pi/2.)) #reward increases smoothly to 1 till target height and then decrease smootly to -1 when current height is 3 times target height, with an additional reward/penalty based on whether quad is going in right direction
# Direction of travel reward
if ((self.sim.pose[2] - self.target_pos[2])/self.sim.v[2])< 0: # Reward for going in right direction
direc = 0.3
else: # penalty for drifting away from target height
direc = -0.3
# Reward determination
if self.sim.pose[2] <self.sim.init_pose[2]: #penalty for not going above initial position
reward = -1
else:
if (abs(self.sim.v[2])>self.target_pos[2]/2): # penalty for excessive speed
reward = -1
else:
if self.sim.done:
if self.sim.time < self.sim.runtime: #penalty for hitting boundary before runtime
reward = -1
else: # episode ran for full runtime
finish = 50/(1+(abs(self.sim.pose[2] - self.target_pos[2]))) #special reward for finishing episode, with maximum reward when finish position is at target height
reward = prize + direc + finish
else: # continuous reward during episode
reward = prize + direc
'''
if (abs(self.sim.pose[2] - self.target_pos[2]))<0.3: #within 30cm of target height
prize= 5
else:
if (self.sim.pose[2] > (2* self.target_pos[2])): # penalty for overshooting target height
prize = -5
else:
if ((self.sim.pose[2] - self.target_pos[2])/self.sim.v[2])< 0: # Reward for going in right direction
prize=1
else: # penalty for drifting away from target height
prize=-1
if self.sim.pose[2] <self.sim.init_pose[2]: #penalty for not going above initial position
reward = -5
else:
if self.sim.done:
if self.sim.time < self.sim.runtime: #penalty for hitting boundary before runtime
reward = -2
else: # episode ran for full runtime
reward = prize
else: # continuous reward during episode
reward = prize
'''
#reward = 1.- np.tanh(abs(self.sim.pose[2] - self.target_pos[2])) #only reward reaching the height
#reward = 1.-.3*(abs(self.sim.pose[2] - self.target_pos[2])).sum()
#reward = self.sim.pose[2] #quad went to zero height from starting height of 10
#reward = 1.-.3*(abs(self.sim.pose[2] - self.target_pos[2])).sum() #only reward reaching the height
#reward = 1.-.3*(abs(self.sim.pose[:3] - self.target_pos)).sum()
#reward = np.tanh(1 - 0.003*(abs(self.sim.pose[:3] - self.target_pos))).sum()
#reward = np.tanh(3.-.9*(abs(self.sim.pose[:3] - self.target_pos)).sum()-.2*(abs(self.sim.v[:2] -self.target_v)).sum()-.2*(abs(self.sim.angular_v[:3] -self.target_angular_v)).sum())
#print("\n Time= = {:7.3f} Z= {:7.3f} , VZ = {:7.3f} ,Accel= {:7.3f}, ,Prize= {:7.4f}, Direc= {:7.4f}, Reward= {:7.4f} ".format( self.sim.time, self.sim.pose[2],self.sim.v[2],self.sim.linear_accel[2],prize, direc, reward ), end="")
return reward
def step(self, rotor_speeds):
"""Uses action to obtain next state, reward, done."""
reward = 0
pose_all = []
for _ in range(self.action_repeat):
done = self.sim.next_timestep(np.concatenate([rotor_speeds] * (4))) # updates pose, v and angular_v. Returns True if env bounds breached or time up
reward += self.get_reward()
#pose_all.append(self.sim.pose)
pose_all.append(np.concatenate(([self.sim.pose[2]],[self.sim.v[2]]),axis =0))
next_state = np.concatenate(pose_all)
return next_state, reward, done
def reset(self):
"""Reset the sim to start a new episode."""
self.takeoff= False
self.sim.reset()
#state = np.concatenate([self.sim.pose] * self.action_repeat) # state definition
#print("Input init velocity reset mod: ", self.sim.init_velocities)
#print("Input init position reset mod: ", self.sim.init_pose)
#print("Target pos reset mod: ", self.target_pos)
#print("Reset velocity in reset mod: ", self.sim.v)
state = np.concatenate(([self.sim.pose[2]],[self.sim.v[2]])*self.action_repeat,axis =0)
#state = np.concatenate([self.sim.pose[2] * self.action_repeat) #restrict to height only
return state
| [
"rnb14@ic.ac.uk"
] | rnb14@ic.ac.uk |
ccea4c2d3b3dedfd336b6570dafd6f1cbb2e431c | bf79aba1e47566d06fd9a7096a1d2dbbaf228748 | /detect.py | 26b305d7faa0cf22c154c6cb4cc9c78bbbeb2413 | [] | no_license | SubinMs/smartPrice | b998e315d8ffe610a75e7164f08fdf78000fb954 | d21b9d991fa513bb08ac097a36e905ba2563cc1c | refs/heads/master | 2020-09-14T16:28:47.955172 | 2019-11-21T17:16:46 | 2019-11-21T17:16:46 | 223,184,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | import io, os
from numpy import random
from google.cloud import vision
from Pillow_Utility import draw_borders, Image
import pandas as pd
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r"GoogleCloudDemo_ServiceAcct_Token.json"
client = vision.ImageAnnotatorClient()
img_list = os.listdir('./images')
#file_name = 'image_name.jpg'
file_name = img_list[0]
image_path = os.path.join('./images', file_name)
save_path = os.path.join('./test_images/')
static_path = os.path.join('./static/result_img/')
with io.open(image_path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.object_localization(image=image)
localized_object_annotations = response.localized_object_annotations
pillow_image = Image.open(image_path)
df = pd.DataFrame(columns=['name', 'score'])
img_size = list(pillow_image.size)
width = img_size[0]
height = img_size[1]
ob = 0
for obj in localized_object_annotations:
df = df.append(
dict(
name=obj.name,
score=obj.score
),
ignore_index=True)
if (obj.name=='Mobile phone') :
vr = dict(ld_x=obj.bounding_poly.normalized_vertices[0].x * width,ld_y=obj.bounding_poly.normalized_vertices[0].y * height,
ru_x=obj.bounding_poly.normalized_vertices[2].x * width,ru_y=obj.bounding_poly.normalized_vertices[2].y * height)
leftDown_x = int(vr['ld_x'])
leftDown_y = int(vr['ld_y'])
rightup_x = int(vr['ru_x'])
rightup_y = int(vr['ru_y'])
ob = ob + 1
con = str(ob)
im = Image.open('images/'+file_name)
crp = im.crop((leftDown_x,leftDown_y,rightup_x,rightup_y))
crp.show()
crp.save(save_path+'img_'+con+'.jpg',format='JPEG')
crp.save(static_path+'img_'+con+'.jpg',format='JPEG')
#end if
r, g, b = random.randint(150, 255), random.randint(
150, 255), random.randint(150, 255)
draw_borders(pillow_image, obj.bounding_poly, (r, g, b),
pillow_image.size, obj.name, obj.score)
#end for
#os.remove(image_path)
| [
"noreply@github.com"
] | SubinMs.noreply@github.com |
4f238047a913854c18e3f54d8ee509ac319bf7c1 | 7873042aa7b983a7c1075ddcf637135eea66adcd | /movie/views.py | 802a69dbda660cfc60cfc2fa73a7d6ded3e48c56 | [] | no_license | connieGao0819/MovieHunter | f6a1a717e0bf441b1b825dd2461d72cfcb1276e9 | ad80b34a0221462bc2850991f14149b46a72dcc3 | refs/heads/master | 2020-03-06T17:58:03.548201 | 2018-03-26T19:41:09 | 2018-03-26T19:41:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,017 | py | from django.shortcuts import render
from django.views.decorators.csrf import csrf_protect
from movie.models import *
from django.http import HttpResponse
import json
from movie import index
index.index_dir()
print(index.permuterm_index.dict())
def add_seen(request, movie_id):
if request.is_ajax():
history = Seen.objects.filter(movieid_id=movie_id, username=request.user.get_username())
if len(history) == 0:
movie = Popularity.objects.get(movieid_id=movie_id)
weight = movie.weight
movie.delete()
new_record = Popularity(movieid_id=movie_id, weight=weight + 3)
new_record.save()
new_record = Seen(movieid_id=movie_id, username=request.user.get_username())
new_record.save()
return HttpResponse('1')
else:
history.delete()
return HttpResponse('0')
def add_expect(request, movie_id):
if request.is_ajax():
history = Expect.objects.filter(movieid_id=movie_id, username=request.user.get_username())
if len(history) == 0:
movie = Popularity.objects.get(movieid_id=movie_id)
weight = movie.weight
movie.delete()
new_record = Popularity(movieid_id=movie_id, weight=weight + 3)
new_record.save()
new_record = Expect(movieid_id=movie_id, username=request.user.get_username())
new_record.save()
return HttpResponse('2')
else:
history.delete()
return HttpResponse('0')
@csrf_protect
def detail(request, model, id):
items = []
try:
if model.get_name() == 'movie' and id != 'None':
try:
d = Popularity.objects.get(movieid_id=id)
weight = d.weight
d.delete()
new_record = Popularity(movieid_id=id, weight=weight + 1)
new_record.save()
except:
new_record = Popularity(movieid_id=id, weight=1)
new_record.save()
label = 'actor'
object = model.objects.get(movieid=id)
records = Act.objects.filter(movieid_id=id)
if request.user.get_username() != '':
seen_list = [str(x).split('|')[1] for x in
Seen.objects.filter(username=request.user.get_username())]
expect_list = [str(y).split('|')[1] for y in
Expect.objects.filter(username=request.user.get_username())]
if id in seen_list:
object.flag = 1
if id in expect_list:
object.flag = 2
for query in records:
for actor in Actor.objects.filter(actorid=query.actorid_id):
items.append(actor)
if model.get_name() == 'actor':
label = 'movie'
object = model.objects.get(actorid=id)
records = Act.objects.filter(actorid_id=id)
for query in records:
for movie in Movie.objects.filter(movieid=query.movieid_id):
items.append(movie)
except:
return render(request, '404.html')
return render(request, '{}_list.html'.format(label), {'items': items, 'number': len(items), 'object': object})
def whole_list(request, model, page):
if page:
page = int(page)
else:
return render(request, '404.html')
objects = model.objects.all()
total_page = len(objects) // 10
if (len(objects) / 10 - len(objects) // 10) > 0:
total_page += 1
if page > total_page:
return render(request, '404.html')
pages = [x + 1 for x in range(total_page)]
end = 10 * page if page != total_page else len(objects)
result = objects[10 * (page - 1):end]
data = {'items': result, 'number': len(objects), 'pages': pages, 'current_page': page, 'next_page': page + 1,
'last_page': page - 1, 'page_number': total_page}
if page == 1:
del data['last_page']
if page == total_page:
del data['next_page']
return render(request, '{}_list.html'.format(model.get_name()), data)
def search(request, pattern):
pattern = pattern.replace("%20", " ")
movies = Movie.objects.filter(title__contains=pattern)
actors = Actor.objects.filter(name__contains=pattern)
return render(request, 'searchresult.html',
{'items1': movies, 'search1': pattern, 'number1': len(movies), 'items2': actors, 'search2': pattern,
'number2': len(actors)})
def search_suggest(request, str):
movie_list, actor_list = [], []
# movie
movies = Movie.objects.filter(title__istartswith=str).order_by('-rate')
if len(movies) > 3:
for i in range(3):
movie_list.append({'movieid': movies[i].movieid, 'poster': movies[i].poster, 'title': movies[i].title})
else:
movies = Movie.objects.filter(title__contains=str).order_by('-rate')
num = 3 - len(movie_list) if len(movies) > 3 - len(movie_list) else len(movies)
for i in range(num):
movie_list.append({'movieid': movies[i].movieid, 'poster': movies[i].poster, 'title': movies[i].title})
# actor
actors = Actor.objects.filter(name__istartswith=str)
if len(actors) > 3:
for i in range(3):
actor_list.append({'actorid': actors[i].actorid, 'photo': actors[i].photo, 'name': actors[i].name})
else:
actors = Actor.objects.filter(name__contains=str)
num = 3 - len(actor_list) if len(actors) > 3 - len(actor_list) else len(actors)
for i in range(num):
actor_list.append({'actorid': actors[i].actorid, 'photo': actors[i].photo, 'name': actors[i].name})
# result in a dictionary
result = {'movie': movie_list, 'actor': actor_list}
return HttpResponse(json.dumps(result, ensure_ascii=False))
@csrf_protect
def seen(request, movie_id):
if request.POST:
try:
d = Seen.objects.get(username=request.user.get_username(), movieid_id=movie_id)
d.delete()
except:
return render(request, '404.html')
records = Seen.objects.filter(username=request.user.get_username())
movies = []
for record in records:
movie_id = str(record).split('|')[1]
movies.append(Movie.objects.get(movieid=movie_id))
return render(request, 'seen.html', {'items': movies, 'number': len(movies)})
def expect(request, movie_id):
if request.POST:
try:
d = Expect.objects.get(username=request.user.get_username(), movieid_id=movie_id)
d.delete()
except:
return render(request, '404.html')
records = Expect.objects.filter(username=request.user.get_username())
movies = []
for record in records:
movie_id = str(record).split('|')[1]
movies.append(Movie.objects.get(movieid=movie_id))
return render(request, 'expect.html', {'items': movies, 'number': len(movies)})
| [
"jgao4@wpi.edu"
] | jgao4@wpi.edu |
9de7481bfb9c7ec3e011a8ebfbeec40ca8cd62b0 | a7f7981a91c3213c011abd33778278c58fa4aa55 | /python-udemy/Practica1/Practica01_02.py | 55a5c9e09ea6471133973896d9386740c0939148 | [] | no_license | nicolassnider/tkinter_flask_django | 9f631c01b0d2b14758deb4ba9c74f0ca59b12d97 | 14355f130570a6e2dccd81804edfe35ee418a099 | refs/heads/master | 2023-01-23T18:32:14.775758 | 2020-12-05T00:44:43 | 2020-12-05T00:44:43 | 318,354,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | '''
Problema 02: Hallar el cociente y residuo (resto) de dos números
enteros.
Análisis: Para la solución de este problema, se requiere que
ingrese dos números entero por teclado y el sistema realice el
cálculo respectivo para hallar el cociente y residuo.
'''
num1=float(input("num1:\n"))
num2=float(input("num2:\n"))
cociente=num1 // num2
residuo=num1 % num2
print(f"cociente = {cociente}")
print(f"residuo = {residuo}")
| [
"nicolas.snider@soulit.io"
] | nicolas.snider@soulit.io |
7efb9951bfdf815059c2e6a6b72a96e332f6a971 | 602afe5a905c1f66892312b91fc381d966196f1a | /utilities/request_parsers.py | aeadf503229360bc0911ab99d3a6bab21f0b095e | [] | no_license | Big-Ideas-Lab/nutrics | 394299905af1fbd88ded4197032a2ce03aa8445c | 174baecf041096552a69b4c5f68895186673e4cd | refs/heads/master | 2022-08-27T06:48:01.326349 | 2020-05-08T17:25:54 | 2020-05-08T17:25:54 | 243,624,416 | 0 | 0 | null | 2022-06-22T02:45:47 | 2020-02-27T21:53:05 | Python | UTF-8 | Python | false | false | 2,799 | py |
'''
There was too much clutter in the resources files, so I pulled out defining of requests parsers.
'''
from flask_restful import reqparse
#create parser for incoming user data
u_parser = reqparse.RequestParser()
u_parser.add_argument('username', help = 'Username cannot be blank.', required = True)
u_parser.add_argument('email', help = 'Please include a valid email address.', required = True)
u_parser.add_argument('password', help = 'Please enter a valid password.', required = True)
u_parser.add_argument('age', help = 'Please enter an age.', required = True)
u_parser.add_argument('gender_identity', help = 'Please enter an age.', required = True)
u_parser.add_argument('activity_level', help = 'We need your activity level for nutritious recommendations.', required = True)
#create parser for incoming geolocal data
r_parser = reqparse.RequestParser()
r_parser.add_argument('latitude', help= 'Latitude parameter is required.', required = True)
r_parser.add_argument('longitude', help= 'Longitude parameter is required.', required = True)
r_parser.add_argument('distance', help= 'Distance parameter is required.', required = True)
#Preference parser
p_parser = reqparse.RequestParser()
p_parser.add_argument('preference', help = 'This field cannot be blank', required = True)
p_parser.add_argument('preference_action', help = 'This field cannot be blank', required = True)
#Admin parser
a_parser = reqparse.RequestParser()
a_parser.add_argument('action', help = 'This field cannot be blank', required = False)
a_parser.add_argument('new_admin', help = 'This field only needs to be filled when adding new admin.', required = False)
a_parser.add_argument('item_name', help = 'This field needs to be added when updating food table', required = False)
a_parser.add_argument('latitude', help = 'This field needs to be added when updating food table', required = False)
a_parser.add_argument('longitude', help = 'This field needs to be added when updating food table', required = False)
#email link parser
e_parser = reqparse.RequestParser()
e_parser.add_argument('token', help = 'include the token.', required = True)
#food link parser
f_parser = reqparse.RequestParser()
f_parser.add_argument('item_name', help = 'include the token.', required = True)
f_parser.add_argument('latitude', help = 'include the latitude.', required = True)
f_parser.add_argument('longitude', help = 'include the longitude.', required = True)
f_parser.add_argument('restaurant_name', help = 'include the restaurant name.', required = True)
f_parser.add_argument('item_description', help = 'include the item description.', required = True)
f_parser.add_argument('price', help = 'include the price.', required = True)
f_parser.add_argument('nutrition', help = 'include the nutritional content.', required = True)
| [
"joshuadarcy@joshuas-mbp.lan"
] | joshuadarcy@joshuas-mbp.lan |
a1bb1aaf10d01f0cf95dcf59433fd0ff850d609e | e15e56ddca0d1aa989725ad2766f9cf36bcbde23 | /bin/rundevserver | a84db05f4423dc055598d11370934509319f8123 | [
"Apache-2.0"
] | permissive | ylamgarchal/dci-feeder | f7c26ed78aa61ee2e90cf4d047909b357f013fab | 27c0236c0986ee96fac8209bf69b57e71e38eaf5 | refs/heads/master | 2022-02-11T07:05:17.586018 | 2019-11-25T16:29:37 | 2019-11-25T16:50:07 | 217,178,791 | 0 | 0 | Apache-2.0 | 2022-01-06T22:39:23 | 2019-10-24T00:36:35 | Python | UTF-8 | Python | false | false | 813 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright (C) Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dcifeeder import app
from dcifeeder import settings as s
if __name__ == '__main__':
feederapp = app.create_app()
feederapp.run(debug=s.API_DEBUG, threaded=True, host='0.0.0.0')
| [
"yassine.lamgarchal@redhat.com"
] | yassine.lamgarchal@redhat.com | |
e805cdb88bd2de7f4bce40ee710b792a3c6c17be | 106aa71c49f176415c7c140f066bde4e3a2df797 | /Archive/Mads_Wind/utility.py | a67c83df9e2c038f38bc54fbb709dfaca7a60d8b | [
"MIT"
] | permissive | madsankern/DynamicProgramming | df461dae3bcc3dbde18e79fdded0974daa0e293c | 0812b844068c33b2529d4b11940f9c89582bc374 | refs/heads/main | 2023-05-31T00:18:45.820845 | 2021-06-09T16:51:45 | 2021-06-09T16:51:45 | 341,465,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | import numpy as np
# Without housing
def u(c,par):
if par.eta == 1.0:
u = np.log(c)
else:
u = (c**(1-par.eta) - 1.0) / (1.0 - par.eta)
return u
# With housing
def u_h(c,h,par):
if par.eta == 1.0:
u = np.log(c) + par.kappa*h
else:
u = (c**(1-par.eta) - 1.0) / (1.0 - par.eta) + par.kappa*h
return u
# Marginal utility
def marg_u(c,par):
return c**(-par.eta)
# Inverse marginal utility
def inv_marg_u(u,par):
return u**(-1.0/par.eta)
| [
"Wind.Mads@bcg.com"
] | Wind.Mads@bcg.com |
0776fc01013ec265fc2da612b9ee90542488e9df | 04d50ae4c98c7832123b8af91de8e3990c2347f9 | /Trnsys/ProjectScripts/Decathlon/Post.py | f9a1c5804176710108653f75423192462523a451 | [] | no_license | bmj-archive/Old_Python | 79d1edb7088e1acb22260414469fbd793d83a44a | 929a19b3c0702f82c61d21450033d7416d411ccb | refs/heads/master | 2022-02-25T17:20:33.931716 | 2019-11-05T15:25:18 | 2019-11-05T15:25:18 | 74,760,848 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,539 | py | from exergyframes import exergy_frame as xrg
from exergyframes import meta_table as metaTab
import logging
import os
from config import *
import datetime
import UtilityPathsAndDirs as utilPath
import re
import numpy as np
def _create():
# Input
projectDir = FREELANCE_DIR + r"\DecathlonSim"
descriptionsFilePath = projectDir + r"\INPUT\Descriptions_r00.xlsx"
zoneNamesFilePath = projectDir + r"\INPUT\ZoneNames.xlsx"
#balDir = FREELANCE_DIR + r"\086_SmartCampus1\TRNSYS"
# Output
fileName = "ZerothRun"
csvOutDir = projectDir + r"\Analysis\\"
matfileOutDir = projectDir + r"\Analysis\\"
now = datetime.datetime.now()
nowStr = "{}-{}-{} {}-{}-{} ".format(now.year,
now.month,now.day, now.hour,now.minute,now.second)
csvFileFullPath = os.path.join(csvOutDir,nowStr + fileName + ".csvIGNORED")
matFileFullPath = os.path.join(matfileOutDir, nowStr + fileName + ".mat")
#===========================================================================
# Loop each variant
#===========================================================================
# Get the var dirs
#variantDirs = projectDir
#fullVariantPaths = [os.path.join(projectDir,d) for d in variantDirs]
# fullVariantPaths = [d for d in fullVariantPaths if os.path.isdir(d)]
#fullOutPaths = [os.path.join(d,"OUT") for d in fullVariantPaths]
#variantPathPairs = zip(variantDirs,fullOutPaths)
variantPathPairs = [["Main",projectDir]]
#===========================================================================
# # Get OUT files ----------------------------------------------------------
#===========================================================================
superFrameList = list()
for pair in variantPathPairs:
print pair
thisDir = pair[1]
inputFiles = utilPath.getFilesByExtRecurse(thisDir, "out")
frameList = list()
#for filePath in inputFiles[20:25]:
for filePath in inputFiles:
# Skip unless 3 elements in file name!
pureFileName = os.path.splitext(os.path.split(filePath)[1])[0]
splitFileName = re.split("_",pureFileName)
if len(splitFileName)==3:
thisFrame = xrg.load_single_out_file(filePath)
else:
logging.info("(Skipping '{}')".format(os.path.split(pureFileName)[1]))
frameList.append(thisFrame)
superFrameList += frameList
#superFrameList.append(frameList)
#print superFrameList
#xrg.displayFrame(thisFrame)
logging.info("Found '{}' OUT frames over all variants)".format(len(superFrameList)))
#===========================================================================
# # Get BAL files ----------------------------------------------------------
#===========================================================================
for pair in variantPathPairs:
#for pair in [variantPathPairs[0]]:
print pair
thisDir = pair[1]
inputFiles = utilPath.getFilesByExtRecurse(thisDir, "bal")
inputFiles = [item for item in inputFiles if not re.search("SUMMARY", item )]
frameList = list()
#for filePath in inputFiles[20:25]:
for filePath in inputFiles:
# Skip unless 3 elements in file name!
pureFileName = os.path.splitext(os.path.split(filePath)[1])[0]
splitFileName = re.split("_",pureFileName)
#if len(splitFileName)==3:
thisFrame = xrg.load_single_bal_file(filePath)
#else:
# logging.info("(Skipping '{}')".format(os.path.split(pureFileName)[1]))
frameList.append(thisFrame)
superFrameList += frameList
#superFrameList.append(frameList)
#print superFrameList
logging.info("Found '{}' BAL files over all variants)".format(len(superFrameList)))
#===========================================================================
# Merge frames
#===========================================================================
frameName = "dataFrame"
finalFrame = xrg.mergeFrames(frameName, superFrameList,True)
finalFrame = xrg.add_simple_time(finalFrame)
#finalFrame._convert_to_ndarray()
#xrg.displayFrame(finalFrame)
#===========================================================================
# # Add descriptions -------------------------------------------------------
#===========================================================================
descriptions = metaTab.getDescriptionsOut(descriptionsFilePath)
for desc in descriptions:
searchSys = desc[0][0]
searchPointType = desc[0][1]
searchNum = desc[0][2]
#print desc
searchIdx = (xrg.idx("system",searchSys) &
xrg.idx("pointType",searchPointType) &
xrg.idx("number",searchNum))
#print searchIdx, type(searchIdx)
descValue = desc[1]
# IN PLACE
xrg.renameHeader(finalFrame,searchIdx,"description",descValue,True)
#===========================================================================
# # Convert kJ/hr to W -----------------------------------------------------
#===========================================================================
def convertKJHtokW(array):
array = array / 3600
return array
thisMask = xrg.idx("units",r"kJ/hr")
xrg.inPlaceFunction(finalFrame,thisMask,convertKJHtokW)
xrg.renameHeader(finalFrame,thisMask,"units","kW")
#----------------------------------------------------------------- Save data
#xrg.displayFrame(finalFrame)
finalFrame.saveToCSV(csvFileFullPath)
finalFrame.saveToMat(matFileFullPath)
def _decathLoad():
logging.debug("Load".format())
loadMatPath = FREELANCE_DIR + r"\DecathlonSim\Analysis\\2012-10-31 13-28-14 ZerothRun.mat"
thisFrame = xrg.load_from_mat(loadMatPath)
print thisFrame.headersArray
if __name__ == "__main__":
logging.config.fileConfig(ABSOLUTE_LOGGING_PATH)
myLogger = logging.getLogger()
myLogger.setLevel("DEBUG")
logging.debug("Started _main".format())
_create()
#_decathLoad()
logging.debug("Finished _main".format())
| [
"Admin@6CORE"
] | Admin@6CORE |
5c09d311aad75b9bd3cd0f7997527b7e8fa604b9 | 457a1baf3a9afc365d53e955db5ccbef6a9f636b | /morphenepython/blockchain.py | 78f87d70667fda7d3020bdbb096dc6dc9ad2baea | [
"MIT"
] | permissive | morphene/morphene-python | 5bf9cb3cbc4a081297e26269c398192e12433072 | 7c6144c7337330490229ce69b8c3fb5dc2e3d08e | refs/heads/master | 2021-07-03T07:43:16.118829 | 2019-05-30T19:38:49 | 2019-05-30T19:38:49 | 189,285,305 | 0 | 0 | NOASSERTION | 2020-10-27T21:47:05 | 2019-05-29T19:22:41 | Python | UTF-8 | Python | false | false | 42,412 | py | # This Python file uses the following encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.utils import python_2_unicode_compatible
from builtins import str
from builtins import range
from builtins import object
import sys
import time
import hashlib
import json
import math
from threading import Thread, Event
from time import sleep
import logging
from datetime import datetime, timedelta
from .utils import formatTimeString, addTzInfo
from .block import Block
from morphenepythonapi.node import Nodes
from morphenepythonapi.morphenenoderpc import MorpheneNodeRPC
from .exceptions import BatchedCallsNotSupported, BlockDoesNotExistsException, BlockWaitTimeExceeded, OfflineHasNoRPCException
from morphenepythonapi.exceptions import NumRetriesReached
from morphenepythongraphenebase.py23 import py23_bytes
from morphenepython.instance import shared_morphene_instance
from .amount import Amount
import morphenepython as mph
log = logging.getLogger(__name__)
if sys.version_info < (3, 0):
from Queue import Queue
else:
from queue import Queue
FUTURES_MODULE = None
if not FUTURES_MODULE:
try:
from concurrent.futures import ThreadPoolExecutor, wait, as_completed
FUTURES_MODULE = "futures"
# FUTURES_MODULE = None
except ImportError:
FUTURES_MODULE = None
# default exception handler. if you want to take some action on failed tasks
# maybe add the task back into the queue, then make your own handler and pass it in
def default_handler(name, exception, *args, **kwargs):
log.warn('%s raised %s with args %s and kwargs %s' % (name, str(exception), repr(args), repr(kwargs)))
pass
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, name, queue, results, abort, idle, exception_handler):
Thread.__init__(self)
self.name = name
self.queue = queue
self.results = results
self.abort = abort
self.idle = idle
self.exception_handler = exception_handler
self.daemon = True
self.start()
def run(self):
"""Thread work loop calling the function with the params"""
# keep running until told to abort
while not self.abort.is_set():
try:
# get a task and raise immediately if none available
func, args, kwargs = self.queue.get(False)
self.idle.clear()
except:
# no work to do
# if not self.idle.is_set():
# print >> stdout, '%s is idle' % self.name
self.idle.set()
# time.sleep(1)
continue
try:
# the function may raise
result = func(*args, **kwargs)
# print(result)
if(result is not None):
self.results.put(result)
except Exception as e:
# so we move on and handle it in whatever way the caller wanted
self.exception_handler(self.name, e, args, kwargs)
finally:
# task complete no matter what happened
self.queue.task_done()
# class for thread pool
class Pool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, thread_count, batch_mode=True, exception_handler=default_handler):
# batch mode means block when adding tasks if no threads available to process
self.queue = Queue(thread_count if batch_mode else 0)
self.resultQueue = Queue(0)
self.thread_count = thread_count
self.exception_handler = exception_handler
self.aborts = []
self.idles = []
self.threads = []
def __del__(self):
"""Tell my threads to quit"""
self.abort()
def run(self, block=False):
"""Start the threads, or restart them if you've aborted"""
# either wait for them to finish or return false if some arent
if block:
while self.alive():
sleep(1)
elif self.alive():
return False
# go start them
self.aborts = []
self.idles = []
self.threads = []
for n in range(self.thread_count):
abort = Event()
idle = Event()
self.aborts.append(abort)
self.idles.append(idle)
self.threads.append(Worker('thread-%d' % n, self.queue, self.resultQueue, abort, idle, self.exception_handler))
return True
def enqueue(self, func, *args, **kargs):
"""Add a task to the queue"""
self.queue.put((func, args, kargs))
def join(self):
"""Wait for completion of all the tasks in the queue"""
self.queue.join()
def abort(self, block=False):
"""Tell each worker that its done working"""
# tell the threads to stop after they are done with what they are currently doing
for a in self.aborts:
a.set()
# wait for them to finish if requested
while block and self.alive():
sleep(1)
def alive(self):
"""Returns True if any threads are currently running"""
return True in [t.is_alive() for t in self.threads]
def idle(self):
"""Returns True if all threads are waiting for work"""
return False not in [i.is_set() for i in self.idles]
def done(self):
"""Returns True if not tasks are left to be completed"""
return self.queue.empty()
def results(self, sleep_time=0):
"""Get the set of results that have been processed, repeatedly call until done"""
sleep(sleep_time)
results = []
try:
while True:
# get a result, raises empty exception immediately if none available
results.append(self.resultQueue.get(False))
self.resultQueue.task_done()
except:
return results
return results
@python_2_unicode_compatible
class Blockchain(object):
""" This class allows to access the blockchain and read data
from it
:param MorpheneClient morphene_instance: MorpheneClient instance
:param str mode: (default) Irreversible block (``irreversible``) or
actual head block (``head``)
:param int max_block_wait_repetition: maximum wait repetition for next block
where each repetition is block_interval long (default is 3)
This class let's you deal with blockchain related data and methods.
Read blockchain related data:
.. testsetup::
from morphenepython.blockchain import Blockchain
chain = Blockchain()
Read current block and blockchain info
.. testcode::
print(chain.get_current_block())
print(chain.morphene.info())
Monitor for new blocks. When ``stop`` is not set, monitoring will never stop.
.. testcode::
blocks = []
current_num = chain.get_current_block_num()
for block in chain.blocks(start=current_num - 99, stop=current_num):
blocks.append(block)
len(blocks)
.. testoutput::
100
or each operation individually:
.. testcode::
ops = []
current_num = chain.get_current_block_num()
for operation in chain.ops(start=current_num - 99, stop=current_num):
ops.append(operation)
"""
def __init__(
self,
morphene_instance=None,
mode="irreversible",
max_block_wait_repetition=None,
data_refresh_time_seconds=900,
):
self.morphene = morphene_instance or shared_morphene_instance()
if mode == "irreversible":
self.mode = 'last_irreversible_block_num'
elif mode == "head":
self.mode = "head_block_number"
else:
raise ValueError("invalid value for 'mode'!")
if max_block_wait_repetition:
self.max_block_wait_repetition = max_block_wait_repetition
else:
self.max_block_wait_repetition = 3
self.block_interval = self.morphene.get_block_interval()
def is_irreversible_mode(self):
return self.mode == 'last_irreversible_block_num'
def get_transaction(self, transaction_id):
""" Returns a transaction from the blockchain
:param str transaction_id: transaction_id
"""
if not self.morphene.is_connected():
raise OfflineHasNoRPCException("No RPC available in offline mode!")
self.morphene.rpc.set_next_node_on_empty_reply(False)
ret = self.morphene.rpc.get_transaction(transaction_id, api="database")
return ret
def get_transaction_hex(self, transaction):
""" Returns a hexdump of the serialized binary form of a transaction.
:param dict transaction: transaction
"""
if not self.morphene.is_connected():
raise OfflineHasNoRPCException("No RPC available in offline mode!")
self.morphene.rpc.set_next_node_on_empty_reply(False)
ret = self.morphene.rpc.get_transaction_hex(transaction, api="database")
return ret
def get_current_block_num(self):
""" This call returns the current block number
.. note:: The block number returned depends on the ``mode`` used
when instantiating from this class.
"""
props = self.morphene.get_dynamic_global_properties(False)
if props is None:
raise ValueError("Could not receive dynamic_global_properties!")
if self.mode not in props:
raise ValueError(self.mode + " is not in " + str(props))
return int(props.get(self.mode))
def get_current_block(self, only_ops=False, only_virtual_ops=False):
""" This call returns the current block
:param bool only_ops: Returns block with operations only, when set to True (default: False)
:param bool only_virtual_ops: Includes only virtual operations (default: False)
.. note:: The block number returned depends on the ``mode`` used
when instantiating from this class.
"""
return Block(
self.get_current_block_num(),
only_ops=only_ops,
only_virtual_ops=only_virtual_ops,
morphene_instance=self.morphene
)
def get_estimated_block_num(self, date, estimateForwards=False, accurate=True):
""" This call estimates the block number based on a given date
:param datetime date: block time for which a block number is estimated
.. note:: The block number returned depends on the ``mode`` used
when instantiating from this class.
"""
last_block = self.get_current_block()
date = addTzInfo(date)
if estimateForwards:
block_offset = 10
first_block = Block(block_offset, morphene_instance=self.morphene)
time_diff = date - first_block.time()
block_number = math.floor(time_diff.total_seconds() / self.block_interval + block_offset)
else:
time_diff = last_block.time() - date
block_number = math.floor(last_block.identifier - time_diff.total_seconds() / self.block_interval)
if block_number < 1:
block_number = 1
if accurate:
if block_number > last_block.identifier:
block_number = last_block.identifier
block_time_diff = timedelta(seconds=10)
while block_time_diff.total_seconds() > self.block_interval or block_time_diff.total_seconds() < -self.block_interval:
block = Block(block_number, morphene_instance=self.morphene)
block_time_diff = date - block.time()
delta = block_time_diff.total_seconds() // self.block_interval
if delta == 0 and block_time_diff.total_seconds() < 0:
delta = -1
elif delta == 0 and block_time_diff.total_seconds() > 0:
delta = 1
block_number += delta
if block_number < 1:
break
if block_number > last_block.identifier:
break
return int(block_number)
def block_time(self, block_num):
""" Returns a datetime of the block with the given block
number.
:param int block_num: Block number
"""
return Block(
block_num,
morphene_instance=self.morphene
).time()
def block_timestamp(self, block_num):
""" Returns the timestamp of the block with the given block
number as integer.
:param int block_num: Block number
"""
block_time = Block(
block_num,
morphene_instance=self.morphene
).time()
return int(time.mktime(block_time.timetuple()))
def blocks(self, start=None, stop=None, max_batch_size=None, threading=False, thread_num=8, only_ops=False, only_virtual_ops=False):
""" Yields blocks starting from ``start``.
:param int start: Starting block
:param int stop: Stop at this block
:param int max_batch_size: When not None, batch calls of are used.
Cannot be combined with threading
:param bool threading: Enables threading. Cannot be combined with batch calls
:param int thread_num: Defines the number of threads, when `threading` is set.
:param bool only_ops: Only yield operations (default: False).
Cannot be combined with ``only_virtual_ops=True``.
:param bool only_virtual_ops: Only yield virtual operations (default: False)
.. note:: If you want instant confirmation, you need to instantiate
class:`morphenepython.blockchain.Blockchain` with
``mode="head"``, otherwise, the call will wait until
confirmed in an irreversible block.
"""
# Let's find out how often blocks are generated!
current_block = self.get_current_block()
current_block_num = current_block.block_num
if not start:
start = current_block_num
head_block_reached = False
if threading and FUTURES_MODULE is not None:
pool = ThreadPoolExecutor(max_workers=thread_num)
elif threading:
pool = Pool(thread_num, batch_mode=True)
if threading:
morphene_instance = [self.morphene]
nodelist = self.morphene.rpc.nodes.export_working_nodes()
for i in range(thread_num - 1):
morphene_instance.append(mph.MorpheneClient(node=nodelist,
num_retries=self.morphene.rpc.num_retries,
num_retries_call=self.morphene.rpc.num_retries_call,
timeout=self.morphene.rpc.timeout))
# We are going to loop indefinitely
latest_block = 0
while True:
if stop:
head_block = stop
else:
current_block_num = self.get_current_block_num()
head_block = current_block_num
if threading and not head_block_reached:
latest_block = start - 1
result_block_nums = []
for blocknum in range(start, head_block + 1, thread_num):
# futures = []
i = 0
if FUTURES_MODULE is not None:
futures = []
block_num_list = []
# freeze = self.morphene.rpc.nodes.freeze_current_node
num_retries = self.morphene.rpc.nodes.num_retries
# self.morphene.rpc.nodes.freeze_current_node = True
self.morphene.rpc.nodes.num_retries = thread_num
error_cnt = self.morphene.rpc.nodes.node.error_cnt
while i < thread_num and blocknum + i <= head_block:
block_num_list.append(blocknum + i)
results = []
if FUTURES_MODULE is not None:
futures.append(pool.submit(Block, blocknum + i, only_ops=only_ops, only_virtual_ops=only_virtual_ops, morphene_instance=morphene_instance[i]))
else:
pool.enqueue(Block, blocknum + i, only_ops=only_ops, only_virtual_ops=only_virtual_ops, morphene_instance=morphene_instance[i])
i += 1
if FUTURES_MODULE is not None:
try:
results = [r.result() for r in as_completed(futures)]
except Exception as e:
log.error(str(e))
else:
pool.run(True)
pool.join()
for result in pool.results():
results.append(result)
pool.abort()
self.morphene.rpc.nodes.num_retries = num_retries
# self.morphene.rpc.nodes.freeze_current_node = freeze
new_error_cnt = self.morphene.rpc.nodes.node.error_cnt
self.morphene.rpc.nodes.node.error_cnt = error_cnt
if new_error_cnt > error_cnt:
self.morphene.rpc.nodes.node.error_cnt += 1
# self.morphene.rpc.next()
checked_results = []
for b in results:
if b.block_num is not None and int(b.block_num) not in result_block_nums:
b["id"] = b.block_num
b.identifier = b.block_num
checked_results.append(b)
result_block_nums.append(int(b.block_num))
missing_block_num = list(set(block_num_list).difference(set(result_block_nums)))
while len(missing_block_num) > 0:
for blocknum in missing_block_num:
try:
block = Block(blocknum, only_ops=only_ops, only_virtual_ops=only_virtual_ops, morphene_instance=self.morphene)
checked_results.append(block)
result_block_nums.append(int(block.block_num))
except Exception as e:
log.error(str(e))
missing_block_num = list(set(block_num_list).difference(set(result_block_nums)))
from operator import itemgetter
blocks = sorted(checked_results, key=itemgetter('id'))
for b in blocks:
if latest_block < int(b.block_num):
latest_block = int(b.block_num)
yield b
if latest_block <= head_block:
for blocknum in range(latest_block + 1, head_block + 1):
if blocknum not in result_block_nums:
block = Block(blocknum, only_ops=only_ops, only_virtual_ops=only_virtual_ops, morphene_instance=self.morphene)
result_block_nums.append(blocknum)
yield block
elif max_batch_size is not None and (head_block - start) >= max_batch_size and not head_block_reached:
if not self.morphene.is_connected():
raise OfflineHasNoRPCException("No RPC available in offline mode!")
self.morphene.rpc.set_next_node_on_empty_reply(False)
latest_block = start - 1
batches = max_batch_size
for blocknumblock in range(start, head_block + 1, batches):
# Get full block
if (head_block - blocknumblock) < batches:
batches = head_block - blocknumblock + 1
for blocknum in range(blocknumblock, blocknumblock + batches - 1):
if only_virtual_ops:
self.morphene.rpc.get_ops_in_block(blocknum, only_virtual_ops, add_to_queue=True)
else:
self.morphene.rpc.get_block(blocknum, add_to_queue=True)
latest_block = blocknum
if batches >= 1:
latest_block += 1
if latest_block <= head_block:
if only_virtual_ops:
block_batch = self.morphene.rpc.get_ops_in_block(blocknum, only_virtual_ops, add_to_queue=False)
else:
block_batch = self.morphene.rpc.get_block(latest_block, add_to_queue=False)
if not bool(block_batch):
raise BatchedCallsNotSupported()
blocknum = latest_block - len(block_batch) + 1
if not isinstance(block_batch, list):
block_batch = [block_batch]
for block in block_batch:
if not bool(block):
continue
block = Block(block, only_ops=only_ops, only_virtual_ops=only_virtual_ops, morphene_instance=self.morphene)
block["id"] = block.block_num
block.identifier = block.block_num
yield block
blocknum = block.block_num
else:
# Blocks from start until head block
if start is None:
start = head_block - 1
for blocknum in range(start, head_block + 1):
# Get full block
block = self.wait_for_and_get_block(blocknum, only_ops=only_ops, only_virtual_ops=only_virtual_ops, block_number_check_cnt=5, last_current_block_num=current_block_num)
yield block
# Set new start
start = head_block + 1
head_block_reached = True
if stop and start > stop:
return
# Sleep for one block
time.sleep(self.block_interval)
def wait_for_and_get_block(self, block_number, blocks_waiting_for=None, only_ops=False, only_virtual_ops=False, block_number_check_cnt=-1, last_current_block_num=None):
""" Get the desired block from the chain, if the current head block is smaller (for both head and irreversible)
then we wait, but a maxmimum of blocks_waiting_for * max_block_wait_repetition time before failure.
:param int block_number: desired block number
:param int blocks_waiting_for: difference between block_number and current head and defines
how many blocks we are willing to wait, positive int (default: None)
:param bool only_ops: Returns blocks with operations only, when set to True (default: False)
:param bool only_virtual_ops: Includes only virtual operations (default: False)
:param int block_number_check_cnt: limit the number of retries when greater than -1
:param int last_current_block_num: can be used to reduce the number of get_current_block_num() api calls
"""
if last_current_block_num is None:
last_current_block_num = self.get_current_block_num()
elif last_current_block_num - block_number < 50:
last_current_block_num = self.get_current_block_num()
if not blocks_waiting_for:
blocks_waiting_for = max(
1, block_number - last_current_block_num)
repetition = 0
# can't return the block before the chain has reached it (support future block_num)
while last_current_block_num < block_number:
repetition += 1
time.sleep(self.block_interval)
if last_current_block_num - block_number < 50:
last_current_block_num = self.get_current_block_num()
if repetition > blocks_waiting_for * self.max_block_wait_repetition:
raise BlockWaitTimeExceeded("Already waited %d s" % (blocks_waiting_for * self.max_block_wait_repetition * self.block_interval))
# block has to be returned properly
repetition = 0
cnt = 0
block = None
while (block is None or block.block_num is None or int(block.block_num) != block_number) and (block_number_check_cnt < 0 or cnt < block_number_check_cnt):
try:
block = Block(block_number, only_ops=only_ops, only_virtual_ops=only_virtual_ops, morphene_instance=self.morphene)
cnt += 1
except BlockDoesNotExistsException:
block = None
if repetition > blocks_waiting_for * self.max_block_wait_repetition:
raise BlockWaitTimeExceeded("Already waited %d s" % (blocks_waiting_for * self.max_block_wait_repetition * self.block_interval))
repetition += 1
time.sleep(self.block_interval)
return block
def ops(self, start=None, stop=None, only_virtual_ops=False, **kwargs):
""" Blockchain.ops() is deprecated. Please use Blockchain.stream() instead.
"""
raise DeprecationWarning('Blockchain.ops() is deprecated. Please use Blockchain.stream() instead.')
def ops_statistics(self, start, stop=None, add_to_ops_stat=None, with_virtual_ops=True, verbose=False):
""" Generates statistics for all operations (including virtual operations) starting from
``start``.
:param int start: Starting block
:param int stop: Stop at this block, if set to None, the current_block_num is taken
:param dict add_to_ops_stat: if set, the result is added to add_to_ops_stat
:param bool verbose: if True, the current block number and timestamp is printed
This call returns a dict with all possible operations and their occurrence.
"""
if add_to_ops_stat is None:
import morphenepythonbase.operationids
ops_stat = morphenepythonbase.operationids.operations.copy()
for key in ops_stat:
ops_stat[key] = 0
else:
ops_stat = add_to_ops_stat.copy()
current_block = self.get_current_block_num()
if start > current_block:
return
if stop is None:
stop = current_block
for block in self.blocks(start=start, stop=stop, only_ops=False, only_virtual_ops=False):
if verbose:
print(block["identifier"] + " " + block["timestamp"])
ops_stat = block.ops_statistics(add_to_ops_stat=ops_stat)
if with_virtual_ops:
for block in self.blocks(start=start, stop=stop, only_ops=True, only_virtual_ops=True):
if verbose:
print(block["identifier"] + " " + block["timestamp"])
ops_stat = block.ops_statistics(add_to_ops_stat=ops_stat)
return ops_stat
def stream(self, opNames=[], raw_ops=False, *args, **kwargs):
""" Yield specific operations (e.g. transfers) only
:param array opNames: List of operations to filter for
:param bool raw_ops: When set to True, it returns the unmodified operations (default: False)
:param int start: Start at this block
:param int stop: Stop at this block
:param int max_batch_size: When not None, batch calls of are used.
Cannot be combined with threading
:param bool threading: Enables threading. Cannot be combined with batch calls
:param int thread_num: Defines the number of threads, when `threading` is set.
:param bool only_ops: Only yield operations (default: False)
Cannot be combined with ``only_virtual_ops=True``
:param bool only_virtual_ops: Only yield virtual operations (default: False)
The dict output is formated such that ``type`` carries the
operation type. Timestamp and block_num are taken from the
block the operation was stored in and the other keys depend
on the actual operation.
.. note:: If you want instant confirmation, you need to instantiate
class:`morphenepython.blockchain.Blockchain` with
``mode="head"``, otherwise, the call will wait until
confirmed in an irreversible block.
output when `raw_ops=False` is set:
.. code-block:: js
{
'type': 'transfer',
'from': 'initwitness',
'to': 'luckyguy',
'amount': '1000000.000 MORPH',
'memo': 'get rich',
'_id': '6d4c5f2d4d8ef1918acaee4a8dce34f9da384786',
'timestamp': datetime.datetime(2019, 6, 1, 16, 20, 0, tzinfo=<UTC>),
'block_num': 420, 'trx_num': 2, 'trx_id': 'cf11b2ac8493c71063ec121b2e8517ab1e0e6bea'
}
output when `raw_ops=True` is set:
.. code-block:: js
{
'block_num': 22277588,
'op':
[
'transfer',
{
'from': 'initwitness', 'to': 'luckyguy',
'amount': '1000000.000 MORPH',
'memo': 'get rich'
}
],
'timestamp': datetime.datetime(2019, 6, 1, 16, 20, 0, tzinfo=<UTC>)
}
"""
for block in self.blocks(**kwargs):
if "transactions" in block:
trx = block["transactions"]
else:
trx = [block]
block_num = 0
trx_id = ""
_id = ""
timestamp = ""
for trx_nr in range(len(trx)):
if "operations" not in trx[trx_nr]:
continue
for event in trx[trx_nr]["operations"]:
if isinstance(event, list):
op_type, op = event
# trx_id = block["transaction_ids"][trx_nr]
block_num = block.get("id")
_id = self.hash_op(event)
timestamp = block.get("timestamp")
elif isinstance(event, dict) and "type" in event and "value" in event:
op_type = event["type"]
if len(op_type) > 10 and op_type[len(op_type) - 10:] == "_operation":
op_type = op_type[:-10]
op = event["value"]
# trx_id = block["transaction_ids"][trx_nr]
block_num = block.get("id")
_id = self.hash_op(event)
timestamp = block.get("timestamp")
elif "op" in event and isinstance(event["op"], dict) and "type" in event["op"] and "value" in event["op"]:
op_type = event["op"]["type"]
if len(op_type) > 10 and op_type[len(op_type) - 10:] == "_operation":
op_type = op_type[:-10]
op = event["op"]["value"]
trx_id = event.get("trx_id")
block_num = event.get("block")
_id = self.hash_op(event["op"])
timestamp = event.get("timestamp")
else:
op_type, op = event["op"]
trx_id = event.get("trx_id")
block_num = event.get("block")
_id = self.hash_op(event["op"])
timestamp = event.get("timestamp")
if not bool(opNames) or op_type in opNames and block_num > 0:
if raw_ops:
yield {"block_num": block_num,
"trx_num": trx_nr,
"op": [op_type, op],
"timestamp": timestamp}
else:
updated_op = {"type": op_type}
updated_op.update(op.copy())
updated_op.update({"_id": _id,
"timestamp": timestamp,
"block_num": block_num,
"trx_num": trx_nr,
"trx_id": trx_id})
yield updated_op
def awaitTxConfirmation(self, transaction, limit=10):
""" Returns the transaction as seen by the blockchain after being
included into a block
:param dict transaction: transaction to wait for
:param int limit: (optional) number of blocks to wait for the transaction (default: 10)
.. note:: If you want instant confirmation, you need to instantiate
class:`morphenepython.blockchain.Blockchain` with
``mode="head"``, otherwise, the call will wait until
confirmed in an irreversible block.
.. note:: This method returns once the blockchain has included a
transaction with the **same signature**. Even though the
signature is not usually used to identify a transaction,
it still cannot be forfeited and is derived from the
transaction contented and thus identifies a transaction
uniquely.
"""
counter = 0
for block in self.blocks():
counter += 1
for tx in block["transactions"]:
if sorted(
tx["signatures"]
) == sorted(transaction["signatures"]):
return tx
if counter > limit:
raise Exception(
"The operation has not been added after %d blocks!" % (limit))
@staticmethod
def hash_op(event):
""" This method generates a hash of blockchain operation. """
if isinstance(event, dict) and "type" in event and "value" in event:
op_type = event["type"]
if len(op_type) > 10 and op_type[len(op_type) - 10:] == "_operation":
op_type = op_type[:-10]
op = event["value"]
event = [op_type, op]
data = json.dumps(event, sort_keys=True)
return hashlib.sha1(py23_bytes(data, 'utf-8')).hexdigest()
def get_all_accounts(self, start='', stop='', steps=1e3, limit=-1, **kwargs):
""" Yields account names between start and stop.
:param str start: Start at this account name
:param str stop: Stop at this account name
:param int steps: Obtain ``steps`` ret with a single call from RPC
"""
cnt = 1
if not self.morphene.is_connected():
raise OfflineHasNoRPCException("No RPC available in offline mode!")
lastname = start
while True:
ret = self.morphene.rpc.lookup_accounts(lastname, steps)
for account in ret:
if isinstance(account, dict):
account_name = account["name"]
else:
account_name = account
if account_name != lastname:
yield account_name
cnt += 1
if account_name == stop or (limit > 0 and cnt > limit):
return
if lastname == account_name:
return
lastname = account_name
if len(ret) < steps:
return
def get_account_count(self):
""" Returns the number of accounts"""
self.morphene.rpc.set_next_node_on_empty_reply(False)
ret = self.morphene.rpc.get_account_count()
return ret
def get_account_reputations(self, start='', stop='', steps=1e3, limit=-1, **kwargs):
""" Yields account reputation between start and stop.
:param str start: Start at this account name
:param str stop: Stop at this account name
:param int steps: Obtain ``steps`` ret with a single call from RPC
"""
cnt = 1
if not self.morphene.is_connected():
raise OfflineHasNoRPCException("No RPC available in offline mode!")
lastname = start
self.morphene.rpc.set_next_node_on_empty_reply(False)
while True:
ret = self.morphene.rpc.get_account_reputations(lastname, steps, api="follow")
for account in ret:
if isinstance(account, dict):
account_name = account["account"]
else:
account_name = account
if account_name != lastname:
yield account
cnt += 1
if account_name == stop or (limit > 0 and cnt > limit):
return
if lastname == account_name:
return
lastname = account_name
if len(ret) < steps:
return
def get_similar_account_names(self, name, limit=5):
""" Returns limit similar accounts with name as list
:param str name: account name to search similars for
:param int limit: limits the number of accounts, which will be returned
:returns: Similar account names as list
:rtype: list
.. code-block:: python
>>> from morphenepython.blockchain import Blockchain
>>> blockchain = Blockchain()
>>> ret = blockchain.get_similar_account_names("test", limit=5)
>>> len(ret) == 5
True
"""
if not self.morphene.is_connected():
return None
self.morphene.rpc.set_next_node_on_empty_reply(False)
return self.morphene.rpc.lookup_accounts(name, limit)
def find_rc_accounts(self, name):
""" Returns the RC parameters of one or more accounts.
:param str name: account name to search rc params for (can also be a list of accounts)
:returns: RC params
:rtype: list
.. code-block:: python
>>> from morphenepython.blockchain import Blockchain
>>> blockchain = Blockchain()
>>> ret = blockchain.find_rc_accounts(["test"])
>>> len(ret) == 1
True
"""
if not self.morphene.is_connected():
return None
self.morphene.rpc.set_next_node_on_empty_reply(False)
if isinstance(name, list):
account = self.morphene.rpc.find_rc_accounts({'accounts': name}, api="rc")
if bool(account):
return account["rc_accounts"]
else:
account = self.morphene.rpc.find_rc_accounts({'accounts': [name]}, api="rc")
if bool(account):
return account["rc_accounts"][0]
def list_change_recovery_account_requests(
self, start="", limit=1000, order="by_account"):
""" List pending `change_recovery_account` requests.
:param str/list start: Start the listing from this entry.
Leave empty to start from the beginning. If `order` is set
to `by_account`, `start` has to be an account name. If
`order` is set to `by_effective_date`, `start` has to be a
list of [effective_on, account_to_recover],
e.g. `start=['2018-12-18T01:46:24', 'bott']`.
:param int limit: maximum number of results to return (default
and maximum: 1000).
:param str order: valid values are "by_account" (default) or
"by_effective_date".
:returns: list of `change_recovery_account` requests.
:rtype: list
.. code-block:: python
>>> from morphenepython.blockchain import Blockchain
>>> blockchain = Blockchain()
>>> ret = blockchain.list_change_recovery_account_requests(limit=1)
"""
if not self.morphene.is_connected():
return None
self.morphene.rpc.set_next_node_on_empty_reply(False)
requests = self.morphene.rpc.list_change_recovery_account_requests(
{'start': start, 'limit': limit, 'order': order}, api="database")
if bool(requests):
return requests['requests']
def find_change_recovery_account_requests(self, accounts):
""" Find pending `change_recovery_account` requests for one or more
specific accounts.
:param str/list accounts: account name or list of account
names to find `change_recovery_account` requests for.
:returns: list of `change_recovery_account` requests for the
given account(s).
:rtype: list
.. code-block:: python
>>> from morphenepython.blockchain import Blockchain
>>> blockchain = Blockchain()
>>> ret = blockchain.find_change_recovery_account_requests('bott')
"""
if not self.morphene.is_connected():
return None
self.morphene.rpc.set_next_node_on_empty_reply(False)
if isinstance(accounts, str):
accounts = [accounts]
requests = self.morphene.rpc.find_change_recovery_account_requests(
{'accounts': accounts}, api="database")
if bool(requests):
return requests['requests']
| [
"andrewc@pobox.com"
] | andrewc@pobox.com |
da7e9cf99e5e8e2d628496cb45d1bce02e1fe524 | 436acccf18f21fe3fa7d2588fa25184c180e930d | /main.py | a08b882602da70e994455f6ef206d8db9fb1a592 | [] | no_license | Kevin-Escobedo/Jeopardy-Bot | ee097c6e375149b1c3e31f9c2c2087846138602b | 06040a2abf53ae6b0178d397a209fdc0fbdd4f50 | refs/heads/main | 2023-04-09T18:57:35.243238 | 2021-04-21T19:32:26 | 2021-04-21T19:32:26 | 358,014,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,482 | py | import requests
import json
import tweepy
import datetime
import time
import twitterCredentials as tc #File containing api key, secret key, tokens
import jeopardyDatabase
#TO-DO: Refactor
def makeTitle(s: str) -> str:
'''Capitalizes each word in s'''
#Because s.title() doesn't quite work with apostrophes
output = ""
s = s.split()
for word in s:
output += "{} ".format(word.capitalize())
return output.strip()
def getJeopardyQuestion() -> dict:
'''Gets a question from the jService API'''
link = "https://jservice.io/api/random"
response = requests.get(link)
jsonData = json.loads(response.text)
answer = jsonData[0]["answer"]
question = jsonData[0]["question"]
value = jsonData[0]["value"]
category = jsonData[0]["category"]["title"]
questionInfo = dict()
questionInfo["answer"] = answer
questionInfo["question"] = question
questionInfo["value"] = value
questionInfo["category"] = makeTitle(category)
return questionInfo
def getValidQuestion(tries: int = 10) -> dict:
'''Keeps trying to pull a Jeopardy question with no None values'''
while tries > 0:
tries -= 1
question = getJeopardyQuestion()
if all(question.values()): #Check if every value of question is not None
return question
else:
time.sleep(5) #Wait 5 seconds before calling the jService API again
return None #Return None if failed after all tries
if __name__ == "__main__":
jd = jeopardyDatabase.JeopardyDatabase()
jd.createTable()
auth = tweepy.OAuthHandler(tc.API_KEY, tc.API_SECRET_KEY)
auth.set_access_token(tc.ACCESS_TOKEN, tc.ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
try:
api.verify_credentials()
lastHour = datetime.datetime.now() - datetime.timedelta(hours = 1)
lastQuestion = jd.getHourQuestion(lastHour)
if lastQuestion != None:
api.update_status("Correct Response: {}".format(lastQuestion[5]), lastQuestion[1])
jq = getValidQuestion()
message = "{} for ${}:\n{}".format(jq["category"], jq["value"], jq["question"])
api.update_status(message)
tweetID = api.user_timeline(screename = tc.BOT_HANDLE, count = 1)[0].id
jd.insertQuestion(tweetID, jq["category"], jq["value"], jq["question"], jq["answer"])
except tweepy.error.TweepError:
print("Authentication Error")
jd.close()
| [
"escobedo001@gmail.com"
] | escobedo001@gmail.com |
8d8f12dad8abc695708a624d836491390fd1a623 | c5281bec992956018ee8c4e9f9561eab0698ceeb | /tests/test_utils.py | 1349491e3a2f9bf3324ef1fa7f2c62741365de59 | [
"Apache-2.0"
] | permissive | swagger-atlas/atlas | db2b2e3ee09f9afa51d42c4156d73292922410ac | 64a0a6e3107da9f7cf894880823badfa84e11f25 | refs/heads/master | 2023-01-12T03:48:21.665390 | 2019-09-20T17:24:19 | 2019-09-20T17:24:19 | 180,743,015 | 3 | 1 | Apache-2.0 | 2023-01-03T19:30:16 | 2019-04-11T07:56:48 | Python | UTF-8 | Python | false | false | 4,441 | py | from unittest import mock
import pytest
from atlas.modules import utils, exceptions, constants
class TestGetRefPathArray:
def test_local_reference(self):
assert utils.get_ref_path_array("#/definition/Sample") == ["definition", "Sample"]
def test_external_reference(self):
with pytest.raises(exceptions.ImproperSwaggerException):
utils.get_ref_path_array("document.json#/sample")
class TestGetRefName:
@mock.patch('atlas.modules.utils.get_ref_path_array')
def test_get_ref_name(self, patched_ref_array):
patched_ref_array.return_value = ["def", "abc"]
assert utils.get_ref_name("#/def/abc") == "abc"
patched_ref_array.assert_called_with("#/def/abc")
@mock.patch('atlas.modules.utils.get_ref_path_array')
class TestResolveReference:
def test_no_reference(self, patched_ref_array):
patched_ref_array.return_value = []
specs = {"a": 1}
assert utils.resolve_reference(specs, "definition") == specs
patched_ref_array.assert_called_with("definition")
def test_valid_reference(self, patched_ref_array):
patched_ref_array.return_value = ["a"]
specs = {"a": {"b": 1}}
assert utils.resolve_reference(specs, "definition") == {"b": 1}
patched_ref_array.assert_called_with("definition")
def test_valid_reference_with_recursion(self, patched_ref_array):
patched_ref_array.return_value = ["a", "b"]
specs = {"a": {"b": 1}}
assert utils.resolve_reference(specs, "definition") == 1
patched_ref_array.assert_called_with("definition")
def test_invalid_reference(self, patched_ref_array):
patched_ref_array.return_value = ["a", "c"]
specs = {"a": {"b": 1}}
with pytest.raises(exceptions.ImproperSwaggerException):
utils.resolve_reference(specs, "definition")
class TestConvertToSnakeCase:
def test_with_camel_case(self):
assert utils.convert_to_snake_case("camelCase") == "camel_case"
def test_with_pascal_case(self):
assert utils.convert_to_snake_case("CamelCase") == "camel_case"
def test_with_normal_string(self):
assert utils.convert_to_snake_case("magic") == "magic"
def test_with_hybrid_string(self):
assert utils.convert_to_snake_case("abc_caseLetter") == "abc_case_letter"
class TestGetProjectPath:
@mock.patch('atlas.modules.utils.os')
def test_get_project_path(self, patched_os):
patched_os.getcwd.return_value = "path"
assert utils.get_project_path() == "path"
class TestOperationIDName:
def test_delete_method(self):
assert utils.operation_id_name("x/{id}/y/{id}", constants.DELETE) == "x_PARAM_1_y_PARAM_2_delete"
def test_create_method(self):
assert utils.operation_id_name("x/{id}/y", constants.POST) == "x_PARAM_1_y_create"
def test_list_method(self):
assert utils.operation_id_name("x/{id}/y", constants.GET) == "x_PARAM_1_y_list"
def test_read_method(self):
assert utils.operation_id_name("x/{id}/y/{id}", constants.GET) == "x_PARAM_1_y_PARAM_2_read"
def test_update_method(self):
assert utils.operation_id_name("x/{id}/y/{id}", constants.PUT) == "x_PARAM_1_y_PARAM_2_update"
def test_patch_method(self):
assert utils.operation_id_name("x/{id}/y/{id}", constants.PATCH) == "x_PARAM_1_y_PARAM_2_partial_update"
class TestExtractResourceNameFromParam:
def test_with_suffix(self):
assert utils.extract_resource_name_from_param("pet_id", "") == "pet"
def test_without_suffix_with_query_params(self):
assert utils.extract_resource_name_from_param("id", "x/{id}/y/{y_id}/z/{abc}", constants.QUERY_PARAM) is None
def test_without_suffix_with_path_params_not_in_settings_identifier(self):
assert utils.extract_resource_name_from_param("abc", "x/{id}/y/{y_id}/z/{abc}", constants.PATH_PARAM) is None
def test_without_suffix_with_path_params(self):
assert utils.extract_resource_name_from_param("id", "x/{id}/y/{y_id}/z/{abc}", constants.PATH_PARAM) == "x"
def test_without_suffix_with_first_resource(self):
assert utils.extract_resource_name_from_param("id", "{id}/y/{y_id}/z/{abc}", constants.PATH_PARAM) is None
def test_without_suffix_with_singular(self):
assert utils.extract_resource_name_from_param("id", "pets/{id}/y/{y_id}/z/{abc}", constants.PATH_PARAM) == "pet"
| [
"kush.jain@joshtechnologygroup.com"
] | kush.jain@joshtechnologygroup.com |
a81f92e9f7166f53fdeb2368141fc3350fe0594f | d4c0b1b7ad466448944d9b6ccadbd64125861c35 | /payment/functions/save_order.py | a7d68484777c78447a3fd7be50081c35d61835b6 | [] | no_license | ncpi34/jourdan | 82dc1874f41cdab60bb5b293153ab45d89c675ab | 76094283b02990e60d23a76d26093e5f09391571 | refs/heads/master | 2023-08-05T04:43:00.272063 | 2021-09-10T09:07:21 | 2021-09-10T09:07:21 | 404,347,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,598 | py | import os
from typing import IO
from account.backends import User
from cart.cart import Cart
from website.models import Article, FavoritesClient
from order.models import OrderItems, Order
from django.template.loader import render_to_string
from django.core.mail import EmailMessage
from django.conf import settings
import logging
db_logger = logging.getLogger('db')
def save_order(cart: Cart, order: Order, user: User):
"""
Save order
"""
db_logger.info("DEBUT payment/functions/save_order")
for item in cart:
article = Article.objects.filter(id=int(item['article_id']))
if article.exists():
db_logger.info("article existe")
item_order: OrderItems = OrderItems(
order=order,
quantity=item['quantity'],
article_code=item['article_code'],
price_with_taxes=item['price_with_taxes'],
name=item['name'],
price_type=item['price_type']
)
db_logger.info(f"item_order => {item_order}")
item_order.save()
# add favorites products for user
favorites, created = FavoritesClient.objects.get_or_create(
user=user,
article=article[0]
)
# add quantity to favorites
qty = 1
try:
qty = int(item['quantity'])
except:
pass
favorites.quantity += qty
favorites.save()
db_logger.info(f"favorites => {favorites}")
else:
db_logger.info(f"article n'existe pas => {article}")
db_logger.info("FIN payment/functions/save_order")
return True
def send_mail_to_user(request, order: Order, user: User, pdf: IO):
"""
Send mail to user
Args:
order: db object
user: db object
pdf: file
Returns: void
"""
current_site = request.get_host()
message = render_to_string('mail/order_email.html', {
'user': user,
'order': order,
'domain': current_site
})
tab_mails = [settings.DELIVERY_MAIL]
if user.email is not None:
tab_mails.append(user.email)
email = EmailMessage(
'Commande effectuée sur le site internet',
message,
settings.EMAIL_HOST_USER,
tab_mails
)
email.attach_file(pdf)
email.send(fail_silently=True)
def remove_file(path: str):
"""
Remove file
Args:
path: str
Returns: void
"""
if os.path.exists(path):
os.remove(path)
| [
"ledain.alexis@gmail.com"
] | ledain.alexis@gmail.com |
bc7adc55472ca0f7d4495f4427cf4e5f885f0825 | eaf2ff7b5ba40595c68f58427d89a90eb7696d84 | /main-4.py | c10a18c041f0cfc27793fe9c4ae1ecbef9c5dcb9 | [] | no_license | CodeRaker/learningMatplotlib | f5081eaccf05f9dbf75986ba154118c587779b6b | 499c7b8682d85639ff4d8e9b65adfbf959529c8b | refs/heads/master | 2020-08-05T06:06:35.352829 | 2019-10-02T20:06:28 | 2019-10-02T20:06:28 | 212,423,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | # https://medium.com/@andykashyap/top-5-tricks-to-make-plots-look-better-9f6e687c1e08
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
deaths = [1,2,3,4,5,6,7]
causes = [1,2,3,4,5,3,4]
#plt.style.use("classic")
plt.plot(deaths, causes)
plt.legend('ABCDEF', ncol=2, loc='upper left');
plt.show() | [
"peterglad1985@hotmail.com"
] | peterglad1985@hotmail.com |
9be024aee9f1a12e949aa5184b9342662b5bb608 | edd3da6431675ce3b048b3557f1b410192491558 | /pd_transforming_data.py | 90ec53c3d745b55a3edab79171c06d8906cff808 | [] | no_license | abdlkdrgndz/data-mining | f845a5821f0800a5fd75807766593c97e5221b9f | 9882f60f75acfc55e5dc9cb2248c92e093b461e6 | refs/heads/master | 2023-02-14T21:24:39.921072 | 2021-01-10T01:19:59 | 2021-01-10T01:19:59 | 328,273,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import pandas
myDatas = {
"name" : ['Kadir', 'Kerim', 'Mehmet'],
"age" : [12,15,18],
"created_at" : ["12.10.1988","11.10.1988","13.10.1988"]
}
df = pandas.DataFrame(myDatas)
# örneğin yeni sutünda yaşların iki katını alalım
df['age_two'] = [i *2 for i in df.age]
# şimdi bunu transforming data yöntemi ile yapalım
def transfer(age):
return age * 3
# şimdi bu fonksiyonu çağıralım
df['age_three'] = df.age.apply(transfer)
print(df)
| [
"abdulkadir.gunduz@modanisa.com"
] | abdulkadir.gunduz@modanisa.com |
02b8e5dee5b57fda9c9099b42c6e685df0976663 | 0cc8c7cfeea7aa44436e4b09769033e7dbe75b93 | /scripts/vgg_face.py | 4b1205caf2566e59682e4c88a2436bacb628393b | [] | no_license | carlylou/IndividualProject-AffWild | 75ab5bd34796f8b5d763f6e41d491cf51f6db192 | f0c861fbc83c0d11c74b8ac3b31d90cb768fe3b4 | refs/heads/master | 2020-04-05T20:45:34.020609 | 2018-11-12T10:25:45 | 2018-11-12T10:25:45 | 157,194,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,316 | py | # import h5py
import tensorflow as tf
# import numpy as np
# import cv2
# import os
# import pdb
import copy
class VGGFace(object):
def __init__(self, batch_size):
self.params = None
self.batch_size = batch_size
self.vars = []
self.layers = []
self.names = [] #[line.strip() for line in file(os.path.join(os.path.dirname(os.path.realpath("__file__")), 'vggface/names.txt'))]
self.restore_names = []
# (1): nn.SpatialConvolutionMM(3 -> 64, 3x3, 1,1, 1,1)
self.layers.append(('conv','1',3,3,3,64))
# (3): nn.SpatialConvolutionMM(64 -> 64, 3x3, 1,1, 1,1)
self.layers.append(('conv','3',3,3,64,64))
# (5): nn.SpatialMaxPooling(2,2,2,2)
self.layers.append(('pool',2,2,2,2))
# (6): nn.SpatialConvolutionMM(64 -> 128, 3x3, 1,1, 1,1)
self.layers.append(('conv','6',3,3,64,128))
# (8): nn.SpatialConvolutionMM(128 -> 128, 3x3, 1,1, 1,1)
self.layers.append(('conv','8',3,3,128,128))
# (10): nn.SpatialMaxPooling(2,2,2,2)
self.layers.append(('pool',2,2,2,2))
# (11): nn.SpatialConvolutionMM(128 -> 256, 3x3, 1,1, 1,1)
self.layers.append(('conv','11',3,3,128,256))
# (13): nn.SpatialConvolutionMM(256 -> 256, 3x3, 1,1, 1,1)
self.layers.append(('conv','13',3,3,256,256))
# (15): nn.SpatialConvolutionMM(256 -> 256, 3x3, 1,1, 1,1)
self.layers.append(('conv','15',3,3,256,256))
# (17): nn.SpatialMaxPooling(2,2,2,2)
self.layers.append(('pool',2,2,2,2))
# (18): nn.SpatialConvolutionMM(256 -> 512, 3x3, 1,1, 1,1)
self.layers.append(('conv','18',3,3,256,512))
# (20): nn.SpatialConvolutionMM(512 -> 512, 3x3, 1,1, 1,1)
self.layers.append(('conv','20',3,3,512,512))
# (22): nn.SpatialConvolutionMM(512 -> 512, 3x3, 1,1, 1,1)
self.layers.append(('conv','22',3,3,512,512))
# (24): nn.SpatialMaxPooling(2,2,2,2)
self.layers.append(('pool',2,2,2,2))
# (25): nn.SpatialConvolutionMM(512 -> 512, 3x3, 1,1, 1,1)
self.layers.append(('conv','25',3,3,512,512))
# (27): nn.SpatialConvolutionMM(512 -> 512, 3x3, 1,1, 1,1)
self.layers.append(('conv','27',3,3,512,512))
# (29): nn.SpatialConvolutionMM(512 -> 512, 3x3, 1,1, 1,1)
self.layers.append(('conv','29',3,3,512,512))
# (31): nn.SpatialMaxPooling(2,2,2,2)
self.layers.append(('pool',2,2,2,2))
# (32): nn.View
# (33): nn.Linear(25088 -> 4096)
self.layers.append(('linear','33',4096,True))
# (34): nn.ReLU
# (35): nn.Dropout(0.500000)
# (36): nn.Linear(4096 -> 4096)
self.layers.append(('linear2','36',4096,True))
# (37): nn.ReLU
# (38): nn.Dropout(0.500000)
# (39): nn.Linear(4096 -> 2622)
self.layers.append(('linear3','39',2622,False))
def get_unique_name_(self, prefix):
id = sum(t.startswith(prefix) for t,_,_ in self.vars)+1
return '%s_%d'%(prefix, id)
def add_(self, name, var,layer):
self.vars.append((name, var,layer))
def get_output(self):
return self.vars[-1][1]
def make_var(self, name, shape,trainable):
return tf.get_variable(name, shape,trainable=trainable)
# return scope names
def get_restore_vars(self):
restore_vars = copy.deepcopy(self.restore_names)
# when match conv_1, get variables to restore will also return 'conv_10', 'conv_11', 'conv_12', 'conv_13'
remove = ['linear_1', 'linear2_1', 'linear3_1', 'conv_10', 'conv_11', 'conv_12', 'conv_13']
for item in remove:
restore_vars.remove(item)
return restore_vars
def get_face_fc0(self):
return self.vars[-4][1]
def get_face_fc1(self):
return self.vars[-3][1]
def setup(self, image_batch, trainable=False):
self.vars.append(('input', image_batch, ['input']))
for layer in self.layers:
name = self.get_unique_name_(layer[0])
self.restore_names.append(name)
if layer[0] == 'conv':
with tf.variable_scope(name) as scope:
h, w, c_i, c_o = layer[2], layer[3], layer[4], layer[5]
kernel = self.make_var('weights', shape=[h, w, c_i, c_o], trainable=trainable)
conv = tf.nn.conv2d(self.get_output(), kernel, [1] * 4, padding='SAME')
biases = self.make_var('biases', [c_o], trainable=trainable)
bias = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape().as_list())
relu = tf.nn.relu(bias, name=scope.name)
self.add_(name, relu, layer)
elif layer[0] == 'pool':
size, size, stride, stride = layer[1], layer[2], layer[3], layer[4]
pool = tf.nn.max_pool(self.get_output(),
ksize=[1, size, size, 1],
strides=[1, stride, stride, 1],
padding='SAME',
name=name)
self.add_(name, pool, layer)
elif layer[0] == 'linear':
num_out = layer[2]
relu = layer[3]
with tf.variable_scope(name) as scope:
input = self.get_output()
input_shape = input.get_shape()
if input_shape.ndims == 4:
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input, [self.batch_size, dim])
else:
feed_in, dim = (input, int(input_shape[-1]))
weights = self.make_var('weights', shape=[dim, num_out], trainable=True)
biases = self.make_var('biases', [num_out], trainable=True)
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
########
drop = tf.nn.dropout(fc, 0.5)
########
self.add_(name, drop, layer)
elif layer[0] == 'linear2':
num_out = layer[2]
relu = layer[3]
with tf.variable_scope(name) as scope:
input = self.get_output()
input_shape = input.get_shape()
if input_shape.ndims == 4:
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input, [self.batch_size, dim])
else:
feed_in, dim = (input, int(input_shape[-1]))
weights = self.make_var('weights', shape=[dim, num_out], trainable=True)
biases = self.make_var('biases', [num_out], trainable=True)
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
########
# drop = tf.nn.dropout(fc,0.5)
########
self.add_(name, fc, layer)
elif layer[0] == 'linear3':
num_out = layer[2]
relu = layer[3]
with tf.variable_scope(name) as scope:
input = self.get_output()
input_shape = input.get_shape()
if input_shape.ndims == 4:
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input, [self.batch_size, dim])
else:
feed_in, dim = (input, int(input_shape[-1]))
weights = self.make_var('weights', shape=[dim, num_out], trainable=True)
biases = self.make_var('biases', [num_out], trainable=True)
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
self.add_(name, fc, layer)
| [
"564458176@qq.com"
] | 564458176@qq.com |
115d00ffaebc79e81a2a2015da2ba91356e18b6e | 1235389bc1ebb52e4d045b00c888234224852a4f | /f_camera_photonics/component_capture.py | c1d78c9e49ba636090990d70619e96bf360c5eee | [] | no_license | jtchiles/camera_photonics | bc2fe6cafa42e8a0d51c8ba99b3ea22bcfcd839b | f186d4cb9a53b9d3544c0677f0ce733cb4313da8 | refs/heads/master | 2021-06-11T19:02:16.223909 | 2020-01-10T19:22:00 | 2020-01-10T19:22:00 | 140,327,371 | 0 | 0 | null | 2018-07-09T18:35:15 | 2018-07-09T18:35:15 | null | UTF-8 | Python | false | false | 981 | py | # Trying to grab images out of cv2 with the USB cam
import cv2
import os
from contextlib import contextmanager
import numpy as np
@contextmanager
def open_camera(camera_port=0):
camera = cv2.VideoCapture(camera_port)
yield camera
del(camera)
## Low level conditioning
# Number of frames to throw away while the camera adjusts to light levels
ramp_frames = 1
def get_frames(nframes=1):
with open_camera() as camera:
for i in range(ramp_frames):
camera.read()
frame_list = []
for i in range(nframes):
_, img = camera.read()
frame_list.append(img)
return frame_list
def single_shot():
return get_frames(1)[0]
def video_mean(nframes=2):
stack = np.array(get_frames(nframes))
return np.mean(stack, axis=0)
if __name__ == '__main__':
print('Called')
from f_camera_photonics import cvshow
print('Taking pic')
img = single_shot()
print('Displaying')
cvshow(img)
print('Complete') | [
"alexander.tait@nist.gov"
] | alexander.tait@nist.gov |
ff3e75465d6bc74082977d0011083bd7cb9d2fa1 | 8dc745854d73e362aa60747b3ab1b5a0dd975902 | /demo/funs/varying_args.py | 95be35fceef5366dfe3c457b3dac4f7b9e356ad3 | [] | no_license | srikanthpragada/PYTHON_27_AUG_2020 | 08a5898fe1a0ae110b74897ce6cce6595bdfce45 | af2aebbb0d83c5e8f381cdda844ab66d2362019c | refs/heads/master | 2022-12-30T10:12:56.688671 | 2020-10-09T14:20:43 | 2020-10-09T14:20:43 | 291,730,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | def wish(*names, message="Hi"):
for n in names:
print(message, n)
wish("Bill", "Steve", message="Hello")
wish("Bill", "Steve", "Mike")
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
ef45fbb0a58276e6b49fab00d2fbbc90dbba4fd6 | 20fa81fb1ba9c6e77be0f6e115ff643c4a608146 | /creational/abstract_factory.py | 9ae16b8d1eebf4d8c9ef1b2551d206b197873fe7 | [
"MIT"
] | permissive | GustavoBoaz/projeto_Patterns_Python | c6bd344a308c0f29c21a435c03d582226f434ba1 | b46c6dd6e355fce8f769b76c432ac8a00f236438 | refs/heads/master | 2022-09-06T19:15:57.183938 | 2019-11-07T19:15:57 | 2019-11-07T19:15:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,011 | py | """
Abstract Factory é um padrão de design criacional que permite produzir
famílias de objetos relacionados sem especificar suas classes concretas.
Como Implementar:
1. Mapeie uma matriz de tipos de produtos distintos versus variantes
desses produtos.
2. Declare interfaces abstratas do produto para todos os tipos de produtos.
Em seguida, faça com que todas as classes de produtos concretas
implementem essas interfaces.
3. Declare a interface abstrata de fábrica com um conjunto de métodos de
criação para todos os produtos abstratos.
4. Implemente um conjunto de classes de fábrica de concreto, uma para cada
variante de produto.
5. Crie o código de inicialização de fábrica em algum lugar do aplicativo.
Ele deve instanciar uma das classes de fábrica de concreto, dependendo da
configuração do aplicativo ou do ambiente atual. Passe esse objeto de
fábrica para todas as classes que constroem produtos.
6. Examine o código e encontre todas as chamadas diretas para os construtores
de produtos. Substitua-os por chamadas para o método de criação apropriado
no objeto de fábrica.
"""
from abc import ABC, abstractmethod
#===========================================Definição de classes abstratas
class ProductA(ABC):
""" This class is used for implements a new product in the system """
@abstractmethod
def build_productA(self) -> str:
""" return the str building """
pass
class ProductB(ABC):
""" This class is used for implements a new product in the system """
@abstractmethod
def build_productB(self) -> str:
""" return the str building """
pass
class AbstractFactory(ABC):
""" This class is used for call method of creation of in the product in the system """
@abstractmethod
def create_productA(self) -> ProductA:
""" return the ProductA building """
pass
@abstractmethod
def create_productB(self) -> ProductB:
""" return the ProductB building """
pass
#=========================================Definição dos Produtos concretos
class ProductA1(ProductA):
def build_productA(self) -> str:
return "Concrete ProductA1 Build!"
class ProductB1(ProductB):
def build_productB(self) -> str:
return "Concrete ProductB1 Build!"
class ProductA2(ProductA):
def build_productA(self) -> str:
return "Concrete ProductA2 Build!"
class ProductB2(ProductB):
def build_productB(self) -> str:
return "Concrete ProductB2 Build!"
#=========================================Definição dos Fabricas concretas
class Factory1(AbstractFactory):
def create_productA(self) -> ProductA:
return ProductA1()
def create_productB(self) -> ProductB:
return ProductB1()
class Factory2(AbstractFactory):
def create_productA(self) -> ProductA:
return ProductA2()
def create_productB(self) -> ProductB:
return ProductB2()
#======================================================Definição do Cliente
def af_client(abstract_factory: AbstractFactory) -> None:
while True:
try:
option = input("Criador produto [A][B] | Exit[C]: ")
if(option == "a"):
print(abstract_factory.create_productA().build_productA())
elif(option == "b"):
print(abstract_factory.create_productB().build_productB())
elif(option == "c"):
break
except:
print("Option false")
continue
def main_af():
while True:
try:
option = int(input("Fabrica option [1][2] | Exit[0]: "))
if(option == 1):
af_client(Factory1())
elif(option == 2):
af_client(Factory2())
elif(option == 0):
break
except:
print("Option false")
continue | [
"gustavo.boaz@hotmail.com"
] | gustavo.boaz@hotmail.com |
563e7be8d8c11aab5ab1f381e5abf2ed03a6b4d2 | 7ff37f8df377e30f09e5947c6097a7db54e8fab5 | /WebApp/model/model.py | 7d7f6b8f10cf10548fd969cc1c7914afe0deb767 | [] | no_license | jamesnelly/EmergingTechnologies-project | baa4980505f87bc6fff2771a5edfd279f45550f1 | b10a9048cc8bb9b147e118488f531cb3acade4f0 | refs/heads/master | 2020-08-06T13:11:57.695949 | 2019-12-13T18:49:26 | 2019-12-13T18:49:26 | 212,987,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | #adapted from https://www.youtube.com/watch?v=n5a0WBIQitI
#loading the dataset
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
import tensorflow as tf
from keras import models
from keras import layers
import keras as kr
import numpy as np
import matplotlib.pyplot as plt
#creating the sequential model
mod = kr.models.Sequential()
| [
"g00346996@gmit.ie"
] | g00346996@gmit.ie |
aae0e6098f5fffd6f5df5e9109899e0ddfcf5d9b | 5de3f612df0dbda712b39403dbafb0617e597651 | /devel/lib/python2.7/dist-packages/pal_control_msgs/__init__.py | e21ec5fdbefcfd4eaadd1be96174a29e086c69d8 | [] | no_license | AdriiTrujillo/tiago_public_ws | 1bd62d51c2eb694d07db83738f7bebd582d8126c | 6eaeabd1ec177df837b81fd9f42887318128766b | refs/heads/main | 2023-04-03T13:09:09.749190 | 2021-04-01T10:05:43 | 2021-04-01T10:05:43 | 350,026,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | /home/adrii/tiago_public_ws/devel/.private/pal_control_msgs/lib/python2.7/dist-packages/pal_control_msgs/__init__.py | [
"adrii.trujillo@gmail.com"
] | adrii.trujillo@gmail.com |
a56fa3ef8236ac18d30826f76d2f59ca41e55070 | 1985271f6d8486de3ab503a6e8574e2c70a30b1b | /feature_engineering/plans/get_fe_diff_div.py | c31e4495a92018588dcef57762136a55c327beb6 | [] | no_license | shuangyumo/kdd-cup-2019-8th-solution | 15ca666e2f9af1e2c8ad4d7295ba500a40b17ca9 | f13fd8e1d8309de00476bd884b39716ffe4c3ced | refs/heads/master | 2022-03-11T11:32:35.802745 | 2019-10-05T16:45:08 | 2019-10-05T16:45:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def get_fe_diff_div(df):
df_fe = pd.DataFrame([])
top_m = 2
for i in range(1, top_m):
df_fe['diff_eta_{}_{}'.format(0, i)] = df['recom_eta_{}'.format(0)] - df['recom_eta_{}'.format(i)]
df_fe['diff_distance_{}_{}'.format(0, i)] = df['recom_distance_{}'.format(0)] - df['recom_distance_{}'.format(i)]
df_fe['diff_price_{}_{}'.format(0, i)] = df['recom_price_{}'.format(0)] - df['recom_price_{}'.format(i)]
df_fe['div_eta_{}_{}'.format(0, i)] = \
df['recom_eta_{}'.format(0)] / (df['recom_eta_{}'.format(i)] + 0.01)
df_fe['div_distance_{}_{}'.format(0, i)] = \
df['recom_distance_{}'.format(0)] / (df['recom_distance_{}'.format(i)] + 0.01)
df_fe['div_price_{}_{}'.format(0, i )] = \
df['recom_price_{}'.format(0)] / (df['recom_price_{}'.format(i)] + 0.01)
df_fe['div_price_eta_{}_{}'.format(i, i)] = \
df['recom_price_{}'.format(i)]/(df['recom_eta_{}'.format(i)] + 0.01)
df_fe['diff_price_distance_{}_{}'.format(i, i)] = \
df['recom_distance_{}'.format(i)]/(0.01 + df['recom_price_{}'.format(i)])
df_fe['diff_distance_eta_{}_{}'.format(i, i)] = \
df['recom_distance_{}'.format(i)]/(0.01 + df['recom_eta_{}'.format(i)])
return df_fe
| [
"noreply@github.com"
] | shuangyumo.noreply@github.com |
963a2d233e978b78dca560f8230b63663653446b | 6da9add72c81a230f2c63dcc73420a28304523ce | /clickx3/utils/constants/phone_number_prefix.py | 36d1124efa30f4ca380cba84c8e6865a2082d270 | [] | no_license | YeKelvin/clickx3-toolkit | e449eccae0d6ce2f69ffbf1380a1e410f562fac8 | 09336bf2c5c898625fbc90ddcf31c9794ca11da0 | refs/heads/master | 2023-06-04T15:09:06.005076 | 2021-06-28T12:07:08 | 2021-06-28T12:07:08 | 251,491,571 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : phone_number_prefix.py
# @Time : 2019/8/30 15:22
# @Author : Kelvin.Ye
from itertools import chain
# 移动
CMCC_CODE = [
'134',
'135',
'136',
'137',
'138',
'139',
'147',
'150',
'151',
'152',
'157',
'158',
'159',
'170',
'172',
'178',
'182',
'183',
'184',
'187',
'188'
]
# 联通
CUCC_CODE = ['130', '131', '132', '145', '155', '156', '170', '171', '175', '176', '185', '186']
# 电信
TELECOM_CODE = ['133', '149', '153', '158', '170', '173', '177', '178', '180', '181', '182', '189', '199']
# 手机号运营商前缀
MOBILENO_PREFIX = list(chain(CMCC_CODE, CUCC_CODE, TELECOM_CODE))
| [
"testmankelvin@163.com"
] | testmankelvin@163.com |
486e48f837ce645846b31ff5ce9ea96f338a5c11 | 391437a03dc30a21ef7cc35d1b51f888da720617 | /test/travis_test_wall_trace.py | b1f54ae1ad06095c25b8fd98e155321f9372cca3 | [] | no_license | takasku/pimouse_run_corridor | 9e60eecd797488901790402b67ca2153d5e8557d | d40b966af1c55b15430e49005e90c68634252c81 | refs/heads/master | 2020-04-25T15:25:44.182451 | 2019-03-06T09:28:44 | 2019-03-06T09:28:44 | 172,856,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | #!/usr/bin/env python
#encoding: utf8
import unittest, rostest
import rosnode, rospy
import time
class WallTraceTest(unittest.TestCase):
def set_and_get(self,lf,ls,rs,rf):
with open("/dev/rtlightsensor0","w") as f:
f.write("%d %d %d %d\n" % (rf,rs,ls,lf))
time.sleep(0.3)
with open("/dev/rtmotor_raw_l0","r") as lf,\
open("/dev/rtmotor_raw_r0","r") as rf:
left = int(lf.readline().rstrip())
right = int(rf.readline().rstrip())
return left, right
def test_io(self):
left, right = self.set_and_get(400,100,100,0)
self.assertTrue(left == 0 and right == 0,"cannot stop")
left, right = self.set_and_get(0,5,1000,0)
self.assertTrue(left == right != 0,"stop wrongly by side sensors")
left, right = self.set_and_get(0,10,0,0)
self.assertTrue(left < right ,"do not curve to left")
left, right = self.set_and_get(0,200,0,0)
self.assertTrue(left > right ,"do not curve to right")
left, right = self.set_and_get(0,5,0,0)
self.assertTrue(0 < left == right ,"curve wrongly")
if __name__ == '__main__':
time.sleep(3)
rospy.init_node('travis_test_wall_trace')
rostest.rosrun('pimouse_run_corridor','travis_test_wall_trace',WallTraceTest)
| [
"fjkks5is@engs.tamagawa.ac.jp"
] | fjkks5is@engs.tamagawa.ac.jp |
8c167de60486df2e7f98815c805c1cf0e63930fd | 8e340be7072bb9cb8b8bbe427e259fba51d54192 | /MyroName.py | 751ef00b50a8e69be96929e4ea45e71c0db29b6d | [] | no_license | KBrownASC/allstarcode | a80141c514cac662a9293655cdec3c1656dc53c1 | e22a0c2fa19e1e29ec6cfe273a9cb82ec6ec3865 | refs/heads/master | 2021-01-20T22:19:48.951183 | 2016-08-09T16:00:14 | 2016-08-09T16:00:14 | 63,092,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | from Myro import *
init("sim")
#loops
#Functions
def drawK(size):
turnBy(90,"deg")
forward(2,size)
backward(1,size)
turnBy(-35,"deg")
forward(1,size)
backward(1,size)
turnBy(-95,"deg")
forward(1,size+.2)
turnBy(30,"deg")
def drawB(size):
turnBy(90,"deg")
forward(2,1)
motors(30,-3,1)
turnBy(270,"deg")
motors(30,-3,1)
#Code- actual work being done
penDown()
#drawK(1)
penUp()
penDown()
drawB(8)
| [
"Keroneobrownjr@gmail.com"
] | Keroneobrownjr@gmail.com |
406c110b30acb23f4d2b89fa97603e853e4b9c26 | 5d263af3a57e0eaa1dfc55df964e61ed74208bb2 | /feature_extraction/extract_features.py | 811abb12454c69f6c67627835c5d8386ede54ef6 | [] | no_license | chenyr0021/multimodal-human-action-recognotion | 1c5374c93050f56eb00f87d00aea400f0158bafb | bf69abb2355de83b53f652416f29bd832ced5afc | refs/heads/main | 2023-02-04T03:22:42.611616 | 2020-12-25T06:35:39 | 2020-12-25T06:35:39 | 318,051,286 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,361 | py | import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-load_model', type=str)
parser.add_argument('-root', type=str)
parser.add_argument('-gpu', type=str)
parser.add_argument('-save_dir', type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]='0,1,2,3'
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torchvision
from torchvision import datasets, transforms
import videotransforms
import numpy as np
from pytorch_i3d import InceptionI3d
from salads_dataset import Salads50_without_label
def run(root, load_model, save_dir, batch_size=1):
# setup dataset
test_transforms = transforms.Compose([transforms.RandomCrop((224, 224)), transforms.ToTensor()])
dataset = Salads50_without_label(root, test_transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=0)
print('load model...')
i3d = InceptionI3d(400, in_channels=3)
i3d.load_state_dict(torch.load(load_model))
i3d.cuda()
i3d = nn.DataParallel(i3d, device_ids=[0,1,2,3])
i3d.eval() # Set model to evaluate mode
# Iterate over data.
print('processing data...')
for inputs, name in dataloader:
# get the inputs
# if os.path.exists(os.path.join(save_dir, name[0]+'.npy')):
# # print(os.path.join(save_dir, name[0]+'.npy'), ' already exist.')
# # continue
b,c,t,h,w = inputs.shape
print(name[0], inputs.shape)
features = []
for start in range(t-20):
ip = Variable(torch.from_numpy(inputs.numpy()[:,:,start:start+21]).cuda())
out = i3d.module.extract_features(ip).cpu()
features.append(out.squeeze(0).detach().numpy())
np_feature = np.concatenate(features, axis=1)
print(np_feature.shape)
np.save(os.path.join(save_dir, name[0]), np_feature)
print('save %s finished.' % os.path.join(save_dir, name[0]))
if __name__ == '__main__':
# need to add argparse
run(root='/home/backup/data_cyr/assemble_ori', load_model='./models/rgb_imagenet.pt', save_dir='/home/backup/data_cyr/assemble/features_video')
| [
"chenyiran0021@163.com"
] | chenyiran0021@163.com |
add4b75288e365aec578df927975b3ca9f0318ec | b77dc17ee7ebad73e1028381739e01f708fb6c8b | /ppygui/doc/tut3.py | 44c2785da27ae01aa643b6d0c2e979d5a2da0b79 | [
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] | permissive | typerlc/ankice-deps | 6c97bee1a926fc539b2f2e8ec345244e6188c6f1 | 4267af31c56ff2f51be65cad345fc7100ec53e78 | refs/heads/master | 2016-09-01T21:43:41.904988 | 2009-06-24T15:15:12 | 2009-06-24T15:15:12 | 235,231 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | import ppygui as gui
# import the gui namespace
class MainFrame(gui.CeFrame):
# subclass to create our own main frame type
def __init__(self):
gui.CeFrame.__init__(self, title="Hello World")
# Create some child control
self.text_entry = gui.Edit(self)
self.button = gui.Button(self, "Copy")
self.label = gui.Label(self)
# Place our controls in a vertical box
sizer = gui.VBox()
sizer.add(self.text_entry)
sizer.add(self.button)
sizer.add(self.label)
# Set the vertical box as our main frame sizer
self.sizer = sizer
if __name__ == '__main__':
app = gui.Application(MainFrame())
# create an application bound to our main frame instance
app.run()
#launch the app !
| [
"richardc@pippin.(none)"
] | richardc@pippin.(none) |
9362ea26f839e1ffad4ad0b283e97271312b1665 | b2f63110ed9b2be2d51ab88dea551a50eb0ffe7b | /easy/string_mask.py | 8bb9e48380b86b9c243d9038a2278b671d80cb09 | [] | no_license | Nevermind7/codeeval | 07f321c855b850e72f3b18352d7ce4f55b0138da | be9cb36fd8fbac86d3fc1d33095c201e0be8ba9a | refs/heads/master | 2021-01-19T06:43:50.113601 | 2016-07-26T08:29:54 | 2016-07-26T08:29:54 | 63,690,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | import sys
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
word, code = test.strip().split()
paired = zip(word, code)
encoded = ''.join([x.upper() if y == '1' else x for (x, y) in paired])
print(encoded)
test_cases.close()
| [
"esser@anvo-systems-dresden.com"
] | esser@anvo-systems-dresden.com |
e9eb2c81dd1c2ed4a7921ec50c1f4ca9e1c1f484 | ab83ce38d59c37c8a55a4e5bd1f49bc2c2538777 | /__env__py3Rest/bin/python-config | b3e1a396ad5ca53df75755e6f67b4d2e6ecc4af8 | [] | no_license | Archu-S-M/Py3Rest | 3df5959d30d96358af97f8434e72c4b4af897889 | d455f5dec45577d4625ca5f5977f9248834fbd26 | refs/heads/master | 2021-01-19T00:21:05.019136 | 2017-04-04T18:26:00 | 2017-04-04T18:26:00 | 87,152,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,354 | #!/var/www/html/Py3Rest/__env__py3Rest/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"archusm007@gmail.com"
] | archusm007@gmail.com | |
04cdfacc94cba4b6547b23c48613e764fff8eea7 | c04766334a0c9bec3583c707ac177aedc3247fbb | /example/report/test/SeoulCityDead.py | 3c3e2530b731292973656ffb451b1a24fb1bb2bb | [] | no_license | realwater20/city-seoul | 6abe870447cedcfc29315ebc2f28e6d878dd4cd5 | 8f889a2667de554c83e76492f08c47838198caee | refs/heads/master | 2023-04-07T23:12:30.598955 | 2021-04-21T06:00:05 | 2021-04-21T06:00:05 | 360,049,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,982 | py | # -*- coding: utf-8 -*-
# 서울시 월별 연간 사망자 수 집계
import numpy as np
import matplotlib.pyplot as plt
from operator import eq
import csv
def analyzeDie():
# csv 파일 읽어오기
pieces = []
datafile = '.\\csv\\SeoulDeadReport.csv'
with open(datafile, 'rt') as f :
data = csv.reader(f, delimiter = ',')
for d in data:
pieces.append(d)
# csv 파일 데이터 배열로 만들기
bf_date = ''
dieCol = 0
dieRow = 0
dieArray = [[0 for col in range(0)] for row in range(7)]
for date, dieCnt in pieces:
if eq(bf_date, '') == True :
bf_date = date[:4]
elif eq(bf_date, date[:4]) == False : # 연도별로 데이터 담기
bf_date = date[:4]
dieCol += 1
dieRow = 0
# 행은 연도 열은 월기준으로 데이터를 만든다.
dieArray[dieCol].insert(dieRow, dieCnt)
month = ['1','2','3','4','5','6','7','8','9','10','11','12']
year = ['2010', '2011', '2012', '2013', '2014', '2015', '2016']
color = ['b','g','r','c','m','y','k']
n_groups = 12 # 노출되는 그래프 x축 개수
index = np.arange(n_groups)
bar_width = 0.1 # 막대그래프 넓이
opacity = 0.4
error_config = {'ecolor': '0.3'}
width_g = 0
cnt = 0
for yearDieArray in dieArray:
plt.bar(index+width_g-0.2, yearDieArray, bar_width,
alpha=opacity,
color=color[cnt],
error_kw=error_config,
label=year[cnt],
align='edge')
width_g = width_g + bar_width
cnt = cnt + 1
plt.xlabel('Year') # X축 제목
plt.ylabel('Count') # Y축 제목
plt.title('Analyze Die Graph') # 메인 제목 설정
plt.xticks(index + bar_width, month)
plt.legend()
plt.tight_layout()
plt.show()
if __name__== '__main__':
analyzeDie() | [
"realwater@staby.co.kr"
] | realwater@staby.co.kr |
d953ca400bd19d3dfe6e933f1f88124d428bbb14 | 4686605ad3c277a0776a653c6b3528db5313ec9c | /PY/PY1.py | 1b819831f1a87ac0b10df6ccab810f61671ef527 | [] | no_license | zhukongh123/- | 4f03c1f20fc5c369aa14d10cdb302bac08b850b9 | 6dfdd84b66fe82bd4b2113c6270d98ea1705401b | refs/heads/master | 2020-11-26T19:10:58.052438 | 2019-12-20T06:52:37 | 2019-12-20T06:52:37 | 229,181,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | d = {"name":"小明","sex":"男","age":18}
d.clear()
print(d)
| [
"364144274@qq.com"
] | 364144274@qq.com |
0404f601868205f4d85cf25071622a33e7bd409e | 85f96ed9ab5510ec1120a0687c6de5c4a8774a9f | /RestAPI/config.py | 4c6208b12f3b361ae2c9f9c7f0cbe7df16fd8a18 | [] | no_license | ronistone/toilter-APP | cc40e7e65ad68845f9d1a58b9f955dd29a3a1e13 | da211df826045a5cf4b463ebd82fddce3949ee25 | refs/heads/master | 2020-07-11T12:34:06.718155 | 2017-06-14T00:41:36 | 2017-06-14T00:41:36 | 94,269,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | DEBUG = True
DEVELOPMENT = True
SQLALCHEMY_DATABASE_URI = 'postgres:///restapi'
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = 'a1b2c3d4e5f6g7h8j9k10l11'
BUNDLE_ERRORS = True # related to Flask-RESTful errors, see docs
ERROR_404_HELP = False
| [
"ronistonejunior@gmail.com"
] | ronistonejunior@gmail.com |
e25fb293e8841b87c8979b159fe4daadf9eed51e | 8ed215ee731bc8c55eabdc66ee028a43771510bc | /tasks-deploy/rsa/check.py | 5bac71f193f848b84031ce3a62e0ff96d6fb6acd | [
"MIT"
] | permissive | irdkwmnsb/lkshl-ctf | c6c0b0ae58653d3d7c427073221043d2adea212c | e5c0200ddc8ba73df5f321b87b9763fb1bbaba57 | refs/heads/master | 2020-03-23T22:22:23.499985 | 2019-02-22T13:29:51 | 2019-02-22T13:29:51 | 142,172,055 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,868 | py | def check(attempt, context):
if attempt.answer == flags[attempt.participant.id % len(flags)]:
return Checked(True)
if attempt.answer in flags:
return CheckedPlagiarist(False, flags.index(attempt.answer))
return Checked(False)
flags = ['LKL{RSA_is_s0metimes_insecur3_3Udjwqg6}', 'LKL{RSA_is_s0metimes_insecur3_UibEbfRa}', 'LKL{RSA_is_s0metimes_insecur3_wGqZy5DF}', 'LKL{RSA_is_s0metimes_insecur3_2LYyyNWF}', 'LKL{RSA_is_s0metimes_insecur3_l9d809Zg}', 'LKL{RSA_is_s0metimes_insecur3_BneTxPca}', 'LKL{RSA_is_s0metimes_insecur3_NfEFCIRX}', 'LKL{RSA_is_s0metimes_insecur3_4WAEvVxt}', 'LKL{RSA_is_s0metimes_insecur3_wQ800lk0}', 'LKL{RSA_is_s0metimes_insecur3_HedQD1vE}', 'LKL{RSA_is_s0metimes_insecur3_pKXxALJn}', 'LKL{RSA_is_s0metimes_insecur3_YZhZvmqN}', 'LKL{RSA_is_s0metimes_insecur3_v1iaaHxu}', 'LKL{RSA_is_s0metimes_insecur3_fm0xHYvf}', 'LKL{RSA_is_s0metimes_insecur3_wKGk99KZ}', 'LKL{RSA_is_s0metimes_insecur3_AycXpexc}', 'LKL{RSA_is_s0metimes_insecur3_H27gGhFt}', 'LKL{RSA_is_s0metimes_insecur3_ipXKDpyl}', 'LKL{RSA_is_s0metimes_insecur3_bDVeeCSu}', 'LKL{RSA_is_s0metimes_insecur3_IOIowsHu}', 'LKL{RSA_is_s0metimes_insecur3_X1J51z2g}', 'LKL{RSA_is_s0metimes_insecur3_qwcBeb7f}', 'LKL{RSA_is_s0metimes_insecur3_BYvIBQl3}', 'LKL{RSA_is_s0metimes_insecur3_lWRmz5AJ}', 'LKL{RSA_is_s0metimes_insecur3_EI4quULK}', 'LKL{RSA_is_s0metimes_insecur3_sILihSt0}', 'LKL{RSA_is_s0metimes_insecur3_Jf1mS2A4}', 'LKL{RSA_is_s0metimes_insecur3_rEpoUHFc}', 'LKL{RSA_is_s0metimes_insecur3_3aOzjiDi}', 'LKL{RSA_is_s0metimes_insecur3_2X4LGivB}', 'LKL{RSA_is_s0metimes_insecur3_E3XpMQ4Z}', 'LKL{RSA_is_s0metimes_insecur3_JkmfbPhc}', 'LKL{RSA_is_s0metimes_insecur3_gSjumGpD}', 'LKL{RSA_is_s0metimes_insecur3_MBvtPPKA}', 'LKL{RSA_is_s0metimes_insecur3_WWn9Txw8}', 'LKL{RSA_is_s0metimes_insecur3_12kavBoH}', 'LKL{RSA_is_s0metimes_insecur3_vkw0O9rB}', 'LKL{RSA_is_s0metimes_insecur3_Remqp7Tc}', 'LKL{RSA_is_s0metimes_insecur3_cJpQlr6K}', 'LKL{RSA_is_s0metimes_insecur3_CnXN72KW}', 'LKL{RSA_is_s0metimes_insecur3_w8Fdsu7b}', 'LKL{RSA_is_s0metimes_insecur3_zwetRh2m}', 'LKL{RSA_is_s0metimes_insecur3_2XDisW1d}', 'LKL{RSA_is_s0metimes_insecur3_nI12YHMk}', 'LKL{RSA_is_s0metimes_insecur3_Zc7yKWN7}', 'LKL{RSA_is_s0metimes_insecur3_UM0NCS7b}', 'LKL{RSA_is_s0metimes_insecur3_FvLHJZwH}', 'LKL{RSA_is_s0metimes_insecur3_jBkK1mgy}', 'LKL{RSA_is_s0metimes_insecur3_ah7tGRm3}', 'LKL{RSA_is_s0metimes_insecur3_V9x3rTk7}', 'LKL{RSA_is_s0metimes_insecur3_72Zr73Q0}', 'LKL{RSA_is_s0metimes_insecur3_MGXTz8Xk}', 'LKL{RSA_is_s0metimes_insecur3_GKCnGHrk}', 'LKL{RSA_is_s0metimes_insecur3_Ar9ok9d7}', 'LKL{RSA_is_s0metimes_insecur3_whpfREVI}', 'LKL{RSA_is_s0metimes_insecur3_UDBDalbH}', 'LKL{RSA_is_s0metimes_insecur3_U1FH7Cf1}', 'LKL{RSA_is_s0metimes_insecur3_KIaqedik}', 'LKL{RSA_is_s0metimes_insecur3_dqPmGn0z}', 'LKL{RSA_is_s0metimes_insecur3_bEusmfrG}', 'LKL{RSA_is_s0metimes_insecur3_wjgfHTeI}', 'LKL{RSA_is_s0metimes_insecur3_CLTG1Vhx}', 'LKL{RSA_is_s0metimes_insecur3_MRX7svAE}', 'LKL{RSA_is_s0metimes_insecur3_6TBCIJY6}', 'LKL{RSA_is_s0metimes_insecur3_kVxzzxLQ}', 'LKL{RSA_is_s0metimes_insecur3_Vkv2woLM}', 'LKL{RSA_is_s0metimes_insecur3_Bo8VUtVU}', 'LKL{RSA_is_s0metimes_insecur3_6GrvaoC1}', 'LKL{RSA_is_s0metimes_insecur3_YibIEvsP}', 'LKL{RSA_is_s0metimes_insecur3_ba9YkBff}', 'LKL{RSA_is_s0metimes_insecur3_x2B0KLjH}', 'LKL{RSA_is_s0metimes_insecur3_JiWBzSRv}', 'LKL{RSA_is_s0metimes_insecur3_QyLDwokQ}', 'LKL{RSA_is_s0metimes_insecur3_nZZ8tb0Z}', 'LKL{RSA_is_s0metimes_insecur3_CnHFcLbS}', 'LKL{RSA_is_s0metimes_insecur3_izNJOHO2}', 'LKL{RSA_is_s0metimes_insecur3_9ukX4Uxy}', 'LKL{RSA_is_s0metimes_insecur3_n0YiGB82}', 'LKL{RSA_is_s0metimes_insecur3_T5VYsfc5}', 'LKL{RSA_is_s0metimes_insecur3_UQ6KvIZB}', 'LKL{RSA_is_s0metimes_insecur3_mEIdKYee}', 'LKL{RSA_is_s0metimes_insecur3_I3rpSyie}', 'LKL{RSA_is_s0metimes_insecur3_Zi0ClOtB}', 'LKL{RSA_is_s0metimes_insecur3_JAVcK2UU}', 'LKL{RSA_is_s0metimes_insecur3_1Tx3Crkx}', 'LKL{RSA_is_s0metimes_insecur3_2FbkNKnk}', 'LKL{RSA_is_s0metimes_insecur3_YRhonqdT}', 'LKL{RSA_is_s0metimes_insecur3_gQkoA50I}', 'LKL{RSA_is_s0metimes_insecur3_axRX4qyw}', 'LKL{RSA_is_s0metimes_insecur3_IFCOj1V7}', 'LKL{RSA_is_s0metimes_insecur3_k4gHI5D8}', 'LKL{RSA_is_s0metimes_insecur3_zFThpVTM}', 'LKL{RSA_is_s0metimes_insecur3_iYDJPaN7}', 'LKL{RSA_is_s0metimes_insecur3_awzaYVZK}', 'LKL{RSA_is_s0metimes_insecur3_aSYyVYud}', 'LKL{RSA_is_s0metimes_insecur3_CEzWlUdO}', 'LKL{RSA_is_s0metimes_insecur3_PSHlcp35}', 'LKL{RSA_is_s0metimes_insecur3_c2NhDpw8}', 'LKL{RSA_is_s0metimes_insecur3_0l3UwHlF}', 'LKL{RSA_is_s0metimes_insecur3_WQeRwaPM}', 'LKL{RSA_is_s0metimes_insecur3_4N7mzVAG}', 'LKL{RSA_is_s0metimes_insecur3_9nkGZpXA}', 'LKL{RSA_is_s0metimes_insecur3_FWB38tRG}', 'LKL{RSA_is_s0metimes_insecur3_TvZshh5M}', 'LKL{RSA_is_s0metimes_insecur3_odkN2hAr}', 'LKL{RSA_is_s0metimes_insecur3_diN6caou}', 'LKL{RSA_is_s0metimes_insecur3_rIrFBQB9}', 'LKL{RSA_is_s0metimes_insecur3_A2bAzEpF}', 'LKL{RSA_is_s0metimes_insecur3_39Uo9bYj}', 'LKL{RSA_is_s0metimes_insecur3_klWefkMl}', 'LKL{RSA_is_s0metimes_insecur3_iWWOVbZZ}', 'LKL{RSA_is_s0metimes_insecur3_ETJzDjaj}', 'LKL{RSA_is_s0metimes_insecur3_xSNZYFhJ}', 'LKL{RSA_is_s0metimes_insecur3_k9Xse4cs}', 'LKL{RSA_is_s0metimes_insecur3_EXZC95Kh}', 'LKL{RSA_is_s0metimes_insecur3_pmodkyrx}', 'LKL{RSA_is_s0metimes_insecur3_gwTzucl7}', 'LKL{RSA_is_s0metimes_insecur3_Hx1bvm1Z}', 'LKL{RSA_is_s0metimes_insecur3_7v8eLOwZ}', 'LKL{RSA_is_s0metimes_insecur3_DxbDPG5X}', 'LKL{RSA_is_s0metimes_insecur3_lobjFfcF}', 'LKL{RSA_is_s0metimes_insecur3_LLLmbRNO}', 'LKL{RSA_is_s0metimes_insecur3_kI6EKTOF}', 'LKL{RSA_is_s0metimes_insecur3_5HSnyTLH}', 'LKL{RSA_is_s0metimes_insecur3_M4ofvfwP}', 'LKL{RSA_is_s0metimes_insecur3_coLWPtfu}', 'LKL{RSA_is_s0metimes_insecur3_qxkvUSRP}', 'LKL{RSA_is_s0metimes_insecur3_2MmsVqUg}', 'LKL{RSA_is_s0metimes_insecur3_Yc52WnBP}', 'LKL{RSA_is_s0metimes_insecur3_yGt1uPiG}', 'LKL{RSA_is_s0metimes_insecur3_qFjrX5Ji}', 'LKL{RSA_is_s0metimes_insecur3_gSebOWUT}', 'LKL{RSA_is_s0metimes_insecur3_XARUHTcG}', 'LKL{RSA_is_s0metimes_insecur3_51QDUC7l}', 'LKL{RSA_is_s0metimes_insecur3_i6p6iiUH}', 'LKL{RSA_is_s0metimes_insecur3_kzUSlkav}', 'LKL{RSA_is_s0metimes_insecur3_2RBFT2GT}', 'LKL{RSA_is_s0metimes_insecur3_ByOtjihb}', 'LKL{RSA_is_s0metimes_insecur3_cLKBCVZ2}', 'LKL{RSA_is_s0metimes_insecur3_Trq7k1wI}', 'LKL{RSA_is_s0metimes_insecur3_Q60qbGcZ}', 'LKL{RSA_is_s0metimes_insecur3_Fp37ejF6}', 'LKL{RSA_is_s0metimes_insecur3_tLBJ6Gix}', 'LKL{RSA_is_s0metimes_insecur3_U7tBKrpB}', 'LKL{RSA_is_s0metimes_insecur3_XDAt8LAu}', 'LKL{RSA_is_s0metimes_insecur3_m60Nw97g}', 'LKL{RSA_is_s0metimes_insecur3_krYk40zo}', 'LKL{RSA_is_s0metimes_insecur3_V3WWrrlx}', 'LKL{RSA_is_s0metimes_insecur3_KsybMcjy}', 'LKL{RSA_is_s0metimes_insecur3_yVWR00Sp}', 'LKL{RSA_is_s0metimes_insecur3_Rt1IFAr8}', 'LKL{RSA_is_s0metimes_insecur3_aHkXSnfe}', 'LKL{RSA_is_s0metimes_insecur3_zEp1mZc1}', 'LKL{RSA_is_s0metimes_insecur3_zv0ffkZ2}', 'LKL{RSA_is_s0metimes_insecur3_ueVY4ipK}', 'LKL{RSA_is_s0metimes_insecur3_ocDnu8u6}', 'LKL{RSA_is_s0metimes_insecur3_pPnTgD60}', 'LKL{RSA_is_s0metimes_insecur3_2rnwVTJ4}', 'LKL{RSA_is_s0metimes_insecur3_20ZEcGl8}', 'LKL{RSA_is_s0metimes_insecur3_fL9Ympb5}', 'LKL{RSA_is_s0metimes_insecur3_3GwYLaqg}', 'LKL{RSA_is_s0metimes_insecur3_qiXClm4E}', 'LKL{RSA_is_s0metimes_insecur3_d2en2vz6}', 'LKL{RSA_is_s0metimes_insecur3_SOLo31WB}', 'LKL{RSA_is_s0metimes_insecur3_OB9dtc4j}', 'LKL{RSA_is_s0metimes_insecur3_98FGOfT9}', 'LKL{RSA_is_s0metimes_insecur3_xM10cADQ}', 'LKL{RSA_is_s0metimes_insecur3_hpMKiswj}', 'LKL{RSA_is_s0metimes_insecur3_FTjpdffi}', 'LKL{RSA_is_s0metimes_insecur3_1iEMCbA4}', 'LKL{RSA_is_s0metimes_insecur3_yEH5gk0l}', 'LKL{RSA_is_s0metimes_insecur3_LhYemwow}', 'LKL{RSA_is_s0metimes_insecur3_PJBY7kTD}', 'LKL{RSA_is_s0metimes_insecur3_Y2RZ1YTf}', 'LKL{RSA_is_s0metimes_insecur3_FQPmnfg5}', 'LKL{RSA_is_s0metimes_insecur3_hNBb63ry}', 'LKL{RSA_is_s0metimes_insecur3_RJ8slmjb}', 'LKL{RSA_is_s0metimes_insecur3_xSodLxm0}', 'LKL{RSA_is_s0metimes_insecur3_HDxXhB9X}', 'LKL{RSA_is_s0metimes_insecur3_vPOiIRZA}', 'LKL{RSA_is_s0metimes_insecur3_mYdW9rli}', 'LKL{RSA_is_s0metimes_insecur3_B1gHPXjt}', 'LKL{RSA_is_s0metimes_insecur3_om7BTmLD}', 'LKL{RSA_is_s0metimes_insecur3_6z9ZUc5z}', 'LKL{RSA_is_s0metimes_insecur3_RvxykO1G}', 'LKL{RSA_is_s0metimes_insecur3_k0Le2xyX}', 'LKL{RSA_is_s0metimes_insecur3_0GRj9QWU}', 'LKL{RSA_is_s0metimes_insecur3_23Kx2a9O}', 'LKL{RSA_is_s0metimes_insecur3_PSAiCs7Z}', 'LKL{RSA_is_s0metimes_insecur3_v6aG3j0B}', 'LKL{RSA_is_s0metimes_insecur3_xXxmsOuX}', 'LKL{RSA_is_s0metimes_insecur3_92Pe84C8}', 'LKL{RSA_is_s0metimes_insecur3_Dx0qMgaA}', 'LKL{RSA_is_s0metimes_insecur3_OaUGvuMU}', 'LKL{RSA_is_s0metimes_insecur3_c2zHPwlu}', 'LKL{RSA_is_s0metimes_insecur3_UJIh7nj1}', 'LKL{RSA_is_s0metimes_insecur3_fexW2IIJ}', 'LKL{RSA_is_s0metimes_insecur3_FxVr8Y7Q}', 'LKL{RSA_is_s0metimes_insecur3_Zgvph30I}', 'LKL{RSA_is_s0metimes_insecur3_8aezHJSp}'] | [
"supermax74.02@gmail.com"
] | supermax74.02@gmail.com |
333b3e57b03c06635723ab136380a76d369174b0 | edfcd96f0010ea068a4c046bdcf7067ff92d3f9b | /Modules/datetime/1.py | 3dcb2607e4524fae4299e4d4cb1d07b43e896777 | [] | no_license | afsanehshu/python-project | a99ff558f375c1f5e17ea6ffc13af9216ec4733f | 48905cfd24df6d1f48460d421ed774f19403cf53 | refs/heads/main | 2023-08-03T01:53:32.812949 | 2021-09-22T19:36:25 | 2021-09-22T19:36:25 | 409,303,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | import datetime
datetime_object = datetime.datetime.now()
print(datetime_object)
| [
"afsanehshu@gmail.com"
] | afsanehshu@gmail.com |
4c198afdf441b3b85b7630151015f6fc947c91ca | 5d423684f7db6dd3f528e0ccc27ab41d6dfca9bd | /seniors/admin.py | ec0914b79716d1cc384dc3ee739cb1def581bc0e | [] | no_license | tnq/grogosite | e7459080188252c169c5bb71fbd183f06a2fe293 | c528826967aba6240a48f344a9a579c442695ddb | refs/heads/master | 2021-01-02T08:56:20.147735 | 2018-05-07T22:49:11 | 2018-05-07T23:04:11 | 1,848,585 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,137 | py | # -*- coding: utf-8 -*-
import codecs
import csv
from collections import defaultdict
from StringIO import StringIO
from zipfile import ZipFile
from django.contrib import admin
from django.core.paginator import Paginator
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.contenttypes.models import ContentType
from seniors.models import Senior, Activity
import re
class ActivityInline(admin.TabularInline):
model = Activity
extra = 1
majors = {
"Mechanical Engineering": "2",
"Physics": "8",
"Electrical Engineering": "6-1",
"Computer Science": "6-3",
"Chemical Engineering": "10",
"Management": "15",
"Political Science": "17",
"Brain Cognitive Sciences": "9",
"Civil Engineering": "1",
"Chemistry": "5",
"Biology": "7",
"Music": "21M",
"Aerospace Engineering": "16",
"History": "21H",
"Writing": "21W",
"Nuclear Engineering": "22",
"Philosophy": "24"
}
## {{{ http://code.activestate.com/recipes/577305/ (r1)
states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
## end of http://code.activestate.com/recipes/577305/ }}}
state_abbrs = {}
for abbr in states.keys():
state_abbrs[states[abbr]] = abbr
lg_expansions = [x.split("\t", 2) for x in
"""ADPhi Alpha Delta Phi
AEP Alpha Epsilon Pi
AXO Alpha Chi Omega
B-Entry MacGregor B-Entry
Annex, McCormick McCormick Annex
Baker House Baker
Beast East Campus 2E
""".splitlines()]
greek_letters = {
"ALPHA" : u"\u0391",
"BETA" : u"\u0392",
"GAMMA" : u"\u0393",
"DELTA" : u"\u0394",
"EPSILON" : u"\u0395",
"ZETA" : u"\u0396",
"ETA" : u"\u0397",
"THETA" : u"\u0398",
"IOTA" : u"\u0399",
"KAPPA" : u"\u039A",
"LAMBDA" : u"\u039B",
"MU" : u"\u039C",
"NU" : u"\u039D",
"XI" : u"\u039E",
"OMICRON" : u"\u039F",
"PI" : u"\u03A0",
"RHO" : u"\u03A1",
"SIGMA" : u"\u03A3",
"TAU" : u"\u03A4",
"UPSILON" : u"\u03A5",
"PHI" : u"\u03A6",
"CHI" : u"\u03A7",
"PSI" : u"\u03A8",
"OMEGA" : u"\u03A9",
}
def format_lg(lg):
fragments = lg.split()
output = ""
in_greek = False
for i, word in enumerate(fragments):
if word.upper() in greek_letters.keys():
if not in_greek:
output += "<CharStyle:Senior Info Greek>"
in_greek = True
output += greek_letters[word.upper()]
else:
if in_greek:
output += "<CharStyle:> "
in_greek = False
output += word + " "
if in_greek:
output += "<CharStyle:>"
return output.strip()
def format_major(major):
major = major.upper().strip()
major = major.replace("AND", "")
major = major.replace("COURSE", "")
major = major.replace(" - ", " / ")
for one, two in majors.iteritems():
major = major.replace(one.upper(), two)
major = re.sub(ur'^([0-9A-Z–-]+)[^0-9A-Z–-]+([0-9A-Z–-]+)$', r'\1 / \2', major)
major = re.sub(r'([0-9]+)-([A-Z]+)', r'\1\2', major)
major = major.replace("-", u"\u2013")
major = major.strip()
return major
def format_state(state):
state = state.strip()
if state.upper() in states.keys():
state = states[state.upper()]
if state in state_abbrs.keys():
state = state_abbrs[state]
return state
def format_name(name):
name = re.sub(r' ([A-Z]) ', r' \1. ', name)
return name.strip()
def format_years(years):
years = re.sub(r',\s*', r' ', years)
return years.strip()
def format_quote(quote):
quote = re.sub(r'^"(.*)"$', r'\1', quote)
quote = re.sub(r"^'(.*)'$", r"\1", quote)
return quote
def format_author(author):
author = re.sub(r'^"(.*)"$', r'\1', author)
author = re.sub(r"^'(.*)'$", r"\1", author)
author = re.sub(r'^-', r'', author)
author = re.sub(r'^([^,]*?),\s*([^0-9,][^,]*?)$', r'\1 (\2)', author)
author = re.sub(r'\("(.+)"\)', r'(\1)', author)
return author.strip()
def fix_seniors(tnq_year, func, attr=None, get=None, set=None):
if not get:
get = lambda senior: getattr(senior, attr)
if not set:
set = lambda senior, value: setattr(senior, attr, value)
queryset = Senior.objects.filter(tnq_year=2012).order_by("sort_letter")
pages = Paginator(queryset, 30)
def do_senior(senior):
try:
val = get(senior)
if val:
new_val = func(val)
if new_val != val:
print "%s\t%s\t%s" % (val, new_val, senior.name)
return [(senior, new_val)]
except IndexError:
pass
return []
for i in range(pages.num_pages):
seniors = list(pages.page(i+1).object_list)
todo = []
for senior in seniors:
todo.extend(do_senior(senior))
if not todo:
continue
if raw_input("Okay [yN]? ").lower() == "y":
for senior, new_val in todo:
set(senior, new_val)
senior.save()
else:
for senior in seniors:
change = do_senior(senior)
if change:
new_val = change[0][1]
if raw_input("Okay [yN]? ").lower() == "y":
set(senior, new_val)
senior.save()
def _sort_seniors(queryset):
import PyICU
collator = PyICU.Collator.createInstance(PyICU.Locale("es_ES"))
queryset = queryset.exclude(image_path=None)
sorted_seniors = list(queryset)
sort_first_name = lambda _: _.name.split()[0].strip()
sort_last_name = lambda _: [w for w in _.name.split() if w[0].lower() == _.sort_letter.lower()][-1].lower().strip()
sorted_seniors.sort(key=lambda _: sort_last_name(_)+" "+sort_first_name(_), cmp=collator.compare)
return sorted_seniors
class SeniorAdmin(admin.ModelAdmin):
inlines = [ ActivityInline, ]
search_fields = ('name', 'kerberos',)
list_display = ('name', 'kerberos', 'sort_letter',)
list_filter = ('tnq_year',)
fieldsets = [
('Biographical Information', {'fields':['name', 'sort_letter', 'name_comments', 'home_town', 'home_state_or_country', 'image_path',]}),
('MIT Information', {'fields':['tnq_year', 'kerberos', 'major', 'minor', 'lg']}),
('Quote', {'fields':['quote', 'quote_author']}),
]
actions = ['export_as_csv', 'export_as_tagged_text', ]
def export_as_tagged_text(modeladmin, request, queryset):
"""
Export senior information as a series of Adobe Tagged Text files inside
a wrapper zip file, suitable for import into an Indesign document.
"""
response = HttpResponse(mimetype='application/zip')
response['Content-Disposition'] = 'attachment; filename=seniors.zip'
zip = ZipFile(response, 'w')
SENIORS_PER_PAGE = 8
SENIORS_PER_ROW = 4
BULLET = u" · "
SLASHES = u" // "
DASH = u" – "
SPECIAL_PAGES = defaultdict(lambda: SENIORS_PER_PAGE)
SPECIAL_PAGES.update({11:4,
28:4,
49:4,
68:4})
sorted_seniors = _sort_seniors(queryset)
pages = []
unpaginated_seniors = list(sorted_seniors)
page = 0
while unpaginated_seniors:
on_page = SPECIAL_PAGES[page]
this_page, unpaginated_seniors = unpaginated_seniors[:on_page], unpaginated_seniors[on_page:]
pages.append(this_page)
page += 1
def sanitize(str):
return str.replace(r"<", r"\<").replace(r">", r"\>")
def format_senior(senior):
if not senior:
return "<ParaStyle:Senior Info Text>"
else:
senior_string = u"<ParaStyle:Senior Info Text>"
senior_string += senior.kerberos
senior_string += BULLET
senior_string += senior.major
if senior.minor:
senior_string += ", "+senior.minor
senior_string += SLASHES
senior_string += senior.home_town + ", " + format_state(senior.home_state_or_country)
if senior.lg.strip():
senior_string += BULLET
senior_string += format_lg(senior.lg)
activities = Activity.objects.filter(senior = senior)
if activities:
senior_string += SLASHES
for i, activity in enumerate(activities):
if i:
senior_string += BULLET
senior_string += activity.title
senior_string += " <cPosition:Superscript>"
senior_string += activity.years
senior_string += "<cPosition:>"
if activity.offices:
senior_string += " (" + activity.offices + ")"
if senior.quote:
senior_string += SLASHES
senior_string += u'\u201C' + format_quote(sanitize(senior.quote)) + u'\u201D'
if senior.quote_author:
senior_string += DASH
senior_string += sanitize(senior.quote_author)
return senior_string
for i in range(len(pages)):
seniors = pages[i]
if len(seniors) < SENIORS_PER_PAGE:
half_num = int(len(seniors)/2.0 + 0.5)
if i % 2 == 0: #On a left-hand page
seniors = [None]*(SENIORS_PER_ROW-half_num) \
+ seniors[:half_num]\
+[None]*(SENIORS_PER_PAGE-len(seniors)-(SENIORS_PER_ROW-half_num))\
+ seniors[half_num:]
else:
seniors = seniors[:half_num]\
+[None]*(SENIORS_PER_ROW-half_num)\
+seniors[half_num:]\
+[None]*(SENIORS_PER_PAGE-len(seniors)-(SENIORS_PER_ROW-half_num))
images = ""
page_string = u"""<UNICODE-MAC>
<Version:7><FeatureSet:InDesign-Roman>"""
for senior in seniors:
if senior:
page_string += "<ParaStyle:Senior Name>%s<cNextXChars:Box>\n" % format_name(senior.name)
images += senior.image_path+"\n"
else:
page_string += "<ParaStyle:Senior Name><cNextXChars:Box>\n"
images += "\n"
for j in range(SENIORS_PER_ROW):
page_string += format_senior(seniors[j])
page_string += "\n"
page_string += format_senior(seniors[j+SENIORS_PER_ROW])
page_string += "<cNextXChars:Column>\n"
zip.writestr("page%02d.txt" % i, codecs.BOM_UTF16_LE + page_string.encode("utf_16_le"))
zip.writestr("images%02d.txt" % i, images)
zip.close()
return response
export_as_tagged_text.short_description = "Export selected seniors to Adobe Tagged Text"
def export_as_csv(modeladmin, request, queryset):
"""
Export senior information in CSV format.
"""
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=seniors.csv'
sorted_seniors = _sort_seniors(queryset)
writer = csv.writer(response,)
writer.writerow(['name', 'firstname', 'lastname', 'comments',
'kerberos', 'major', 'minor', 'hometown',
'homeState', 'lg', 'quote', 'author',
'activity1', 'years1', 'offices1',
'activity2', 'years2', 'offices2',
'activity3', 'years3', 'offices3',
'activity4', 'years4', 'offices4',
'activity5', 'years5', 'offices5', ])
for senior in sorted_seniors:
this_row = [format_name(senior.name).encode('utf8'),
senior.name.strip().split(" ")[0].encode('utf8'),
senior.name.strip().split(" ")[-1].encode('utf8'),
senior.name_comments.encode('utf8'),
senior.kerberos.encode('utf8'),
format_major(senior.major).encode('utf8'),
senior.minor.encode('utf8'),
senior.home_town.encode('utf8'),
senior.home_state_or_country.encode('utf8'),
senior.lg.encode('utf8'),
senior.quote.encode('utf8'),
senior.quote_author.encode('utf8')]
activities = Activity.objects.filter(senior = senior)
for activity in activities:
this_row.append(activity.title.encode('utf8'))
this_row.append(format_years(activity.years).encode('utf8'))
this_row.append(activity.offices.encode('utf8'))
writer.writerow(this_row)
return response
export_as_csv.short_description = "Export selected seniors to CSV"
class ActivityAdmin(admin.ModelAdmin):
list_display = ('title', 'senior')
admin.site.register(Senior, SeniorAdmin)
admin.site.register(Activity, ActivityAdmin)
| [
"nwiltsie@mit.edu"
] | nwiltsie@mit.edu |
de173d2bb760fbf4bd04e8b5784cb2d50c4a74b0 | f4af33b9a46effbd6cbcd84eedbc8992d3f3a5ce | /unit4/sps_function.py | 413e089cb9d718d71765a9b5c49c594758e39c3d | [] | no_license | ajdt/udacity_cs212 | d7fac354e8cc4ee54674cf40baf605f47f758bbf | bc9225ba7e04b7d219fed387a045dfec09c9bbcf | refs/heads/master | 2020-11-30T01:44:36.911165 | 2016-09-17T17:22:27 | 2016-09-17T17:22:27 | 68,466,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,778 | py | # -----------------
# User Instructions
#
# Write a function, shortest_path_search, that generalizes the search algorithm
# that we have been using. This function should have three inputs, a start state,
# a successors function, and an is_goal function.
#
# You can use the solution to mc_problem as a template for constructing your
# shortest_path_search. You can also see the example is_goal and successors
# functions for a simple test problem below.
def shortest_path_search(start, successors, is_goal):
"""Find the shortest path from start state to a state
such that is_goal(state) is true."""
if is_goal(start):
return [start]
frontier, explored = [ [start] ], set()
while frontier:
path = frontier.pop(0) # pop the last path
last_state = path[-1]
if is_goal(last_state): # check for goal here
return path
for (state, action) in successors(last_state).items():
if state not in explored:
explored.add(state)
frontier.append( path + [action, state] )
frontier.sort(key=len)
return Fail
def mc_problem1(start=(3, 3, 1, 0, 0, 0), goal=None):
"""Solve the missionaries and cannibals problem.
State is 6 ints: (M1, C1, B1, M2, C2, B2) on the start (1) and other (2) sides.
Find a path that goes from the initial state to the goal state (which, if
not specified, is the state with no people or boats on the start side."""
if goal is None:
goal = (0, 0, 0) + start[:3]
if start == goal:
return [start]
explored = set() # set of states we have visited
frontier = [ [start] ] # ordered list of paths we have blazed
while frontier:
path = frontier.pop(0)
s = path[-1]
for (state, action) in csuccessors(s).items():
if state not in explored:
explored.add(state)
path2 = path + [action, state]
if state == goal:
return path2
else:
frontier.append(path2)
return Fail
Fail = []
def csuccessors(state):
"""Find successors (including those that result in dining) to this
state. But a state where the cannibals can dine has no successors."""
M1, C1, B1, M2, C2, B2 = state
## Check for state with no successors
if C1 > M1 > 0 or C2 > M2 > 0:
return {}
items = []
if B1 > 0:
items += [(sub(state, delta), a + '->')
for delta, a in deltas.items()]
if B2 > 0:
items += [(add(state, delta), '<-' + a)
for delta, a in deltas.items()]
return dict(items)
def add(X, Y):
"add two vectors, X and Y."
return tuple(x+y for x,y in zip(X, Y))
def sub(X, Y):
"subtract vector Y from X."
return tuple(x-y for x,y in zip(X, Y))
deltas = {(2, 0, 1, -2, 0, -1): 'MM',
(0, 2, 1, 0, -2, -1): 'CC',
(1, 1, 1, -1, -1, -1): 'MC',
(1, 0, 1, -1, 0, -1): 'M',
(0, 1, 1, 0, -1, -1): 'C'}
Fail = []
# --------------
# Example problem
#
# Let's say the states in an optimization problem are given by integers.
# From a state, i, the only possible successors are i+1 and i-1. Given
# a starting integer, find the shortest path to the integer 8.
#
# This is an overly simple example of when we can use the
# shortest_path_search function. We just need to define the appropriate
# is_goal and successors functions.
def is_goal(state):
if state == 8:
return True
else:
return False
def successors(state):
successors = {state + 1: '->',
state - 1: '<-'}
return successors
#test
assert shortest_path_search(5, successors, is_goal) == [5, '->', 6, '->', 7, '->', 8]
| [
"ajdt@uw.edu"
] | ajdt@uw.edu |
560ff9f3f493317e04240dcf5f75f3fb3c0b41e7 | 500bca3e22bd0c30c79b74918e9847742b3c428e | /sdk/python/endpoints/online/mlflow/sklearn-diabetes/src/score.py | 4e2c269f5cb447804f693d12932e283e9219e83f | [
"MIT"
] | permissive | Azure/azureml-examples | 2304c862fd2e36e6640ecc4d09f69c5ed93b48ab | e5f7b247d4753f115a8f7da30cbe25294f71f9d7 | refs/heads/main | 2023-08-31T00:10:14.107509 | 2023-08-30T17:29:22 | 2023-08-30T17:29:22 | 289,334,021 | 1,219 | 1,074 | MIT | 2023-09-14T16:00:55 | 2020-08-21T18:04:26 | Jupyter Notebook | UTF-8 | Python | false | false | 979 | py | import logging
import os
import json
import mlflow
from io import StringIO
from mlflow.pyfunc.scoring_server import infer_and_parse_json_input, predictions_to_json
def init():
global model
global input_schema
# "model" is the path of the mlflow artifacts when the model was registered. For automl
# models, this is generally "mlflow-model".
model_path = os.path.join(os.getenv("AZUREML_MODEL_DIR"), "model")
model = mlflow.pyfunc.load_model(model_path)
input_schema = model.metadata.get_input_schema()
def run(raw_data):
json_data = json.loads(raw_data)
if "input_data" not in json_data.keys():
raise Exception("Request must contain a top level key named 'input_data'")
serving_input = json.dumps(json_data["input_data"])
data = infer_and_parse_json_input(serving_input, input_schema)
predictions = model.predict(data)
result = StringIO()
predictions_to_json(predictions, result)
return result.getvalue()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
3ec15a885991693045ca69757489420fd2440bc1 | ee22ec2076a79e8de3011377fe205bc87163ab9f | /src/algo-p5/0828/q27/player.py | 1631f0c162cd5940c1385e5e74a4e95c3ea58bec | [] | no_license | n18018/programming-term2 | 039a95c67372a38a34e2aa8c5975045a9fc731be | 86c455269eed312def529604e1ac3b00f476226c | refs/heads/master | 2020-03-22T08:59:29.545280 | 2018-08-29T07:57:37 | 2018-08-29T07:57:37 | 139,806,131 | 0 | 0 | null | 2018-07-05T06:42:11 | 2018-07-05T06:42:11 | null | UTF-8 | Python | false | false | 12,069 | py | import field_map
import sys
import random
from enemy import Enemy
class Player:
def __init__(self, name):
"""
コンストラクタ
Parameters
----------
name : str
プレイヤーの名前
Returns
-------
自分自身のインスタンス
"""
self.name = name
self.cur_pos = 0
self.hp = 100
self.max_hp = 100
self.min_damage = 4
self.max_damage = 7
self.freq = 10
self.plant_nums = 10
self.exp = 0
self.level = 1
def choose_action_in_field(self):
"""
フィールド中での操作を選択する
Parameters
----------
なし
Returns
-------
なし
"""
# 見やすさのために、空白行を表示
print()
# 「何をしますか?」を表示
print("何をしますか?")
# 「1:サイコロを振る、2:現在の状態を確認する、3:薬草を使う、9:ゲームを終了する>> 」を表示し、入力待ちにする
cmd_num = input("1:サイコロを振る、2:現在の状態を確認する、3:薬草を使う、9:ゲームを終了する>> ")
# cmd_numの値によって条件分岐
if cmd_num == "1":
# その場から動く
self.move()
elif cmd_num == "2":
# 状態を表示する
self.show_status()
elif cmd_num == "3":
# 薬草を使う
self.use_plants()
elif cmd_num == "9":
# ゲームを終了する
self.quit_game()
def move(self):
"""
動く(サイコロを振る行為を含む)
Parameters
----------
なし
Returns
-------
なし
"""
# サイコロを振る
dice_num = field_map.shake_dice()
# 出た目の数だけ前に進む
self.go_forward(dice_num)
def go_forward(self, cells):
"""
前に進む
Parameters
----------
cells : int
進むマス目の数
Returns
-------
なし
"""
# 引数のマス目だけ進む
self.cur_pos += cells
# 現在位置を表示
print("現在位置は" + str(self.cur_pos) + "です。")
# 止まったマス目のイベントを取得する
event_nm = field_map.get_event(self.cur_pos)
if event_nm == "BattleVsZako":
# ザコキャラ「スラスラ」と戦う
zako = Enemy("スラスラ")
self.battle(zako)
elif event_nm == "GoMoreForward":
# 2マスさらに前に進む
self.go_more_forward(2)
elif event_nm == "GoBack":
# 3マス戻る
self.go_back(3)
elif event_nm == "GoBackToStart":
# 振り出しに戻る
self.go_back_to_start()
elif event_nm == "HealingLake":
# event_nmが"HealingLake"の場合、新たに定義したself.healed_in_lake()を呼び出してください。
self.healed_in_lake()
elif event_nm == "PoisonSwamp":
# event_nmが"PoisonSwamp"の場合、新たに定義したself.poisoned_in_swamp()を呼び出してください。
self.poisoned_in_swamp()
def go_more_forward(self, cells):
"""
出た目の分さらに前に進む
Parameters
----------
cells : int
進むマス目の数
Returns
-------
なし
"""
print("イベント発生!" + str(cells) + "マスさらに進みます。")
# 引数で渡された目の分だけ前に進む
self.go_forward(cells)
def go_back(self, cells):
"""
出た目の分後ろに戻る
Parameters
----------
cells : int
戻るマス目の数
Returns
-------
なし
"""
print("イベント発生!" + str(cells) + "マス後ろに戻ります。")
# 引数で出た目の分だけ前に戻る(引数に-1を掛けることで戻る動作をしている)
self.go_forward((cells * -1))
def go_back_to_start(self):
"""
出た目の分後ろに戻る
Parameters
----------
なし
Returns
-------
なし
"""
print("イベント発生!振り出しに戻ってしまいます!")
# 引数で出た目の分だけ前に戻る(引数に-1を掛けることで戻る動作をしている)
self.go_forward((self.cur_pos * -1))
def show_status(self):
"""
現在の状態を表示する
Parameters
----------
なし
Returns
-------
なし
"""
# 状態を表示する
print(self.name + "の現在位置は" + str(self.cur_pos)
+ "、HPは" + str(self.hp) + "です。")
# 薬草の枚数も表示する。
print("薬草を" + str(self.plant_nums) + "枚持っています。")
def battle(self, enemy):
"""
敵とたたかう
Parameters
----------
enemy : Enemy
敵のオブジェクト
Returns
-------
なし
"""
# イベント発生メッセージ
print("イベント発生!" + enemy.name + "があらわれた!")
# 敵が倒されるまで戦い続ける
while enemy.hp > 0:
# 見やすさのために空行を表示
print()
# ガイドメッセージを表示
print("どうする?")
# 「1:攻撃する、3:薬草を使う、9:逃げる>> 」を表示し、入力待ちにする
cmd_num = input("1:攻撃する、3:薬草を使う、9:逃げる>> ")
if cmd_num == "1":
# プレイヤーが敵を攻撃。倒したらループを抜ける
if self.attack(enemy):
break
elif cmd_num == "3":
# 薬草を使う
self.use_plants()
elif cmd_num == "9":
# 逃げる
print(self.name + "は逃げ出した!")
return
# 敵がプレイヤーを攻撃。倒されたらゲームオーバー
if not enemy.attack(self):
print(self.name + "はしんでしまった!世界は闇に包まれてしまった...")
sys.exit()
# バトル終了
print(self.name + "は" + enemy.name + "を倒した!")
def attack(self, enemy):
"""
敵を攻撃する
Parameters
----------
enemy : Enemy
敵のオブジェクト
Returns
-------
bool
True:敵を倒した、False:敵がまだ生きている
"""
# ダメージを最小〜最大の範囲でランダムに取得
damage = random.randint(self.min_damage, self.max_damage)
is_critical = False # 「かいしんのいちげき」かどうか
# 1/(self.freq)の確率で「かいしんのいちげき」を出す
rand_num = random.randint(1, self.freq)
if rand_num % self.freq == 0:
is_critical = True
# 自分のターンのメッセージ表示
print(self.name + "のこうげき!")
# かいしんのいちげきの場合、ダメージを倍にする
if is_critical:
print("かいしんのいちげき!!")
damage *= 2
# 相手にダメージを与える
enemy.hp -= damage
if enemy.hp > 0:
print(enemy.name + "に" + str(damage) + "のダメージを与えた!"
+ enemy.name + "のHPは" + str(enemy.hp) + "です。")
return False
else:
print(enemy.name + "に" + str(damage) + "のダメージを与えた!"
+ enemy.name + "のHPは0です。")
return True
def use_plants(self):
"""
薬草を使う
Parameters
----------
なし
Returns
-------
なし
"""
# 薬草を持っていなければ、その旨表示して終了
if self.plant_nums <= 0:
print(self.name + "は薬草を持っていない")
return
# メッセージ表示
print(self.name + "は薬草を使った!")
# HPを30ポイント回復
self.hp += 30
# HPが最大を超えないように調整
if self.hp > self.max_hp:
self.hp = self.max_hp
# 持っている薬草を1枚減らす
self.plant_nums -= 1
# 回復したHPの状態を表示
print(self.name + "のHPが" + str(self.hp) + "に回復した!")
# healed_in_lakeメソッドを定義します。引数はselfのみです。
def healed_in_lake(self):
"""
湖でHPを回復される
Parameters
----------
なし
Returns
-------
なし
"""
# 「イベント発生!癒しの湖で身を清めます。」を表示してください。
print("イベント発生!癒しの湖で身を清めます。")
# HPを最大まで回復します。self.hpにself.max_hpを代入してください。
self.hp = self.max_hp
# 「(self.name)のHPが全回復した!現在のHPは(self.hp)です。」を表示してください。
print(self.name, "のHPは全回復した!現在のHPは", self.hp, "です。")
# poisoned_in_swampメソッドを定義します。引数はselfのみです。
def poisoned_in_swamp(self):
"""
沼で毒に冒される
Parameters
----------
なし
Returns
-------
なし
"""
# 「イベント発生!沼で毒に冒されました。」を表示してください。
print("イベント発生!沼で毒に冒されました。")
# 20のダメージを受けます。self.hpから20を引いて(self.hpに再代入して)ください。
self.hp = self.hp - 20
if self.hp > 0:
# self.hpが0より大きい場合、「(self.name)は20のダメージを受けた!現在のHPは(self.hp)です。」を表示してください。
print(self.name, "は20のダメージを受けた!現在のHPは", self.hp, "です。")
else:
# 上記以外の場合、「(self.name)は20のダメージを受けた!(self.name)はしんでしまった!世界は闇に包まれてしまった...」を表示してください。
print(self.name, "は20のダメージを受けた!", self.name,
"はしんでしまった!世界は闇に包まれてしまった...")
# ゲームオーバーなので終了です。1つ前のメッセージに続けて、sys.exit()を呼び出してください。
sys.exit()
def quit_game(self):
"""
ゲームを終了する
Parameters
----------
なし
Returns
-------
なし
"""
# 終了するかどうかの確認メッセージを表示
cmd_str = input("ゲームの状態はセーブされません。終了しますか?(y/n) ")
# Yが押されたら終了
if cmd_str.upper() == "Y":
sys.exit()
# 以下メイン処理
if __name__ == '__main__':
# プレイヤーのオブジェクトを作成
kevin = Player("ケビン")
# 敵のオブジェクトを作成
enemy = Enemy("スラスラ")
# ケビンとスラスラが戦う
kevin.battle(enemy)
# バトル後のケビンのステータスを表示
kevin.show_status()
| [
"n18018@std.it-college.ac.jp"
] | n18018@std.it-college.ac.jp |
f7d577610c90e9aa93d478ba5211244881ae4241 | 4711a51655cf8944039246d3cf28f5798af089fe | /coder/wsgi.py | 29f26bbb10285e85a1939e86c02aebaf634743b4 | [] | no_license | GauravTyagi67/myblog | 827756f8334b4e3df5b3fce2f598185d13551530 | 86288bb1b403206654752a1890f70cae4dc9d4c2 | refs/heads/main | 2023-04-20T02:08:23.752116 | 2021-05-11T03:08:12 | 2021-05-11T03:08:12 | 366,239,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for coder project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'coder.settings')
application = get_wsgi_application()
| [
"noreply@github.com"
] | GauravTyagi67.noreply@github.com |
6969c5a69023c51c4b9f057fc4d0ebc464317c30 | b4920771048ba1f7cc6ac266c3f3576290c00718 | /session1/HW/ex1.py | fc29c34c92c90a445624ba1c1a341c8b163b3e6c | [] | no_license | dungbk10t/phamtuandung-webmodule-c4e26 | 969779da1d4bd8c1940583f4a11d1cfbe064eea2 | af793ba2765c8c17852c6bebcaf8250543488490 | refs/heads/master | 2021-10-23T12:59:32.532871 | 2019-03-17T14:03:31 | 2019-03-17T14:03:31 | 173,112,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from flask import Flask,redirect
app = Flask(__name__)
@app.route('/about-me')
def about():
myseft = {
"Name": "Dung",
"Age": "21",
"Hobbies": "Travel",
"Work": "Student",
}
return str(myseft)
@app.route('/')
def school():
return redirect("https://techkids.vn/", code=302)
if __name__ == '__main__':
app.run(debug=True) | [
"38665090+dungbk10t@users.noreply.github.com"
] | 38665090+dungbk10t@users.noreply.github.com |
8a1ca419dff4adbd0c351ffc4b87553ec6abd288 | b134420ad05667ae191c3a2f3753ce5966594fb1 | /02_Info/hw02/src/docreader.py | 7746f9a82e692830eb169f912e43a82443d4b2a3 | [] | no_license | Fen99/TehnoSphere | aad17f9dca11561378d38ba292db1599e9bcfbec | 8a11c3d26f4eb6ad88c154e10e5411a5f625a17e | refs/heads/master | 2022-03-07T03:56:54.781807 | 2019-09-13T23:03:45 | 2019-09-13T23:03:45 | 106,061,225 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | #!/usr/bin/env python
import document_pb2
import struct
import gzip
class DocumentStreamReader:
def __init__(self, paths):
self.paths = paths
def open_single(self, path):
return gzip.open(path, 'rb') if path.endswith('.gz') else open(path, 'rb')
#Document format - <Len><Text>
#document fields: .url, .text
def __iter__(self):
for path in self.paths:
with self.open_single(path) as stream:
while True:
sb = stream.read(4)
if sb == '':
break
size = struct.unpack('i', sb)[0]
msg = stream.read(size)
doc = document_pb2.document()
doc.ParseFromString(msg)
yield doc
def GetDocs(filenames):
reader = DocumentStreamReader(filenames)
return reader
| [
"feda.petraykin@gmail.com"
] | feda.petraykin@gmail.com |
770c7164abe7da38b537a93ec34d8f614f0a94cc | ef35552267ac45345c60135845470260afbd6687 | /Artifacts/run_verus.py | 62d4e1cd025febe0970d0d9cd628cd8b3f810c46 | [
"MIT"
] | permissive | xianliangjiang/ALCC | 2bbe7e48aaf7ab273cfea4622855be12e261730f | fc9c627de8c381987fc775ce0872339fceb43ddf | refs/heads/main | 2023-05-16T21:11:06.738812 | 2021-06-10T11:43:23 | 2021-06-10T11:43:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | import os
TIME=300
DIR='Results'
NUM_RUNS=20
os.system('sudo sysctl -w net.ipv4.tcp_congestion_control=cubic')
# compile bftpd with alcc verus library
os.system('echo "compiling bftpd for alcc verus" && cd ../Applications/bftpd && cp Makefile_verus Makefile && make')
for trace in ['highwayGold', 'CityDrive', 'Corniche', 'rapidGold']:
for i in range(1,NUM_RUNS+1):
print (trace)
os.system('''gnome-terminal -- sh -c 'echo "Running bftpd server" && cd ../Applications/bftpd && pwd && sudo ./bftpd -D -c bftpd.conf' ''')
os.system('python run.py -tr {0} -t {1} --name {0}{2} --dir {3} --algo alcc_verus'.format(trace,TIME,i,DIR))
os.system('sudo killall bftpd')
os.system('python run.py -tr {0} -t {1} --name {0}{2} --dir {3} --algo verus'.format(trace,TIME,i,DIR))
| [
"yasir.zaki@nyu.edu"
] | yasir.zaki@nyu.edu |
834876b1059232ee24322d209800d83c0d91d521 | de7b80e949b8890e8beec5da711c33fa74a49f01 | /catnado/properties/choice_property.py | 679b7c73a5872660d58c99d697c6ee75e8c3c629 | [
"Apache-2.0"
] | permissive | tylertrussell/gae-catnado | 39a0d1a7931acbb09ab739d6536f1b475b367a5f | 91a73e9108bb724fb780cc8dcfca4da579313cb9 | refs/heads/master | 2020-03-17T20:24:25.942542 | 2018-07-25T07:02:42 | 2018-07-25T07:02:42 | 133,907,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,242 | py | from google.appengine.ext import db
class ChoiceProperty(db.IntegerProperty):
"""A property for efficiently storing choices made from a finite set.
This works by mapping each choice to an integer. The choices must be hashable
(so that they can be efficiently mapped back to their corresponding index).
"""
def __init__(self, choices, make_choice_attrs=True, *args, **kwargs):
"""Constructor.
Args:
choices: A non-empty list of 2-tuples of the form (id, choice). id must be
the int to store in the database. choice may be any hashable value.
make_choice_attrs: If True, the uppercase version of each string choice is
set as an attribute whose value is the choice's int representation.
"""
super(ChoiceProperty, self).__init__(*args, **kwargs)
self.index_to_choice = dict(choices)
self.choice_to_index = dict((c, i) for i, c in self.index_to_choice.iteritems())
if make_choice_attrs:
for i, c in self.index_to_choice.iteritems():
if isinstance(c, basestring):
setattr(self, c.upper(), i)
def get_choices(self):
"""Get a list of values which may be assigned to this property."""
return self.choice_to_index.keys()
def c2i(self, choice):
"""Convert a choice to its datastore representation."""
return self.choice_to_index[choice]
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
index = super(ChoiceProperty, self).__get__(model_instance, model_class)
return self.index_to_choice[index]
def __set__(self, model_instance, value):
try:
index = self.c2i(value)
except KeyError:
raise db.BadValueError('Property %s must be one of the allowed choices: %s' %
(self.name, self.get_choices()))
super(ChoiceProperty, self).__set__(model_instance, index)
def get_value_for_datastore(self, model_instance):
"""Use underlying int value for datastore."""
return super(ChoiceProperty, self).__get__(model_instance, model_instance.__class__)
def make_value_from_datastore(self, value):
"""Convert int from datastore to choice."""
if value is None:
return None
return self.index_to_choice[value]
| [
"tigertrussell@gmail.com"
] | tigertrussell@gmail.com |
6cbf9974caf542980afdcb04dd20da0afa523385 | a835f4daa719e0060d5f0c9def9b51ff319ea17d | /MyEDmodules/HFraddamAnal/python/hfraddamanal_cfi.py | 493225a7c22025bf8d63b76188548455508b1635 | [] | no_license | pdudero/usercode | 8e2582df407aa81e1d674c5adb498e5268f54aa7 | e53c110632ef046e0944697611d727e1f8841510 | refs/heads/master | 2021-01-01T06:28:25.007997 | 2018-05-04T05:32:32 | 2018-05-04T05:32:32 | 11,696,730 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,654 | py | import FWCore.ParameterSet.Config as cms
hfraddam = cms.EDAnalyzer('HFraddamAnal',
eventDataPset = cms.untracked.PSet(
fedRawDataLabel = cms.untracked.InputTag("source"),
tbTrigDataLabel = cms.untracked.InputTag("tbunpack"),
laserDigiLabel = cms.untracked.InputTag("hcalLaserReco"),
hfDigiLabel = cms.untracked.InputTag("hcalDigis"),
hcalibDigiLabel = cms.untracked.InputTag("hcalDigis"),
verbose = cms.untracked.bool(False)
),
TDCpars = cms.untracked.PSet(
TDCCutCenter = cms.untracked.double(1075),
TDCCutWindow = cms.untracked.double(25),
CorrectedTimeModCeiling = cms.untracked.int32(9999),
TimeModCeiling = cms.untracked.int32(9999)
),
ampCutsInfC = cms.bool(True),
minHit_GeVorfC = cms.double(0),
maxHit_GeVorfC = cms.double(9e99),
doPerChannel = cms.bool(True),
doTree = cms.untracked.bool(True),
hfraddamchannels = cms.vint32(-30,35,1, -30,71,1, -32,15,1, -32,51,1,
-34,35,1, -34,71,1, -36,15,1, -36,51,1,
-38,35,1, -38,71,1, -40,15,1, -40,51,1,
-41,35,1, -41,71,1,
-30,15,2, -30,51,2, -32,35,2, -32,71,2,
-34,15,2, -34,51,2, -36,35,2, -36,71,2,
-38,15,2, -38,51,2, -40,35,2, -40,71,2,
-41,15,2, -41,51,2,
30,21,1, 30,57,1, 32, 1,1, 32,37,1,
34,21,1, 34,57,1, 36, 1,1, 36,37,1,
38,21,1, 38,57,1, 40,35,1, 40,71,1,
41,19,1, 41,55,1,
30, 1,2, 30,37,2, 32,21,2, 32,57,2,
34, 1,2, 34,37,2, 36,21,2, 36,57,2,
38, 1,2, 38,37,2, 40,19,2, 40,55,2,
41,35,2, 41,71,2
),
tdcwindowsfile = cms.untracked.string("perchanwindows.txt"),
rundatesfile = cms.untracked.string("../data/rundates2012.txt"),
s2overs1meansfile = cms.untracked.string("../data/s2overs1meansperchan.txt"),
lumiprofilefile = cms.untracked.string("../data/2012-delivered-perday.csv"),
bottomfeeder = cms.untracked.int32(0xbadf00d)
)
| [
""
] | |
3d97109bf415ea9269f7025758774cb1e2f9c5ab | e5add4ba0dc980b2129830142d91956f762d9835 | /CovidResourceFinder/urls.py | bf5e8da72bbc53f8764ed3e35dba36556799fb7e | [] | no_license | VirangParekh/CovidResourceFinder | a23ddb0db9167625f2a605ec061d4f8a0bd583aa | 168bc145d1e92e8285f3a38bfd0eb0ea3effea93 | refs/heads/master | 2023-04-08T03:34:51.677830 | 2021-04-25T06:02:25 | 2021-04-25T06:02:25 | 360,268,178 | 0 | 0 | null | 2021-04-22T13:41:36 | 2021-04-21T18:27:36 | Python | UTF-8 | Python | false | false | 366 | py | from django.conf.urls import url
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('resource_finder/', include("ResourceFinderApp.urls")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"44228173+VirangParekh@users.noreply.github.com"
] | 44228173+VirangParekh@users.noreply.github.com |
8baafd6e359d9fb1be1f926e4333393e9d332c08 | 6ceb5c8d4276165e61063edf4c4d7ddd4e23ad93 | /tests/pf/test_mag_MVI_Octree.py | be0979f77bb644d2fa5708a03a2dc24fdf135846 | [
"MIT"
] | permissive | fperez/simpeg | e3f552c654d1b57b8f6e407a8f9460799a300cba | 5babfbfb0e74a41f20dfa81eb872603fdc33b17a | refs/heads/master | 2020-09-15T19:39:35.547901 | 2020-01-10T00:25:57 | 2020-01-10T00:25:57 | 223,541,836 | 0 | 1 | MIT | 2019-11-23T06:21:14 | 2019-11-23T06:21:13 | null | UTF-8 | Python | false | false | 11,712 | py | from __future__ import print_function
import unittest
from SimPEG import (Directives, Maps,
InvProblem, Optimization, DataMisfit,
Inversion, Utils, Regularization, Mesh)
import SimPEG.PF as PF
import numpy as np
from scipy.interpolate import NearestNDInterpolator
from SimPEG.Utils import mkvc
class MVIProblemTest(unittest.TestCase):
def setUp(self):
np.random.seed(0)
H0 = (50000., 90., 0.)
# The magnetization is set along a different
# direction (induced + remanence)
M = np.array([45., 90.])
# Create grid of points for topography
# Lets create a simple Gaussian topo
# and set the active cells
[xx, yy] = np.meshgrid(
np.linspace(-200, 200, 50),
np.linspace(-200, 200, 50)
)
b = 100
A = 50
zz = A*np.exp(-0.5*((xx/b)**2. + (yy/b)**2.))
# We would usually load a topofile
topo = np.c_[Utils.mkvc(xx), Utils.mkvc(yy), Utils.mkvc(zz)]
# Create and array of observation points
xr = np.linspace(-100., 100., 20)
yr = np.linspace(-100., 100., 20)
X, Y = np.meshgrid(xr, yr)
Z = A*np.exp(-0.5*((X/b)**2. + (Y/b)**2.)) + 5
# Create a MAGsurvey
xyzLoc = np.c_[Utils.mkvc(X.T), Utils.mkvc(Y.T), Utils.mkvc(Z.T)]
rxLoc = PF.BaseMag.RxObs(xyzLoc)
srcField = PF.BaseMag.SrcField([rxLoc], param=H0)
survey = PF.BaseMag.LinearSurvey(srcField)
# Create a mesh
h = [5, 5, 5]
padDist = np.ones((3, 2)) * 100
nCpad = [2, 4, 2]
# Get extent of points
limx = np.r_[topo[:, 0].max(), topo[:, 0].min()]
limy = np.r_[topo[:, 1].max(), topo[:, 1].min()]
limz = np.r_[topo[:, 2].max(), topo[:, 2].min()]
# Get center of the mesh
midX = np.mean(limx)
midY = np.mean(limy)
midZ = np.mean(limz)
nCx = int(limx[0]-limx[1]) / h[0]
nCy = int(limy[0]-limy[1]) / h[1]
nCz = int(limz[0]-limz[1]+int(np.min(np.r_[nCx, nCy])/3)) / h[2]
# Figure out full extent required from input
extent = np.max(np.r_[nCx * h[0] + padDist[0, :].sum(),
nCy * h[1] + padDist[1, :].sum(),
nCz * h[2] + padDist[2, :].sum()])
maxLevel = int(np.log2(extent/h[0]))+1
# Number of cells at the small octree level
nCx, nCy, nCz = 2**(maxLevel), 2**(maxLevel), 2**(maxLevel)
# Define the mesh and origin
# For now cubic cells
mesh = Mesh.TreeMesh([np.ones(nCx)*h[0],
np.ones(nCx)*h[1],
np.ones(nCx)*h[2]])
# Set origin
mesh.x0 = np.r_[
-nCx*h[0]/2.+midX,
-nCy*h[1]/2.+midY,
-nCz*h[2]/2.+midZ
]
# Refine the mesh around topography
# Get extent of points
F = NearestNDInterpolator(topo[:, :2], topo[:, 2])
zOffset = 0
# Cycle through the first 3 octree levels
for ii in range(3):
dx = mesh.hx.min()*2**ii
nCx = int((limx[0]-limx[1]) / dx)
nCy = int((limy[0]-limy[1]) / dx)
# Create a grid at the octree level in xy
CCx, CCy = np.meshgrid(
np.linspace(limx[1], limx[0], nCx),
np.linspace(limy[1], limy[0], nCy)
)
z = F(mkvc(CCx), mkvc(CCy))
# level means number of layers in current OcTree level
for level in range(int(nCpad[ii])):
mesh.insert_cells(
np.c_[
mkvc(CCx),
mkvc(CCy),
z-zOffset
], np.ones_like(z)*maxLevel-ii,
finalize=False
)
zOffset += dx
mesh.finalize()
self.mesh = mesh
# Define an active cells from topo
actv = Utils.surface2ind_topo(mesh, topo)
nC = int(actv.sum())
model = np.zeros((mesh.nC, 3))
# Convert the inclination declination to vector in Cartesian
M_xyz = Utils.matutils.dip_azimuth2cartesian(M[0], M[1])
# Get the indicies of the magnetized block
ind = Utils.ModelBuilder.getIndicesBlock(
np.r_[-20, -20, -10], np.r_[20, 20, 25],
mesh.gridCC,
)[0]
# Assign magnetization values
model[ind, :] = np.kron(
np.ones((ind.shape[0], 1)), M_xyz*0.05
)
# Remove air cells
self.model = model[actv, :]
# Create active map to go from reduce set to full
self.actvMap = Maps.InjectActiveCells(mesh, actv, np.nan)
# Creat reduced identity map
idenMap = Maps.IdentityMap(nP=nC*3)
# Create the forward model operator
prob = PF.Magnetics.MagneticIntegral(
mesh, chiMap=idenMap, actInd=actv,
modelType='vector'
)
# Pair the survey and problem
survey.pair(prob)
# Compute some data and add some random noise
data = prob.fields(Utils.mkvc(self.model))
std = 5 # nT
data += np.random.randn(len(data))*std
wd = np.ones(len(data))*std
# Assigne data and uncertainties to the survey
survey.dobs = data
survey.std = wd
# Create an projection matrix for plotting later
actvPlot = Maps.InjectActiveCells(mesh, actv, np.nan)
# Create sensitivity weights from our linear forward operator
rxLoc = survey.srcField.rxList[0].locs
# This Mapping connects the regularizations for the three-component
# vector model
wires = Maps.Wires(('p', nC), ('s', nC), ('t', nC))
# Create sensitivity weights from our linear forward operator
# so that all cells get equal chance to contribute to the solution
wr = np.sum(prob.G**2., axis=0)**0.5
wr = (wr/np.max(wr))
# Create three regularization for the different components
# of magnetization
reg_p = Regularization.Sparse(mesh, indActive=actv, mapping=wires.p)
reg_p.mref = np.zeros(3*nC)
reg_p.cell_weights = (wires.p * wr)
reg_s = Regularization.Sparse(mesh, indActive=actv, mapping=wires.s)
reg_s.mref = np.zeros(3*nC)
reg_s.cell_weights = (wires.s * wr)
reg_t = Regularization.Sparse(mesh, indActive=actv, mapping=wires.t)
reg_t.mref = np.zeros(3*nC)
reg_t.cell_weights = (wires.t * wr)
reg = reg_p + reg_s + reg_t
reg.mref = np.zeros(3*nC)
# Data misfit function
dmis = DataMisfit.l2_DataMisfit(survey)
dmis.W = 1./survey.std
# Add directives to the inversion
opt = Optimization.ProjectedGNCG(maxIter=30, lower=-10, upper=10.,
maxIterLS=20, maxIterCG=20, tolCG=1e-4)
invProb = InvProblem.BaseInvProblem(dmis, reg, opt)
# A list of directive to control the inverson
betaest = Directives.BetaEstimate_ByEig()
# Here is where the norms are applied
# Use pick a treshold parameter empirically based on the distribution of
# model parameters
IRLS = Directives.Update_IRLS(
f_min_change=1e-3, maxIRLSiter=0, beta_tol=5e-1
)
# Pre-conditioner
update_Jacobi = Directives.UpdatePreconditioner()
inv = Inversion.BaseInversion(invProb,
directiveList=[IRLS, update_Jacobi, betaest])
# Run the inversion
m0 = np.ones(3*nC) * 1e-4 # Starting model
mrec_MVIC = inv.run(m0)
self.mstart = Utils.matutils.cartesian2spherical(mrec_MVIC.reshape((nC, 3), order='F'))
beta = invProb.beta
dmis.prob.coordinate_system = 'spherical'
dmis.prob.model = self.mstart
# Create a block diagonal regularization
wires = Maps.Wires(('amp', nC), ('theta', nC), ('phi', nC))
# Create a Combo Regularization
# Regularize the amplitude of the vectors
reg_a = Regularization.Sparse(mesh, indActive=actv,
mapping=wires.amp)
reg_a.norms = np.c_[0., 0., 0., 0.] # Sparse on the model and its gradients
reg_a.mref = np.zeros(3*nC)
# Regularize the vertical angle of the vectors
reg_t = Regularization.Sparse(mesh, indActive=actv,
mapping=wires.theta)
reg_t.alpha_s = 0. # No reference angle
reg_t.space = 'spherical'
reg_t.norms = np.c_[2., 0., 0., 0.] # Only norm on gradients used
# Regularize the horizontal angle of the vectors
reg_p = Regularization.Sparse(mesh, indActive=actv,
mapping=wires.phi)
reg_p.alpha_s = 0. # No reference angle
reg_p.space = 'spherical'
reg_p.norms = np.c_[2., 0., 0., 0.] # Only norm on gradients used
reg = reg_a + reg_t + reg_p
reg.mref = np.zeros(3*nC)
Lbound = np.kron(np.asarray([0, -np.inf, -np.inf]), np.ones(nC))
Ubound = np.kron(np.asarray([10, np.inf, np.inf]), np.ones(nC))
# Add directives to the inversion
opt = Optimization.ProjectedGNCG(maxIter=20,
lower=Lbound,
upper=Ubound,
maxIterLS=20,
maxIterCG=30,
tolCG=1e-3,
stepOffBoundsFact=1e-3,
)
opt.approxHinv = None
invProb = InvProblem.BaseInvProblem(dmis, reg, opt, beta=beta)
# Here is where the norms are applied
IRLS = Directives.Update_IRLS(f_min_change=1e-4, maxIRLSiter=20,
minGNiter=1, beta_tol=0.5,
coolingRate=1, coolEps_q=True,
betaSearch=False)
# Special directive specific to the mag amplitude problem. The sensitivity
# weights are update between each iteration.
ProjSpherical = Directives.ProjectSphericalBounds()
update_SensWeight = Directives.UpdateSensitivityWeights()
update_Jacobi = Directives.UpdatePreconditioner()
self.inv = Inversion.BaseInversion(
invProb,
directiveList=[
ProjSpherical, IRLS, update_SensWeight, update_Jacobi
]
)
def test_mag_inverse(self):
# Run the inversion
mrec_MVI_S = self.inv.run(self.mstart)
nC = int(mrec_MVI_S.shape[0]/3)
vec_xyz = Utils.matutils.spherical2cartesian(
mrec_MVI_S.reshape((nC, 3), order='F')).reshape((nC, 3), order='F')
residual = np.linalg.norm(vec_xyz-self.model) / np.linalg.norm(self.model)
# print(residual)
# import matplotlib.pyplot as plt
# mrec = np.sum(vec_xyz**2., axis=1)**0.5
# plt.figure()
# ax = plt.subplot(1, 2, 1)
# midx = 65
# self.mesh.plotSlice(self.actvMap*mrec, ax=ax, normal='Y', ind=midx,
# grid=True, clim=(0, 0.03))
# ax.set_xlim(self.mesh.gridCC[:, 0].min(), self.mesh.gridCC[:, 0].max())
# ax.set_ylim(self.mesh.gridCC[:, 2].min(), self.mesh.gridCC[:, 2].max())
# plt.show()
self.assertTrue(residual < 0.25)
# self.assertTrue(residual < 0.05)
if __name__ == '__main__':
unittest.main()
| [
"fourndo@gmail.com"
] | fourndo@gmail.com |
e308f0aa83b793bc83ed23a3d964b239a72ed6de | d4a5f8144855b201071c4657e37a7ad6b5994aff | /users/models.py | 7ae3002a7cc0a15449464e78df4109b39fe0abb8 | [] | no_license | Muratcol/Higher-Level-Django-Project | d453761197756d5b345640570f5a7b00c7948319 | cd82cc6bdc01196ad9a602be4bcd11ee655e1e1f | refs/heads/master | 2022-04-26T14:39:05.641565 | 2020-04-25T14:28:46 | 2020-04-25T14:28:46 | 258,793,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | from django.db import models
from django.contrib.auth.models import User
from PIL import Image
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete = models.CASCADE)
image = models.ImageField(default = 'default.jpg', upload_to = 'profile_pics')
def __str__(self):
return f'{self.user.username} Profile'
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
| [
"muratcolyaran@yahoo.com.tr"
] | muratcolyaran@yahoo.com.tr |
833150ec357d3ab8a3ffb1d0b530443494e22440 | 6423626dcb7c6d2d261e9c87095736bcff888359 | /mainApp/views.py | 0f89809047fb03adb631c5f00f6269b3e53f4dd1 | [] | no_license | andrew-cmdltt/blog | d39031f7e1c8c5402fb201676c4b360c6b2ad3eb | 96e819ad1da056739c4ed854bbb7426d27f80c39 | refs/heads/master | 2022-11-05T13:32:32.720195 | 2020-06-22T07:07:37 | 2020-06-22T07:07:37 | 274,064,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.views.generic.base import View
from django.views.generic.edit import FormView
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import login, logout
from posts.models import Post
from django.http import HttpResponse
class PostController():
def index(request):
if not request.user.is_authenticated:
return render(request, 'mainApp/message.html', {"message": "You are not authorized"})
posts = Post.objects.filter(owner_id=request.user.pk)
return render(request, "mainApp/index.html", {"posts": posts})
def addPost(request):
if request.Method == 'GET':
return render(request, "posts/add.html")
def searchPosts(request):
posts = Post.objects.filter(title__contains=request.GET['title'])
return render(request, "mainApp/index.html", {"posts": posts})
class RegisterFormView(FormView):
form_class = UserCreationForm
success_url = "/login/"
template_name = "mainApp/register.html"
def form_valid(self, form):
form.save()
return super(RegisterFormView, self).form_valid(form)
class LoginFormView(FormView):
form_class = AuthenticationForm
template_name = "mainApp/login.html"
success_url = "/"
def form_valid(self, form):
self.user = form.get_user()
login(self.request, self.user)
return super(LoginFormView, self).form_valid(form)
class LogoutView(View):
def get(self, request):
logout(request)
return HttpResponseRedirect("/login")
| [
"menwhohas2279@gmail.com"
] | menwhohas2279@gmail.com |
26cdf1910bc9e8ac0aefa7ee992aabf7376ac5ae | a2f0abc7e1ec380f387d51dcefd1c5c2e0f6ba21 | /auth/httpclient.py | 4eeac4e2eb21f2faa3c06f0a709419106a2ec479 | [] | no_license | sanand0/sms-greetings | 0771537fd73a0769cb0e5eff2c9db56d401f6889 | ab9d7757104f8b1d1129df598f78e4b507d07bc3 | refs/heads/master | 2021-01-15T11:48:30.030591 | 2010-03-19T14:12:48 | 2010-03-19T14:12:48 | 32,197,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | """
web.py httpclient
~~~~~~~~~~~~~~~~
HTTP client to support `tornado.auth` on web.py.
:copyright: 2010 by tipfy.org and s-anand.net
:license: Apache License Version 2.0. See LICENSE.txt for more details.
"""
import functools
import logging
import httplib2
from webpyauth import RequestRedirect
browser = httplib2.Http()
class HttpResponseError(object):
"""A dummy response used when urlfetch raises an exception."""
code = 404
body = '404 Not Found'
error = 'Error 404'
class AsyncHTTPClient(object):
"""An blocking HTTP client that uses urllib."""
def fetch(self, url, callback, **kwargs):
if callback is None:
return None
try:
status, content = browser.request(url, **kwargs)
code = status.status
setattr(status, 'error', (code < 200 or code >= 300) and code or None)
setattr(status, 'body', content)
try:
return callback(status)
except RequestRedirect, e:
raise e
except Exception, e:
logging.error("Exception during callback", exc_info=True)
except RequestRedirect, e:
raise e
except Exception, e:
result = HttpResponseError()
| [
"subramanian_anand@localhost"
] | subramanian_anand@localhost |
0dec940c8d9ee73e47f55d49a771aebb21beec6d | 55d560fe6678a3edc9232ef14de8fafd7b7ece12 | /tools/build/test/rescan_header.py | 36a007eb406fa403704cb5091d42f2606d7901ce | [
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | stardog-union/boost | ec3abeeef1b45389228df031bf25b470d3d123c5 | caa4a540db892caa92e5346e0094c63dea51cbfb | refs/heads/stardog/develop | 2021-06-25T02:15:10.697006 | 2020-11-17T19:50:35 | 2020-11-17T19:50:35 | 148,681,713 | 0 | 0 | BSL-1.0 | 2020-11-17T19:50:36 | 2018-09-13T18:38:54 | C++ | UTF-8 | Python | false | false | 5,653 | py | #!/usr/bin/python
# Copyright 2012 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
# Test a header loop that depends on (but does not contain) a generated header.
t.write("test.cpp", '#include "header1.h"\n')
t.write("header1.h", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.h", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header1.h"
#include "header3.h"
#endif
""")
t.write("header3.in", "/* empty file */\n")
t.write("jamroot.jam", """\
import common ;
make header3.h : header3.in : @common.copy ;
obj test : test.cpp : <implicit-dependency>header3.h ;
""")
t.run_build_system(["-j2"])
t.expect_addition("bin/header3.h")
t.expect_addition("bin/$toolset/debug*/test.obj")
t.expect_nothing_more()
t.rm(".")
# Test a linear sequence of generated headers.
t.write("test.cpp", '#include "header1.h"\n')
t.write("header1.in", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.in", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header3.h"
#endif
""")
t.write("header3.in", "/* empty file */\n")
t.write("jamroot.jam", """\
import common ;
make header1.h : header1.in : @common.copy ;
make header2.h : header2.in : @common.copy ;
make header3.h : header3.in : @common.copy ;
obj test : test.cpp :
<implicit-dependency>header1.h
<implicit-dependency>header2.h
<implicit-dependency>header3.h ;
""")
t.run_build_system(["-j2", "test"])
t.expect_addition("bin/header1.h")
t.expect_addition("bin/header2.h")
t.expect_addition("bin/header3.h")
t.expect_addition("bin/$toolset/debug*/test.obj")
t.expect_nothing_more()
t.rm(".")
# Test a loop in generated headers.
t.write("test.cpp", '#include "header1.h"\n')
t.write("header1.in", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.in", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header3.h"
#endif
""")
t.write("header3.in", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header1.h"
#endif
""")
t.write("jamroot.jam", """\
import common ;
actions copy {
sleep 1
cp $(>) $(<)
}
make header1.h : header1.in : @common.copy ;
make header2.h : header2.in : @common.copy ;
make header3.h : header3.in : @common.copy ;
obj test : test.cpp :
<implicit-dependency>header1.h
<implicit-dependency>header2.h
<implicit-dependency>header3.h ;
""")
t.run_build_system(["-j2", "test"])
t.expect_addition("bin/header1.h")
t.expect_addition("bin/header2.h")
t.expect_addition("bin/header3.h")
t.expect_addition("bin/$toolset/debug*/test.obj")
t.expect_nothing_more()
t.rm(".")
# Test that all the dependencies of a loop are updated before any of the
# dependents.
t.write("test1.cpp", '#include "header1.h"\n')
t.write("test2.cpp", """\
#include "header2.h"
int main() {}
""")
t.write("header1.h", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.h", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header1.h"
#include "header3.h"
#endif
""")
t.write("header3.in", "\n")
t.write("sleep.bat", """\
::@timeout /T %1 /NOBREAK >nul
@ping 127.0.0.1 -n 2 -w 1000 >nul
@ping 127.0.0.1 -n %1 -w 1000 >nul
@exit /B 0
""")
t.write("jamroot.jam", """\
import common ;
import os ;
if [ os.name ] = NT
{
SLEEP = call sleep.bat ;
}
else
{
SLEEP = sleep ;
}
rule copy { common.copy $(<) : $(>) ; }
actions copy { $(SLEEP) 1 }
make header3.h : header3.in : @copy ;
exe test : test2.cpp test1.cpp : <implicit-dependency>header3.h ;
""")
t.run_build_system(["-j2", "test"])
t.expect_addition("bin/header3.h")
t.expect_addition("bin/$toolset/debug*/test1.obj")
t.expect_addition("bin/$toolset/debug*/test2.obj")
t.expect_addition("bin/$toolset/debug*/test.exe")
t.expect_nothing_more()
t.touch("header3.in")
t.run_build_system(["-j2", "test"])
t.expect_touch("bin/header3.h")
t.expect_touch("bin/$toolset/debug*/test1.obj")
t.expect_touch("bin/$toolset/debug*/test2.obj")
t.expect_touch("bin/$toolset/debug*/test.exe")
t.expect_nothing_more()
t.rm(".")
# Test a loop that includes a generated header
t.write("test1.cpp", '#include "header1.h"\n')
t.write("test2.cpp", """\
#include "header2.h"
int main() {}
""")
t.write("header1.h", """\
#ifndef HEADER1_H
#define HEADER1_H
#include "header2.h"
#endif
""")
t.write("header2.in", """\
#ifndef HEADER2_H
#define HEADER2_H
#include "header3.h"
#endif
""")
t.write("header3.h", """\
#ifndef HEADER3_H
#define HEADER3_H
#include "header1.h"
#endif
""")
t.write("sleep.bat", """\
::@timeout /T %1 /NOBREAK >nul
@ping 127.0.0.1 -n 2 -w 1000 >nul
@ping 127.0.0.1 -n %1 -w 1000 >nul
@exit /B 0
""")
t.write("jamroot.jam", """\
import common ;
import os ;
if [ os.name ] = NT
{
SLEEP = call sleep.bat ;
}
else
{
SLEEP = sleep ;
}
rule copy { common.copy $(<) : $(>) ; }
actions copy { $(SLEEP) 1 }
make header2.h : header2.in : @copy ;
exe test : test2.cpp test1.cpp : <implicit-dependency>header2.h <include>. ;
""")
t.run_build_system(["-j2", "test"])
t.expect_addition("bin/header2.h")
t.expect_addition("bin/$toolset/debug*/test1.obj")
t.expect_addition("bin/$toolset/debug*/test2.obj")
t.expect_addition("bin/$toolset/debug*/test.exe")
t.expect_nothing_more()
t.cleanup()
| [
"james.pack@stardog.com"
] | james.pack@stardog.com |
a6f10eb4cac4d9ebb9ce1cafd18a0bd881d177e0 | 9f3151a966b1e682dd24d806d4705145c8b10c47 | /0x11-python-network_1/6-post_email.py | ba2b1dc4d81b3f013abb2827eab4d3cb8942dca6 | [] | no_license | DiegoSusviela/holbertonschool-higher_level_programming | 5ace3a63a1f79fc7d0e870b8308dc10964b7bb21 | 4cd3e677bd0ffd6b7fc034a53b8c357893b87a60 | refs/heads/main | 2023-08-14T05:12:20.926985 | 2021-09-27T15:47:15 | 2021-09-27T15:47:15 | 361,817,218 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | #!/usr/bin/python3
"""alibaba y los 40 ladrones"""
import requests
from sys import argv
if __name__ == "__main__":
payload = {'email': argv[2]}
r = requests.post(argv[1], data=payload)
print(r.text)
| [
"dieguitosus@hotmail.com"
] | dieguitosus@hotmail.com |
f5a8b97a66c04bb5a50c0064ce19657b48d5b3ef | 596b6f769a19bd597ca235263b4518be3227b0f7 | /ExeDemo/Exe14.py | 148b0eb6792aa548ad6088c8760e6848c115f327 | [] | no_license | himanim-ept/Python-Execerise-1 | 0ac5022e6cd5f6fe6887b1154258c3c62d393150 | 1663cba8bae6b1e6d186e87c7058c70023e65ca7 | refs/heads/master | 2023-03-15T06:18:51.546786 | 2021-03-10T11:02:19 | 2021-03-10T11:02:19 | 336,448,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | #Write a Python program to count the number occurrence of a specific character in a string.
txt = "Hello, My name is Himani"
x = txt.count("e") # count particular charachter
print(x) | [
"himanim@emiprotechnologies.com"
] | himanim@emiprotechnologies.com |
87329ac75e0a03161d9c4ec7e50671e1a8c5b0d0 | 22299195d67f887d8de9f8764e8a85680cd3416c | /class7 (Color Filtering - OpenCV with Python for Image and Video Analysis 7)/main.py | e4430a318df1dc716db227d2a786414f7b6eb3ff | [] | no_license | EnggQasim/PythonOpenCV | 71268cb9bfa603b9aec1e239756f515f9693f74c | 2f1cd61df0fd520dbdc0e41a52ebfc4da410c771 | refs/heads/master | 2021-01-01T15:29:14.768477 | 2017-07-18T18:11:19 | 2017-07-18T18:11:19 | 97,629,494 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | import cv2
import numpy as np
cap = cv2.VideoCapture(1)
while True:
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#hsv hue sat value
lower_red = np.array([150,150,50])
upper_red = np.array([180, 255, 150])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('Frame', frame)
cv2.imshow('Mask', mask)
cv2.imshow('Result', res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cv2.release() | [
"m.qasim077@gmail.com"
] | m.qasim077@gmail.com |
09485a4a913d81b199e0e4f85f59f811f3947951 | 867bb24022e8908e66b9dbe52bcac81cc16e86db | /myshop/Employee/apps.py | 907e9dc183c55890576e53643f99d014415ddbe7 | [] | no_license | Gonza12345/Diplom_shop | 86120886b0bf77cb871d3de2f64075592bed09c8 | 0527561d9746d6e5f73c62b74814135af7aa52e8 | refs/heads/master | 2020-05-31T02:28:52.147478 | 2019-06-03T19:34:39 | 2019-06-03T19:34:39 | 190,066,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from django.apps import AppConfig
class EmployeeConfig(AppConfig):
name = 'orders'
| [
"uad134679@gmail.com"
] | uad134679@gmail.com |
51fe296f9a06966e6e243a907c4209236b1137e9 | 0c66e605e6e4129b09ea14dbb6aa353d18aaa027 | /diventi/ebooks/migrations/0007_auto_20190429_1732.py | 3b567492131adb79f3e21d1f851220e0b4b14f01 | [
"Apache-2.0"
] | permissive | flavoi/diventi | 58fbc8c947f387cbcc1ce607878a59a6f2b72313 | c0b1efe2baa3ff816d6ee9a8e86623f297973ded | refs/heads/master | 2023-07-20T09:32:35.897661 | 2023-07-11T19:44:26 | 2023-07-11T19:44:26 | 102,959,477 | 2 | 1 | Apache-2.0 | 2023-02-08T01:03:17 | 2017-09-09T14:10:51 | Python | UTF-8 | Python | false | false | 1,340 | py | # Generated by Django 2.1.7 on 2019-04-29 15:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ebooks', '0006_auto_20190429_1727'),
]
operations = [
migrations.AddField(
model_name='chapter',
name='description_en',
field=models.TextField(null=True, verbose_name='description'),
),
migrations.AddField(
model_name='chapter',
name='description_it',
field=models.TextField(null=True, verbose_name='description'),
),
migrations.AddField(
model_name='chapter',
name='slug_en',
field=models.SlugField(null=True, unique=True, verbose_name='slug'),
),
migrations.AddField(
model_name='chapter',
name='slug_it',
field=models.SlugField(null=True, unique=True, verbose_name='slug'),
),
migrations.AddField(
model_name='chapter',
name='title_en',
field=models.CharField(max_length=50, null=True, verbose_name='title'),
),
migrations.AddField(
model_name='chapter',
name='title_it',
field=models.CharField(max_length=50, null=True, verbose_name='title'),
),
]
| [
"flavius476@gmail.com"
] | flavius476@gmail.com |
a4c78496e3e6c0ca7c8343f03b0e455be84de413 | 585fcfd09bcc37ad73c6f301cb8b16261a93df7e | /projects/pyDOE-master/pyDOE/build_regression_matrix.py | 5ea2c2f53342a023823a115a04a403407c9ccc3d | [
"MIT",
"BSD-3-Clause"
] | permissive | louisXW/Surrogate-Model | e9e8de3ab892eed2f8ed424e09b770e67126c1f3 | 65ec8a89c1b7a19d4c04c62e2c988340c96c69f8 | refs/heads/master | 2021-07-21T09:37:41.045898 | 2017-10-30T11:49:35 | 2017-10-30T11:49:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,909 | py | """
This code was originally published by the following individuals for use with
Scilab:
Copyright (C) 2012 - 2013 - Michael Baudin
Copyright (C) 2012 - Maria Christopoulou
Copyright (C) 2010 - 2011 - INRIA - Michael Baudin
Copyright (C) 2009 - Yann Collette
Copyright (C) 2009 - CEA - Jean-Marc Martinez
website: forge.scilab.org/index.php/p/scidoe/sourcetree/master/macros
Much thanks goes to these individuals. It has been converted to Python by
Abraham Lee.
"""
import numpy as np
def grep(haystack, needle):
start = 0
while True:
start = haystack.find(needle, start)
if start == -1:
return
yield start
start += len(needle)
def build_regression_matrix(H, model, build=None):
"""
Build a regression matrix using a DOE matrix and a list of monomials.
Parameters
----------
H : 2d-array
model : str
build : bool-array
Returns
-------
R : 2d-array
"""
ListOfTokens = model.split(' ')
if H.shape[1] == 1:
size_index = len(str(H.shape[0]))
else:
size_index = len(str(H.shape[1]))
if build is None:
build = [True] * len(ListOfTokens)
# Test if the vector has the wrong direction (lines instead of columns)
if H.shape[0] == 1:
H = H.T
# Collect the list of monomials
Monom_Index = []
for i in range(len(ListOfTokens)):
if build[i]:
Monom_Index += [grep(ListOfTokens, 'x' + str(0) * (size_index - \
len(str(i))) + str(i))]
Monom_Index = -np.sort(-Monom_Index)
Monom_Index = np.unique(Monom_Index)
if H.shape[1] == 1:
nb_var = H.shape[0] # vector "mode": the number of vars is equal to the number of lines of H
VectorMode = True
for i in range(nb_var):
for j in range(ListOfTokens.shape[0]):
ListOfTokens[j] = ListOfTokens[j].replace(
'x' + str(0) * (size_index - len(str(i))) + str(i),
'H(' + str(i) + ')')
else:
nb_var = H.shape[0] # matrix "mode": the number of vars is equal to the number of columns of H
VectorMode = False
for i in range(nb_var):
for j in range(ListOfTokens.shape[0]):
ListOfTokens[j] = ListOfTokens[j].replace(
'x' + str(0) * (size_index - len(str(i))) + str(i),
'H[i,' + str(i) + ')')
# Now build the regression matrix
if VectorMode:
R = np.zeros((len(ListOfTokens), 1))
for j in range(len(ListOfTokens)):
R[j, 0] = eval(ListOfTokens[j])
else:
R = np.zeros((H.shape[0], len(ListOfTokens)))
for i in range(H.shape[0]):
for j in range(len(ListOfTokens)):
R[i, j] = eval(ListOfTokens[j])
return R
| [
"quanpan302@hotmail.com"
] | quanpan302@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.