blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f2dee36385c4a7f1048a4323ce74bddb78d1e287
|
eaf4e734f5238ede8867f86e298f7e13c5b063fc
|
/RequestLibrary/request/Sample03.py
|
6203e8264257b553f69010310be314202893d4ad
|
[] |
no_license
|
asifurrouf/RestApiAutomation
|
20ca5c19d86996fdc838fb8432f6fab42b1a355e
|
57a1685b8197999d308025a4f719c0d341c74ea2
|
refs/heads/master
| 2020-06-28T21:42:50.653762
| 2017-05-19T06:09:56
| 2017-05-19T06:09:56
| 74,468,130
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
import responses
import requests
@responses.activate
def test_my_api():
responses.add(responses.GET, 'http://twitter.com/api/1/foobar',
json={"error": "not found"}, status=404)
resp = requests.get('http://twitter.com/api/1/foobar')
assert resp.json() == {"error": "not found"}
assert len(responses.calls) == 1
assert responses.calls[0].request.url == 'http://twitter.com/api/1/foobar'
assert responses.calls[0].response.text == '{"error": "not found"}'
|
[
"rouf.asifur@gmail.com"
] |
rouf.asifur@gmail.com
|
c98f9a9726047f4b9c13bbe9267c04b7238554f5
|
2e15dcd969888a4202a10df87912398b3491faa5
|
/eoc/graph_scene.py
|
4722aa694123bb86824fff262d42e6e0a237fca3
|
[] |
no_license
|
moses1994/manim
|
4f1ebe228b90462db97bb517b6b17f87e3ed0b3b
|
982ddb4c14e454293c813bc4d1d5d9a0385ae5ad
|
refs/heads/master
| 2021-01-13T03:16:06.288277
| 2016-12-26T15:10:38
| 2016-12-26T15:10:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,804
|
py
|
from helpers import *
from scene import Scene
# from topics.geometry import
from mobject.tex_mobject import TexMobject, TextMobject
from mobject.vectorized_mobject import VGroup, VectorizedPoint
from animation.simple_animations import Write, ShowCreation
from topics.number_line import NumberLine
from topics.functions import ParametricFunction
from topics.geometry import Rectangle
class GraphScene(Scene):
CONFIG = {
"x_min" : -1,
"x_max" : 10,
"x_axis_width" : 9,
"x_tick_frequency" : 1,
"x_leftmost_tick" : None, #Change if different from x_min
"x_labeled_nums" : range(1, 10),
"x_axis_label" : "x",
"y_min" : -1,
"y_max" : 10,
"y_axis_height" : 6,
"y_tick_frequency" : 1,
"y_bottom_tick" : None, #Change if different from y_min
"y_labeled_nums" : range(1, 10),
"y_axis_label" : "y",
"axes_color" : GREY,
"graph_origin" : 2.5*DOWN + 4*LEFT,
"y_axis_numbers_nudge" : 0.4*UP+0.5*LEFT,
}
def setup_axes(self, animate = True):
x_num_range = float(self.x_max - self.x_min)
x_axis = NumberLine(
x_min = self.x_min,
x_max = self.x_max,
space_unit_to_num = self.x_axis_width/x_num_range,
tick_frequency = self.x_tick_frequency,
leftmost_tick = self.x_leftmost_tick or self.x_min,
numbers_with_elongated_ticks = self.x_labeled_nums,
color = self.axes_color
)
x_axis.shift(self.graph_origin - x_axis.number_to_point(0))
if self.x_labeled_nums:
x_axis.add_numbers(*self.x_labeled_nums)
x_label = TextMobject(self.x_axis_label)
x_label.next_to(x_axis, RIGHT+UP, buff = SMALL_BUFF)
x_label.shift_onto_screen()
x_axis.add(x_label)
self.x_axis_label_mob = x_label
y_num_range = float(self.y_max - self.y_min)
y_axis = NumberLine(
x_min = self.y_min,
x_max = self.y_max,
space_unit_to_num = self.y_axis_height/y_num_range,
tick_frequency = self.y_tick_frequency,
leftmost_tick = self.y_bottom_tick or self.y_min,
numbers_with_elongated_ticks = self.y_labeled_nums,
color = self.axes_color
)
y_axis.shift(self.graph_origin-y_axis.number_to_point(0))
y_axis.rotate(np.pi/2, about_point = y_axis.number_to_point(0))
if self.y_labeled_nums:
y_axis.add_numbers(*self.y_labeled_nums)
y_axis.numbers.shift(self.y_axis_numbers_nudge)
y_label = TextMobject(self.y_axis_label)
y_label.next_to(y_axis.get_top(), RIGHT, buff = 2*MED_BUFF)
y_label.shift_onto_screen()
y_axis.add(y_label)
self.y_axis_label_mob = y_label
if animate:
self.play(Write(VGroup(x_axis, y_axis)))
else:
selfe.add(x_axis, y_axis_label)
self.x_axis, self.y_axis = x_axis, y_axis
def coords_to_point(self, x, y):
assert(hasattr(self, "x_axis") and hasattr(self, "y_axis"))
result = self.x_axis.number_to_point(x)[0]*RIGHT
result += self.y_axis.number_to_point(y)[1]*UP
return result
def graph_function(self, func,
color = BLUE,
animate = False,
is_main_graph = True,
):
def parameterized_graph(alpha):
x = interpolate(self.x_min, self.x_max, alpha)
return self.coords_to_point(x, func(x))
graph = ParametricFunction(parameterized_graph, color = color)
if is_main_graph:
self.graph = graph
self.func = func
if animate:
self.play(ShowCreation(graph))
self.add(graph)
return graph
def input_to_graph_point(self, x):
assert(hasattr(self, "graph"))
alpha = (x - self.x_min)/(self.x_max - self.x_min)
return self.graph.point_from_proportion(alpha)
def angle_of_tangent(self, x, dx = 0.01):
assert(hasattr(self, "graph"))
vect = self.graph_point(x + dx) - self.graph_point(x)
return angle_of_vector(vect)
def label_graph(self, graph, label = "f(x)",
proportion = 0.7,
direction = LEFT,
buff = 2*MED_BUFF,
animate = True
):
label = TexMobject(label)
label.highlight(graph.get_color())
label.next_to(
graph.point_from_proportion(proportion),
direction,
buff = buff
)
if animate:
self.play(Write(label))
self.add(label)
return label
def get_riemann_rectangles(self,
x_min = None,
x_max = None,
dx = 0.1,
stroke_width = 1,
start_color = BLUE,
end_color = GREEN):
assert(hasattr(self, "func"))
x_min = x_min if x_min is not None else self.x_min
x_max = x_max if x_max is not None else self.x_max
rectangles = VGroup()
for x in np.arange(x_min, x_max, dx):
points = VGroup(*map(VectorizedPoint, [
self.coords_to_point(x, 0),
self.coords_to_point(x+dx, self.func(x+dx)),
]))
rect = Rectangle()
rect.replace(points, stretch = True)
rect.set_fill(opacity = 1)
rectangles.add(rect)
rectangles.gradient_highlight(start_color, end_color)
rectangles.set_stroke(BLACK, width = stroke_width)
return rectangles
|
[
"grantsanderson7@gmail.com"
] |
grantsanderson7@gmail.com
|
baf0ea5868c97078b0ac00e5849c440553700302
|
b9a131dd85fe5f2d2f5b16c97b1f859ede5a4914
|
/Curso_em_Vídeo/utilizando while/Desafio02.py
|
e910749cf987c6cb33cc710b0347cbf2a360b4e0
|
[] |
no_license
|
juancassioo/python-sistemas
|
131f218bf8fa1bebf1bc6e5fbe3222571ca7a42f
|
378596d1c630357b1b1958a3b4e3e7f6f96dd5d1
|
refs/heads/main
| 2023-07-04T20:27:22.859839
| 2021-08-09T01:10:37
| 2021-08-09T01:10:37
| 394,105,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
from random import randint
computador = randint(1,10)
print('Tente adivinhar meu número')
acertou = False
palpites = 0
while not acertou:
jogador = int(input('Digite um numero: '))
palpites += 1
if jogador == computador:
acertou = True
elif jogador < computador:
print('Mais...')
else:
print('Menos...')
print('O número era {}' .format(computador))
print('Acertasse com {} tentativas' .format(palpites))
|
[
"noreply@github.com"
] |
juancassioo.noreply@github.com
|
0b3ff650e9dfb71ebdf0317b59d8e2385c2daa4b
|
795b0d3fa770b9e136bf5b0790cf45740025a2ed
|
/AI/transformerTester.py
|
031b469bee275bacd7f638fc85b5739de480c2d5
|
[] |
no_license
|
Baes20/AIstuff
|
8ff812c537d94d0793b33c21d6ec6adbe1d09008
|
307032ac4c147cab951b579a2eabd82a5acf8abe
|
refs/heads/master
| 2020-04-19T01:39:19.814505
| 2019-09-19T01:12:26
| 2019-09-19T01:12:26
| 167,877,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,239
|
py
|
from FXTMdataset import MarketDataGenerator
from Attention3 import DeepPredictor
import tensorflow as tf
import random
import matplotlib.pyplot as plt
import numpy as np
import sys
from NASDAQ100dataset import NasdaqGenerator
from Luong import Luong
from Transformer import Transformer
sys.setrecursionlimit(1500)
def show_rand_sample(validY, validPredict, num_samples):
for i in range(num_samples):
rand = random.randrange(validY.shape[0])
rand2 = random.randrange(validY.shape[2])
validY_slice = validY[rand, :, rand2, 0]
validPredict_slice = validPredict[rand, :, rand2, 0]
validY_delta = []
validPredict_delta = []
for j in range(len(validY_slice) - 1):
validY_delta.append(validY_slice[j + 1] - validY_slice[j])
validPredict_delta.append(validPredict_slice[j + 1] - validPredict_slice[j])
plt.subplot(211)
plt.plot(validY_slice)
plt.plot(validPredict_slice)
plt.subplot(212)
plt.plot(validY_delta)
plt.plot(validPredict_delta)
plt.show()
def trend_accuracy(Y, Predict):
count = 0
total = 0
def get_delta(Y):
Y_shiftright = np.concatenate(([Y[0]], Y), axis=0)
Y_shiftright = np.delete(Y_shiftright, len(Y) - 1, axis=0)
print(Y.shape)
print(Y_shiftright.shape)
return np.subtract(Y_shiftright, Y)
Y_delta = get_delta(Y)
Predict_delta = get_delta(Predict)
Y_delta = np.reshape(Y_delta, [-1])
Predict_delta = np.reshape(Predict_delta, [-1])
for i in range(len(Y_delta)):
if Y_delta[i] * Predict_delta[i] > 0: # if they are the same
count += 1
total += 1
return count / total
# tensorboard --host 127.0.0.1 --logdir=D:/Projects/tensor2/summary/Transformer
train_ratio = 0.8
seq_length = 32
output_count = 8
batch_size = 8
N = 6
filter_num = 12
kernel_size = 64
ffn_size = kernel_size * 4
epoch = 3000
learning_rate = 0.00001
mfile = './models/en2de.model.h5'
mfile_arch = './models/Transformer/en2de.model_arch.json'
with open(mfile_arch, 'r') as f:
model = tf.keras.models.model_from_json(f.read())
|
[
"noreply@github.com"
] |
Baes20.noreply@github.com
|
7d686f1717b013e5a16bfd46d17ede520136c2e1
|
1a90e935bf8bae9f352775cd5e4d7cb89e2c1827
|
/venv/Scripts/rst2html4.py
|
9d13b068a0a2df48b32120082b1d92c9fc6f6b72
|
[] |
no_license
|
KyleIrving01/KylePracticals
|
a3d3fd1c673c0a16050b530034caa9ec582e7261
|
7af28f1903f5d5d97f1a60d88e4b43c7c4bc6500
|
refs/heads/master
| 2022-10-22T05:28:22.330462
| 2018-04-11T08:08:32
| 2018-04-11T08:08:32
| 127,836,146
| 0
| 1
| null | 2022-10-01T16:15:00
| 2018-04-03T01:57:23
|
Python
|
UTF-8
|
Python
| false
| false
| 761
|
py
|
#!C:\Users\Kyle\PycharmProjects\KylePracs\venv\Scripts\python.exe
# $Id: rst2html4.py 7994 2016-12-10 17:41:45Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing (X)HTML.
The output conforms to XHTML 1.0 transitional
and almost to HTML 4.01 transitional (except for closing empty tags).
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html4', description=description)
|
[
"kyle.irving@my.jcu.edu.au"
] |
kyle.irving@my.jcu.edu.au
|
02a5bbc62239aa59ffed254631814c7b2f0e9001
|
b70e3c8d4b512212708eda8bba4e33a102c9edfd
|
/photo/tests.py
|
570f0a6d20fb4a2e8ed816e4a196c4ddab5d0706
|
[
"MIT"
] |
permissive
|
markmumba/Thee-Gallery
|
b8d378700a9696f57d3d2e0af184b4f635537c1b
|
fcdbeb35f26db6ecb68dd9c786e842fe2aea7772
|
refs/heads/master
| 2021-09-09T09:34:47.412904
| 2019-08-27T08:34:23
| 2019-08-27T08:34:23
| 203,953,586
| 0
| 0
| null | 2021-06-10T21:53:51
| 2019-08-23T08:06:47
|
Python
|
UTF-8
|
Python
| false
| false
| 565
|
py
|
from django.test import TestCase
from .models import Location, Image, Category
# Create your tests here.
class ImageTestClass(TestCase):
def setUp(self):
self.location = Location(name='nairobi')
self.location.save()
self.category = Category(name ='food')
self.category.save()
self.image = Image( id = 1,name ='bg.jpg',description = 'good', category=self.category, location=self.location)
def test_save(self):
self.image
self.image.image_save()
self.assertTrue(len(Image.objects,all())>0)
|
[
"markmumba01@gmail.com"
] |
markmumba01@gmail.com
|
a2a59919f86d0379d999ad9f21ac54fd2050ef58
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/dms_write_f/endpoint_delete.py
|
931dd6bbc2ae6f7170ea30b200a1a599d99271d0
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
create-endpoint : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dms/create-endpoint.html
describe-endpoints : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dms/describe-endpoints.html
modify-endpoint : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dms/modify-endpoint.html
"""
write_parameter("dms", "delete-endpoint")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
43b7ee085e4556264bd72e224d461a3231d7227a
|
051175a2e83077b1c55a9b927fa944f7d578954b
|
/class&object-python/untitled2.py
|
3c5f567b8dcc973c6da864961b20e587081da145
|
[] |
no_license
|
ajkashik/Class-object-python-Introduction
|
79cfec4db6310b796ef91bd4f80ca2fc1dfb970b
|
1e9549d59edcd48cad9d3596e242f224c4562b49
|
refs/heads/master
| 2022-11-06T09:10:19.210570
| 2020-06-28T19:17:44
| 2020-06-28T19:17:44
| 275,652,336
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
class bird:
def __init__(self):
print("This is a bird")
def name(self):
print("bird")
def swim(self):
print("Can swim")
class penguin(bird):
def __init__(self):
super().__init__()
print("This is penguin")
def name(self):
super().name()
print("Penguin")
def run(self):
super().swim()
print("Can run")
pop=penguin()
pop.name()
pop.run()
|
[
"noreply@github.com"
] |
ajkashik.noreply@github.com
|
b519651abbfcf68850825535a106d0edbcc9b006
|
baaefd1bbc39142294fef46f5cd4b1128a1b8e74
|
/data/getData.py
|
3772cba7a30c54d6c47390cc3df6d694a9959645
|
[] |
no_license
|
gutobenn/inf-visdados
|
f8853ca21057bded091a578339da1164fe69694e
|
eb6823b4d1815672c3102b7e60f45c5e79e0e3cb
|
refs/heads/master
| 2020-03-25T05:47:13.090246
| 2018-07-16T22:22:45
| 2018-07-16T22:22:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
import csv, sys
from collections import Counter
input_stream = open(sys.argv[1])
reader = csv.reader(input_stream, delimiter=';')
print(reader.next()) #skip header
data = [row[int(sys.argv[2])] for row in reader]
print data
for (k,v) in Counter(data).iteritems():
print "%s %d" % (k, v)
|
[
"fabriciommazzola@gmail.com"
] |
fabriciommazzola@gmail.com
|
92d0ea6cb275b2656032b22fc0bced5e32c93bac
|
535fcd46a4708f35406ac93def63728d3296ff70
|
/sign-websocket/lambda_function.py
|
39d6b17fb633f574b83477c9627c6065d160793e
|
[] |
no_license
|
maxzega/sondehub-infra
|
bd12087d633ec28a7bcc1d1358961635d47e211c
|
00fddada463f7f0a4a190026dd311c6b595c8c08
|
refs/heads/main
| 2023-04-15T21:31:43.310299
| 2021-04-23T10:33:54
| 2021-04-23T10:33:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,522
|
py
|
import boto3
import time
import uuid
import urllib.parse
import hmac, datetime, hashlib
import os
#todo this will need an iam role that has iot connection privs
def aws_sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def aws_getSignatureKey(key, dateStamp, regionName, serviceName):
kDate = aws_sign(("AWS4" + key).encode("utf-8"), dateStamp)
kRegion = aws_sign(kDate, regionName)
kService = aws_sign(kRegion, serviceName)
kSigning = aws_sign(kService, "aws4_request")
return kSigning
def aws_presign(
access_key=None,
secret_key=None,
session_token=None,
host=None,
region=None,
method=None,
protocol=None,
uri=None,
service=None,
expires=3600,
payload_hash=None,
):
# method=GET, protocol=wss, uri=/mqtt service=iotdevicegateway
assert 604800 >= expires >= 1, "Invalid expire time 604800 >= %s >= 1" % expires
# Date stuff, first is datetime, second is just date.
t = datetime.datetime.utcnow()
date_time = t.strftime("%Y%m%dT%H%M%SZ")
date = t.strftime("%Y%m%d")
# Signing algorithm used
algorithm = "AWS4-HMAC-SHA256"
# Scope of credentials, date + region (eu-west-1) + service (iot gateway hostname) + signature version
credential_scope = date + "/" + region + "/" + service + "/" + "aws4_request"
# Start building the query-string
canonical_querystring = "X-Amz-Algorithm=" + algorithm
canonical_querystring += "&X-Amz-Credential=" + urllib.parse.quote_plus(
access_key + "/" + credential_scope
)
canonical_querystring += "&X-Amz-Date=" + date_time
canonical_querystring += "&X-Amz-Expires=" + str(expires)
canonical_querystring += "&X-Amz-SignedHeaders=host"
if payload_hash is None:
if service == "iotdevicegateway":
payload_hash = hashlib.sha256(b"").hexdigest()
else:
payload_hash = "UNSIGNED-PAYLOAD"
canonical_headers = "host:" + host + "\n"
canonical_request = (
method
+ "\n"
+ uri
+ "\n"
+ canonical_querystring
+ "\n"
+ canonical_headers
+ "\nhost\n"
+ payload_hash
)
string_to_sign = (
algorithm
+ "\n"
+ date_time
+ "\n"
+ credential_scope
+ "\n"
+ hashlib.sha256(canonical_request.encode()).hexdigest()
)
signing_key = aws_getSignatureKey(secret_key, date, region, service)
signature = hmac.new(
signing_key, string_to_sign.encode("utf-8"), hashlib.sha256
).hexdigest()
canonical_querystring += "&X-Amz-Signature=" + signature
if session_token:
canonical_querystring += "&X-Amz-Security-Token=" + urllib.parse.quote(
session_token
)
return protocol + "://" + host + uri + "?" + canonical_querystring
def lambda_handler(event, context):
#get aws creds
session = boto3.Session()
credentials = session.get_credentials()
current_credentials = credentials.get_frozen_credentials()
url = aws_presign(
access_key=current_credentials.access_key,
secret_key=current_credentials.secret_key,
session_token=current_credentials.token,
method="GET",
protocol="wss",
uri="/mqtt",
service="iotdevicegateway",
host=os.getenv("IOT_ENDPOINT"),
region=session.region_name,
)
return {"statusCode": 200, "body": url}
if __name__ == "__main__":
print(lambda_handler({}, {}))
|
[
"git@michaela.lgbt"
] |
git@michaela.lgbt
|
89e1240e6d4e1e68240d8ff9adcd8c6e1613b0e9
|
e3f3def1b627eb1ba58395b23c732b124bc76b05
|
/PMCHealthCare/HealthRecord/models.py
|
a2c6535ae6de0f685a718ccb751229e8b6514528
|
[] |
no_license
|
ShraddhaVarat/HealthRecordApp
|
d59e3c582e4d66a4a0d8cbb88002cf5e7d685e3d
|
029f41885ef29440899f720bcf3377991a773008
|
refs/heads/master
| 2020-03-27T22:32:15.291007
| 2018-10-11T19:26:31
| 2018-10-11T19:26:31
| 147,239,811
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,071
|
py
|
from mongoengine import *
from django.utils import timezone
import datetime
from datetime import datetime
from django.db.models import (
DateField, DateTimeField, IntegerField, TimeField, Transform,
)
from PMCHealthCare.settings import DBNAME
connect(DBNAME)
class Doctor(Document):
doctor_id = StringField(max_length=50)
password = StringField(max_length=50)
name = StringField(max_length=100)
phone1 = StringField(max_length=15)
phone2 = StringField(max_length=15)
address = StringField(max_length=50)
email = StringField(max_length=254,required=False)
registration_no = StringField(max_length=50)
qualification = ListField(StringField(max_length=50))
hospitals_associated = SortedListField(StringField(max_length=50))
class Hospital(Document):
hospital_id = StringField(max_length=50)
password = StringField(max_length=50)
name = StringField(max_length=100)
phone_no = StringField(max_length=15)
helpline = StringField(max_length=15)
email = StringField(max_length=254,required=False)
address = StringField(max_length=100)
pincode = StringField(max_length=6)
registration_no = StringField(max_length=50)
hospital_type = StringField(max_length=50)
doctors_associated = SortedListField(ReferenceField(Doctor))
latitude = DecimalField(max_digits=10, decimal_places=7)
longitude = DecimalField(max_digits=10, decimal_places=7)
Location_Coordinates = StringField(max_length=50, blank=True, null=True)
Subtown = StringField(max_length=50, blank=True, null=True)
Total_Num_Beds = IntField(blank=True, null=True)
Facilities = StringField(max_length=50, blank=True, null=True)
District_ID = StringField(max_length=50, blank=True, null=True)
Specialties = ListField(StringField(max_length=50, blank=True, null=True))
Town = StringField(max_length=50, blank=True, null=True)
Website = StringField(max_length=50, blank=True, null=True)
Number_DoctorVillage = IntField(blank=True, null=True)
State_ID = StringField(max_length=50 , blank=True, null=True)
class Checkup_Details(EmbeddedDocument):
date = DateTimeField(blank=True, null=True)
hospital_id = StringField(max_length=50)
doctor = StringField(max_length=50)
symptoms = ListField(StringField(max_length=50))
provisional_diagnosis = ListField(StringField(max_length=50))
severity = IntField()
class Prescription(EmbeddedDocument):
prescription_id = StringField(max_length=50)
medicines = ListField(EmbeddedDocumentField('Medicine'))
class Medicine(EmbeddedDocument):
medicine_name = StringField(max_length=50)
morning = FloatField(required=False)
afternoon = FloatField(required=False)
evening = FloatField(required=False)
no_of_days = IntField()
class Patient(Document):
patient_id = StringField(max_length=50)
password = StringField(max_length=50)
name = StringField(max_length=100)
phone1 = StringField(max_length=15)
phone2 = StringField(max_length=15)
email_id = EmailField(max_length=254,required=False)
aadhar_no = DecimalField( max_digits=12, decimal_places=0)
permanent_addr = StringField(max_length=250)
local_addr = StringField(max_length=250)
dob = StringField(null=True, blank=True)
gender = StringField(max_length=1)
profession = StringField(max_length=50)
marital_status = StringField(max_length=1)
blood_grp = StringField(max_length=10)
spouse_name = StringField(max_length=50,required=False)
checkup = ListField(EmbeddedDocumentField('Checkup_Details'))
prescription = ListField(EmbeddedDocumentField('Prescription'),blank=True,null=True)
class Pharmacist(Document):
pharmacist_id = StringField(max_length=50)
password = StringField(max_length=50)
name = StringField(max_length=100)
phone1 = StringField(max_length=15)
phone2 = StringField(max_length=15)
email = EmailField(max_length=254,required=False)
address = StringField(max_length=100)
registration_no = StringField(max_length=50)
#Sahyadri = Hospital(hospital_id="PMCH001",name="Sahyadri", phone_no="12345",email_id="abc@gmai.com",facilties=["MRI","X-ray"])
#Sahyadri.save()
#for e in Hospital.objects.all():
# print(e["registration_no"],e["hospital_id"])
|
[
"shraddhavarat77@gmail.com"
] |
shraddhavarat77@gmail.com
|
33034e06cf155cf17a64e4d985bd5693ace235fb
|
00affc541697bb828548f227d6b07d6bdba78eeb
|
/Report/grafik/amplitude-plot-snorken.py
|
ce353dcce2dbccf208122d2e8d593ff0fd39d374
|
[] |
no_license
|
lgrave11/P8-Report
|
c0872f5d11814974a8b31a90f4112142635bfc5c
|
67bcafddc7ba670560cf018d14ebd5f3d88b11f7
|
refs/heads/master
| 2016-09-05T19:12:52.511855
| 2015-06-23T08:35:52
| 2015-06-23T08:35:52
| 30,405,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,215
|
py
|
import datetime
import matplotlib
import matplotlib.dates
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter
def main():
f = open(r"snorken_amplitudes.txt", "r").readlines()
lst = [(datetime.datetime.strptime(x.split("\t")[0].strip(), "%Y-%m-%d %H:%M:%S.%f"), x.split("\t")[1]) for x in f]
lst = [x for x in lst if x[0] >= datetime.datetime(2015, 4, 24, 19, 6, 14) and x[0] <= datetime.datetime(2015, 4, 25, 9, 0, 0)]
lst2 = zip(*lst)
format = matplotlib.dates.DateFormatter('%H:%M')
dates = matplotlib.dates.date2num(lst2[0])
matplotlib.use('PDF')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot_date(dates, lst2[1], 'o-', color="red", markerfacecolor='red', markeredgecolor='red', markersize=2, label="Amplitude", rasterized=True)
plt.legend(["Amplitude"])
ax.set_ylabel('Maks amplitude')
ax.set_xlabel('Tidspunkt')
plt.ylabel('Maks amplitude', rotation="vertical")
plt.xlabel('Tidspunkt', rotation="horizontal")
ax.xaxis.set_major_formatter(format)
ax.autoscale_view()
ax.grid(True)
fig.autofmt_xdate()
plt.savefig(r"amplitude-plot-snorken.pdf", dpi=400)
if __name__ == '__main__':
main()
|
[
"gravesenlasse@gmail.com"
] |
gravesenlasse@gmail.com
|
8a5e5ac52df2299bd07b8855ea97880603375668
|
48fcd5b9203c5f34dcad9483259c0f3d46f5d48b
|
/coursera-python/strings_evaluation.py
|
ab068659dcdc5aa7585549a1d1bec40e7c103fbf
|
[] |
no_license
|
ssaulrj/codes-python
|
438dd691815d0a688d264928eb07187ba30c2138
|
04b75b001de60a5e202ad373f3379864753ce203
|
refs/heads/master
| 2022-11-17T11:40:18.883096
| 2020-07-06T00:57:58
| 2020-07-06T00:57:58
| 234,440,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,206
|
py
|
#The is_palindrome function checks if a string is a palindrome. A palindrome is a string that can be equally read from left to right or right to left, omitting blank spaces, and ignoring capitalization. Examples of palindromes are words like kayak and radar, and phrases like "Never Odd or Even".
def is_palindrome(input_string):
reverse_string = ""
new_string = ""
# Traverse through each letter of the input string
for letter in input_string.lower():
if letter != ' ':
new_string += letter
reverse_string = letter + reverse_string[0:]
#print(reverse_string)
# Compare the strings
if new_string == reverse_string:
return True
return False
print(is_palindrome("Never Odd or Even")) # Should be True
print(is_palindrome("abc")) # Should be False
print(is_palindrome("kayak")) # Should be True
#Using the format method, fill in the gaps in the convert_distance function so that it returns the phrase "X miles equals Y km", with Y having only 1 decimal place. For example, convert_distance(12) should return "12 miles equals 19.2 km".
def convert_distance(miles):
km = miles * 1.6
result = "{} miles equals {:.1f} km".format(miles, km)
return result
print(convert_distance(12)) # Should be: 12 miles equals 19.2 km
print(convert_distance(5.5)) # Should be: 5.5 miles equals 8.8 km
print(convert_distance(11)) # Should be: 11 miles equals 17.6 km
#In the nametag function so that it uses the format method to return first_name and the first initial of last_name followed by a period.
def nametag(first_name, last_name):
return("{} {}.".format(first_name, last_name[0]))
print(nametag("Jane", "Smith"))
# Should display "Jane S."
print(nametag("Francesco", "Rinaldi"))
# Should display "Francesco R."
print(nametag("Jean-Luc", "Grand-Pierre"))
# Should display "Jean-Luc G."
#The replace_ending function replaces the old string in a sentence with the new string, but only if the sentence ends with the old string. If there is more than one occurrence of the old string in the sentence, only the one at the end is replaced, not all of them. For example, replace_ending("abcabc", "abc", "xyz") should return abcxyz, not xyzxyz or xyzabc. The string comparison is case-sensitive, so replace_ending("abcabc", "ABC", "xyz") should return abcabc (no changes made).
def replace_ending(sentence, old, new):
# Check if the old string is at the end of the sentence
if sentence.endswith(old):
# Using i as the slicing index, combine the part
# of the sentence up to the matched string at the
# end with the new string
i = sentence.rindex(old)
print(i)
new_sentence = sentence[:i] + new
return new_sentence
# Return the original sentence if there is no match
return sentence
print(replace_ending("It's raining cats and cats", "cats", "dogs"))
# Should display "It's raining cats and dogs"
print(replace_ending("She sells seashells by the seashore", "seashells", "donuts"))
# Should display "She sells seashells by the seashore"
print(replace_ending("The weather is nice in May", "may", "april"))
# Should display "The weather is nice in May"
print(replace_ending("The weather is nice in May", "May", "April"))
# Should display "The weather is nice in April"
|
[
"noreply@github.com"
] |
ssaulrj.noreply@github.com
|
2d4f81484db9931750caf5780609c291d546af57
|
7828deb2e1c37adafaac9a527887e67e0d2f98ca
|
/venv/Scripts/easy_install-script.py
|
b5a08e4f01dc3ed7c0549c7e42567214091492c7
|
[] |
no_license
|
arunindia95/automation-first
|
78bd565feaad77b86759afff1a12ab71f00ce5e1
|
9b403105ada7e62c020f6752ca619befadbdeb73
|
refs/heads/main
| 2023-03-29T11:16:27.456232
| 2021-03-21T10:40:21
| 2021-03-21T10:40:21
| 349,966,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
#!C:\Users\Admin\PycharmProjects\saleprac\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"arun.konagutti06@gmail.com"
] |
arun.konagutti06@gmail.com
|
06e38ae0557bd579e0f1e8e62c7e68ce5eee8d42
|
6d4c0a2e0997fc2dd850c0ceb2584d65c17a5fb3
|
/crud/apps/sr_users/models.py
|
c3af94bb101e3ba397fdeee81a54e108980bc44b
|
[] |
no_license
|
cd-chicago-june-cohort/django_orm_Alyssa
|
77eb155c46ff79a4c27a36f4154d4f2e0f65161e
|
17df629ba00edac1d1cd23dfc93729ff4818f468
|
refs/heads/master
| 2021-01-01T06:46:28.394999
| 2017-07-19T21:39:40
| 2017-07-19T21:39:40
| 97,506,652
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class UserManager(models.Manager):
def user_validator(self, post_data):
errors = {}
if len(post_data['first_name']) == 0 or len(post_data['last_name']) == 0 or len(post_data['email']) == 0:
errors['required']='All fields are required'
if not EMAIL_REGEX.match(post_data['email']):
errors['email']='Invalid Email Address!'
return errors
class User(models.Model):
first_name = models.CharField(max_length=128)
last_name = models.CharField(max_length=128)
email = models.CharField(max_length=128)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
|
[
"alyssa@nickow.com"
] |
alyssa@nickow.com
|
eee5575235971f6b7417c1981655c6d18d4f5064
|
15cebfba74a77de9d633addb278e57917ade3a14
|
/models/articulation_estimator.py
|
9d9d48ff314f0d84ab97e9de202fbe2838559b06
|
[] |
no_license
|
liuliu66/articulation_estimator_slim
|
75cc04502a24872a0e07dd249c849df825645478
|
561fa07a9901692c4543648349b555711c9b18d0
|
refs/heads/main
| 2023-06-12T17:08:30.176028
| 2021-07-08T07:35:56
| 2021-07-08T07:35:56
| 370,897,114
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
import os, sys
import torch
import torch.nn as nn
from .pointnet2 import PointNet2
from .estimation_head import EstimationHead
class ArticulationEstimator(nn.Module):
def __init__(self,
in_channels=3,
n_max_parts=7):
super(ArticulationEstimator, self).__init__()
self.n_max_parts = n_max_parts
self.backbone = PointNet2(in_channels)
self.nocs_head = EstimationHead(n_max_parts, mixed_pred=True)
def forward(self, return_loss=True, **input):
if return_loss:
return self.forward_train(**input)
else:
return self.forward_test(**input)
def forward_train(self, **input):
P = input['parts_pts']
if 'parts_pts_feature' in input.keys():
P_feature = input['parts_pts_feature']
else:
P_feature = None
feat, feat_encode = self.backbone(P, P_feature)
pred_dict = self.nocs_head(feat, feat_encode)
loss_result = self.nocs_head.loss(pred_dict, mode='train', **input)
return loss_result
def forward_test(self, **input):
P = input['pts']
if 'pts_feature' in input.keys():
P_feature = input['pts_feature']
else:
P_feature = None
if P.dim() == 2:
P = P.unsqueeze(0)
if P_feature.dim() == 2:
P_feature = P_feature.unsqueeze(0)
feat, feat_encode = self.backbone(P, P_feature)
pred_dict = self.nocs_head(feat, feat_encode)
return pred_dict
@property
def with_nocs(self):
return hasattr(self, 'nocs_head') and self.nocs_head is not None
|
[
"312989161@qq.com"
] |
312989161@qq.com
|
91abdc4a15ab120bb5b7538c6841258ddc4d1d8e
|
89e430e5e47642132b272ab454c0bd40344c40a7
|
/LeetcodePython/LeetCode/0.base.py
|
ec95524357a6fac2d831c3c93deac528204ec2b9
|
[] |
no_license
|
selonsy/leetcode
|
bebde23e0e13ba236adb3d905a701a34602f98df
|
b8f705a77cfcdb7d498d3422f9c4ee88fd61a3b3
|
refs/heads/master
| 2021-06-28T06:23:36.422766
| 2021-02-23T12:34:40
| 2021-02-23T12:34:40
| 220,758,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,996
|
py
|
import time
import copy
class Solution:
def func1(self, x):
pass
def getVarLen(self):
co = self.func1.__code__
return co.co_argcount
ori_data = [1]
expect_data = [None]
assert len(ori_data) == len(expect_data),'输入输出数据数量不一致'
s = Solution()
funcs = []
var_len = s.getVarLen() - 1 # exclude self
for key in Solution.__dict__.keys():
if "func" in key:
funcs.append(getattr(s, key)) # 反射获取实例化的func
for f in range(len(funcs)):
func = funcs[f]
assert func!=None,'func不能为空!'
begin = time.time() # ToDo:这里的计时有问题,需要改进
data_length = len(expect_data)
input_data = copy.deepcopy(ori_data) # 前面的func可能修改测试数据,故深拷贝
for i in range(data_length):
if var_len == 1:
res = func(input_data[i])
assert res == expect_data[i], "func{0}({3}): expected = {1}, but actually = {2}".format(f+1,expect_data[i], res,input_data[i])
elif var_len == 2:
res = func(input_data[i][0],input_data[i][1])
assert res == expect_data[i], "func{0}({3},{4}): expected = {1}, but actually = {2}".format(f+1,expect_data[i], res,input_data[i][0],input_data[i][1])
elif var_len == 3:
res = func(input_data[i][0],input_data[i][1],input_data[i][2])
assert res == expect_data[i], "func{0}({3},{4},{5}): expected = {1}, but actually = {2}".format(f+1,expect_data[i], res,input_data[i][0],input_data[i][1],input_data[i][2])
elif var_len == 4:
res = func(input_data[i][0],input_data[i][1],input_data[i][2],input_data[i][3])
assert res == expect_data[i], "func{0}({3},{4},{5},{6}): expected = {1}, but actually = {2}".format(f+1,expect_data[i], res,input_data[i][0],input_data[i][1],input_data[i][2],input_data[i][3])
end = time.time()
print("func{0} : {1:.4f} ms".format(f+1, (end-begin)*1000/data_length))
print("done")
|
[
"selonsy@gmail.com"
] |
selonsy@gmail.com
|
34a936681fc993105ca2d6ae90223c3fbde5699e
|
9a17439a485041a0c17fe2b1be2106e8345a686b
|
/music_controller/polls/apiviews.py
|
85345cfc6c89b4abeef87f904d196aea98ba44f7
|
[] |
no_license
|
dom-inic/music_controller
|
15cc79310919116db310b55d3d6c7df2314fee06
|
73f6b225fd95719dc7244e278b80d6f587a2e921
|
refs/heads/master
| 2023-02-22T19:26:41.918556
| 2021-01-26T08:57:19
| 2021-01-26T08:57:19
| 326,018,333
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
from rest_framework.views import APIView
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from rest_framework import generics
from . models import Question, Choice
from .serializers import PollSerializer, ChoiceSerializer, VoteSerializer
# class PollList(APIView):
# def get(self, request):
# polls = Question.objects.all()[:20]
# data = PollSerializer(polls, many=True).data
# return Response(data)
# class PollDetail(APIView):
# def get (self, request, pk):
# poll = get_object_or_404(Question, pk=pk)
# data = PollSerializer(poll).data
# return Response(data)
class PollList(generics.ListCreateAPIView):
queryset = Question.objects.all()
serializer_class = PollSerializer
class PollDetail(generics.RetrieveDestroyAPIView):
queryset = Question.objects.all()
serializer_class = PollSerializer
class ChoiceList(generics.ListCreateAPIView):
queryset = Choice.objects.all()
serializer_class = ChoiceSerializer
class CreateVote(generics.CreateAPIView):
serializer_class = VoteSerializer
|
[
"dominicnyambane22@gmail.com"
] |
dominicnyambane22@gmail.com
|
32a6be3181eaf73bfdbe1274daba814817ea932b
|
6624f41d6a58b91f080b482727b9b7fbd70c3480
|
/kannada_brat/coreference_ui.py
|
6c440557e1189ac83799a204bec575f7a3ed73a0
|
[] |
no_license
|
swarooplr/fp
|
ac423d4d3c8a7aa9972e95e6bc191e1495eda5d1
|
ad5559689f141149f91d410fbe7cd05fe0fd53b7
|
refs/heads/master
| 2020-05-07T10:25:18.657882
| 2019-07-14T14:08:23
| 2019-07-14T14:08:23
| 180,417,247
| 0
| 1
| null | 2019-05-03T17:12:22
| 2019-04-09T17:28:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,537
|
py
|
import json
from english_brat import final_builder_ui
import os
import random
def buid_brat(text,annotated):
docData = {}
docData['text'] = text
docData['entities'] = []
docData['relations'] = []
start_index = 0
end_index = 0
counter = 0
search_string = text
id_map = {}
finished_map = {}
counter = 0
for token in annotated:
if not id_map.has_key(token[0]):
id_map[token[0]] = counter
counter += 1
finished_map[token[0]] = True
if not id_map.has_key(token[2]):
id_map[token[2]] = counter
counter += 1
finished_map[token[2]] = True
for token in annotated:
entity = []
entity.append('T' + str(id_map[token[0]]))
entity.append('Entity')
entity.append([[token[0],token[1]]])
docData['entities'].append(entity)
counter += 1
entity = []
entity.append('T' + str(id_map[token[2]]))
entity.append('Anaphor')
entity.append([[token[2], token[3]]])
docData['entities'].append(entity)
counter += 1
docData['relations'].append(
['R'+str(counter-1), 'Co-Reference', [['Anaphor', 'T' + str(id_map[token[2]])], ['Entity','T' + str(id_map[token[0]])]]]
)
collData = {}
collData['entity_types'] = []
tags = set()
tags.add("Anaphor")
tags.add("Entity")
colours = open(os.path.join(os.path.dirname(__file__), '../misc/colours.txt')).read().splitlines()
collData['relation_types'] = [{
"type": 'Co-Reference',
"labels": ['Co-Reference'],
"dashArray": '3,3',
"color": 'purple',
"args": [
{"role": 'Anaphor', "targets": ['Anaphor']},
{"role": 'Entity', "targets": ['Entity']}
]
}]
for tag in tags:
entity = {}
entity['type'] = tag
entity['labels'] = [tag]
entity['borderColor'] = 'darken'
entity['bgColor'] = colours[random.randint(0,len(colours)-1)]
collData['entity_types'].append(entity)
docData = json.dumps(docData,sort_keys=True,indent=4)
collData = json.dumps(collData,sort_keys=True,indent=4)
final_builder_ui.build(docData,collData)
|
[
"swarooplr13@gmail.com"
] |
swarooplr13@gmail.com
|
8ad3174e81f9c26f2ddd4b70d68fe27555f25a45
|
957c9b285d508d56d865d60889e1485b34239e92
|
/firecares/firestation/migrations/0040_auto_20170126_1640.py
|
4a6f93a469ea12d7ef2956004348627b1842f7bc
|
[
"MIT"
] |
permissive
|
FireCARES/firecares
|
677fd4a3c6c554b735fa276fc1cd6a4b67ce42f6
|
aa708d441790263206dd3a0a480eb6ca9031439d
|
refs/heads/develop
| 2022-12-11T22:45:11.378689
| 2021-04-22T22:00:12
| 2021-04-22T22:00:12
| 39,472,578
| 12
| 14
|
MIT
| 2022-12-08T00:02:40
| 2015-07-21T22:16:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,522
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('firestation', '0039_auto_20170126_0857'),
]
sql = """
CREATE OR REPLACE FUNCTION department_fts_document(integer) RETURNS tsvector AS $$
DECLARE
department_document TEXT;
name varchar;
city varchar;
state varchar(2);
state_name varchar(40);
postal_code varchar(10);
BEGIN
RAISE NOTICE 'WRONG FUUNCTIONS';
SELECT fd.name, add.city, fd.state, states.state_name, add.postal_code
INTO name, city, state, state_name, postal_code
FROM firestation_firedepartment fd
LEFT JOIN firecares_core_address add
ON fd.headquarters_address_id=add.id
LEFT JOIN usgs_stateorterritoryhigh states
ON ST_CoveredBy(ST_Centroid(fd.geom), states.geom)
WHERE fd.id=$1;
SELECT concat_ws(' ', name, city, state, state_name, postal_code) INTO department_document;
RETURN to_tsvector('pg_catalog.simple', department_document);
END;
$$ LANGUAGE plpgsql;
-- Overload the department_fts_document by calling this version the same name but accepting a different argument type.
-- This one takes a Fire Department object.
CREATE OR REPLACE FUNCTION department_fts_document(department firestation_firedepartment) RETURNS tsvector AS $$
DECLARE
department_document TEXT;
name varchar;
city varchar;
state varchar(2);
state_name varchar(40);
postal_code varchar(10);
BEGIN
SELECT add.city, states.state_name, add.postal_code
INTO city, state_name, postal_code
FROM firestation_firedepartment fd
LEFT JOIN firecares_core_address add
ON fd.headquarters_address_id=add.id
LEFT JOIN usgs_stateorterritoryhigh states
ON ST_CoveredBy(ST_Centroid(fd.geom), states.geom)
WHERE fd.id=department.id;
SELECT concat_ws(' ', department.name, city, department.state, state_name, postal_code) INTO department_document;
RETURN to_tsvector('pg_catalog.simple', department_document);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION department_fts_document_trigger() RETURNS TRIGGER AS $$
BEGIN
raise warning 'before set %', NEW;
NEW.fts_document=department_fts_document(NEW);
raise warning 'after set';
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
"""
operations = [
migrations.RunSQL(sql)
]
|
[
"garnertb@gmail.com"
] |
garnertb@gmail.com
|
816ce6df5e1915ef71909f8e745acf6d747e9179
|
ab621c65fc91f5194c4032d68e750efaa5f85682
|
/purchase_invoice_line_percentage/wizards/purchase_make_invoice_advance.py
|
66c903b489b5ea00b42dfe67281d9b5bf22f137a
|
[] |
no_license
|
pabi2/pb2_addons
|
a1ca010002849b125dd89bd3d60a54cd9b9cdeef
|
e8c21082c187f4639373b29a7a0905d069d770f2
|
refs/heads/master
| 2021-06-04T19:38:53.048882
| 2020-11-25T03:18:24
| 2020-11-25T03:18:24
| 95,765,121
| 6
| 15
| null | 2022-10-06T04:28:27
| 2017-06-29T10:08:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,974
|
py
|
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class purchase_advance_payment_inv(osv.osv_memory):
_name = "purchase.advance.payment.inv"
_description = "Purchase Advance Payment Invoice"
_columns = {
'line_percent': fields.float(
'Installment',
digits_compute=dp.get_precision('Account'),
help="The % of installment to be used to "
"calculate the quantity to invoice"),
}
_defaults = {
'amount': 0.0,
}
def create_invoices(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0], context)
# Additional case, Line Percentage
if wizard.line_percent:
# Getting PO Line IDs of this PO
purchase_obj = self.pool.get('purchase.order')
purchase_ids = context.get('active_ids', [])
order = purchase_obj.browse(cr, uid, purchase_ids[0])
if order.invoiced_rate + wizard.line_percent > 100:
raise osv.except_osv(
_('Warning!'),
_('This percentage is too high, '
'it make overall invoiced rate exceed 100%!'))
order_line_ids = []
for order_line in order.order_line:
order_line_ids.append(order_line.id)
# Assign them into active_ids
context.update({'active_ids': order_line_ids})
context.update({'line_percent': wizard.line_percent})
purchase_order_line_make_invoice_obj = self.pool.get(
'purchase.order.line_invoice')
res = purchase_order_line_make_invoice_obj.makeInvoices(
cr, uid, ids, context=context)
if not context.get('open_invoices', False):
return {'type': 'ir.actions.act_window_close'}
return res
return super(purchase_advance_payment_inv, self).create_invoices(
cr, uid, ids, context=context)
def open_invoices(self, cr, uid, ids, invoice_ids, context=None):
""" open a view on one of the given invoice_ids """
ir_model_data = self.pool.get('ir.model.data')
form_res = ir_model_data.get_object_reference(
cr, uid, 'account', 'invoice_supplier_form')
form_id = form_res and form_res[1] or False
tree_res = ir_model_data.get_object_reference(
cr, uid, 'account', 'invoice_tree')
tree_id = tree_res and tree_res[1] or False
return {
'name': _('Advance Invoice'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'account.invoice',
'res_id': invoice_ids[0],
'view_id': False,
'views': [(form_id, 'form'), (tree_id, 'tree')],
'context': "{'type': 'in_invoice'}",
'type': 'ir.actions.act_window',
}
|
[
"kittiu@gmail.com"
] |
kittiu@gmail.com
|
09b1804b7637a7d48d1db6c42af093f42603907e
|
fcbe21026e7ae483c535f6eb38ffbfaaa2aa06c2
|
/.history/main_20210320181238.py
|
c4c23e154eacb243057172f4c0f7578559179576
|
[] |
no_license
|
robertstrauss/discordAIchatbot
|
8e4c85920f73d49daeb1394afbd1ce8baffbb97b
|
8347e6a591d352ace1b8fe4c4629e831763eb0ba
|
refs/heads/master
| 2023-04-03T12:55:07.641825
| 2021-04-13T17:54:57
| 2021-04-13T17:54:57
| 357,641,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
import discord
import re
import requests
import random
import time
import threading
import asyncio
client = discord.Client()
pingchannel = client.get_channel("763821567695126588")
pinginterval = 2 # seconds
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
global pinging, pinginterval, pingchannel
if message.author == "Eon#8669":
stopPinging()
if message.author == client.user:
return
if "Eon#8669" in [str(mention) for mention in message.mentions]:
pingean()
if message.content.startswith("hey bot, ping ean!"):
# pingchannel = message.channel
pinging = True
while pinging:
await pingchannel.send('<@549394254695235594>')
time.sleep(pinginterval)
if message.content.startswith("hey bot, stop!"):
pinging = False
# await message.channel.send(makepenis(random.randint(0,10)))
with open('apikey.txt', 'r') as apikeytxt:
api_key = apikeytxt.read()
api_url = 'https://www.alphavantage.co/query'
def getQuote(symbol):
data = {
'function': 'GLOBAL_QUOTE',
'symbol': symbol,
'apikey': api_key
}
return requests.get(api_url, params=data).json()['Global Quote']
with open('token.txt', 'r') as tokentxt:
client.run(tokentxt.read())
|
[
"robert.strauss@protonmail.com"
] |
robert.strauss@protonmail.com
|
082ae6ae67757ef03811610dd0fd91fdaf230ab3
|
b9a4864e7952918f79761f9c2df9b0e3fc93f45c
|
/ml-agents/mlagents/trainers/torch/action_log_probs.py
|
eee4cab8c2701dbe834f549f0d37750123a68f6f
|
[
"Apache-2.0"
] |
permissive
|
418sec/ml-agents
|
bc9e2c4544f4adacacf0b960001316b0c0e6ee9b
|
26a7040c9780fc093c6a84653f33627d207bf117
|
refs/heads/master
| 2023-02-26T04:47:03.195404
| 2021-01-29T19:12:11
| 2021-01-29T19:12:11
| 334,611,921
| 0
| 1
|
Apache-2.0
| 2021-02-08T15:38:37
| 2021-01-31T08:53:42
| null |
UTF-8
|
Python
| false
| false
| 4,426
|
py
|
from typing import List, Optional, NamedTuple, Dict
from mlagents.torch_utils import torch
import numpy as np
from mlagents.trainers.torch.utils import ModelUtils
from mlagents_envs.base_env import _ActionTupleBase
class LogProbsTuple(_ActionTupleBase):
"""
An object whose fields correspond to the log probs of actions of different types.
Continuous and discrete are numpy arrays
Dimensions are of (n_agents, continuous_size) and (n_agents, discrete_size),
respectively. Note, this also holds when continuous or discrete size is
zero.
"""
@property
def discrete_dtype(self) -> np.dtype:
"""
The dtype of a discrete log probability.
"""
return np.float32
class ActionLogProbs(NamedTuple):
"""
A NamedTuple containing the tensor for continuous log probs and list of tensors for
discrete log probs of individual actions as well as all the log probs for an entire branch.
Utility functions provide numpy <=> tensor conversions to be used by the optimizers.
:param continuous_tensor: Torch tensor corresponding to log probs of continuous actions
:param discrete_list: List of Torch tensors each corresponding to log probs of the discrete actions that were
sampled.
:param all_discrete_list: List of Torch tensors each corresponding to all log probs of
a discrete action branch, even the discrete actions that were not sampled. all_discrete_list is a list of Tensors,
each Tensor corresponds to one discrete branch log probabilities.
"""
continuous_tensor: torch.Tensor
discrete_list: Optional[List[torch.Tensor]]
all_discrete_list: Optional[List[torch.Tensor]]
@property
def discrete_tensor(self):
"""
Returns the discrete log probs list as a stacked tensor
"""
return torch.stack(self.discrete_list, dim=-1)
@property
def all_discrete_tensor(self):
"""
Returns the discrete log probs of each branch as a tensor
"""
return torch.cat(self.all_discrete_list, dim=1)
def to_log_probs_tuple(self) -> LogProbsTuple:
"""
Returns a LogProbsTuple. Only adds if tensor is not None. Otherwise,
LogProbsTuple uses a default.
"""
log_probs_tuple = LogProbsTuple()
if self.continuous_tensor is not None:
continuous = ModelUtils.to_numpy(self.continuous_tensor)
log_probs_tuple.add_continuous(continuous)
if self.discrete_list is not None:
discrete = ModelUtils.to_numpy(self.discrete_tensor)
log_probs_tuple.add_discrete(discrete)
return log_probs_tuple
def _to_tensor_list(self) -> List[torch.Tensor]:
"""
Returns the tensors in the ActionLogProbs as a flat List of torch Tensors. This
is private and serves as a utility for self.flatten()
"""
tensor_list: List[torch.Tensor] = []
if self.continuous_tensor is not None:
tensor_list.append(self.continuous_tensor)
if self.discrete_list is not None:
tensor_list.append(self.discrete_tensor)
return tensor_list
def flatten(self) -> torch.Tensor:
"""
A utility method that returns all log probs in ActionLogProbs as a flattened tensor.
This is useful for algorithms like PPO which can treat all log probs in the same way.
"""
return torch.cat(self._to_tensor_list(), dim=1)
@staticmethod
def from_dict(buff: Dict[str, np.ndarray]) -> "ActionLogProbs":
"""
A static method that accesses continuous and discrete log probs fields in an AgentBuffer
and constructs the corresponding ActionLogProbs from the retrieved np arrays.
"""
continuous: torch.Tensor = None
discrete: List[torch.Tensor] = None # type: ignore
if "continuous_log_probs" in buff:
continuous = ModelUtils.list_to_tensor(buff["continuous_log_probs"])
if "discrete_log_probs" in buff:
discrete_tensor = ModelUtils.list_to_tensor(buff["discrete_log_probs"])
# This will keep discrete_list = None which enables flatten()
if discrete_tensor.shape[1] > 0:
discrete = [
discrete_tensor[..., i] for i in range(discrete_tensor.shape[-1])
]
return ActionLogProbs(continuous, discrete, None)
|
[
"noreply@github.com"
] |
418sec.noreply@github.com
|
b0bc231c424535e2de8e165f77a9dec6fc51a064
|
8bff98c58d7d5894b9b9095fb4dac049c7b4a77c
|
/NERYS-product-monitor-master/supreme.py
|
ba15a427444c4e21539d6e4c03373e59d518e6db
|
[
"MIT"
] |
permissive
|
mascher/supreme-nikemonitor
|
cf401e4fadbeb8ff812f514525530e5b85f1f3d4
|
52e5f1e3f333802a2ef2a4fe9495b2f06e1f9e9f
|
refs/heads/master
| 2020-03-27T20:01:18.383492
| 2018-08-21T02:22:25
| 2018-08-21T02:22:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,840
|
py
|
'''
NERYS
supreme module
left to do:
save products to sql db
load products from sql db on startup
'''
import requests
from bs4 import BeautifulSoup as soup
import random
from log import log as log
from threading import Thread
from discord_hooks import Webhook
import time
class Product:
def __init__(self, link, image, title = "", stock = False):
self.link = link
self.image = image
self.title = title
self.stock = stock
def read_from_txt(path):
'''
(None) -> list of str
Loads up all sites from the sitelist.txt file in the root directory.
Returns the sites as a list
'''
# Initialize variables
raw_lines = []
lines = []
# Load data from the txt file
try:
f = open(path, "r")
raw_lines = f.readlines()
f.close()
# Raise an error if the file couldn't be found
except:
log('e', "Couldn't locate <" + path + ">.")
raise FileNotFound()
if(len(raw_lines) == 0):
raise NoDataLoaded()
# Parse the data
for line in raw_lines:
lines.append(line.strip("\n"))
# Return the data
return lines
def get_proxy(proxy_list):
'''
(list) -> dict
Given a proxy list <proxy_list>, a proxy is selected and returned.
'''
# Choose a random proxy
proxy = random.choice(proxy_list)
# Set up the proxy to be used
proxies = {
"http": str(proxy),
"https": str(proxy)
}
# Return the proxy
return proxies
def send_embed(alert_type, product):
'''
(str, str, list, str, str, str) -> None
Sends a discord alert based on info provided.
'''
# Set webhook
url = discord_webhook
# Create embed to send to webhook
embed = Webhook(url, color=123123)
# Set author info
embed.set_author(name='Premier Cooks', icon='https://pbs.twimg.com/profile_images/1031725580075585536/s0GlPWIB_400x400.jpg')
# Set product details
if(alert_type == "RESTOCK"):
embed.set_desc("RESTOCK: " + product.title)
elif(alert_type == "NEW"):
embed.set_desc("NEW: " + product.title)
embed.add_field(name="Product", value=product.title)
embed.add_field(name="Link", value=product.link)
embed.add_field(name="Stock", value=str(product.stock))
# Set product image
embed.set_thumbnail(product.image)
embed.set_image(product.image)
# Set footer
embed.set_footer(text='Supreme Monitor by @premiercooks', icon='https://pbs.twimg.com/profile_images/1031725580075585536/s0GlPWIB_400x400.jpg', ts=True)
# Send Discord alert
embed.post()
def monitor():
# GET "view all" page
link = "http://www.supremenewyork.com/shop/all"
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36"
}
proxies = get_proxy(proxy_list)
try:
r = requests.get(link, timeout=5, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed. Retrying...")
try:
if(use_proxies):
proxies = get_proxy(proxy_list)
r = requests.get(link, proxies=proxies, timeout=8, verify=False)
else:
r = requests.get(link, timeout=8, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed.")
return
page = soup(r.text, "html.parser")
products = page.findAll("div", {"class": "inner-article"})
log('i', "Checking stock of Supreme products...")
for product in products:
link = "https://www.supremenewyork.com" + product.a["href"]
monitor_supreme_product(link, product)
def monitor_supreme_product(link, product):
# Product info
image = "https:" + product.a.img["src"]
if(product.text == "sold out"):
stock = False
else:
stock = True
# Product already in database
try:
if(stock is True and products_list[link].stock is False):
log('s', products_list[link].title + " is back in stock!")
products_list[link].stock = True
send_embed("RESTOCK", products_list[link])
elif(stock is False and products_list[link].stock is True):
log('s', products_list[link].title + " is now out of stock.")
products_list[link].stock = False
# Add new product to database
except:
# GET product name
try:
if(use_proxies):
proxies = get_proxy(proxy_list)
r = requests.get(link, proxies=proxies, timeout=8, verify=False)
else:
r = requests.get(link, timeout=8, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed. Retrying...")
try:
if(use_proxies):
proxies = get_proxy(proxy_list)
r = requests.get(link, proxies=proxies, timeout=8, verify=False)
else:
r = requests.get(link, timeout=8, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed.")
return
title = soup(r.text, "html.parser").find("title").text
# Add product to database
products_list[link] = Product(link, image, title, stock)
log('s', "Added " + title + " to the database.")
send_embed("NEW", products_list[link])
def build_db():
# GET "view all" page
link = "http://www.supremenewyork.com/shop/all"
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36"
}
proxies = get_proxy(proxy_list)
try:
r = requests.get(link, timeout=5, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed. Retrying...")
try:
if(use_proxies):
proxies = get_proxy(proxy_list)
r = requests.get(link, proxies=proxies, timeout=8, verify=False)
else:
r = requests.get(link, timeout=8, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed.")
return
page = soup(r.text, "html.parser")
products = page.findAll("div", {"class": "inner-article"})
log('i', "Checking stock of Supreme products...")
for product in products:
link = "https://www.supremenewyork.com" + product.a["href"]
# Product info
image = "https:" + product.a.img["src"]
if(product.text == "sold out"):
stock = False
else:
stock = True
# GET product name
try:
if(use_proxies):
proxies = get_proxy(proxy_list)
r = requests.get(link, proxies=proxies, timeout=8, verify=False)
else:
r = requests.get(link, timeout=8, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed. Retrying...")
proxies = get_proxy(proxy_list)
r = requests.get(link, proxies=proxies, timeout=8, verify=False)
try:
if(use_proxies):
proxies = get_proxy(proxy_list)
r = requests.get(link, proxies=proxies, timeout=8, verify=False)
else:
r = requests.get(link, timeout=8, verify=False)
except:
proxies = get_proxy(proxy_list)
log('e', "Connection to URL <" + link + "> failed.")
return
title = soup(r.text, "html.parser").find("title").text
# Add product to database
products_list[link] = Product(link, image, title, stock)
log('s', "Added " + title + " to the database.")
if(__name__ == "__main__"):
# Ignore insecure messages
requests.packages.urllib3.disable_warnings()
# Load proxies (if available)
proxy_list = read_from_txt("proxies.txt")
log('i', "Loaded " + str(len(proxy_list)) + " proxies.")
if(len(proxy_list) == 0):
use_proxies = False
else:
use_proxies = True
# Initialize variables
products_list = {}
proxies = get_proxy(proxy_list)
discord_webhook = "https://discordapp.com/api/webhooks/466338049207435297/Qlm-eq1c2_oil7AJGRU1U2j93TGD4IvCJuo8PYfWXY0ghuTdk-lCiYkq5KbboeTvC4ds" # Put your webhook here
# Build database
build_db()
# Monitor products
while(True):
monitor()
time.sleep(8)
|
[
"noreply@github.com"
] |
mascher.noreply@github.com
|
2603a62d3d40aa583bfcf5eff0ea7b99e1bfbdfd
|
c3ff4d4d945867d6019438a7ed0ca4cd59e31dfa
|
/tests/platform_tests/api/test_component.py
|
9257e4ad20379554e2be12ed6a6f4b0c7fb71e3f
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
monipko/sonic-mgmt
|
9b80386039b98470e093e8e23c3b98bf71a5eb70
|
9927942d83f749579fb99ed92e26a2963b20a0d9
|
refs/heads/master
| 2021-08-07T04:06:04.760399
| 2020-12-11T07:28:44
| 2020-12-11T07:28:44
| 230,930,317
| 1
| 1
|
NOASSERTION
| 2020-09-25T12:42:00
| 2019-12-30T14:33:16
|
Python
|
UTF-8
|
Python
| false
| false
| 10,169
|
py
|
import logging
import re
import pytest
import yaml
from tests.common.helpers.assertions import pytest_assert
from tests.common.helpers.platform_api import chassis, component
from platform_api_test_base import PlatformApiTestBase
###################################################
# TODO: Remove this after we transition to Python 3
import sys
if sys.version_info.major == 3:
STRING_TYPE = str
else:
STRING_TYPE = basestring
# END Remove this after we transition to Python 3
###################################################
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.disable_loganalyzer, # disable automatic loganalyzer
pytest.mark.topology('any')
]
image_list = [
"current",
"next"
]
@pytest.fixture(scope="class")
def gather_facts(request, duthost):
# Get platform facts from platform.json file
request.cls.chassis_facts = duthost.facts.get("chassis")
@pytest.mark.usefixtures("gather_facts")
class TestComponentApi(PlatformApiTestBase):
"""Platform API test cases for the Component class"""
num_components = None
chassis_facts = None
# This fixture would probably be better scoped at the class level, but
# it relies on the platform_api_conn fixture, which is scoped at the function
# level, so we must do the same here to prevent a scope mismatch.
@pytest.fixture(scope="function", autouse=True)
def setup(self, platform_api_conn):
if self.num_components is None:
try:
self.num_components = int(chassis.get_num_components(platform_api_conn))
except:
pytest.fail("num_components is not an integer")
#
# Helper functions
#
def compare_value_with_platform_facts(self, key, value, component_idx):
expected_value = None
if self.chassis_facts:
expected_components = self.chassis_facts.get("components")
if expected_components:
expected_value = expected_components[component_idx].get(key)
if self.expect(expected_value is not None,
"Unable to get expected value for '{}' from platform.json file for component {}".format(key, component_idx)):
self.expect(value == expected_value,
"'{}' value is incorrect. Got '{}', expected '{}' for component {}".format(key, value, expected_value, component_idx))
#
# Functions to test methods inherited from DeviceBase class
#
def test_get_name(self, duthost, localhost, platform_api_conn):
if self.num_components == 0:
pytest.skip("No components found on device")
for i in range(self.num_components):
name = component.get_name(platform_api_conn, i)
if self.expect(name is not None, "Component {}: Unable to retrieve name".format(i)):
self.expect(isinstance(name, STRING_TYPE), "Component {}: Name appears incorrect".format(i))
self.compare_value_with_platform_facts('name', name, i)
self.assert_expectations()
def test_get_presence(self, duthost, localhost, platform_api_conn):
if self.num_components == 0:
pytest.skip("No components found on device")
for i in range(self.num_components):
presence = component.get_presence(platform_api_conn, i)
if self.expect(presence is not None, "Component {}: Unable to retrieve presence".format(i)):
self.expect(isinstance(presence, bool), "Component {}: Presence appears incorrect".format(i))
# All components are expected to be present on DuT
self.expect(presence is True, "Component {} not present".format(i))
self.assert_expectations()
def test_get_model(self, duthost, localhost, platform_api_conn):
if self.num_components == 0:
pytest.skip("No components found on device")
for i in range(self.num_components):
model = component.get_model(platform_api_conn, i)
if self.expect(model is not None, "Component {}: Unable to retrieve model".format(i)):
self.expect(isinstance(model, STRING_TYPE), "Component {}: Model appears incorrect".format(i))
self.assert_expectations()
def test_get_serial(self, duthost, localhost, platform_api_conn):
if self.num_components == 0:
pytest.skip("No components found on device")
for i in range(self.num_components):
serial = component.get_serial(platform_api_conn, i)
if self.expect(serial is not None, "Component {}: Unable to retrieve serial number".format(i)):
self.expect(isinstance(serial, STRING_TYPE), "Component {}: Serial number appears incorrect".format(i))
self.assert_expectations()
def test_get_status(self, duthost, localhost, platform_api_conn):
if self.num_components == 0:
pytest.skip("No components found on device")
for i in range(self.num_components):
status = component.get_status(platform_api_conn, i)
if self.expect(status is not None, "Component {}: Unable to retrieve status".format(i)):
self.expect(isinstance(status, bool), "Component {}: Status appears incorrect".format(i))
self.assert_expectations()
def test_get_position_in_parent(self, platform_api_conn):
for i in range(self.num_components):
position = component.get_position_in_parent(platform_api_conn, i)
if self.expect(position is not None, "Failed to perform get_position_in_parent for component {}".format(i)):
self.expect(isinstance(position, int), "Position value must be an integer value for component {}".format(i))
self.assert_expectations()
def test_is_replaceable(self, platform_api_conn):
for i in range(self.num_components):
replaceable = component.is_replaceable(platform_api_conn, i)
if self.expect(replaceable is not None, "Failed to perform is_replaceable for component {}".format(i)):
self.expect(isinstance(replaceable, bool), "Replaceable value must be a bool value for component {}".format(i))
self.assert_expectations()
#
# Functions to test methods defined in ComponentBase class
#
def test_get_description(self, duthost, localhost, platform_api_conn):
if self.num_components == 0:
pytest.skip("No components found on device")
for i in range(self.num_components):
description = component.get_description(platform_api_conn, i)
if self.expect(description is not None, "Component {}: Failed to retrieve description".format(i)):
self.expect(isinstance(description, STRING_TYPE), "Component {}: Description appears to be incorrect".format(i))
self.assert_expectations()
def test_get_firmware_version(self, duthost, localhost, platform_api_conn):
if self.num_components == 0:
pytest.skip("No components found on device")
for i in range(self.num_components):
fw_version = component.get_firmware_version(platform_api_conn, i)
if self.expect(fw_version is not None, "Component {}: Failed to retrieve firmware version".format(i)):
self.expect(isinstance(fw_version, STRING_TYPE), "Component {}: Firmware version appears to be incorrect".format(i))
self.assert_expectations()
def test_get_available_firmware_version(self, duthost, localhost, platform_api_conn):
if self.num_components == 0:
pytest.skip("No components found on device")
for i in range(self.num_components):
for image in range(image_list):
avail_fw_version = component.get_available_firmware_version(platform_api_conn, i, image)
if self.expect(avail_fw_version is not None, "Component {}: Failed to retrieve available firmware version from image {}".format(i, image)):
self.expect(isinstance(avail_fw_version, STRING_TYPE), "Component {}: Available Firmware version appears to be incorrect from image {}".format(i, image))
self.assert_expectations()
def test_get_firmware_update_notification(self, duthost, localhost, platform_api_conn):
if self.num_components == 0:
pytest.skip("No components found on device")
for i in range(self.num_components):
for image in range(image_list):
notif = component.get_firmware_update_notification(platform_api_conn, i, image)
# Can return "None" if no update required.
pytest_assert(isinstance(notif, STRING_TYPE), "Component {}: Firmware update notification appears to be incorrect from image {}".format(i, image))
def test_install_firmware(self, duthost, localhost, platform_api_conn):
if self.num_components == 0:
pytest.skip("No components found on device")
for i in range(self.num_components):
for image in range(image_list):
install_status = component.install_firmware(platform_api_conn, i, image)
if self.expect(install_status is not None, "Component {}: Failed to install firmware from image {}".format(i, image)):
self.expect(isinstance(avail_fw_version, bool), "Component {}: Return of Firmware installation appears to be incorrect from image {}".format(i, image))
self.assert_expectations()
def test_update_firmware(self, duthost, localhost, platform_api_conn):
if self.num_components == 0:
pytest.skip("No components found on device")
for i in range(self.num_components):
for image in range(image_list):
update_status = component.update_firmware(platform_api_conn, i, image)
if self.expect(update_status is not None, "Component {}: Failed to update firmware from image {}".format(i, image)):
self.expect(isinstance(update_status, bool), "Component {}: Return of Firmware update appears to be incorrect from image {}".format(i, image))
self.assert_expectations()
|
[
"noreply@github.com"
] |
monipko.noreply@github.com
|
6976cdd0d4b19e359e9b77f84aa77a7431923896
|
fb4e9ddb628ae19634eb1d51f02fa33d093ca5d1
|
/tensorflow-master/tensorflow/contrib/memory_stats/__init__.py
|
e310f9fd5861cf93c02d83f38c706173a96e5ffe
|
[
"Apache-2.0"
] |
permissive
|
zhentaowang/machine-learning
|
68189bbc9bd052cecf068fb5fc7e88c04ec24e34
|
1dbba7bbe7f5c8c1449c312fb7e0c008581b90be
|
refs/heads/master
| 2021-06-14T14:08:51.889455
| 2017-03-20T02:10:43
| 2017-03-20T02:10:43
| 63,773,867
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for memory statistics.
@@MaxBytesInUse
"""
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import MaxBytesInUse
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
[
"wangzhentao@zhiweicloud.com"
] |
wangzhentao@zhiweicloud.com
|
4743280e06c6a802965a5557e13204fe16802350
|
873a5c8e5191c41d914d6572163487f7d7112ad0
|
/ex012.py
|
acc51f137666e8014e68e282ddca74d5e39feffc
|
[] |
no_license
|
VitrSantos/cursoemvideo
|
425d999ec260605fa07efa11253bb145fea0483b
|
addb10f1db260106edf953655cac9850f158c2b0
|
refs/heads/master
| 2022-11-21T01:18:20.241974
| 2020-07-20T15:41:31
| 2020-07-20T15:41:31
| 265,569,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
#Exercício Python 12: Faça um algoritmo que leia o preço de um produto e mostre seu novo preço, com 5% de desconto.
preço = (float(input('Qual o preço do produto? R$ ')))
print(f'Com 5% de desconto seu produto custará R$ \033[032m{preço * 0.95:.2f}')
|
[
"63022943+VitrSantos@users.noreply.github.com"
] |
63022943+VitrSantos@users.noreply.github.com
|
d4bc95e2ad34b3a267b55bb7ddd18f6dfd28ef93
|
6ff23c7157fce6ede541db849cdd7f71a6f07d34
|
/ls.py
|
2795433b60edc38ca06942909db4fe26b9b19dea
|
[] |
no_license
|
czachariah/Load_Balancing_DNS_Servers
|
f425f797dad4ea45042f4ada626318ed791e6cd0
|
1cf68fefbc371830880b394e7ba82327e81b60cf
|
refs/heads/master
| 2022-04-12T08:36:59.800541
| 2020-04-06T17:39:07
| 2020-04-06T17:39:07
| 247,522,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,122
|
py
|
import socket
import sys
import select
import threading
# main
if __name__ == "__main__":
LS = threading.Thread(name='LSserver')
LS.start()
# need to make sure that the port number is given as an argument
if len(sys.argv) != 6:
print("[LS]: ERROR: Need to include the correct number of arugments: "
+ "python ls.py lsListenPort ts1Hostname ts1ListenPort ts2Hostname ts2ListenPort")
exit()
# check to make sure that the port numbers given are integer numbers
try:
lsPortNum = int(sys.argv[1])
ts1PortNum = int(sys.argv[3])
ts2PortNum = int(sys.argv[5])
except Exception as err:
print("[LS]: Please make sure to enter positive Integers greater than 1023 for the port numbers.\n")
exit()
# make sure the port numbers given are greater than 1023
if lsPortNum <= 1023 or ts1PortNum <= 1023 or ts2PortNum <= 1023:
print("[LS]: Please make sure the port numbers are all greater than 1023.\n")
exit()
# this function is used to connect to the TS servers and receive any messages
def connectToTSServers(URL, TS1HostName, TS1PortNum , TS2HostName, TS2PortNum):
# make the sockets
try:
ts1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ts2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("[LS]: Sockets created to connect to TS1 and TS2 server.")
except socket.error as err:
print('[LS]: Error in creating sockets: {} \n'.format(err))
exit()
try:
# get the host name and the port number ready to be ready to connect to the TS1 and TS2 servers
ts1_addr = socket.gethostbyname(TS1HostName)
ts2_addr = socket.gethostbyname(TS2HostName)
# now connect to the TS1 and TS2 servers
ts1_server_binding = (ts1_addr, TS1PortNum)
ts2_server_binding = (ts2_addr, TS2PortNum)
ts1.settimeout(5)
ts2.settimeout(5)
ts1.connect(ts1_server_binding)
ts2.connect(ts2_server_binding)
print("[LS]; Connected to the TS1 and TS2 servers.\n")
# send URL to look up
message = URL
ts1.send(message.encode('utf-8'))
ts2.send(message.encode('utf-8'))
print("[LS]: Sending host name " + message + " to both the servers for IP lookup ...\n")
except Exception as error:
print("[LS]: There was an error in connecting to the TS servers. Closing all sockets. Please try again.")
ts1.close()
ts2.close()
raise Exception()
# these are the connections to the TS servers that select() can use to read info from
inputs = [ts1, ts2]
while inputs:
# select will return 3 types of lists (respectively) : read_from , write_to , exceptions
readable, writable, exceptional = select.select(inputs, [], [], 5)
# we only care about reading from the TS sockets, so look into both sockets to get an IP
for s in readable:
# trying to get info from TS1
if s is ts1:
data = s.recv(1024)
if data:
print("[LS]: TS1 has returned an IP for " + URL + " : " + data)
ts1.close()
ts2.close()
return data
# TS1 did not have the IP, so check TS2
if s is ts2:
data = s.recv(1024)
if data:
print("[LS]: TS2 has returned an IP for " + URL + " : " + data)
ts1.close()
ts2.close()
return data
# both TS1 and TS2 did not have the IP, so after 8 seconds (timeout), these statements send back LS a "NOTHING"
if not (readable or writable or exceptional):
print("[LS]: The connections have timed out. Both TS1 and TS2 do not have the IP for: " + URL)
return "NOTHING"
# create the socket for the ls server
try:
ls = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("[LS]: Server socket created")
except socket.error as err:
print('[LS]: socket open error: {}\n'.format(err))
exit()
# bind the socket to the port to listen for clients
server_binding = ('', lsPortNum)
ls.bind(server_binding)
ls.listen(10)
host = socket.gethostname()
print("[LS]: Server host name is {}".format(host))
localhost_ip = (socket.gethostbyname(host))
print("[LS]: Server IP address is {}".format(localhost_ip))
print("\n")
# wait for client connections
while True:
csockid, addr = ls.accept()
print ("[LS]: Got a connection request from a client at {}".format(addr))
data_from_client = csockid.recv(500)
print("[LS]: Connection received. Looking up : {}".format(data_from_client.decode('utf-8')) + " ...")
try:
msg = connectToTSServers(data_from_client, sys.argv[2], ts1PortNum, sys.argv[4], ts2PortNum)
except:
ls.close()
exit()
if msg == "NOTHING":
msg = "" + data_from_client + " - " + "Error:HOST NOT FOUND"
print("[LS]: Message from TS server: " + str(msg) + " , now sending to client ...")
# send message back to the client
csockid.send(str(msg))
print("\n")
|
[
"zachariahchris@yahoo.com"
] |
zachariahchris@yahoo.com
|
0c874e87c7dd0378e2cec7afc6bd82c8f21b69a7
|
738fd4e3ed9966269657ceb2ecbca4b72b5d5526
|
/common/binary_search.py
|
4304d3ffa361cdc1c29ac93916b8cca4d820a685
|
[] |
no_license
|
abelishi/leetcode
|
128aa02b5caf509bf130f58eee3243ef52571230
|
1787c4c00a57210c8d47764791cef5f056c7e115
|
refs/heads/master
| 2023-06-14T01:13:29.113271
| 2021-07-17T07:03:24
| 2021-07-17T07:03:24
| 182,582,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
import typing
from typing import List
class BinarySearch(object):
@staticmethod
def upper_bound(sorted_list: List, target) -> int:
left = 0
right = len(sorted_list) - 1
while left < right:
middle = left + (right - left) // 2
if sorted_list[middle] <= target:
left = middle + 1
else:
right = middle - 1
if right == len(sorted_list) - 1 and target >= sorted_list[-1]:
return len(sorted_list)
return right
@staticmethod
def lower_bound(sorted_list: List, target) -> int:
left = 0
right = len(sorted_list) - 1
while left <= right:
middle = left + (right - left) // 2
if sorted_list[middle] >= target:
right = middle - 1
else:
left = middle + 1
return left
|
[
"abelishi@163.com"
] |
abelishi@163.com
|
06405e42c32db838d8491f73ed075fbf28e05614
|
f8602a239b61ff0386be9c3c1230e1f5035952bd
|
/week_8/codingBat/string2/string2_count_hi.py
|
d2a62fb5529e35097a63d851e1dcf8e339a3f86b
|
[] |
no_license
|
MaratNurzhan/WD
|
4a7dcf05855b1965de5074503f609b4bee0ea45f
|
bb629660b8045568934693360f2a8793b158ce6e
|
refs/heads/master
| 2023-01-22T13:48:00.828829
| 2020-04-26T10:33:57
| 2020-04-26T10:33:57
| 237,237,374
| 0
| 0
| null | 2023-01-07T17:10:36
| 2020-01-30T15:01:14
|
Python
|
UTF-8
|
Python
| false
| false
| 162
|
py
|
def count_hi(str):
count = 0
for i in range(len(str)-1):
count =count+str[i]=='h' and str[i+1]=='i'
return count
count_hi('abc hi ho')
|
[
"nurzhan.marat.2000@mail.ru"
] |
nurzhan.marat.2000@mail.ru
|
4ac24e02ed56a766960633f19c1674b3162a837f
|
88556921c3b76cf1b4d72e0654ad9b53b7ef1546
|
/ex9.py
|
7e982d5c9c9adfa4277eb76566ef2f1d1e2c57d5
|
[] |
no_license
|
jonasrosland/lpthw
|
74c5131eb6df467a37591edb21b0960105aada8d
|
423dda6623f018e7090ee3a21b0147c173604ba1
|
refs/heads/master
| 2021-01-10T21:21:26.121628
| 2014-02-03T19:31:19
| 2014-02-03T19:31:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
# Here's some new strange stuff, remember to type it exactly.
days = "Mon Tue Wed Thu Fri Sat Sun"
months = "\nJan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug"
print "Here are the days: ", days
print "Here are the months: ", months
print """
There's something going on here.
With the three double quotes.
We'll be able to type as much as we like.
Even 4 lines if we want, or 5, or 6.
"""
|
[
"jonas.rosland@gmail.com"
] |
jonas.rosland@gmail.com
|
cb42cfea10a03acaae91cbaec8b87a6af184ffd3
|
e79888cd68177e7ec5125270cdc52f888e211e78
|
/Naoto/chapter05/knock45.py
|
8720639a538757187bdeb96acf3bcfdd6516618a
|
[] |
no_license
|
cafenoctua/100knock2019
|
ec259bee27936bdacfe0097d42f23cc7500f0a07
|
88717a78c4290101a021fbe8b4f054f76c9d3fa6
|
refs/heads/master
| 2022-06-22T04:42:03.939373
| 2019-09-03T11:05:19
| 2019-09-03T11:05:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,537
|
py
|
'''
45. 動詞の格パターンの抽出
今回用いている文章をコーパスと見なし,日本語の述語が取りうる格を調査したい. 動詞を述語,動詞に係っている文節の助詞を格と考え,述語と格をタブ区切り形式で出力せよ. ただし,出力は以下の仕様を満たすようにせよ.
動詞を含む文節において,最左の動詞の基本形を述語とする
述語に係る助詞を格とする
述語に係る助詞(文節)が複数あるときは,すべての助詞をスペース区切りで辞書順に並べる
「吾輩はここで始めて人間というものを見た」という例文(neko.txt.cabochaの8文目)を考える. この文は「始める」と「見る」の2つの動詞を含み,「始める」に係る文節は「ここで」,「見る」に係る文節は「吾輩は」と「ものを」と解析された場合は,次のような出力になるはずである.
始める で
見る は を
このプログラムの出力をファイルに保存し,以下の事項をUNIXコマンドを用いて確認せよ.
コーパス中で頻出する述語と格パターンの組み合わせ
「する」「見る」「与える」という動詞の格パターン(コーパス中で出現頻度の高い順に並べよ)
'''
from knock41 import load_cabocha_iter
def main():
path = "case_pattern.txt"
with open(path, "w") as f:
for chunks in load_cabocha_iter():
case_patterns = {}
# {id: [動詞の基本形, [助詞, 助詞,...]]}
for chunk in chunks:
if chunk.dst == -1:
continue
particles = [chunk.morphs[-1].surface]
# particles = [morph.surface for morph in chunk.morphs if morph.pos == '助詞']
verbs = [morph.base for morph in chunks[chunk.dst].morphs if morph.pos == '動詞']
if not particles or not verbs:
continue
if chunk.dst not in case_patterns:
case_patterns[chunk.dst] = [verbs[0], particles]
else:
case_patterns[chunk.dst][1].extend(particles)
for value in case_patterns.values():
f.write(f'{value[0]}\t{" ".join(sorted(value[1]))}\n')
if __name__ == "__main__":
main()
'''
sort case_pattern.txt | uniq -c | sort -n -r | head
grep "^する\s" case_pattern.txt | sort | uniq -c | sort -n -r | head
'''
|
[
"naoto_nakazawa@NaotonoMacBook-Pro.local"
] |
naoto_nakazawa@NaotonoMacBook-Pro.local
|
0394f0ff941f7f6c1eab4c4933df6023fe6baf0a
|
e4e523ebc00eec4b6126f8884944c8d20156af48
|
/server.py
|
4223313e8bd9e958c973f39b5c1e382f2ebcd08d
|
[] |
no_license
|
emish/B.A.R.D.
|
e3d7811ca40606c7dafef61f3a78e78a77d997b6
|
f1ec9ee39c2a1334f2c0301fbe7d4763b57c9176
|
refs/heads/master
| 2021-01-20T10:41:36.641614
| 2012-01-19T22:57:20
| 2012-01-19T22:57:20
| 3,222,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,783
|
py
|
import threading, socket, sys, re, httplib, os
#default server host and port number
host,port_num = "localhost", 9999
#two files: whitelist, blacklist
## shared whitelist, need lock
white_lock = threading.Lock()
## shared history, need lock
black_lock = threading.Lock()
whitelist = 'whitelist'
blacklist = 'blacklist'
class slave_thread(threading.Thread):
## Initializes the thread class
# @param conn Connection
# @ param addr Address
def __init__(self, conn,addr):
threading.Thread.__init__(self)
self.conn,self.addr = conn,addr
self.name = "";
## Gets data from connection.
# 1 = white, 2 = black
def get_data(self):
"""server_thread.get_data() -> data
If the connection goes down, returns 0 length
string. Otherwise, buffers the data and returns it as a
string."""
data = []
while 1:
d = self.conn.recv(1024)
data.extend(d)
if len(d)<1024: break
return "".join(data)
def add_blacklist(self, atom):
black_lock.acquire()
try:
blacklist_f = open(blacklist, 'a')
blacklist_f.write(atom)
blacklist_f.close()
except:
print "Error. Invalid file location."
black_lock.release()
sys.exit()
black_lock.release()
def add_whitelist(self, atom):
print "We are def in the mehtod now!"
white_lock.acquire()
try:
print "mohohohohoho"
whitelist_f = open(whitelist, 'a')
whitelist_f.write(atom)
whitelist_f.close()
print "add_whitelist method"
except:
print "Error. Invalid file location."
white_lock.release()
sys.exit()
white_lock.release()
## The main thread loop. Receives message from the client, echoes them back and logs them in the history lists. Once a socket error or a 0 length string is received, the loop breaks, the socket is closed and the thread returns.
def run(self):
"""run() -> None"""
#helper to keep track of which mode you are in
message = False
while 1:
try:
#first get information from the socket
data = self.get_data()
#report it
print "Got:",data
#send it back
if data == "1": #user has chosen to update whitelist
self.conn.send("whitelist")
atom = self.get_data()
print atom
self.add_whitelist(atom)
print "added to the whitelist"
self.conn.send("ok")
elif data == "2": #user has chosen to update blacklist
self.conn.send("blacklist")
atom = self.get_data()
self.add_blacklist(atom)
self.conn.send("ok")
elif data == "3": #send user whitelist
white_lock.acquire()
whitelist_f = open(whitelist, 'r')
strTosend = ""
for line in whitelist_f:
strTosend += line
self.conn.send(strTosend)
whitelist_f.close()
white_lock.release()
elif data == "4": #send user blacklist
black_lock.acquire()
blacklist_f = open(blacklist, 'r')
strTosend = ""
for line in blacklist_f:
strTosend += line
self.conn.send(strTosend)
blacklist_f.close()
black_lock.release()
elif data == "exit" or not data: #check 0 data here
break #break loop
else:
self.conn.send("error")
break
except (KeyboardInterrupt,EOFError):
#capture Ctrl-C and Ctrl-D, exit smoothly
break #we're out of here
except socket.error,e:
print >>sys.stderr, "Got a socket error in server thread:",str(e)
break
self.conn.close() #close up, we're done
print "Thread from,",self.addr,"is closing"
## Main method allows user to specify port number for server using -p flag
def main(argv):
global port_num
#allow user to specify port number for server
if len(argv) == 2 and argv[0] == '-p':
try:
port_num = int(argv[1])
except:
print "Error. Invalid port number input."
sys.exit()
else:
print "Invalid command line arguments."
sys.exit()
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
sock.bind((host, port_num))
except:
print "Error. Cannot connect to port."
sys.exit()
sock.listen(5)
print "Server Started on:", (host,port_num)
#listen for a new client connections and spawn a thread to deal with it
while 1:
try:
conn,addr = sock.accept()
print "New connection from:", addr
th = slave_thread(conn,addr)
th.start()
except socket.error,e:
print >>sys.stderr, "Got an error in accept:",str(e)
break
except KeyboardInterrupt:
#Ctrl-C capture
#no guarantee that this thread will recieve the signal,
#but at some point, it should after multiple attempts
print "Exiting ..."
break
sock.close() #close the server socket
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"a.mamish@gmail.com"
] |
a.mamish@gmail.com
|
894fd3899b8ea36c619c66d7f18fe72bdefa2997
|
dfb2250e8815783a97f1797b171793d903103f4b
|
/pom/pages/homepage.py
|
27f7994d17169f907c67204fa37b6a06b0c0034e
|
[] |
no_license
|
aniljadesh/Python_automation_Selenium_POM
|
e17118bcd8628c52e9c29e81191d1c6edaf25bd5
|
1e743057b4536a2abcd420b33ee4870561fb6e03
|
refs/heads/main
| 2023-03-06T19:24:46.888693
| 2021-02-23T16:49:07
| 2021-02-23T16:49:07
| 341,621,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
from pom.locator.locator import Locator
class HomePage():
def __init__(self, driver):
self.driver = driver
self.welcome_link_id = Locator.welcome_link_id
self.logout_link_linkText = Locator.logout_link_linkText
def click_welcome_link(self):
self.driver.find_element_by_id("self.welcome_link_id").click()
def click_logout_link(self):
self.driver.find_element_by_link_text("self.logout_link_linkText").click()
|
[
"noreply@github.com"
] |
aniljadesh.noreply@github.com
|
53ce1d81f3614d58a433ad4a62f454b8288a211a
|
a5711713e8e2a931aa1e931951128be3a2ddf914
|
/pta_sim/scripts/ng12p5_fl_sinusoid.py
|
7bdb2ce4ba6e48b8d64ff658c96e853aada439ca
|
[
"MIT"
] |
permissive
|
Hazboun6/pta_sim
|
b35021352202625817d707105956c77f1754207b
|
8023ec2d795abaca426e88ed146c15f4e57cfaa1
|
refs/heads/master
| 2023-05-25T19:15:20.973598
| 2023-05-17T21:32:56
| 2023-05-17T21:32:56
| 182,192,948
| 1
| 3
|
MIT
| 2022-12-26T20:47:20
| 2019-04-19T03:07:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,478
|
py
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import sys, os, glob, json, pickle, copy
import cloudpickle
import logging
from enterprise_extensions import models, model_utils, hypermodel, sampler
from enterprise.signals.signal_base import PTA
from enterprise.signals import gp_signals, signal_base, deterministic_signals, parameter, selections, white_signals, utils
from enterprise.signals import gp_bases as gpb
from enterprise.signals import gp_priors as gpp
from enterprise import constants as const
from enterprise_extensions.models import model_singlepsr_noise
from enterprise_extensions import blocks
from enterprise_extensions import gp_kernels as gpk
from enterprise_extensions import chromatic as chrom
import la_forge.core as co
import pta_sim
import pta_sim.parse_sim as parse_sim
from pta_sim.bayes import chain_length_bool, save_core, get_freqs, filter_psr_path
args = parse_sim.arguments()
logging.basicConfig(format="%(levelname)s: %(name)s: %(message)s", level=logging.INFO)
#Is chain longer than niter?
# longer = chain_length_bool(args.outdir, int(args.niter//10))
# if longer:
# sys.exit() #Hmmmm what to do here?
# else:
# pass
with open(args.noisepath, 'r') as fin:
noise =json.load(fin)
with open('{0}'.format(args.pickle), "rb") as f:
psrs = pickle.load(f)
psr = psrs[args.process]
#### Nihan's Sine wave
dataset_tmin = 4597873783.54894 #np.min([psr.toas.min() for psr in psrs])
@parameter.function
def sine_wave(toas, flags, A = -9, f = -9, phase = 0.0):
return 10 ** A * np.sin(2 * np.pi * (10 ** f) * (toas - dataset_tmin) + phase)
def sine_signal(A, f, phase, name = ""):
return deterministic_signals.Deterministic(sine_wave(A = A, f = f, phase = phase), name = name)
day_seconds = 86400
sin = sine_signal(A = parameter.Uniform(-9, -4)('common_sin_A'), f = parameter.Uniform(-9, -7)('common_sin_f'), phase = parameter.Uniform(0, 2 * np.pi)('common_sin_phase'))
### Turn SW model off. Add in stand alone SW model and common process. Return model.
kwargs={'white_vary':args.vary_wn,
'extra_sigs':sin,
'red_var': True,
'tm_marg':True,
'tnequad':True}
if args.gfl:
kwargs.update({'red_var':False,
'factorized_like':True,
'psd':'spectrum',
'Tspan':args.tspan,
'gw_components':30,
'fact_like_logmin':-14.2,
'fact_like_logmax':-1.2,})
if args.gwb_on:
kwargs.update({'factorized_like':True,
'Tspan':args.tspan,
'gw_components':args.n_gwbfreqs,
'fact_like_gamma':args.gamma_gw,})
pta = model_singlepsr_noise(psr, **kwargs)
pta.set_default_params(noise)
groups = sampler.get_parameter_groups(pta)
groups.extend(sampler.get_psr_groups(pta))
Sampler = sampler.setup_sampler(pta, outdir=args.outdir+f'{psr.name}/', resume=True,
empirical_distr = args.emp_distr, groups=groups)
Sampler.addProposalToCycle(Sampler.jp.draw_from_empirical_distr, 120)
try:
achrom_freqs = get_freqs(pta, signal_id='gw')
np.savetxt(args.outdir + f'{psr.name}/achrom_rn_freqs.txt', achrom_freqs, fmt='%.18e')
except:
pass
x0 = np.hstack([p.sample() for p in pta.params])
Sampler.sample(x0, args.niter, SCAMweight=200, AMweight=100,
DEweight=200, burn=3000, writeHotChains=args.writeHotChains,
hotChain=args.hot_chain, Tskip=100, Tmax=args.tempmax)
|
[
"jeffrey.hazboun@gmail.com"
] |
jeffrey.hazboun@gmail.com
|
e6bfeca1286f905ae510abf5bb85d71353232e7b
|
1bc2a635a93b5bc84606edf9ac2226851cac9e6d
|
/rolling/gui/map/widget.py
|
b94f40ca6be86257b40af7209cb820c7bc71038c
|
[
"MIT"
] |
permissive
|
coolkat64/rolling
|
819149cbb1e11a455b93a030477f9da91e2f93e4
|
4c3ee2401128e993a52ac9b52cdbd32e17728129
|
refs/heads/master
| 2022-11-29T00:35:14.058665
| 2020-07-31T20:37:15
| 2020-07-31T20:37:15
| 285,312,272
| 0
| 0
|
MIT
| 2020-08-05T14:25:48
| 2020-08-05T14:25:47
| null |
UTF-8
|
Python
| false
| false
| 6,183
|
py
|
# coding: utf-8
import typing
import urwid
from urwid import BOX
from rolling.exception import CantMoveBecauseSurcharge
from rolling.exception import MoveToOtherZoneError
from rolling.gui.connector import ZoneMapConnector
from rolling.gui.dialog import SimpleDialog
from rolling.gui.map.render import MapRenderEngine
from rolling.gui.play.zone import ChangeZoneDialog
from rolling.map.source import ZoneMapSource
from rolling.model.zone import MoveZoneInfos
from rolling.util import CornerEnum
if typing.TYPE_CHECKING:
from rolling.gui.controller import Controller
class MapWidget(urwid.Widget):
_sizing = frozenset([BOX])
def __init__(self, controller: "Controller", render_engine: MapRenderEngine) -> None:
self._controller = controller
self._render_engine = render_engine
self._horizontal_offset = 0
self._vertical_offset = 0
self._current_row_size = 0
self._current_col_size = 0
self._first_display = True
@property
def render_engine(self) -> MapRenderEngine:
return self._render_engine
def render(self, size, focus=False):
self._current_col_size, self._current_row_size = size
return self._render(size, focus)
def _render(self, size, focus=False):
self._render_engine.render(
self._current_col_size,
self._current_row_size,
offset_horizontal=self._horizontal_offset,
offset_vertical=self._vertical_offset,
)
self._controller.loop.set_alarm_in(0.25, lambda *_, **__: self._invalidate())
return urwid.TextCanvas(
text=self._render_engine.rows,
attr=self._render_engine.attributes,
maxcol=self._current_col_size,
)
def selectable(self):
return True
def keypress(self, size, key):
pass
def _offset_change(self, new_offset: typing.Tuple[int, int]) -> None:
pass
class WorldMapWidget(MapWidget):
pass
# TODO BS 2019-01-22: Rename into ZoneMapWidget
class TileMapWidget(MapWidget):
def __init__(
self,
controller: "Controller",
render_engine: MapRenderEngine,
zone_map_source: ZoneMapSource,
) -> None:
super().__init__(controller, render_engine)
self._connector = ZoneMapConnector(self, self._controller, zone_map_source=zone_map_source)
def _offset_change(self, new_offset: typing.Tuple[int, int]) -> None:
try:
if not self._connector.move_is_possible(new_offset):
return
except MoveToOtherZoneError as exc:
# FIXME BS 2019-03-06: Manage (try) change zone case
self._change_zone_dialog(exc.corner)
return
except CantMoveBecauseSurcharge:
if not self._first_display:
self._controller.display_cant_move_because_surcharge()
return
# move player
self._connector.player_move(new_offset)
character_col_i = self._controller.display_objects_manager.current_player.col_i
character_row_i = self._controller.display_objects_manager.current_player.row_i
# center on player
self._horizontal_offset = self._current_col_size // 2 - character_col_i
self._vertical_offset = self._current_row_size // 2 - character_row_i
def _render(self, size, focus=False):
if self._first_display:
self._offset_change((0, 0)) # to compute offset with player position
self._first_display = False
return super()._render(size, focus)
def _change_zone_dialog(self, corner: CornerEnum) -> None:
zone_map_widget = self._controller._view.main_content_container.original_widget
world_row_i, world_col_i = self._connector.get_zone_coordinates(corner)
move_zone_infos: MoveZoneInfos = self._controller.client.get_move_zone_infos(
character_id=self._controller.player_character.id,
world_row_i=world_row_i,
world_col_i=world_col_i,
)
if not move_zone_infos.can_move:
self._controller._view.main_content_container.original_widget = SimpleDialog(
kernel=self._controller.kernel,
controller=self._controller,
original_widget=self._controller.view.main_content_container.original_widget,
title=f"Vous ne pouvez pas marcher vers là-bas, "
f"cela nécessiterait {move_zone_infos.cost} points d'actions",
go_back=True,
)
return
zones = self._controller.kernel.world_map_source.geography.rows
try:
zones[world_row_i][world_col_i] # test if zone exist
self._controller._view.main_content_container.original_widget = ChangeZoneDialog(
kernel=self._controller.kernel,
controller=self._controller,
original_widget=zone_map_widget,
title="Marchez vers là bas ?",
text=f"Marchez pour arrivez à votre destination "
f"vous coutera {move_zone_infos.cost} points d'actions",
world_row_i=world_row_i,
world_col_i=world_col_i,
)
except IndexError:
self._controller._view.main_content_container.original_widget = SimpleDialog(
kernel=self._controller.kernel,
controller=self._controller,
original_widget=self._controller.view.main_content_container.original_widget,
title="Vous êtes au bord du monde ! Vous ne pouvez pas aller au delà.",
go_back=True,
)
def keypress(self, size, key):
new_offset = None
if key == "up":
new_offset = (1, 0)
if key == "down":
new_offset = (-1, 0)
if key == "left":
new_offset = (0, 1)
if key == "right":
new_offset = (0, -1)
if key == "enter":
self._controller.display_zone_actions_on_place()
if new_offset is not None:
self._offset_change(new_offset)
self._invalidate()
|
[
"sevajol.bastien@gmail.com"
] |
sevajol.bastien@gmail.com
|
ddeb4292f997109b6ee0fbcc98d0a359bdbea648
|
20fedfd55a0cddff39769278544aea7a0d235c08
|
/qutebrowser/themes/base16-qutebrowser/themes/base16-spacemacs.config.py
|
111e8f65036fcee09fdffbd274e58ebb9f625135
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
ebzzry/dotfiles
|
1d2942c6ea0fe847f8fb6c5aacb229b392444c82
|
9ba2d1a3b5aff2f2c65d2c0dd17257e8f9e1f51a
|
refs/heads/main
| 2023-06-09T07:20:35.676866
| 2023-05-26T03:45:26
| 2023-05-26T03:45:26
| 64,131,018
| 30
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,387
|
py
|
# base16-qutebrowser (https://github.com/theova/base16-qutebrowser)
# Base16 qutebrowser template by theova
# Spacemacs scheme by Nasser Alshammari (https://github.com/nashamri/spacemacs-theme)
base00 = "#1f2022"
base01 = "#282828"
base02 = "#444155"
base03 = "#585858"
base04 = "#b8b8b8"
base05 = "#a3a3a3"
base06 = "#e8e8e8"
base07 = "#f8f8f8"
base08 = "#f2241f"
base09 = "#ffa500"
base0A = "#b1951d"
base0B = "#67b11d"
base0C = "#2d9574"
base0D = "#4f97d7"
base0E = "#a31db1"
base0F = "#b03060"
# set qutebrowser colors
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
c.colors.completion.fg = base05
# Background color of the completion widget for odd rows.
c.colors.completion.odd.bg = base03
# Background color of the completion widget for even rows.
c.colors.completion.even.bg = base00
# Foreground color of completion widget category headers.
c.colors.completion.category.fg = base0A
# Background color of the completion widget category headers.
c.colors.completion.category.bg = base00
# Top border color of the completion widget category headers.
c.colors.completion.category.border.top = base00
# Bottom border color of the completion widget category headers.
c.colors.completion.category.border.bottom = base00
# Foreground color of the selected completion item.
c.colors.completion.item.selected.fg = base01
# Background color of the selected completion item.
c.colors.completion.item.selected.bg = base0A
# Top border color of the completion widget category headers.
c.colors.completion.item.selected.border.top = base0A
# Bottom border color of the selected completion item.
c.colors.completion.item.selected.border.bottom = base0A
# Foreground color of the matched text in the completion.
c.colors.completion.match.fg = base0B
# Color of the scrollbar handle in the completion view.
c.colors.completion.scrollbar.fg = base05
# Color of the scrollbar in the completion view.
c.colors.completion.scrollbar.bg = base00
# Background color for the download bar.
c.colors.downloads.bar.bg = base00
# Color gradient start for download text.
c.colors.downloads.start.fg = base00
# Color gradient start for download backgrounds.
c.colors.downloads.start.bg = base0D
# Color gradient end for download text.
c.colors.downloads.stop.fg = base00
# Color gradient stop for download backgrounds.
c.colors.downloads.stop.bg = base0C
# Foreground color for downloads with errors.
c.colors.downloads.error.fg = base08
# Font color for hints.
c.colors.hints.fg = base00
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
c.colors.hints.bg = base0A
# Font color for the matched part of hints.
c.colors.hints.match.fg = base05
# Text color for the keyhint widget.
c.colors.keyhint.fg = base05
# Highlight color for keys to complete the current keychain.
c.colors.keyhint.suffix.fg = base05
# Background color of the keyhint widget.
c.colors.keyhint.bg = base00
# Foreground color of an error message.
c.colors.messages.error.fg = base00
# Background color of an error message.
c.colors.messages.error.bg = base08
# Border color of an error message.
c.colors.messages.error.border = base08
# Foreground color of a warning message.
c.colors.messages.warning.fg = base00
# Background color of a warning message.
c.colors.messages.warning.bg = base0E
# Border color of a warning message.
c.colors.messages.warning.border = base0E
# Foreground color of an info message.
c.colors.messages.info.fg = base05
# Background color of an info message.
c.colors.messages.info.bg = base00
# Border color of an info message.
c.colors.messages.info.border = base00
# Foreground color for prompts.
c.colors.prompts.fg = base05
# Border used around UI elements in prompts.
c.colors.prompts.border = base00
# Background color for prompts.
c.colors.prompts.bg = base00
# Background color for the selected item in filename prompts.
c.colors.prompts.selected.bg = base0A
# Foreground color of the statusbar.
c.colors.statusbar.normal.fg = base0B
# Background color of the statusbar.
c.colors.statusbar.normal.bg = base00
# Foreground color of the statusbar in insert mode.
c.colors.statusbar.insert.fg = base00
# Background color of the statusbar in insert mode.
c.colors.statusbar.insert.bg = base0D
# Foreground color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.fg = base00
# Background color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.bg = base0C
# Foreground color of the statusbar in private browsing mode.
c.colors.statusbar.private.fg = base00
# Background color of the statusbar in private browsing mode.
c.colors.statusbar.private.bg = base03
# Foreground color of the statusbar in command mode.
c.colors.statusbar.command.fg = base05
# Background color of the statusbar in command mode.
c.colors.statusbar.command.bg = base00
# Foreground color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.fg = base05
# Background color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.bg = base00
# Foreground color of the statusbar in caret mode.
c.colors.statusbar.caret.fg = base00
# Background color of the statusbar in caret mode.
c.colors.statusbar.caret.bg = base0E
# Foreground color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.fg = base00
# Background color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.bg = base0D
# Background color of the progress bar.
c.colors.statusbar.progress.bg = base0D
# Default foreground color of the URL in the statusbar.
c.colors.statusbar.url.fg = base05
# Foreground color of the URL in the statusbar on error.
c.colors.statusbar.url.error.fg = base08
# Foreground color of the URL in the statusbar for hovered links.
c.colors.statusbar.url.hover.fg = base05
# Foreground color of the URL in the statusbar on successful load
# (http).
c.colors.statusbar.url.success.http.fg = base0C
# Foreground color of the URL in the statusbar on successful load
# (https).
c.colors.statusbar.url.success.https.fg = base0B
# Foreground color of the URL in the statusbar when there's a warning.
c.colors.statusbar.url.warn.fg = base0E
# Background color of the tab bar.
c.colors.tabs.bar.bg = base00
# Color gradient start for the tab indicator.
c.colors.tabs.indicator.start = base0D
# Color gradient end for the tab indicator.
c.colors.tabs.indicator.stop = base0C
# Color for the tab indicator on errors.
c.colors.tabs.indicator.error = base08
# Foreground color of unselected odd tabs.
c.colors.tabs.odd.fg = base05
# Background color of unselected odd tabs.
c.colors.tabs.odd.bg = base03
# Foreground color of unselected even tabs.
c.colors.tabs.even.fg = base05
# Background color of unselected even tabs.
c.colors.tabs.even.bg = base00
# Foreground color of selected odd tabs.
c.colors.tabs.selected.odd.fg = base00
# Background color of selected odd tabs.
c.colors.tabs.selected.odd.bg = base05
# Foreground color of selected even tabs.
c.colors.tabs.selected.even.fg = base00
# Background color of selected even tabs.
c.colors.tabs.selected.even.bg = base05
# Background color for webpages if unset (or empty to use the theme's
# color).
# c.colors.webpage.bg = base00
|
[
"rommel.martinez@valmiz.com"
] |
rommel.martinez@valmiz.com
|
72c751c8c83b4c95a2bbf363e97a5089829c4045
|
e75f36c9fbe70fec4a20b14765670730532f8ebc
|
/hand_object_detector/demo_original.py
|
20244f3b6d1692b691432516470921b2d7673d58
|
[
"MIT"
] |
permissive
|
luigman/CSCI5561ProjectFall2020
|
1f48332753896b78ae0584e48e4bf8fb5f0ce5a7
|
f1c8fc7c0d2c7a78dca7fe4046979d8b4d2b1ec6
|
refs/heads/main
| 2023-08-07T18:02:20.400537
| 2021-09-17T17:59:17
| 2021-09-17T17:59:17
| 302,421,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,558
|
py
|
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from PIL import Image
import torchvision.transforms as transforms
import torchvision.datasets as dset
from scipy.misc import imread
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
# from model.nms.nms_wrapper import nms
from model.roi_layers import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections, vis_detections_PIL, vis_detections_filtered_objects_PIL, vis_detections_filtered_objects # (1) here add a function to viz
from model.utils.blob import im_list_to_blob
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
import pdb
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/res101.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models',
default="models")
parser.add_argument('--image_dir', dest='image_dir',
help='directory to load images for demo',
default="images")
parser.add_argument('--save_dir', dest='save_dir',
help='directory to save results',
default="images_det")
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=8, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=89999, type=int, required=True)
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=1, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
default=True)
parser.add_argument('--webcam_num', dest='webcam_num',
help='webcam ID number',
default=-1, type=int)
parser.add_argument('--thresh_hand',
type=float, default=0.5,
required=False)
parser.add_argument('--thresh_obj', default=0.5,
type=float,
required=False)
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
if __name__ == '__main__':
args = parse_args()
# print('Called with args:')
# print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.USE_GPU_NMS = args.cuda
np.random.seed(cfg.RNG_SEED)
# load model
model_dir = args.load_dir + "/" + args.net + "_handobj_100K" + "/" + args.dataset
if not os.path.exists(model_dir):
raise Exception('There is no input directory for loading network from ' + model_dir)
load_name = os.path.join(model_dir, 'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
pascal_classes = np.asarray(['__background__', 'targetobject', 'hand'])
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32, 64]', 'ANCHOR_RATIOS', '[0.5, 1, 2]']
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(pascal_classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(pascal_classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(pascal_classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(pascal_classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
if args.cuda > 0:
checkpoint = torch.load(load_name)
else:
checkpoint = torch.load(load_name, map_location=(lambda storage, loc: storage))
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
box_info = torch.FloatTensor(1)
# ship to cuda
if args.cuda > 0:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
with torch.no_grad():
if args.cuda > 0:
cfg.CUDA = True
if args.cuda > 0:
fasterRCNN.cuda()
fasterRCNN.eval()
start = time.time()
max_per_image = 100
thresh_hand = args.thresh_hand
thresh_obj = args.thresh_obj
vis = args.vis
# print(f'thresh_hand = {thresh_hand}')
# print(f'thnres_obj = {thresh_obj}')
webcam_num = args.webcam_num
# Set up webcam or get image directories
if webcam_num >= 0 :
cap = cv2.VideoCapture(webcam_num)
num_images = 0
else:
print(f'image dir = {args.image_dir}')
print(f'save dir = {args.save_dir}')
imglist = os.listdir(args.image_dir)
num_images = len(imglist)
print('Loaded Photo: {} images.'.format(num_images))
while (num_images >= 0):
total_tic = time.time()
if webcam_num == -1:
num_images -= 1
# Get image from the webcam
if webcam_num >= 0:
if not cap.isOpened():
raise RuntimeError("Webcam could not open. Please check connection.")
ret, frame = cap.read()
im_in = np.array(frame)
# Load the demo image
else:
im_file = os.path.join(args.image_dir, imglist[num_images])
im_in = np.array(imread(im_file))
# resize
# im_in = np.array(Image.fromarray(im_in).resize((640, 360)))
if len(im_in.shape) == 2:
im_in = im_in[:,:,np.newaxis]
im_in = np.concatenate((im_in,im_in,im_in), axis=2)
# rgb -> bgr
im = im_in[:,:,::-1]
blobs, im_scales = _get_image_blob(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs
im_info_np = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
im_data_pt = torch.from_numpy(im_blob)
im_data_pt = im_data_pt.permute(0, 3, 1, 2)
im_info_pt = torch.from_numpy(im_info_np)
with torch.no_grad():
im_data.resize_(im_data_pt.size()).copy_(im_data_pt)
im_info.resize_(im_info_pt.size()).copy_(im_info_pt)
gt_boxes.resize_(1, 1, 5).zero_()
num_boxes.resize_(1).zero_()
box_info.resize_(1, 1, 5).zero_()
# pdb.set_trace()
det_tic = time.time()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label, loss_list = fasterRCNN(im_data, im_info, gt_boxes, num_boxes, box_info)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
# extact predicted params
contact_vector = loss_list[0][0] # hand contact state info
offset_vector = loss_list[1][0].detach() # offset vector (factored into a unit vector and a magnitude)
lr_vector = loss_list[2][0].detach() # hand side info (left/right)
# get hand contact
_, contact_indices = torch.max(contact_vector, 2)
contact_indices = contact_indices.squeeze(0).unsqueeze(-1).float()
# get hand side
lr = torch.sigmoid(lr_vector) > 0.5
lr = lr.squeeze(0).float()
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
if args.cuda > 0:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
box_deltas = box_deltas.view(1, -1, 4)
else:
if args.cuda > 0:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
box_deltas = box_deltas.view(1, -1, 4 * len(pascal_classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
pred_boxes /= im_scales[0]
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
if vis:
im2show = np.copy(im)
obj_dets, hand_dets = None, None
for j in xrange(1, len(pascal_classes)):
# inds = torch.nonzero(scores[:,j] > thresh).view(-1)
if pascal_classes[j] == 'hand':
inds = torch.nonzero(scores[:,j]>thresh_hand).view(-1)
elif pascal_classes[j] == 'targetobject':
inds = torch.nonzero(scores[:,j]>thresh_obj).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = scores[:,j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = pred_boxes[inds, :]
else:
cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1), contact_indices[inds], offset_vector.squeeze(0)[inds], lr[inds]), 1)
cls_dets = cls_dets[order]
keep = nms(cls_boxes[order, :], cls_scores[order], cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
if pascal_classes[j] == 'targetobject':
obj_dets = cls_dets.cpu().numpy()
if pascal_classes[j] == 'hand':
hand_dets = cls_dets.cpu().numpy()
if vis:
# visualization
im2show,bbox_array = vis_detections_filtered_objects_PIL(im2show, obj_dets, hand_dets, thresh_hand, thresh_obj)
misc_toc = time.time()
nms_time = misc_toc - misc_tic
if webcam_num == -1:
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(num_images + 1, len(imglist), detect_time, nms_time))
sys.stdout.flush()
if vis and webcam_num == -1:
folder_name = args.save_dir
os.makedirs(folder_name, exist_ok=True)
result_path = os.path.join(folder_name, imglist[num_images][:-4] + "_det.png")
im2show.save(result_path)
else:
im2showRGB = cv2.cvtColor(im2show, cv2.COLOR_BGR2RGB)
cv2.imshow("frame", im2showRGB)
total_toc = time.time()
total_time = total_toc - total_tic
frame_rate = 1 / total_time
print('Frame rate:', frame_rate)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if webcam_num >= 0:
cap.release()
cv2.destroyAllWindows()
|
[
"luigman@gmail.com"
] |
luigman@gmail.com
|
4c4685634fd2453e75bf3df224abee0b2d34f03d
|
fe92ae3d85ce07480c19539a805e2693ddc3f581
|
/hashes_dict/cat_builder.py
|
2f67ca8ec40c5c0a832f9b8a6b6aa98f5f8eb0f5
|
[] |
no_license
|
pooja1506/Beginner_python_code
|
62301ba83cddcdb371fa7b30e8177555056c7ee9
|
d0b4f48dbf9dbdb1f4c08a9dd49f8e2b6a25b6c6
|
refs/heads/master
| 2022-07-03T15:40:24.537769
| 2020-05-09T14:04:20
| 2020-05-09T14:04:20
| 254,545,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
""" Write a method cat_builder that takes in a name, color, and age. The method should return a dictionary representing a cat with those values. """
def cat_builder(name_str, color_str, age_num):
final_dict = {}
final_dict["name"] = name_str
final_dict["color"] = color_str
final_dict["age"] = age_num
return final_dict
# => {"name": "Whiskers", "color": "orange", "age": 3}
print(cat_builder("Whiskers", "orange", 3))
# => {"name": "Salem", "color": "black", "age": 100}
print(cat_builder("Salem", "black", 100))
|
[
"pooja.dmehta15@gmail.com"
] |
pooja.dmehta15@gmail.com
|
c5f72b1d3bc2f54985570ff75ab86552c9579723
|
9cbd523cdedc727f62c887612e8ae2c25c909964
|
/tests/lib/stubs/server/list_line_up/get_table_by_name.py
|
01d77c5fd135c7e8f808ce224fb485ba4a0a7239
|
[] |
no_license
|
louiscklaw/QA_test_scripts
|
8a71d0bed99fae3b0dac4cd9414b3e34dcf5beed
|
58b73594332053272d8dce2c812c93297259c782
|
refs/heads/master
| 2023-01-27T15:48:29.477848
| 2020-12-06T10:05:19
| 2020-12-06T10:05:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
#!/usr/bin/env python3
import os,sys
from pprint import pprint
import json
import requests
from list_all_table import listAllTable
def getTableByName(queue_user_name):
all_table_json = listAllTable()
all_table_by_name={}
for table_json in all_table_json:
all_table_by_name[table_json['name']]={
'lid': table_json['lid']
}
return all_table_by_name[queue_user_name]
if __name__ == '__main__':
print(getTableByName('louis_finger_print_1'))
|
[
"louiscklaw@gmail.com"
] |
louiscklaw@gmail.com
|
62b3b12dc948f701cc4134cc368fa080c63f2f1b
|
25ad3f52dc8e34250dfbb67a747ac52909386bb7
|
/gdance/apps/users/migrations/0002_schedule_modalidad.py
|
6f37d4e8ace5f0c28a26adf4917a7aa3ad94c677
|
[] |
no_license
|
gdanceapp/gdance
|
0f59bc4bee2f7e3c411e31912c12047089f58147
|
0ac66345302a0a9a99ed1f13d99c8b4fb213c866
|
refs/heads/master
| 2021-05-08T04:43:01.435021
| 2017-11-17T15:58:49
| 2017-11-17T15:58:49
| 108,346,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-11-17 15:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='schedule',
name='modalidad',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='base.Modalidad'),
preserve_default=False,
),
]
|
[
"alka65@hotmail.com"
] |
alka65@hotmail.com
|
8bd9f00361920d3fe8bcb3dc45a3c37724ba9a24
|
42ad563bbebe83a17f9a75f7dd5d7db66d476f9b
|
/迁移bert tf2/migrate_to_tf2_bert/第一步命令行/bert-master-tf1/run_tokenTypeEmb_class_yr.py
|
b415f1fddd23097f1163acee830aa29923e0c312
|
[] |
no_license
|
2877992943/information_extracion
|
131517ffbe5a8d65536749ac9041a5fb8cbe7a16
|
8c5036777cd47c3d031491c35bb0c1bb89f75fc5
|
refs/heads/master
| 2021-01-03T23:06:34.058021
| 2020-02-13T14:28:35
| 2020-02-13T14:28:35
| 240,275,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54,792
|
py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os
import random
import modeling
import optimization
import tokenization
import six
import tensorflow as tf
import json
import sys
eps=0.0000001
# #####使用 decoder
# path_to_be_add='../models_src_part'
# sys.path.insert(0, path_to_be_add)
# print (sys.path)
# from official.transformer.model import model_params
# from official.transformer.model import transformer_decoder
# from official.transformer.utils import metrics
# from official.transformer import transformer_main
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_float(
"null_score_diff_threshold", 0.0,
"If null_score - best_non_null is greater than the threshold predict null.")
flags.DEFINE_string("target_vocab", None,
"The vocabulary file that the BERT model was fine-tuned.")
flags.DEFINE_string("target_vocab_size", None,
"The vocabulary sz that the BERT model was fine-tuned.")
flags.DEFINE_string("data_dir", None,
"The vocabulary file that the BERT model was fine-tuned.")
flags.DEFINE_string("pos_vocab", None,
"The vocabulary file that the BERT model was fine-tuned.")
flags.DEFINE_string("pos_vocab_size", None,
"The vocabulary sz that the BERT model was fine-tuned.")
flags.DEFINE_string("max_seq_length_y", None,
"The vocabulary sz that the BERT model was fine-tuned.")
flags.DEFINE_float(
"label_smoothing", 0.1,'label smooth'
)
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
# class knowledgeExample(object):
# """A single training/test example for simple sequence classification.
#
# For examples without an answer, the start and end position are -1.
# """
#
# def __init__(self,
# text,
# doc_tokens,
# tokentype,
# ytoken
# ):
# self.text=text
# self.doc_tokens=doc_tokens
# self.tokentype=tokentype
# self.ytoken=ytoken
#
# def __str__(self):
# return self.__repr__()
#
# def __repr__(self):
# s = ""
#
# s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
# if self.text:
# s += ", start_position: %d" % (self.text)
# if self.ytoken:
# s += ", end_position: %d" % (self.ytoken)
#
# return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
#example_index,
#doc_span_index,
#tokens,
#token_to_orig_map,
#token_is_max_context,
input_ids,
input_mask,
segment_ids, # token type
target_ids,
num_of_target
#start_position=None,
#end_position=None,
#is_impossible=None
):
self.unique_id = unique_id
#self.example_index = example_index
#self.doc_span_index = doc_span_index
#self.tokens = tokens
#self.token_to_orig_map = token_to_orig_map
#self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.target_ids=target_ids
self.num_of_target=num_of_target
#self.start_position = start_position
#self.end_position = end_position
#self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data[:2]:
for paragraph in entry["paragraphs"][:2]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if FLAGS.version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length -
1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def read_knowledge_example(inputfile,is_training):
examples=[]
reader=open(inputfile)
for line in reader.readlines():
line=line.strip()
if len(line)==0:continue
#
d=json.loads(line)
spo_list=d['spo_list']
posseq=d['posseq']
text=[cell['char'] for cell in posseq];#print (''.join(text))
postag=[cell['tag'] for cell in posseq];#print (' '.join(postag))
predicate=[cell['predicate'] for cell in spo_list];#print (' '.join(predicate))
examples.append({'text':text,'tag':postag,'predicate':predicate})
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn,
tokenizer_y,
max_seq_length_y
):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if example_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (unique_id))
tf.logging.info("example_index: %s" % (example_index))
tf.logging.info("doc_span_index: %s" % (doc_span_index))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
tf.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
tf.logging.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
tf.logging.info("start_position: %d" % (start_position))
tf.logging.info("end_position: %d" % (end_position))
tf.logging.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
def convert_examples_to_features1(examples,
tokenizer,
tokenizer_pos,
max_seq_length,
is_training,
output_fn,
tokenizer_y,
max_seq_length_y
):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1
for (example_index, example) in enumerate(examples):
#query_tokens = tokenizer.tokenize(example.question_text)
charll,posll,predicatell=example['text'],example['tag'],example['predicate']
# text_token=tokenizer.tokenize(' '.join(charll))
# pos_token=tokenizer_pos.tokenize(' '.join(posll))
# predicate_token=tokenizer_y.tokenize(' '.join(predicatell))
text_token=[w.lower() for w in charll]
pos_token=[w.lower() for w in posll]
predicate_token=[w.lower() for w in predicatell]
if len(text_token) > max_seq_length-1: # 限制长度
text_token = text_token[0:max_seq_length]
if len(pos_token) > max_seq_length-1: # 限制长度
pos_token = pos_token[0:max_seq_length]
if len(predicate_token) > max_seq_length_y: # 限制长度
predicate_token = predicate_token[0:max_seq_length_y]
text_token=['[CLS]']+text_token
pos_token=['PAD']+pos_token
#predicate_token=predicate_token+['EOS']
predicate_token = predicate_token
input_ids = tokenizer.convert_tokens_to_ids(text_token)
segment_ids=tokenizer_pos.convert_tokens_to_ids(pos_token)
predicate_ids=tokenizer_y.convert_tokens_to_ids(predicate_token)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# zero pad y seq
while len(predicate_ids) < max_seq_length_y:
predicate_ids.append(0)
assert len(predicate_ids) == max_seq_length_y
### num of target
num_y=len([yi for yi in predicate_ids if yi!=0])
#tf.logging.info("char: %s" % (' '.join([str(w) for w in input_ids])))
#tf.logging.info("char mask: %s" % (' '.join([str(w) for w in input_mask])))
#tf.logging.info("pos: %s" % (' '.join([str(w) for w in segment_ids])))
#tf.logging.info("predicate: %s" % (' '.join([str(w) for w in predicate_ids])))
#tf.logging.info("predicate number: %d" % (num_y))
feature = InputFeatures(
unique_id=unique_id,
#example_index=example_index,
#doc_span_index=doc_span_index,
#tokens=tokens,
#token_to_orig_map=token_to_orig_map,
#token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
target_ids=predicate_ids,
num_of_target=num_y
#start_position=start_position,
#end_position=end_position,
#is_impossible=example.is_impossible
)
# Run callback
output_fn(feature)
unique_id += 1
#if unique_id>10:break #yr
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
use_one_hot_embeddings,
targets,
#GLOBAL_PARAMS_target
):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
#final_hidden = model.get_sequence_output() # [8 384 768]
first_token=model.get_pooled_output() #[batch step]
# final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
# batch_size = final_hidden_shape[0]
# seq_length = final_hidden_shape[1]
# hidden_size = final_hidden_shape[2]
### decode part
# my_decoder = transformer_decoder.TransformerDecoder(GLOBAL_PARAMS_target,train=is_training)###??
# if is_training :
# logits = my_decoder(input_ids,final_hidden,targets)
# else:
# logits = my_decoder(input_ids,final_hidden)
# return logits
logits_multic=tf.layers.dense(inputs=first_token,name='multi',
units=FLAGS.target_vocab_size)
logits_num = tf.layers.dense(inputs=first_token,name='num_of_class',
units=FLAGS.target_vocab_size)
return logits_multic,logits_num # #[batch vocabsz]
# output_weights = tf.get_variable(
# "cls/squad/output_weights", [2, hidden_size],
# initializer=tf.truncated_normal_initializer(stddev=0.02))
#
# output_bias = tf.get_variable(
# "cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
#
# final_hidden_matrix = tf.reshape(final_hidden,
# [batch_size * seq_length, hidden_size])
# logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
# logits = tf.nn.bias_add(logits, output_bias)
#
# logits = tf.reshape(logits, [batch_size, seq_length, 2])
# logits = tf.transpose(logits, [2, 0, 1])
#
# unstacked_logits = tf.unstack(logits, axis=0) # [2, 8batch, 384step]
#
# (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
#
# return (start_logits, end_logits)
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings,
#GLOBAL_PARAMS_target
):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
target_ids=features['target_ids'] #[batch step]
num_target=features['num_of_target'] #[batchsz,]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
logits_multic,logits_num = create_model(
targets=target_ids,
#GLOBAL_PARAMS_target=GLOBAL_PARAMS_target,
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings) # [batch vocabsz]
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
# xentropy, weights = metrics.padded_cross_entropy_loss(
# logits, targets, FLAGS.label_smoothing, FLAGS.vocab_size_symptom)
# loss = tf.reduce_sum(xentropy) / tf.reduce_sum(weights)
##### loss 1 number of target
y=tf.one_hot(num_target,depth=FLAGS.target_vocab_size)#[batch vocabsz]
prob_num_target=tf.nn.softmax(logits_num,axis=-1)
loss1=-tf.reduce_mean(tf.reduce_sum(y*tf.log(prob_num_target+eps),axis=-1))
##### loss2 multi label
y=tf.reduce_sum(tf.one_hot(target_ids,depth=FLAGS.target_vocab_size),axis=1)[:,1:]
# [batch numClass25 vocabsz]->[batch vocabsz] -> remove padding
prob_y=tf.nn.sigmoid(logits_multic)[:,1:] #[batch vocabsz]
loss2=y*tf.log(eps+prob_y) + (1-y)*tf.log(1.-prob_y+eps)
loss2=tf.reduce_mean(-loss2)
loss=loss1+loss2
train_op = optimization.create_optimizer(loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
#"unique_ids": unique_ids,
#"start_logits": start_logits,
#"end_logits": end_logits,
"logits_multic": logits_multic,
'logits_num':logits_num
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
def input_fn_builder(input_file, seq_length,
seq_length_y,
is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
if is_training:
name_to_features['target_ids']=tf.FixedLenFeature([seq_length_y], tf.int64)
name_to_features["num_of_target"]= tf.FixedLenFeature([], tf.int64)
# name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64)
# name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file):
"""Write final predictions to the json file and log-odds of null if needed."""
tf.logging.info("Writing predictions to: %s" % (output_prediction_file))
tf.logging.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if FLAGS.version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if FLAGS.version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if FLAGS.version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not FLAGS.version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > FLAGS.null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with tf.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if FLAGS.version_2_with_negative:
with tf.gfile.GFile(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if FLAGS.verbose_logging:
tf.logging.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if FLAGS.verbose_logging:
tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["target_ids"] = create_int_feature(feature.target_ids)
features["num_of_target"] = create_int_feature([feature.num_of_target])
# features["start_positions"] = create_int_feature([feature.start_position])
# features["end_positions"] = create_int_feature([feature.end_position])
# impossible = 0
# if feature.is_impossible:
# impossible = 1
# features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
validate_flags_or_throw(bert_config)
tf.gfile.MakeDirs(FLAGS.output_dir)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tokenizer_y = tokenization.FullTokenizer_word(
vocab_file=FLAGS.target_vocab, do_lower_case=FLAGS.do_lower_case)###???
tokenizer_pos = tokenization.FullTokenizer(
vocab_file=FLAGS.pos_vocab, do_lower_case=FLAGS.do_lower_case) ###???
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
###############
# generate data
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = read_knowledge_example(
inputfile=FLAGS.train_file, is_training=True)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
# Pre-shuffle the input to avoid having to make a very large shuffle
# buffer in in the `input_fn`.
rng = random.Random(12345)
#rng.shuffle(train_examples)
if FLAGS.do_train:
tf_filename=os.path.join(FLAGS.output_dir, "train.tf_record")
if not os.path.exists(tf_filename): # not exist tf.record
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
train_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "train.tf_record"),
is_training=True)
convert_examples_to_features1(
examples=train_examples,
tokenizer=tokenizer,
tokenizer_y=tokenizer_y,
tokenizer_pos=tokenizer_pos,
max_seq_length_y=FLAGS.max_seq_length_y,
max_seq_length=FLAGS.max_seq_length,
is_training=True,
output_fn=train_writer.process_feature)
train_writer.close()
tf.logging.info("***** Running training *****")
tf.logging.info(" Num orig examples = %d", len(train_examples))
tf.logging.info(" Num split examples = %d", train_writer.num_features)
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
del train_examples
###########
# build model
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
# GLOBAL_PARAMS_target=GLOBAL_PARAMS_target
)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
##########
train_input_fn = input_fn_builder(
#input_file=train_writer.filename,
input_file=tf_filename,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
seq_length_y=FLAGS.max_seq_length_y)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
local_flag = False
##
import os
if local_flag == True:
OLD_BERT_MODEL_DIR = '/Users/admin/Desktop/previous/bert2019/download_model_cn/chinese_L-12_H-768_A-12'
XIAOBAI_DATA_DIR = '../bert_demo_data/knowlege/'
FLAGS.data_dir = '../tmp/'
FLAGS.output_dir = '../model_specifiedTask/'
elif local_flag == False:
OLD_BERT_MODEL_DIR = '/code/bert_download/download_model_cn/chinese_L-12_H-768_A-12'
XIAOBAI_DATA_DIR = '/code/bert_train_yr_squad_v0101/bert_demo_data/knowlege'
FLAGS.data_dir = '/code/bert_train_yr_squad_v0101/tmp/'
#FLAGS.output_dir = '/code/bert_train_yr_squad_v0101/model_specifiedTask/'
FLAGS.output_dir='/models/bert_train_yr_squad_v0101/'
FLAGS.bert_config_file = os.path.join(OLD_BERT_MODEL_DIR, 'bert_config.json')
FLAGS.do_train = True
FLAGS.init_checkpoint = os.path.join(OLD_BERT_MODEL_DIR, 'bert_model.ckpt')
FLAGS.num_train_epochs = 10000
FLAGS.learning_rate = 3e-5
FLAGS.train_batch_size = 8
FLAGS.label_smoothing = 0.1
####
f1=['dev1.json','train1.json']
FLAGS.train_file=os.path.join(XIAOBAI_DATA_DIR,'tmp',f1[1])
FLAGS.vocab_file=os.path.join(OLD_BERT_MODEL_DIR,'vocab.txt')
### target seq
FLAGS.target_vocab=os.path.join(XIAOBAI_DATA_DIR,'dict_v1231','spo.txt')
FLAGS.target_vocab_size = 52
### pos vocab
FLAGS.pos_vocab=os.path.join(XIAOBAI_DATA_DIR,'dict_v1231','pos.txt')
FLAGS.pos_vocab_size=26
FLAGS.save_checkpoints_steps = 3000
# GLOBAL_PARAMS_target = model_params.BASE_PARAMS.copy()
# GLOBAL_PARAMS_target["vocab_size"] = FLAGS.target_vocab_size
# GLOBAL_PARAMS_target['num_hidden_layers']=3
# GLOBAL_PARAMS_target['hidden_size']=128
FLAGS.max_seq_length_y=25
FLAGS.max_seq_length=301
tf.app.run()
|
[
"2877992943@qq.com"
] |
2877992943@qq.com
|
f2dbbcafddf9f354bf5167496f494892e861f0b5
|
27be66085e7f72316a01f31aa2946932b64ef05b
|
/models.py
|
aa602d50edccd9ca0676a68f175ef7b4bbdbacfd
|
[] |
no_license
|
c404err/ml
|
b2a223a00107b67000f8027addb6b1af6c8a93f4
|
0769abde162204bf7ef82891def50641cba4b1a6
|
refs/heads/main
| 2023-06-18T15:46:22.915532
| 2021-07-19T20:53:52
| 2021-07-19T20:53:52
| 383,961,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,631
|
py
|
import nn
import numpy
class PerceptronModel(object):
def __init__(self, dimensions):
"""
Initialize a new Perceptron instance.
A perceptron classifies data points as either belonging to a particular
class (+1) or not (-1). `dimensions` is the dimensionality of the data.
For example, dimensions=2 would mean that the perceptron must classify
2D points.
"""
self.w = nn.Parameter(1, dimensions)
def get_weights(self):
"""
Return a Parameter instance with the current weights of the perceptron.
"""
return self.w
def run(self, x):
"""
Calculates the score assigned by the perceptron to a data point x.
Inputs:
x: a node with shape (1 x dimensions)
Returns: a node containing a single number (the score)
"""
"*** YOUR CODE HERE ***"
return nn.DotProduct(self.w, x)
def get_prediction(self, x):
"""
Calculates the predicted class for a single data point `x`.
Returns: 1 or -1
"""
"*** YOUR CODE HERE ***"
if (nn.as_scalar(self.run(x)) >= 0):
return 1
return -1
def train(self, dataset):
"""
Train the perceptron until convergence.
"""
"*** YOUR CODE HERE ***"
flag = True
while (flag):
for x, y in dataset.iterate_once(1):
if nn.as_scalar(y) != self.get_prediction(x):
flag = False
nn.Parameter.update(self.w, x, nn.as_scalar(y))
if (flag):
break
flag = True
class RegressionModel(object):
"""
A neural network model for approximating a function that maps from real
numbers to real numbers. The network should be sufficiently large to be able
to approximate sin(x) on the interval [-2pi, 2pi] to reasonable precision.
"""
def __init__(self):
# Initialize your model parameters here
"*** YOUR CODE HERE ***"
self.w0 = nn.Parameter(1, 100)
self.b0 = nn.Parameter(1, 100)
self.w1 = nn.Parameter(100, 1)
self.b1 = nn.Parameter(1, 1)
def run(self, x):
"""
Runs the model for a batch of examples.
Inputs:
x: a node with shape (batch_size x 1)
Returns:
A node with shape (batch_size x 1) containing predicted y-values
"""
"*** YOUR CODE HERE ***"
xm0 = nn.Linear(x, self.w0)
r = nn.ReLU(nn.AddBias(xm0, self.b0))
xm1 = nn.Linear(r, self.w1)
return nn.AddBias(xm1, self.b1)
def get_loss(self, x, y):
"""
Computes the loss for a batch of examples.
Inputs:
x: a node with shape (batch_size x 1)
y: a node with shape (batch_size x 1), containing the true y-values
to be used for training
Returns: a loss node
"""
"*** YOUR CODE HERE ***"
return nn.SquareLoss(self.run(x), y)
def train(self, dataset):
"""
Trains the model.
"""
"*** YOUR CODE HERE ***"
while (True):
for x, y in dataset.iterate_once(1):
g = nn.gradients(self.get_loss(x, y), [self.w0, self.w1, self.b0, self.b1])
self.w0.update(g[0], -0.005)
self.w1.update(g[1], -0.005)
self.b0.update(g[2], -0.005)
self.b1.update(g[3], -0.005)
if (nn.as_scalar(self.get_loss(nn.Constant(dataset.x), nn.Constant(dataset.y))) < 0.02):
return
class DigitClassificationModel(object):
"""
A model for handwritten digit classification using the MNIST dataset.
Each handwritten digit is a 28x28 pixel grayscale image, which is flattened
into a 784-dimensional vector for the purposes of this model. Each entry in
the vector is a floating point number between 0 and 1.
The goal is to sort each digit into one of 10 classes (number 0 through 9).
(See RegressionModel for more information about the APIs of different
methods here. We recommend that you implement the RegressionModel before
working on this part of the project.)
"""
def __init__(self):
# Initialize your model parameters here
"*** YOUR CODE HERE ***"
self.b0 = nn.Parameter(1, 100)
self.w0 = nn.Parameter(784, 100)
self.b1 = nn.Parameter(1, 10)
self.w1 = nn.Parameter(100, 10)
def run(self, x):
"""
Runs the model for a batch of examples.
Your model should predict a node with shape (batch_size x 10),
containing scores. Higher scores correspond to greater probability of
the image belonging to a particular class.
Inputs:
x: a node with shape (batch_size x 784)
Output:
A node with shape (batch_size x 10) containing predicted scores
(also called logits)
"""
"*** YOUR CODE HERE ***"
xm0 = nn.Linear(x, self.w0)
r0 = nn.ReLU(nn.AddBias(xm0, self.b0))
xm1 = nn.Linear(r0, self.w1)
return nn.AddBias(xm1, self.b1)
def get_loss(self, x, y):
"""
Computes the loss for a batch of examples.
The correct labels `y` are represented as a node with shape
(batch_size x 10). Each row is a one-hot vector encoding the correct
digit class (0-9).
Inputs:
x: a node with shape (batch_size x 784)
y: a node with shape (batch_size x 10)
Returns: a loss node
"""
"*** YOUR CODE HERE ***"
return nn.SoftmaxLoss(self.run(x), y)
def train(self, dataset):
"""
Trains the model.
"""
"*** YOUR CODE HERE ***"
while (True):
for x, y in dataset.iterate_once(1):
g = nn.gradients(self.get_loss(x, y), [self.w0, self.w1, self.b0, self.b1])
self.w0.update(g[0], -0.005)
self.w1.update(g[1], -0.005)
self.b0.update(g[2], -0.005)
self.b1.update(g[3], -0.005)
if (dataset.get_validation_accuracy() > 1):
return
class LanguageIDModel(object):
"""
A model for language identification at a single-word granularity.
(See RegressionModel for more information about the APIs of different
methods here. We recommend that you implement the RegressionModel before
working on this part of the project.)
"""
def __init__(self):
# Our dataset contains words from five different languages, and the
# combined alphabets of the five languages contain a total of 47 unique
# characters.
# You can refer to self.num_chars or len(self.languages) in your code
self.num_chars = 47
self.languages = ["English", "Spanish", "Finnish", "Dutch", "Polish"]
# Initialize your model parameters here
"*** YOUR CODE HERE ***"
self.w = nn.Parameter(47, 400)
self.b0 = nn.Parameter(1, 400)
self.wHidden = nn.Parameter(400, 400)
self.b1 = nn.Parameter(1, 400)
self.wFinal = nn.Parameter(400, 5)
self.b2 = nn.Parameter(1, 5)
def run(self, xs):
"""
Runs the model for a batch of examples.
Although words have different lengths, our data processing guarantees
that within a single batch, all words will be of the same length (L).
Here `xs` will be a list of length L. Each element of `xs` will be a
node with shape (batch_size x self.num_chars), where every row in the
array is a one-hot vector encoding of a character. For example, if we
have a batch of 8 three-letter words where the last word is "cat", then
xs[1] will be a node that contains a 1 at position (7, 0). Here the
index 7 reflects the fact that "cat" is the last word in the batch, and
the index 0 reflects the fact that the letter "a" is the inital (0th)
letter of our combined alphabet for this task.
Your model should use a Recurrent Neural Network to summarize the list
`xs` into a single node of shape (batch_size x hidden_size), for your
choice of hidden_size. It should then calculate a node of shape
(batch_size x 5) containing scores, where higher scores correspond to
greater probability of the word originating from a particular language.
Inputs:
xs: a list with L elements (one per character), where each element
is a node with shape (batch_size x self.num_chars)
Returns:
A node with shape (batch_size x 5) containing predicted scores
(also called logits)
"""
"*** YOUR CODE HERE ***"
flag = True
for x in xs:
xm0 = nn.Linear(x, self.w)
r0 = nn.ReLU(nn.AddBias(xm0, self.b0))
xm1 = nn.Linear(r0, self.wHidden)
r1 = nn.ReLU(nn.AddBias(xm1, self.b1))
if flag:
ans = r1
flag = False
else:
ans = nn.Add(nn.AddBias(xm1, self.b1), nn.Linear(nn.ReLU(ans), self.wHidden))
return nn.AddBias(nn.Linear(nn.ReLU(ans), self.wFinal), self.b2)
def get_loss(self, xs, y):
"""
Computes the loss for a batch of examples.
The correct labels `y` are represented as a node with shape
(batch_size x 5). Each row is a one-hot vector encoding the correct
language.
Inputs:
xs: a list with L elements (one per character), where each element
is a node with shape (batch_size x self.num_chars)
y: a node with shape (batch_size x 5)
Returns: a loss node
"""
"*** YOUR CODE HERE ***"
return nn.SoftmaxLoss(self.run(xs), y)
def train(self, dataset):
"""
Trains the model.
"""
"*** YOUR CODE HERE ***"
while (True):
for x, y in dataset.iterate_once(2):
g = nn.gradients(self.get_loss(x, y), [self.w, self.wHidden, self.wFinal, self.b0, self.b1, self.b2])
self.w.update(g[0], -0.005)
self.wHidden.update(g[1], -0.005)
self.wFinal.update(g[2], -0.005)
self.b0.update(g[3], -0.005)
self.b1.update(g[4], -0.005)
self.b2.update(g[5], -0.005)
if (dataset.get_validation_accuracy() >= 0.86):
return
|
[
"noreply@github.com"
] |
c404err.noreply@github.com
|
11a26793bdcb54e200cccb110f50fcb7e3a865ef
|
5a89613dffb83ae99946c5529ff30d62918e0401
|
/code/training_data/district_training_data.py
|
a45f971fc627be598134b6d6a4ac7298a356c3f5
|
[
"MIT"
] |
permissive
|
sanja7s/JJ
|
59a400deb431072fd525dffac368578d08cd68d0
|
6facc5fe4aa19ca5927d3929f73fc8017f35d522
|
refs/heads/master
| 2021-04-21T21:36:39.114032
| 2020-05-26T10:38:57
| 2020-05-26T10:38:57
| 249,817,535
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,771
|
py
|
import pandas as pd
# network_type = "vgg16_4096"
# PCA_components = 64
# LABELING_METHOD = "maximum"
# AVERAGING_METHOD = "kaist"
# normalize = False
norm_dict = {
True:"normalized_",
False: "",
"Total": "normalized_across_all_"
}
data_dir = "preprocessed/"
def get_training_data(network_type, PCA_components,\
LABELING_METHOD, AVERAGING_METHOD,\
pred_type, normalize, data_dir=data_dir):
f = get_features(network_type, PCA_components,\
LABELING_METHOD, AVERAGING_METHOD, data_dir)
l = get_labels(pred_type, normalize, data_dir)
df = f.merge(l, on=["city_district"])
# print (df.head())
return df
def get_features(network_type, PCA_components,\
LABELING_METHOD, AVERAGING_METHOD, data_dir):
features_dir = data_dir + "training_data/features/districts/"
if network_type == "vgg19":
features_file = "Italy_6_cities_vgg19_pca"+str(PCA_components)+"_linear_fc_thirdlast_layer.csv"
elif network_type == "resnet50":
features_file = "Italy_6_cities_resnet_pca"+str(PCA_components)+"_second_last_layer.csv"
elif network_type == "vgg16_4096":
features_file = "Italy_6_cities_resnet_pca" + str(PCA_components) + "_vgg16_4096.csv"
ff = features_file.replace(".csv", "_" + \
LABELING_METHOD + "_" + AVERAGING_METHOD +"_features.csv")
features = pd.read_csv(features_dir + ff)
# print (features.head())
return features
def get_labels(pred_type, normalize, data_dir):
label_dir = data_dir + "training_data/labels/districts/"
ll = norm_dict[normalize] + "district_" \
+ pred_type + "_labels.csv"
labels = pd.read_csv(label_dir + ll)
labels.rename(columns={l:"label_" + l for l in \
labels.columns if l != "city_district"},\
inplace=True)
# print (labels.head())
return labels
# get_training_data()
|
[
"sanja.scepanovic@nokia-bell-labs.com"
] |
sanja.scepanovic@nokia-bell-labs.com
|
9db06d452195f3e4a3b11e54e8ddb1f4fad30f3b
|
15445418c3df26032a5b715b77aa9073d8ff76c5
|
/Data_Structures_and_Algorithms_in_Python/06_12_Quiz_Grapth_Traversal_Practice.py
|
39684fe33c092aa41af2188a586eaac6893409c6
|
[] |
no_license
|
DenisWilsonDev/Courses
|
441d9cc14f05065eb41e47167ea28b77a4b76227
|
fd668d1122dd1eef5834e6510004279b385ff4df
|
refs/heads/master
| 2020-08-24T11:29:46.199368
| 2019-11-05T13:07:38
| 2019-11-05T13:07:38
| 216,817,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,995
|
py
|
class Node(object):
def __init__(self, value):
self.value = value
self.edges = []
self.visited = False
class Edge(object):
def __init__(self, value, node_from, node_to):
self.value = value
self.node_from = node_from
self.node_to = node_to
# You only need to change code with docs strings that have TODO.
# Specifically: Graph.dfs_helper and Graph.bfs
# New methods have been added to associate node numbers with names
# Specifically: Graph.set_node_names
# and the methods ending in "_names" which will print names instead
# of node numbers
class Graph(object):
def __init__(self, nodes=None, edges=None):
self.nodes = nodes or []
self.edges = edges or []
self.node_names = []
self._node_map = {}
def set_node_names(self, names):
"""The Nth name in names should correspond to node number N.
Node numbers are 0 based (starting at 0).
"""
self.node_names = list(names)
def insert_node(self, new_node_val):
"Insert a new node with value new_node_val"
new_node = Node(new_node_val)
self.nodes.append(new_node)
self._node_map[new_node_val] = new_node
return new_node
def insert_edge(self, new_edge_val, node_from_val, node_to_val):
"Insert a new edge, creating new nodes if necessary"
nodes = {node_from_val: None, node_to_val: None}
for node in self.nodes:
if node.value in nodes:
nodes[node.value] = node
if all(nodes.values()):
break
for node_val in nodes:
nodes[node_val] = nodes[node_val] or self.insert_node(node_val)
node_from = nodes[node_from_val]
node_to = nodes[node_to_val]
new_edge = Edge(new_edge_val, node_from, node_to)
node_from.edges.append(new_edge)
node_to.edges.append(new_edge)
self.edges.append(new_edge)
def get_edge_list(self):
"""Return a list of triples that looks like this:
(Edge Value, From Node, To Node)"""
return [(e.value, e.node_from.value, e.node_to.value)
for e in self.edges]
def get_edge_list_names(self):
"""Return a list of triples that looks like this:
(Edge Value, From Node Name, To Node Name)"""
return [(edge.value,
self.node_names[edge.node_from.value],
self.node_names[edge.node_to.value])
for edge in self.edges]
def get_adjacency_list(self):
"""Return a list of lists.
The indecies of the outer list represent "from" nodes.
Each section in the list will store a list
of tuples that looks like this:
(To Node, Edge Value)"""
max_index = self.find_max_index()
adjacency_list = [[] for _ in range(max_index)]
for edg in self.edges:
from_value, to_value = edg.node_from.value, edg.node_to.value
adjacency_list[from_value].append((to_value, edg.value))
return [a or None for a in adjacency_list] # replace []'s with None
def get_adjacency_list_names(self):
"""Each section in the list will store a list
of tuples that looks like this:
(To Node Name, Edge Value).
Node names should come from the names set
with set_node_names."""
adjacency_list = self.get_adjacency_list()
def convert_to_names(pair, graph=self):
node_number, value = pair
return (graph.node_names[node_number], value)
def map_conversion(adjacency_list_for_node):
if adjacency_list_for_node is None:
return None
return map(convert_to_names, adjacency_list_for_node)
return [map_conversion(adjacency_list_for_node)
for adjacency_list_for_node in adjacency_list]
def get_adjacency_matrix(self):
"""Return a matrix, or 2D list.
Row numbers represent from nodes,
column numbers represent to nodes.
Store the edge values in each spot,
and a 0 if no edge exists."""
max_index = self.find_max_index()
adjacency_matrix = [[0] * (max_index) for _ in range(max_index)]
for edg in self.edges:
from_index, to_index = edg.node_from.value, edg.node_to.value
adjacency_matrix[from_index][to_index] = edg.value
return adjacency_matrix
def find_max_index(self):
"""Return the highest found node number
Or the length of the node names if set with set_node_names()."""
if len(self.node_names) > 0:
return len(self.node_names)
max_index = -1
if len(self.nodes):
for node in self.nodes:
if node.value > max_index:
max_index = node.value
return max_index
def find_node(self, node_number):
"Return the node with value node_number or None"
return self._node_map.get(node_number)
def _clear_visited(self):
for node in self.nodes:
node.visited = False
def dfs_helper(self, start_node):
"""TODO: Write the helper function for a recursive implementation
of Depth First Search iterating through a node's edges. The
output should be a list of numbers corresponding to the
values of the traversed nodes.
ARGUMENTS: start_node is the starting Node
MODIFIES: the value of the visited property of nodes in self.nodes
RETURN: a list of the traversed node values (integers).
"""
ret_list = [start_node.value]
# Your code here
start_node.visited = True
for edge in start_node.edges:
if edge.node_from == start_node:
if not edge.node_to.visited:
ret_list += self.dfs_helper(edge.node_to)
return ret_list
def dfs(self, start_node_num):
"""Outputs a list of numbers corresponding to the traversed nodes
in a Depth First Search.
ARGUMENTS: start_node_num is the starting node number (integer)
MODIFIES: the value of the visited property of nodes in self.nodes
RETURN: a list of the node values (integers)."""
self._clear_visited()
start_node = self.find_node(start_node_num)
return self.dfs_helper(start_node)
def dfs_names(self, start_node_num):
"""Return the results of dfs with numbers converted to names."""
return [self.node_names[num] for num in self.dfs(start_node_num)]
def bfs(self, start_node_num):
"""TODO: Create an iterative implementation of Breadth First Search
iterating through a node's edges. The output should be a list of
numbers corresponding to the traversed nodes.
ARGUMENTS: start_node_num is the node number (integer)
MODIFIES: the value of the visited property of nodes in self.nodes
RETURN: a list of the node values (integers)."""
node = self.find_node(start_node_num)
self._clear_visited()
ret_list = [node.value]
# Your code here
node.visited = True
queue = []
while True:
for edge in node.edges:
if edge.node_from == node:
if not edge.node_to.visited:
queue.append(edge.node_to)
edge.node_to.visited = True
if len(queue) > 0:
node = queue.pop(0)
ret_list.append(node.value)
if len(queue) == 0:
break
return ret_list
def bfs_names(self, start_node_num):
"""Return the results of bfs with numbers converted to names."""
return [self.node_names[num] for num in self.bfs(start_node_num)]
graph = Graph()
# You do not need to change anything below this line.
# You only need to implement Graph.dfs_helper and Graph.bfs
graph.set_node_names(('Mountain View', # 0
'San Francisco', # 1
'London', # 2
'Shanghai', # 3
'Berlin', # 4
'Sao Paolo', # 5
'Bangalore')) # 6
graph.insert_edge(51, 0, 1) # MV <-> SF
graph.insert_edge(51, 1, 0) # SF <-> MV
graph.insert_edge(9950, 0, 3) # MV <-> Shanghai
graph.insert_edge(9950, 3, 0) # Shanghai <-> MV
graph.insert_edge(10375, 0, 5) # MV <-> Sao Paolo
graph.insert_edge(10375, 5, 0) # Sao Paolo <-> MV
graph.insert_edge(9900, 1, 3) # SF <-> Shanghai
graph.insert_edge(9900, 3, 1) # Shanghai <-> SF
graph.insert_edge(9130, 1, 4) # SF <-> Berlin
graph.insert_edge(9130, 4, 1) # Berlin <-> SF
graph.insert_edge(9217, 2, 3) # London <-> Shanghai
graph.insert_edge(9217, 3, 2) # Shanghai <-> London
graph.insert_edge(932, 2, 4) # London <-> Berlin
graph.insert_edge(932, 4, 2) # Berlin <-> London
graph.insert_edge(9471, 2, 5) # London <-> Sao Paolo
graph.insert_edge(9471, 5, 2) # Sao Paolo <-> London
# (6) 'Bangalore' is intentionally disconnected (no edges)
# for this problem and should produce None in the
# Adjacency List, etc.
import pprint
pp = pprint.PrettyPrinter(indent=2)
print "Edge List"
pp.pprint(graph.get_edge_list_names())
print "\nAdjacency List"
pp.pprint(graph.get_adjacency_list_names())
print "\nAdjacency Matrix"
pp.pprint(graph.get_adjacency_matrix())
print "\nDepth First Search"
pp.pprint(graph.dfs_names(2))
# Should print:
# Depth First Search
# ['London', 'Shanghai', 'Mountain View', 'San Francisco', 'Berlin', 'Sao Paolo']
print "\nBreadth First Search"
pp.pprint(graph.bfs_names(2))
# test error reporting
# pp.pprint(['Sao Paolo', 'Mountain View', 'San Francisco', 'London', 'Shanghai', 'Berlin'])
# Should print:
# Breadth First Search
# ['London', 'Shanghai', 'Berlin', 'Sao Paolo', 'Mountain View', 'San Francisco']
|
[
"dev@deniswilson.ru"
] |
dev@deniswilson.ru
|
6b80c599f791e236d4a4c9241f42382c94f932bd
|
77be118e15e523834b1095ab8dfb45e4a66afa52
|
/blog/migrations/0001_initial.py
|
8a7d7efac15670e7d57609e0aa9f0dee33a1b487
|
[] |
no_license
|
rs07/my-first-blog
|
0c7fbbc6d9b389a350c5d2af71f7f18fe9525ff0
|
dd3babfcbce70035e7aecdfbf37728086160c1cb
|
refs/heads/master
| 2021-09-02T07:35:34.067644
| 2017-12-31T14:40:53
| 2017-12-31T14:40:53
| 115,858,378
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-31 10:36
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"superrishabh0@gmail.com"
] |
superrishabh0@gmail.com
|
d04b346b78f0c5154d23427b65333dae9753261a
|
01d2db8a3c5d0ed96c3da3af71e928b1276f7f43
|
/src/main/python/generate_tweet.py
|
5a01f81f5f4e7396f3cd314f2a8aeba22d8348a3
|
[] |
no_license
|
tobakk/twitter-ai
|
6c78413e904f973378c634a476adb499251dab08
|
a8a20f7aed8c75a37eddc68aed44a4a7b3cc05f9
|
refs/heads/master
| 2022-06-05T02:24:00.756355
| 2020-05-04T11:19:25
| 2020-05-04T11:19:25
| 261,021,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
import random
import sys
import numpy as np
import tensorflow as tf
from lib import build_dataset, read_training_data, to_string
data_path = 'data/'
training_file = data_path + 'data.txt'
model_file = data_path + 'model.h5'
n_input = 10
model = tf.keras.models.load_model(
data_path + 'model.h5', custom_objects=None, compile=True
)
training_data = read_training_data(training_file)
dictionary, reverse_dictionary = build_dataset(training_data)
def generate_tweet(model, word_id_arr, number_of_words=20):
out = []
words = list(word_id_arr.copy())
for i in range(number_of_words):
keys = np.reshape(np.array(words), [-1, n_input])
onehot_pred = model(keys).numpy()[0]
pred_index = onehot_pred.argmax(axis=1)
pred = pred_index[-1]
out.append(pred)
words = words[1:]
words.append(pred)
sentence = to_string(out, reverse_dictionary)
return sentence
words = random.choices(training_data, k=n_input)
symbols_in_keys = [dictionary[str(words[i])] for i in range(len(words))]
print(generate_tweet(model, symbols_in_keys))
sys.exit(0)
|
[
"17847628+tobakk@users.noreply.github.com"
] |
17847628+tobakk@users.noreply.github.com
|
6d845d03bb3bfd7ccce2e4fe0ca367421bebae5c
|
1378bb34a1c6e2f686eac54f62d6adba37acd5e4
|
/config.py
|
ce61fbe9f4f9577fdaa81f0f13ccc25e06ff9603
|
[] |
no_license
|
colabearwd/final_demo
|
2ae5c36608b576181b544d7897acf651f82db361
|
abf656bf772480d33882aeebabd79c0585c21188
|
refs/heads/master
| 2020-03-27T19:34:40.380456
| 2018-09-19T11:46:08
| 2018-09-19T11:46:08
| 146,998,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
# encoding: utf-8
import os
DEBUG =True
SECRET_KEY = os.urandom(24)
DIALECT = 'mysql'
DRIVER = 'mysqldb'
USERNAME = 'root'
PASSWORD = 'root'
HOST = 'localhost'
PORT = '3306'
DATABASE = 'final_demo'
SQLALCHEMY_DATABASE_URI = "{}+{}://{}:{}@{}:{}/{}?charset=utf8".format(DIALECT,DRIVER,USERNAME,
PASSWORD,HOST,PORT,DATABASE)
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
[
"wz_jxnu@163.com"
] |
wz_jxnu@163.com
|
cc722694f4d5c2b3b5cb507e5fabb0250dacb484
|
0b933ccc03c117cfe599fe064a34714fa56ee16b
|
/reachable.py
|
2805c0ce91cc534a34bb2bb5297dd60670385a2f
|
[] |
no_license
|
fanbbs/ori_coop_server
|
c2e15a2b77d494ca5a95582bcf1903b2d5d19354
|
ab0f92e03b3d3430168f41af85a934bbd1aa0f66
|
refs/heads/master
| 2020-03-08T21:45:59.384983
| 2018-04-05T22:55:53
| 2018-04-05T22:55:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,443
|
py
|
import xml.etree.ElementTree as XML
from collections import defaultdict, Counter
class PlayerState(object):
name_from_id = {
("SK",0): 'Bash', ("SK",2): 'ChargeFlame', ("SK",3): 'WallJump', ("SK",4): 'Stomp', ("SK",5): 'DoubleJump',
("SK",8): 'ChargeJump', ("SK",12): 'Climb', ("SK",14): 'Glide', ("SK",50): 'Dash', ("SK",51): 'Grenade',
("EV", 0): 'GinsoKey', ("EV", 1): 'Water', ("EV", 2): 'ForlornKey', ("EV", 3): 'Wind', ("EV", 4): 'HoruKey',
("TP","Swamp"): 'TPSwamp', ("TP","Grove"): 'TPGrove', ("TP","Valley"): 'TPValley',
("TP","Grotto"): 'TPGrotto', ("TP","Forlorn"): 'TPForlorn', ("TP","Sorrow"): 'TPSorrow'
}
def __init__(self):
self.has = Counter()
@staticmethod
def from_player(player):
inst = PlayerState()
inst.build_from_codes([(h.pickup_code, h.pickup_id, h.removed) for h in player.history])
return inst
@staticmethod
def from_codes(codes):
inst = PlayerState()
inst.build_from_codes(codes)
return inst
def build_from_codes(self, pickinfos):
wv = ss = gs = 0
for code,id,removed in pickinfos:
if code in ["EX", "AC"]:
continue
id = id if code=="TP" else int(id)
if (code,id) in PlayerState.name_from_id:
self.has[PlayerState.name_from_id[(code,id)]] = (0 if removed else 1)
elif code == "RB":
if id == 17:
wv += (-1 if removed else 1)
elif id == 19:
gs += (-1 if removed else 1)
if id == 21:
ss += (-1 if removed else 1)
elif code in ["HC","EC","KS", "MS"]:
self.has[code] += (-id if removed else id)
if wv >= 3:
self.has['GinsoKey'] = 1
if gs >= 3:
self.has['ForlornKey'] = 1
if ss >= 3:
self.has['HoruKey'] = 1
print self.has["MS"]
class Area(object):
def __init__(self, name):
self.name = name
self.conns = []
def get_reachable(self, state, modes):
return [conn.target for conn in self.conns if conn.is_active(state, modes)]
class Connection(object):
def __init__(self, target):
self.target = target
self.reqs = defaultdict(list)
def is_active(self, state, modes):
for mode in modes:
for reqs in self.reqs[mode]:
if not reqs.cnt - state.has:
return True
return False
class Requirement(object):
def __init__(self, raw):
self.cnt = Counter([r for r in raw.split('+') if r != "Free"])
class Map(object):
areas = {}
@staticmethod
def build():
tree = XML.parse("seedbuilder/areas.xml")
root = tree.getroot()
for child in root:
area = Area(child.attrib["name"])
for c in child.find("Connections"):
conn = Connection(c.find("Target").attrib["name"])
for req in c.find("Requirements"):
conn.reqs[req.attrib["mode"]].append(Requirement(req.text))
area.conns.append(conn)
Map.areas[area.name] = area
@staticmethod
def get_reachable_areas(state, modes):
if not Map.areas:
Map.build()
unchecked_areas = set(["SunkenGladesRunaway"])
checked_areas = set()
while(len(unchecked_areas) > 0):
curr = unchecked_areas.pop()
checked_areas.add(curr)
unchecked_areas |= set([r for r in Map.areas[curr].get_reachable(state, modes) if r not in checked_areas])
mapstone_cnt = min(len([a for a in checked_areas if "MapStone" in a]), state.has["MS"])
if mapstone_cnt == 9 and state.has["MS"] < 11:
mapstone_cnt -= 1
if mapstone_cnt == 8 and state.has["MS"] < 9:
mapstone_cnt -= 1
ms_areas = ["MS%s"%i for i in range(1,mapstone_cnt +1) ]
print ms_areas
return list(checked_areas) + ms_areas
|
[
"worst.mirari@gmail.com"
] |
worst.mirari@gmail.com
|
ab3ac1779fcfb178dd41e6ab57196771e8196e5d
|
0f6eaef1598e32193fd553a00f1c53e9aa367614
|
/exercicios_capitulo4/cube.py
|
7c4611b35eadd778664bb4230852ce46f8ee1d7c
|
[] |
no_license
|
IfDougelseSa/cursoIntensivoPython
|
72be288cfd3314e084374abed02d2c9075e7bf3b
|
c3da310488bec268fb6db416c6c8d54164a180a9
|
refs/heads/main
| 2023-06-11T19:46:20.066751
| 2021-07-06T13:07:58
| 2021-07-06T13:07:58
| 365,917,050
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 58
|
py
|
cubes = [cube**3 for cube in range(1, 10)]
print(cubes)
|
[
"doug_ccortez@outlook.com"
] |
doug_ccortez@outlook.com
|
77b7bc3323f62d7bb1fc73cbf1d52dcb5d3e932c
|
4bf050eea24a8144af61438db12845432d2f73a5
|
/strings/split_join.py
|
93528c565c294617c0e950d656f97c366f450678
|
[] |
no_license
|
Yzdesarrollo/FundamentosPython
|
43228eae6fe37770ff500b67203703bb2804c636
|
16ca8c36bb688e0ec29cf638ae6f64fa79e2f3f6
|
refs/heads/master
| 2022-10-10T16:41:28.373987
| 2020-06-11T16:33:24
| 2020-06-11T16:33:24
| 271,595,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
lenguajes = "Python; Java; Ruby; PHP; Swift; Javascript; C#; C; C++"
# resultado = lenguajes.split() # Podemos generar una lista apartir de un string
# print(resultado)
separador = "; "
resultado = lenguajes.split(separador) # Podemos generar una lista apartir de un string
# print(resultado)
nuevo_string = "_".join(resultado) # Podemos generar una string apartir de una lista
print(resultado)
print(nuevo_string)
# texto = """Este es un texto
# con
# saltos
# de
# linea"""
# resultado = texto.splitlines() # Podemos generar una lista apartir de un texto
# print(resultado)
|
[
"yeison.ariel@accenture.com"
] |
yeison.ariel@accenture.com
|
08bad906274b22b88b6b3513859e43028c45c6da
|
20953bded8c44d09fea22dab32d269e50ea1d35e
|
/pset4/ps4a.py
|
53ea171fb2c8371f9df2d94e7a5a3898b0e61c37
|
[] |
no_license
|
utamhank1/MIT_6.0001x
|
e417ebfd9fecbcad9f9223b2f48be97809e44bcf
|
927aab8c51dd8c4a7019ab6943aab6b4cb226c48
|
refs/heads/master
| 2020-12-29T21:24:23.465373
| 2020-02-06T16:54:43
| 2020-02-06T16:54:43
| 238,736,789
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,868
|
py
|
# Problem Set 4A
# Name: Ujjwal Tamhankar
# Collaborators: none
# Time Spent: ~ 5hrs
def get_permutations(sequence):
'''
Enumerate all permutations of a given string
sequence (string): an arbitrary string to permute. Assume that it is a
non-empty string.
You MUST use recursion for this part. Non-recursive solutions will not be
accepted.
Returns: a list of all permutations of sequence
Example:
>>> get_permutations('abc')
['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
Note: depending on your implementation, you may return the permutations in
a different order than what is listed here.
'''
# Base case, if the sequence length = 1, return that value.
if len(sequence) == 1:
return[sequence]
# If the sequence length is greater than 1, enumerate the sequence and iterate over each piece.
# call the get_permutations() on each piece and iteratively compose the 'permutations' list.
# return the final permuations list.
else:
permutations = []
for i, counter in enumerate(sequence):
for j in get_permutations(sequence[:i] + sequence[i + 1:]):
permutations = permutations + [counter + j]
return permutations
if __name__ == '__main__':
# #EXAMPLE
# example_input = 'abc'
# print('Input:', example_input)
# print('Expected Output:', ['abc', 'acb', 'bac', 'bca', 'cab', 'cba'])
# print('Actual Output:', get_permutations(example_input))
# # Put three example test cases here (for your sanity, limit your inputs
# to be three characters or fewer as you will have n! permutations for a
# sequence of length n)
# Test case 1:
sequence = 'a'
output = get_permutations(sequence)
expected_output = ['a']
print(f'Input = {sequence}, Output = {output} ')
print(f'Expected Output: {expected_output}')
if output == expected_output:
print('\nSingle Letter Permutation Test Passed!\n')
else:
print('\nSingle Letter Permutation Test Failed!')
# Test case 2:
sequence = 'ab'
output = get_permutations(sequence)
expected_output = ['ab', 'ba']
print(f'Input = {sequence}, Output = {output} ')
print(f'Expected Output: {expected_output}')
if output == expected_output:
print('\nDouble Letter Permutation Test Passed!\n')
else:
print('\nDouble Letter Permutation Test Failed!\n')
# Test case 3:
sequence = '123'
output = get_permutations(sequence)
expected_output = ['123', '132', '213', '231', '312', '321']
print(f'Input = {sequence}, Output = {output} ')
print(f'Expected Output: {expected_output}')
if output == expected_output:
print('\nTriple Number Permutation Test Passed!\n')
else:
print('\nTriple Number Permutation Test Failed!\n')
|
[
"utamhank1@gmail.com"
] |
utamhank1@gmail.com
|
7bdf0ae10d88057b630ada47ec7a4bbc25e4837a
|
1fe03131ad139e2415fd0c0c73697b4541e5b862
|
/.history/src/_fighter_20190422144508.py
|
b8423e2a178b0c458e0f63ee9a1a09d7fe1b40f4
|
[
"MIT"
] |
permissive
|
vidalmatheus/pyKombat
|
d83175a7a952663e278a8247d43349f87192fde3
|
6646020c59367ba0424d73a5861e13bbc0daac1f
|
refs/heads/master
| 2021-06-20T09:35:07.950596
| 2020-08-06T14:08:13
| 2020-08-06T14:08:13
| 172,716,161
| 1
| 1
|
MIT
| 2019-12-25T10:54:10
| 2019-02-26T13:24:31
|
Python
|
UTF-8
|
Python
| false
| false
| 38,909
|
py
|
from pygame_functions import *
import fightScene
import engine
import menu
class Fighter:
fighterNames = ["Sub-Zero", "Scorpion"]
fightMoves = [["w", "s", "a", "d"], ["up", "down", "left", "right"]]
combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]]
danceLimit = 7
walkLimit = 9
jumpLimit = 3
crouchLimit = 3
punchLimit = [3, 11, 3, 5, 3]
kickLimit = [7, 9, 7, 6, 3]
hitLimit = [3, 3, 6, 2, 3, 14, 11, 10]
blockLimit = 3
specialLimit = [4,7]
victoryLimit = 3
fatalityLimit = 20
dizzyLimit = 7
# indexação
# moves
dance = 0
walk = 1
jump = 2
crouch = 3
# punches
Apunch = 4 # soco fraco
Bpunch = 5 # soco forte
Cpunch = 6 # soco agachado fraco
Dpunch = 7 # soco agachado forte: gancho
# kicks
Akick = 8 # chute fraco
Bkick = 9 # chute forte
Ckick = 10 # chute agachado fraco
Dkick = 11 # chute agachado forte: banda
# hits
Ahit = 12 # soco fraco
Bhit = 13 # chute fraco
Chit = 14 # soco forte
Dhit = 15 # chute agrachado fraco
Ehit = 16 # soco agachado fraco
Fhit = 17 # chute forte e soco forte agachado (gancho)
Ghit = 18 # chute agachado forte: banda
#Hhit = 19 # specialMove
#fatalityHit = 20 # fatality hit
# block
Ablock = 19
Bblock = 20
# special move
special = 21
# fatality
fatality = 24
def __init__(self, id, scenario):
self.fighterId = id
self.name = self.fighterNames[id]
self.move = self.fightMoves[id]
self.combat = self.combatMoves[id]
# Position
self.x = 150+id*500
if scenario == 1:
self.y = 350
elif scenario == 2:
self.y = 370
elif scenario == 3:
self.y = 400
elif scenario == 4:
self.y = 370
elif scenario == 5:
self.y = 380
elif scenario == 6:
self.y = 380
elif scenario == 7:
self.y = 360
elif scenario == 8:
self.y = 395
# Loading sprites
self.spriteList = []
# moves
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/dance.png', self.danceLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/walk.png', self.walkLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/jump.png', self.jumpLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/crouch.png', self.crouchLimit))
# Punch sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Apunch.png', self.punchLimit[0]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bpunch.png', self.punchLimit[1]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Cpunch.png', self.punchLimit[2]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dpunch.png', self.punchLimit[3]))
# Kick sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Akick.png', self.kickLimit[0]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bkick.png', self.kickLimit[1]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ckick.png', self.kickLimit[2]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dkick.png', self.kickLimit[3]))
# Hit sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ahit.png', self.hitLimit[0])) # soco fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bhit.png', self.hitLimit[1])) # chute fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Chit.png', self.hitLimit[2])) # soco forte
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dhit.png', self.hitLimit[3])) # chute agrachado fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ehit.png', self.hitLimit[4])) # soco agachado fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Fhit.png', self.hitLimit[5])) # chute forte e soco forte agachado (gancho)
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ghit.png', self.hitLimit[6])) # chute agachado forte: banda
#self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Hhit.png', self.hitLimit[7])) # specialMove
# blocking sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ablock.png', self.blockLimit)) # defesa em pé
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bblock.png', self.blockLimit)) # defesa agachado
# special sprite ----------------------------------
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Special.png', self.specialLimit[self.fighterId])) # Especial
self.act()
def act(self):
# Combat control
combat = False
block = False
alive = False
fatality = False
dizzyCounter = 1
dizzyCounterAux = 1
fatalityCounter = 8
fatalityCounterAux = 1
# Control reflection var
reflection = False
# Dance vars
self.dancing = True
self.frame_dance = 0
self.dance_step = 1
# Walk vars
self.frame_walk = 0
self.walking = False # Variável de status
# Jump vars
self.jumpHeight = 10 # Altura do pulo
self.jumpCounter = 1 # Contador correspodente à subida e descida do pulo
self.jumping = False # Variável de status
self.frame_jumping = 0
self.jump_step = 1
self.end_jump = True
# Crouch vars
self.crouching = False # Variável de status
self.frame_crouching = 0
self.crouch_step = 1
# Punch vars
self.Apunching = False
self.frame_Apunching = 0
self.Apunch_step = 1
self.end_Apunch = True
self.Bpunching = False
self.frame_Bpunching = 0
self.Bpunch_step = 1
self.end_Bpunch = True
self.Cpunching = False
self.frame_Cpunching = 0
self.Cpunch_step = 1
self.end_Cpunch = True
self.Dpunching = False
self.frame_Dpunching = 0
self.Dpunch_step = 1
self.end_Dpunch = True
# Kick vars
self.Akicking = False
self.frame_Akicking = 0
self.Akick_step = 1
self.end_Akick = True
self.Bkicking = False
self.frame_Bkicking = 0
self.Bkick_step = 1
self.end_Bkick = True
self.Ckicking = False
self.frame_Ckicking = 0
self.Ckick_step = 1
self.end_Ckick = True
self.Dkicking = False
self.frame_Dkicking = 0
self.Dkick_step = 1
self.end_Dkick = True
# Blocking vars
self.Ablocking = False
self.frame_Ablocking = 0
self.Ablock_step = 1
self.Bblocking = False
self.frame_Bblocking = 0
self.Bblock_step = 1
# Special vars
self.specialMove = False
self.end_special = True
self.frame_special = 0
self.special_step = 1
# Hit vars
self.hit = False
self.downHit = False
self.hitName = ""
self.Ahitting = False
self.Bhitting = False
self.Chitting = False
self.Dhitting = False
self.Ehitting = False
self.Fhitting = False
self.Ghitting = False
self.Hhitting = False
self.frame_Ahit = 0
self.frame_Bhit = 0
self.frame_Chit = 0
self.frame_Dhit = 0
self.frame_Ehit = 0
self.frame_Fhit = 0
self.frame_Ghit = 0
self.frame_Hhit = 0
self.hit_step = 1
# Life Vars
X_inicio = 37
X_atual = X_inicio
X_fim = X_inicio + 327
self.posFighter()
def fight(self, time, nextFrame):
frame_step = 60
if not self.jumping:
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> jump
if keyPressed(self.move[0]) and not self.hit:
self.jumping = True
self.end_jump = False
self.curr_sprite = self.spriteList[self.jump]
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> right
elif keyPressed(self.move[3]) and not self.hit:
self.curr_sprite = self.spriteList[self.walk]
self.walking = self.setState()
self.setEndState()
self.x += 6
moveSprite(self.spriteList[self.walk], self.x, self.y, True)
self.setSprite(self.spriteList[self.walk])
changeSpriteImage(self.spriteList[self.walk], self.frame_walk)
if time > nextFrame:
# There are 9 frames of animation in each direction
self.frame_walk = (self.frame_walk+1) % self.walkLimit
# so the modulus 9 allows it to loop
nextFrame += frame_step
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> left
elif keyPressed(self.move[2]) and not self.hit:# SEGUNDA MUDANÇA and not self.jumping:
self.curr_sprite = self.spriteList[self.walk]
self.walking = self.setState()
self.setEndState()
self.x -= 6
moveSprite(self.spriteList[self.walk], self.x, self.y, True)
self.setSprite(self.spriteList[self.walk])
changeSpriteImage(self.spriteList[self.walk], self.walkLimit-1-self.frame_walk)
if time > nextFrame:
# There are 9 frames of animation in each direction
self.frame_walk = (self.frame_walk+1) % self.walkLimit
nextFrame += frame_step
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> crouch
elif (keyPressed(self.move[1]) and not self.hit) or self.downHit:
if self.end_Cpunch and self.end_Dpunch and self.end_Ckick and self.end_Dkick and not self.hit and not self.downHit:
self.curr_sprite = self.spriteList[self.crouch]
self.crouching = self.setState()
self.setEndState()
if time > nextFrame:
if self.end_Cpunch and self.end_Dpunch and self.end_Ckick and self.end_Dkick and not self.hit and not self.downHit:
moveSprite(self.spriteList[self.crouch], self.x, self.y, True)
self.setSprite(self.spriteList[self.crouch])
changeSpriteImage(self.spriteList[self.crouch], self.frame_crouching)
self.frame_crouching = (self.frame_crouching+self.crouch_step) % self.crouchLimit
if self.frame_crouching == self.crouchLimit - 2:
self.crouch_step = 0
# reset block (hold type)
self.frame_bblocking = 0
self.bblock_step = 1
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> crouch and jab
if ( (keyPressed(self.combat[0]) and self.end_Cpunch) or (not self.end_Cpunch) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Cpunch]
self.Cpunching = self.setState()
self.setEndState()
self.end_Cpunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Cpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Cpunch])
changeSpriteImage(self.spriteList[self.Cpunch], self.frame_Cpunching)
self.frame_Cpunching = (self.frame_Cpunching+self.Cpunch_step) % (self.punchLimit[2]+1)
if (self.frame_Cpunching == self.punchLimit[2]-1):
self.Cpunch_step = -1
if (self.frame_Cpunching == self.punchLimit[2]):
self.frame_Cpunching = 0
self.Cpunch_step = 1
self.end_Cpunch = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> crouch and strong punch
elif ( (keyPressed(self.combat[1]) and self.end_Dpunch) or ( not self.end_Dpunch) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Dpunch]
self.Dpunching = self.setState()
self.setEndState()
self.end_Dpunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Dpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dpunch])
changeSpriteImage(self.spriteList[self.Dpunch], self.frame_Dpunching)
self.frame_Dpunching = (self.frame_Dpunching+self.Dpunch_step) % (self.punchLimit[3]+1)
if (self.frame_Dpunching == self.punchLimit[3]-1):
self.Dpunch_step = -1
if (self.frame_Dpunching == self.punchLimit[3]):
self.frame_Dpunching = 0
self.Dpunch_step = 1
self.end_Dpunch = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> crouch and kick
elif ( (keyPressed(self.combat[2]) and self.end_Ckick) or ( not self.end_Ckick) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Ckick]
self.Ckicking = self.setState()
self.end_Ckick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ckick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ckick])
changeSpriteImage(self.spriteList[self.Ckick], self.frame_Ckicking)
self.frame_Ckicking = (self.frame_Ckicking+self.Ckick_step) % (self.kickLimit[2]+1)
if (self.frame_Ckicking == self.kickLimit[2]-1):
self.Ckick_step = -1
if (self.frame_Ckicking == self.kickLimit[2]):
self.frame_Ckicking = 0
self.Ckick_step = 1
self.end_Ckick = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> Crouch and strong kick
elif ( (keyPressed(self.combat[3]) and self.end_Dkick) or ( not self.end_Dkick) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Dkick]
self.Dkicking = self.setState()
self.end_Dkick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Dkick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dkick])
changeSpriteImage(self.spriteList[self.Dkick], self.frame_Dkicking)
self.frame_Dkicking = (self.frame_Dkicking+self.Dkick_step) % self.kickLimit[3]
if (self.frame_Dkicking == 0):
self.end_Dkick = True
#--------------Hit em agachado--------------------
#Hhit = 19 # specialMove
#BblockHit = 21 hit agachado
#Ehit = 16 # chute ou soco agachado fraco
elif self.downHit and self.hitName == "Ehit":
self.curr_sprite = self.spriteList[self.Ehit]
self.Ehitting = self.setState()
self.crouching = True
moveSprite(self.spriteList[self.Ehit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ehit])
changeSpriteImage(self.spriteList[self.Ehit], self.frame_Ehit)
if time > nextFrame:
self.frame_Ehit = (self.frame_Ehit+self.hit_step) % self.hitLimit[4]
if (self.frame_Ehit == self.hitLimit[4] - 1):
self.hit_step = -1
if (self.frame_Ehit == 0):
self.hit_step = 1
self.downHit = False
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> defesa em pé
elif keyPressed(self.combat[5]) and not self.hit and not self.downHit:
self.curr_sprite = self.spriteList[self.Bblock]
self.Bblocking = self.setState()
self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bblock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bblock])
changeSpriteImage(self.spriteList[self.Bblock], self.frame_Bblocking)
self.frame_Bblocking = (self.frame_Bblocking+self.Bblock_step) % self.blockLimit
if self.frame_Ablocking == self.blockLimit - 2:
self.Bblock_step = 0
#BblockHit = 21 hit agachado
elif (self.downHit or self.hit) and self.hitName == "Bblocking":
self.curr_sprite = self.spriteList[self.Bblock]
self.Bblocking = self.setState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bblock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bblock])
changeSpriteImage(self.spriteList[self.Bblock], self.frame_Bblocking)
self.frame_Bblocking = (self.frame_Bblocking+self.hit_step) % self.blockLimit
if self.frame_Bblocking == self.blockLimit - 1:
self.hit_step = -1
if self.frame_Bblocking == 1:
self.hit_step = 1
self.hit = False
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> jab
elif ((keyPressed(self.combat[0]) and self.end_Apunch) or ( not self.end_Apunch) ) and (not self.hit) :
print("flag!")
self.curr_sprite = self.spriteList[self.Apunch]
self.Apunching = self.setState()
self.setEndState()
self.end_Apunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Apunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Apunch])
changeSpriteImage(self.spriteList[self.Apunch], self.frame_Apunching)
self.frame_Apunching = (self.frame_Apunching+self.Apunch_step) % (self.punchLimit[0]+1)
if (self.frame_Apunching == self.punchLimit[0]-1):
self.Apunch_step = -1
if (self.frame_Apunching == self.punchLimit[0]):
self.frame_Apunching = 0
self.Apunch_step = 1
self.end_Apunch = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> strong punch
elif ( (keyPressed(self.combat[1]) and self.end_Bpunch) or ( not self.end_Bpunch) ) and (not self.hit) :
self.curr_sprite = self.spriteList[self.Bpunch]
self.Bpunching = self.setState()
self.end_Bpunch = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bpunch])
changeSpriteImage(self.spriteList[self.Bpunch], self.frame_Bpunching)
self.frame_Bpunching = (self.frame_Bpunching+self.Bpunch_step) % self.punchLimit[1]
if (self.frame_Bpunching == 0):
self.end_Bpunch = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> kick
elif ( (keyPressed(self.combat[2]) and self.end_Akick) or ( not self.end_Akick) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Akick]
self.Akicking = self.setState()
self.end_Akick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Akick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Akick])
changeSpriteImage(self.spriteList[self.Akick], self.frame_Akicking)
self.frame_Akicking = (self.frame_Akicking+self.Akick_step) % (self.kickLimit[0]+1)
if (self.frame_Akicking == self.kickLimit[0]-1):
self.Akick_step = -1
if (self.frame_Akicking == self.kickLimit[0]):
self.frame_Akicking = 0
self.Akick_step = 1
self.end_Akick = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> strong kick
elif ( (keyPressed(self.combat[3]) and self.end_Bkick) or ( not self.end_Bkick) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Bkick]
self.Bkicking = self.setState()
self.end_Bkick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bkick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bkick])
changeSpriteImage(self.spriteList[self.Bkick], self.frame_Bkicking)
self.frame_Bkicking = (self.frame_Bkicking+self.Bkick_step) % self.kickLimit[1]
if (self.frame_Bkicking == 0):
self.end_Bkick = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> defesa em pé
elif keyPressed(self.combat[5]) and not self.hit:
self.curr_sprite = self.spriteList[self.Ablock]
self.Ablocking = self.setState()
self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ablock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ablock])
changeSpriteImage(self.spriteList[self.Ablock], self.frame_Ablocking)
self.frame_Ablocking = (self.frame_Ablocking+self.Ablock_step) % self.blockLimit
if self.frame_Ablocking == self.blockLimit - 2:
self.Ablock_step = 0
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> special move
elif ((keyPressed(self.combat[4]) and self.end_special) or ( not self.end_special) ) and (not self.hit):
print("SpecialMove")
self.curr_sprite = self.spriteList[self.special]
self.specialMove = self.setState()
self.setEndState()
self.end_special = False
if time > nextFrame:
moveSprite(self.spriteList[self.special], self.x, self.y, True)
self.setSprite(self.spriteList[self.special])
changeSpriteImage(self.spriteList[self.special], self.frame_special)
self.frame_special = (self.frame_special+self.special_step) % (self.specialLimit[self.fighterId]+1)
if (self.frame_special == self.specialLimit[self.fighterId]-1):
self.special_step = -1
if (self.frame_special == self.specialLimit[self.fighterId]):
self.frame_special = 0
self.special_step = 1
self.end_special = True
nextFrame += 1*frame_step
# just dance :)
elif not self.hit:
# reset block (hold type)
self.frame_Ablocking = 0
self.Ablock_step = 1
# reset down (hold type)
self.frame_crouching = 0
self.crouch_step = 1
# reset other movement
self.frame_walk = self.frame_jumping = 0
# reset combat frames
self.frame_Apunching = self.frame_Bpunching = self.frame_Cpunching = self.frame_Dpunching = self.frame_Akicking = self.frame_Bkicking = self.frame_Ckicking = self.frame_Dkicking = 0
self.setEndState()
# start to dance
self.curr_sprite = self.spriteList[self.dance]
self.dancing = self.setState()
if time > nextFrame:
moveSprite(self.spriteList[self.dance], self.x, self.y, True)
self.setSprite(self.spriteList[self.dance])
changeSpriteImage(self.spriteList[self.dance], self.frame_dance)
self.frame_dance = (self.frame_dance+self.dance_step) % self.danceLimit
if (self.frame_dance == self.danceLimit-1):
self.dance_step = -1
if (self.frame_dance == 0):
self.dance_step = 1
nextFrame += frame_step
#--------------Hit em pé--------------------
#Hhit = 19 # specialMove
#BblockHit = 21 hit agachado
# Ouch! Punch on a face (Ahit = 12 # soco fraco)
elif self.hit and self.hitName == "Apunching":
self.curr_sprite = self.spriteList[self.Ahit]
self.Ahitting = self.setState()
moveSprite(self.spriteList[self.Ahit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ahit])
changeSpriteImage(self.spriteList[self.Ahit], self.frame_Ahit)
if time > nextFrame:
self.frame_Ahit = (self.frame_Ahit+self.hit_step) % self.hitLimit[0]
if (self.frame_Ahit == self.hitLimit[0] - 1):
self.hit_step = -1
if (self.frame_Ahit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
# Ouch! kick on a face (Bhit = 13 # chute fraco)
elif self.hit and self.hitName == "Akicking":
self.curr_sprite = self.spriteList[self.Bhit]
self.Bhitting = self.setState()
if self.fighterId == 0:
self.x -=0.8
else: self.x +=0.8
moveSprite(self.spriteList[self.Bhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bhit])
changeSpriteImage(self.spriteList[self.Bhit], self.frame_Bhit)
if time > nextFrame:
# There are 8 frames of animation in each direction
self.frame_Bhit = (self.frame_Bhit+self.hit_step) % self.hitLimit[1]
if (self.frame_Bhit == self.hitLimit[1] - 1):
self.hit_step = -1
if (self.frame_Bhit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
# Ouch! combo punch (Chit = 14 # soco forte)
elif self.hit and self.hitName == "Bpunching":
self.curr_sprite = self.spriteList[self.Chit]
self.Chitting = self.setState()
if self.fighterId == 0:
self.x -=2
else: self.x +=2
moveSprite(self.spriteList[self.Chit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Chit])
changeSpriteImage(self.spriteList[self.Chit], self.frame_Chit)
if time > nextFrame:
self.frame_Chit = (self.frame_Chit+self.hit_step) % self.hitLimit[2]
if (self.frame_Chit == self.hitLimit[2] - 1):
self.hit_step = -1
if (self.frame_Chit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
#Dhit = 15 # soco agrachado fraco
elif self.hit and self.hitName == "Cpunching":
self.curr_sprite = self.spriteList[self.Dhit]
self.Dhitting = self.setState()
moveSprite(self.spriteList[self.Dhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dhit])
changeSpriteImage(self.spriteList[self.Dhit], self.frame_Dhit)
if time > nextFrame:
self.frame_Dhit = (self.frame_Dhit+self.hit_step) % self.hitLimit[3]
if (self.frame_Dhit == self.hitLimit[3] - 1):
self.hit_step = -1
if (self.frame_Dhit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
#Fhit = 17 # chute forte e soco forte agachado (gancho)
elif self.hit and self.hitName == "Bkicking":
self.curr_sprite = self.spriteList[self.Fhit]
self.Fhitting = self.setState()
if self.frame_Fhit <= 6:
if self.fighterId == 0:
self.x -=5
else: self.x +=5
moveSprite(self.spriteList[self.Fhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Fhit])
changeSpriteImage(self.spriteList[self.Fhit], self.frame_Fhit)
if time > nextFrame:
self.frame_Fhit = (self.frame_Fhit+self.hit_step) % self.hitLimit[5]
if (self.frame_Fhit == self.hitLimit[5] - 1):
self.hit = False
nextFrame += 1.2*frame_step
#Ghit = 18 # chute agachado forte: banda
elif self.hit and self.hitName == "Dkicking":
self.curr_sprite = self.spriteList[self.Ghit]
self.Ghitting = self.setState()
moveSprite(self.spriteList[self.Ghit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ghit])
changeSpriteImage(self.spriteList[self.Ghit], self.frame_Ghit)
if time > nextFrame:
self.frame_Ghit = (self.frame_Ghit+self.hit_step) % self.hitLimit[6]
if (self.frame_Ghit == self.hitLimit[6] - 1):
self.hit = False
nextFrame += 1.2*frame_step
#blockHit! Defesa em pé.
elif self.hit and self.hitName == "Ablocking":
self.curr_sprite = self.spriteList[self.Ablock]
self.Ablocking = self.setState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ablock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ablock])
changeSpriteImage(self.spriteList[self.Ablock], self.frame_Ablocking)
self.frame_Ablocking = (self.frame_Ablocking+self.hit_step) % self.blockLimit
if self.frame_Ablocking == self.blockLimit - 1:
self.hit_step = -1
if self.frame_Ablocking == 1:
self.hit_step = 1
self.hit = False
nextFrame += 1*frame_step
else:
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> jump
if time > nextFrame:
if keyPressed(self.move[2]):
self.x -= 15
if keyPressed(self.move[3]):
self.x += 15
moveSprite(self.spriteList[self.jump], self.x, self.y, True)
self.setSprite(self.spriteList[self.jump])
self.y -= (self.jumpHeight-self.jumpCounter)*7
changeSpriteImage(self.spriteList[self.jump], self.frame_jumping)
if (self.jumpCounter < self.jumpHeight -1 or self.jumpCounter > self.jumpHeight +1): # subindo ou descendo
self.frame_jumping = 1
if (self.jumpHeight - 1 <= self.jumpCounter <= self.jumpHeight + 1): # quase parado
self.frame_jumping = 2
if (self.jumpCounter == 2*self.jumpHeight-1):
self.frame_jumping = 0
self.jumpCounter = -1
if clock() > nextFrame:
self.setSprite(self.spriteList[self.jump])
changeSpriteImage(self.spriteList[self.jump], self.frame_jumping)
moveSprite(self.spriteList[self.jump], self.x, self.y, True)
self.end_jump = self.setState()# MUDANÇA
self.jumping = self.setEndState() #MUDANÇA
self.jumpCounter += 2
nextFrame += 1*frame_step
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
tick(120)
return nextFrame
def getX(self):
return self.x
def getY(self):
return self.y
def setX(self,X):
self.x = X
moveSprite(self.curr_sprite,self.x,self.y,True)
def setY(self,Y):
self.y = Y
moveSprite(self.curr_sprite,self.x,self.y,True)
def isWalking(self):
return self.walking
def isCrouching(self):
return self.crouching
def isDancing(self):
return self.dancing
def isApunching(self):
return self.Apunching
def isBpunching(self):
return self.Bpunching
def isCpunching(self):
return self.Cpunching
def isDpunching(self):
return self.Dpunching
def isAkicking(self):
return self.Akicking
def isBkicking(self):
return self.Bkicking
def isCkicking(self):
return self.Ckicking
def isDkicking(self):
return self.Dkicking
def isAblocking(self):
return self.Ablocking
def isHit(self):
return self.hit
def killPlayer(self):
for i in range(0,len(self.spriteList)):
killSprite(self.spriteList[i])
def currentSprite(self):
return self.curr_sprite
def takeHit(self,by):
self.hit = True
self.hitName = by
def takeDownHit(self,by):
self.downHit = True
print("flag")
self.hitName = by
def stopHit(self):
self.hit = False
self.hitName = ""
def setState(self):
# moves
self.walking = False
self.dancing = False
self.jumping = False
self.crouching = False
# punches
self.Apunching = False
self.Bpunching = False
self.Cpunching = False
self.Dpunching = False
# kicks
self.Akicking = False
self.Bkicking = False
self.Ckicking = False
self.Dkicking = False
# punch hits
self.Ahitting = False
self.Bhitting = False
self.Chitting = False
self.Dhitting = False
self.Ehitting = False
self.Fhitting = False
self.Ghitting = False
self.Hhitting = False
# blocks
self.Ablocking = False
self.Bblocking = False
# special move
self.specialMove = False
# fatality
self.fatality = False
# actual states
return True
def setEndState(self):
self.end_jump = True
self.end_Apunch = True
self.end_Bpunch = True
self.end_Cpunch = True
self.end_Dpunch = True
self.end_Akick = True
self.end_Bkick = True
self.end_Ckick = True
self.end_Dkick = True
self.end_special = True
return False
def setSprite(self,sprite):
for i in range(0,len(self.spriteList)):
if (not sprite == self.spriteList[i]):
hideSprite(self.spriteList[i])
showSprite(sprite)
def posFighter(self):
for i in range(0,len(self.spriteList)):
moveSprite(self.spriteList[i], self.x, self.y, True)
|
[
"matheusvidaldemenezes@gmail.com"
] |
matheusvidaldemenezes@gmail.com
|
47433678731e79969ebc60e04c75a9ed7ad837c7
|
6e362245c5933cb89b636c7a7c853dd8bb1f52cc
|
/hardware/raspio_pro_hat/pulse_rgbled.py
|
2ed9dbd2c202d90acf7bd8fb0d255679a829b3a9
|
[
"MIT"
] |
permissive
|
iidylsnicc/raspi_code
|
4133ab5ba6093ae40c19284259792cc9e6019ac3
|
ee089b8eae29dd4ddf8d45e101affa1e8c5efca4
|
refs/heads/master
| 2023-08-17T04:31:39.413107
| 2021-09-20T18:00:32
| 2021-09-20T18:00:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
from gpiozero import RGBLED
from time import sleep
led = RGBLED(red=17, green=18, blue=19)
delay = 0.02
while True:
for x in range(100):
led.red = x/100
sleep(delay)
for x in range(100, -1, -1):
led.red = x/100
sleep(delay)
for x in range(100):
led.green = x/100
sleep(delay)
for x in range(100, -1, -1):
led.green = x/100
sleep(delay)
for x in range(100):
led.blue = x/100
sleep(delay)
for x in range(100, -1, -1):
led.blue = x/100
sleep(delay)
|
[
"github@cfmacrae.fastmail.co.uk"
] |
github@cfmacrae.fastmail.co.uk
|
309dcb9d3ae4eb56482f54d31c6b2d71bef2c8dc
|
d279d30912212aea1b0f9b6d71032748f91b8f17
|
/src/message_tests/message_edit_test.py
|
8014fe7b3a8eadee0ee545410f51dfa1414e9903
|
[] |
no_license
|
HeadYak/comp1531-flock
|
3c51300d5957ff8299902204a566b9f0728bcbca
|
7e4082a21e3ab68b46376e18481eff3bad00bb3a
|
refs/heads/master
| 2023-04-14T19:41:15.693088
| 2020-11-15T06:10:51
| 2020-11-15T06:10:51
| 365,741,010
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,222
|
py
|
'''
Nessacary imports
'''
import pytest
from channel import channel_join
from message import message_send, message_edit
from channels import channels_create
from auth import auth_register
from error import AccessError
from other import clear
from global_data import channels
def test_message_edit():
'''
Testing message_send function
'''
clear()
#Creating users to create channels
user1 = auth_register("user1@gmail.com", "user1pass", "user1", "last1", None)
user2 = auth_register("user2@gmail.com", "user2pass", "user2", "last2", None)
token1 = user1['token']
token2 = user2['token']
#creating channels
ch_id1 = channels_create(token1, "aGreatChannel", True)['channel_id']
ch_id2 = channels_create(token2, "yetAnotherChannel", True)['channel_id']
#creating channel messages
m_id1 = message_send(token1, ch_id1, 'hello')['message_id']
message_send(token1, ch_id1, 'hey')
m_id3 = message_send(token2, ch_id2, "hello")['message_id']
message_send(token2, ch_id2, "hello")
message_send(token2, ch_id2, "hello")
channel_join(token1, ch_id2)
m_id4 = message_send(token1, ch_id2, "hello")['message_id']
with pytest.raises(AccessError):
#user did not created message and isnt an owner
message_edit(token2, m_id1, "message")
#user editing thier own message
message_edit(token1, m_id1, "newMessage")
#owner of channel editing another users message/ empty string test
message_edit(token2, m_id4, "")
#owner of flcok editing a antoher users message
message_edit(token1, m_id3, "yoooo")
#checking old message is replaves by new message
found = False
for channel in channels:
if channel['channel_id'] == ch_id1:
for msg in channel['messages']:
if msg['message_id'] == m_id1:
assert msg['message'] == "newMessage"
if msg['message_id'] == m_id4:
assert msg['message'] == ""
found = True
if msg['message_id'] == m_id3:
assert msg['message'] == "yoooo"
#checking if string given is empty the message is removed
assert found == False
|
[
"z5257072@cse.unsw.edu.au"
] |
z5257072@cse.unsw.edu.au
|
527af3a4e7634240faba6c5b05c3ee119377049a
|
f20e531a213f6892991653e7c6f2288f07c15f9c
|
/test/test_districtadmins_deleted.py
|
63bb48496a9567e2c7f865ef52ab99501a91d8d2
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
Clever/clever-python
|
54ec744ef997d2c739ea056ae223aa35ef487876
|
c123d339f9af16aa32405cdf4051ad4afbdc5bd2
|
refs/heads/master
| 2022-07-09T11:33:03.804757
| 2022-06-24T01:38:24
| 2022-06-24T01:38:24
| 7,064,597
| 18
| 21
|
Apache-2.0
| 2022-06-24T01:38:35
| 2012-12-08T06:12:45
|
Python
|
UTF-8
|
Python
| false
| false
| 867
|
py
|
# coding: utf-8
"""
Clever API
The Clever API
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import clever
from clever.rest import ApiException
from clever.models.districtadmins_deleted import DistrictadminsDeleted
class TestDistrictadminsDeleted(unittest.TestCase):
""" DistrictadminsDeleted unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testDistrictadminsDeleted(self):
"""
Test DistrictadminsDeleted
"""
# FIXME: construct object with mandatory attributes with example values
#model = clever.models.districtadmins_deleted.DistrictadminsDeleted()
pass
if __name__ == '__main__':
unittest.main()
|
[
"amelia.jones@clever.com"
] |
amelia.jones@clever.com
|
cb78f9c09bc5a9f6ff2a1f449e9edcce8749ea83
|
ce469cd389c8bbcd80a167bbc03da4d68e1988f5
|
/views/api.py
|
08e51b99b00d1ec92d596c473c637d5a735118cc
|
[] |
no_license
|
dhydrated/kan-banana-web
|
7189ecab6092ab105d79bb0817b7921e49280e78
|
87418d2f5fcdd62e63b0f1284cc078945f404ef6
|
refs/heads/master
| 2021-01-10T22:01:28.497874
| 2012-11-15T11:11:01
| 2012-11-15T11:11:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 999
|
py
|
import webapp2
import logging
from google.appengine.api import urlfetch
from template_engine import TemplateEngine
class API(webapp2.RequestHandler):
def put(self):
logging.debug('request: %s', self.request)
response = urlfetch.fetch('http://kan-banana.appspot.com/project',
payload=self.request.body,
method='PUT',
headers={'Content-Type':'application/json'},
allow_truncated=False,
follow_redirects=True,
deadline=60,
validate_certificate=None)
logging.debug('response: %s', response.content)
self.response.out.write(response.content)
def get(self):
template_engine = TemplateEngine()
self.response.out.write(template_engine.render('main.html', []))
|
[
"dhydrated@gmail.com"
] |
dhydrated@gmail.com
|
12c3b876d4a666adb377f0019f3af6e7e7c3f34f
|
c0e279a689a29bd69a22463c6e32d56069005c8c
|
/2016-10-21/workshop_02.py
|
d60f87b6e78f45315bba5f34c60cd0059e9784ba
|
[] |
no_license
|
RinaldoBuratti/ggpl
|
f1f41d9afc219f35d57c45d7dc0443ec9ff94a6a
|
fb603002215a4af866e8bf1f0065662c38de4104
|
refs/heads/master
| 2020-05-23T06:23:37.399426
| 2017-01-31T17:55:28
| 2017-01-31T17:55:28
| 70,234,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,938
|
py
|
from pyplasm import *
from larlib import *
import csv
def search(string, char):
"""
Function to search a character in a string
Args:
string: a string in which we have to find the character char
char: a character
Returns:
An index corresponding to the position of the character within the string
"""
index = 0
while index < len(string):
if string[index] == char:
return index
index = index + 1
return -1
def loadDataFromCsv(string):
"""
Function to load the contenct of a csv file
Returns:
A set of element's list
"""
with open(string,'r') as f:
dati=csv.reader(f, delimiter=':', quotechar=' ')
#my_list = map(tuple, dati)
frameDistances = [] #per la traslazione sull'asse x
yDistances = [] #per la traslazione sull'asse y
zDistances = [] #per la traslazione sull'asse z
beamsSections = []
beamsDistances = []
pillarsSections = []
pillarsDistances = []
for line in dati:
if(len(line) == 1):
s = line[0]
frameDistances.extend([float(s[0:search(s,",")])])
s = s[search(s,",")+1:len(s)]
yDistances.extend([float(s[0:search(s,",")])])
zDistances.extend([float(s[search(s,",")+1:len(s)])])
else:
pillarsDistances.append(line[0])
beamsDistances.append(line[1])
pillarsSections.append(line[2])
beamsSections.append(line[3])
tmp = []
for i in range (0, len(pillarsDistances)):
el2 = []
for j in range (0, len(pillarsDistances[i])):
if(j%2 == 0):
el2.extend([float(pillarsDistances[i][j])])
tmp.append(el2)
pillarsDistances = tmp
tmp = []
for i in range (0, len(beamsDistances)):
el2 = []
for j in range (0, len(beamsDistances[i])):
if(j%2 == 0):
el2.extend([float(beamsDistances[i][j])])
tmp.append(el2)
beamsDistances = tmp
ps= []
val = search(pillarsSections[0], ",")
for i in range(0,len(pillarsSections)):
tmp = pillarsSections[i]
px = float(tmp[0:val])
py = float(tmp[val+1:len(tmp)])
p = (px,py)
ps.append(p)
pillarsSections = ps
bs= []
val = search(beamsSections[0], ",")
for i in range(0,len(beamsSections)):
tmp = beamsSections[i]
bx = float(tmp[0:val])
by = float(tmp[val+1:len(tmp)])
b = (bx,by)
bs.append(b)
beamsSections = bs
return (pillarsDistances, beamsDistances, pillarsSections, beamsSections, frameDistances)
def buildFrame(beamSection, pillarSection, pillarDistances, beamDistances) :
"""
Creates a parametric frame in reinforced concrete
Args:
:param beamSize: given dimensions of beam section
:param pillarSize: given dimensions of pillar section
:param beamDistances: distances between axes of the pillars
:param pillarDistances: interstory heights
Returns:
3D value of type HPC representing a single frame of the building
"""
(bx, bz) = beamSection
(px, py) = pillarSection
#pillar creation
pillarX = []
for i in range(0, len(pillarDistances)):
pillarX.extend([py, -pillarDistances[i]])
pillarX.extend([py])
pillarY = []
for i in range(0, len(beamDistances)):
pillarY.extend([beamDistances[i], -bz])
pillarSimple = PROD([QUOTE(pillarX), QUOTE(pillarY)])
pillarComplete = PROD([pillarSimple, Q(px)])
#beam creations
beamX = []
for i in range(0, len(pillarDistances)):
if i == 0 or i == len(pillarDistances) - 1 :
beamSize = pillarDistances[i] + py + py/2.0
else:
beamSize = pillarDistances[i] + py
beamX.extend([beamSize])
beamY = []
for i in range(0, len(beamDistances)):
beamY.extend([-beamDistances[i], bz])
beamSimple = PROD([QUOTE(beamX), QUOTE(beamY)])
beamComplete = PROD([beamSimple, Q(bx)])
structure = STRUCT([pillarComplete, beamComplete])
structure = R([1,2])(PI/2.0)(structure)
return R([1,3])(-PI/2.0)(structure)
def buildAllFrames(beamSection, pillarSection, pillarDistances, beamDistances, frameDistances) :
"""
Build multiple frames
Args:
:param beamSize: given dimensions of beam section
:param pillarSize: given dimensions of pillar section
:param beamDistances: distances between axes of the pillars
:param pillarDistances: interstory heights
:param frameDistances: distances between frames
Returns:
3D value of type HPC representing the frames of the building
"""
allFrames = []
framesHeight = []
framesWidth = []
for i in range(0, len(frameDistances)):
temp = 0
currentPillarDistances = pillarDistances[i]
currentBeamDistances = beamDistances[i]
for j in range(0, len(currentBeamDistances)):
temp = temp + currentBeamDistances[j]
framesHeight.extend([temp])
temp = 0
for j in range(0, len(currentPillarDistances)):
temp = temp + currentPillarDistances[j]
framesWidth.extend([temp])
currentPillarSection = pillarSection[i]
currentBeamSection = beamSection[i]
frame = buildFrame(currentBeamSection, currentPillarSection, currentPillarDistances, currentBeamDistances)
allFrames.extend([T(1) (frameDistances[i]), frame])
return (STRUCT(allFrames), framesHeight, framesWidth)
def buildBeamsBetweenFrames(beamSection, pillarSection, pillarDistances, beamDistances, frameDistances, heights, widths):
"""
Build beams between frames
Args:
:param beamSize: given dimensions of beam section
:param pillarSize: given dimensions of pillar section
:param beamDistances: distances between axes of the pillars
:param pillarDistances: interstory heights
:param frameDistances: distances between frames
Returns:
3D value of type HPC representing the internal beams of the building
"""
planes = []
dist = 0
for j in range(0, len(frameDistances)-1):
minHeight = 0
if(heights[j] <= heights[j+1]):
minHeight = j
else:
minHeight = j+1
minWidth = 0
if(widths[j] <= widths[j+1]):
minWidth = j
else:
minWidth = j+1
(by, bz) = beamSection[minHeight]
(px, py) = pillarSection[minWidth]
bx = frameDistances[j+1]
yDistances = pillarDistances[minWidth]
zDistances = beamDistances[minHeight]
if(j == 0):
dist = px
el = PROD([Q(bx), Q(by)])
el = PROD([el, Q(bz)])
el = STRUCT([el])
tmp = []
for i in range(0, len(yDistances)):
tmp.extend([el, T(2)(yDistances[i]+py)])
tmp.extend([el])
tmp = STRUCT(tmp)
floors = []
pred = 0
for i in range(0, len(zDistances)):
floors.extend([T(3)(zDistances[i] + pred), tmp])
pred = bz
floors = STRUCT(floors)
planes.extend([T(1)(dist), floors])
dist = bx
planes = STRUCT(planes)
return planes
def ggpl_bone_structure(filename):
"""
creates a bone structure of a building
Args:
:param filename: the name of a .csv file that contais input values
Returns:
3D value of type HPC representing the bone structure of a building
"""
values = loadDataFromCsv(filename)
beamSection = values[3]
pillarSection = values[2]
pillarDistances = values[0]
beamDistances = values[1]
frameDistances = values[4]
(allFrames, heights, widths) = buildAllFrames(beamSection, pillarSection, pillarDistances, beamDistances, frameDistances)
allBeams = buildBeamsBetweenFrames(beamSection, pillarSection, pillarDistances, beamDistances, frameDistances, heights, widths)
structure = buildStructure(allFrames, allBeams)
return structure
def buildStructure(frames, floorsBeams):
structure = STRUCT([frames, floorsBeams])
return structure
if __name__ == "__main__":
structure = ggpl_bone_structure('frame_data_438537.csv')
VIEW(structure)
|
[
"rinaldoburatti@gmail.com"
] |
rinaldoburatti@gmail.com
|
333121f2cc279a1bf0888d6748253b0b79d00f6d
|
9f826b53122cba44a30bf41c3c610f40a2100457
|
/employee.py
|
a0d4db1d26b005a1258fa72f911f3aeec534b11b
|
[] |
no_license
|
sai444/nkcbackup
|
2f76adea23023b61a74f1c6aaca323d360946fcd
|
4d691f8e221c1aa2dfe655cd30269dcab9de0150
|
refs/heads/main
| 2023-06-28T11:01:56.380565
| 2021-07-21T07:08:37
| 2021-07-21T07:08:37
| 388,026,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
from flask import jsonify, Blueprint
employee_api = Blueprint('employee_api', __name__)
@employee_api.route("/employee")
def employee():
return jsonify({"msg": "Im in employee file"})
|
[
"31222470+sai444@users.noreply.github.com"
] |
31222470+sai444@users.noreply.github.com
|
a26b75d618b32140ec9d1bc5934a9358f20d796b
|
932ca541a9c0ec65522777a3b4d14ae50ec6f668
|
/Desktop/Deep-learning/project_1/train2.py
|
71a77b138eb2d8e234fd83553e2b326eabb68cc8
|
[] |
no_license
|
Khuongb1609777/text-recognition
|
e5e9c733052b86f57371b5dd25481c2ab9538b95
|
67b51d9f9fa074f1589c5e61f6d47e7bd7e92779
|
refs/heads/master
| 2022-12-16T06:23:20.632386
| 2020-09-16T02:36:54
| 2020-09-16T02:36:54
| 295,902,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,166
|
py
|
import cv2 as cv2
import os
import numpy as np
import glob
from skimage.feature import hog
from sklearn.svm import LinearSVC
from keras.datasets import mnist
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from tkinter import *
# loading Python Imaging Library
from PIL import ImageTk, Image
# To get the dialog box to open when required
from tkinter import filedialog
def get_digit_data(path):#:, digit_list, label_list):
digit_list = []
label_list = []
for number in range(12):
for img_org_path in glob.iglob(path + str(number) + '/*.jpg'):
img = cv2.imread(img_org_path, 0)
img = np.array(img)
digit_list.append(img)
label_list.append(int(number))
return digit_list,label_list
#lấy dữ liệu train
digit_path_train = "Desktop/data_svm_train/"
digit_list, label_list = get_digit_data(digit_path_train)
X_train = np.array(digit_list, dtype=np.float32)
y_train = np.array(label_list)
#lấy dữ liệu test
digit_path_test = "Desktop/data_svm_test/"
digit_list, label_list = get_digit_data(digit_path_test)
X_test = np.array(digit_list, dtype=np.float32)
y_test = np.array(label_list)
# Rút trích đặc trưng cho tập train
# Giai thích tham số:
# pixels_per_cell là kích thước của 1 cell (đơn vị pixel)
# pixels_per_cell = 5,5 trên ảnh 60,30 vậy là có 6 * 12 = 72 cell
#--------------------------------------------------------------------------
# # Rút trích đặt trưng chp tập train
# X_train_feature = []
# for i in range(len(X_train)):
# feature = hog(X_train[i],orientations=9,pixels_per_cell=(5,5),cells_per_block=(1,1),block_norm="L2")
# X_train_feature.append(feature)
# X_train_feature = np.array(X_train_feature,dtype = np.float32)
# # Rút trích đặc trưng cho tập test
# X_test_feature = []
# for i in range(len(X_test)):
# feature = hog(X_test[i],orientations=9,pixels_per_cell=(5,5),cells_per_block=(1,1),block_norm="L2")
# X_test_feature.append(feature)
# X_test_feature = np.array(X_test_feature,dtype=np.float32)
#--------------------------------------------------------------------------
# Hàm rút trích đặc trưng
import cv2 as cv2
import os
import numpy as np
import glob
from skimage.feature import hog
from sklearn.svm import LinearSVC
from keras.datasets import mnist
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
def get_digit_data(path):#:, digit_list, label_list):
digit_list = []
label_list = []
for number in range(12):
for img_org_path in glob.iglob(path + str(number) + '/*.jpg'):
img = cv2.imread(img_org_path, 0)
img = np.array(img)
digit_list.append(img)
label_list.append(int(number))
return digit_list,label_list
#lấy dữ liệu train
digit_path_train = "./data_svm_train/"
digit_list, label_list = get_digit_data(digit_path_train)
X_train = np.array(digit_list, dtype=np.float32)
y_train = np.array(label_list)
#lấy dữ liệu test
digit_path_test = "./data_svm_test/"
digit_list, label_list = get_digit_data(digit_path_test)
X_test = np.array(digit_list, dtype=np.float32)
y_test = np.array(label_list)
#--------------------------------------------------------------------------
# # Rút trích đặt trưng chp tập train
# X_train_feature = []
# for i in range(len(X_train)):
# feature = hog(X_train[i],orientations=9,pixels_per_cell=(5,5),cells_per_block=(1,1),block_norm="L2")
# X_train_feature.append(feature)
# X_train_feature = np.array(X_train_feature,dtype = np.float32)
# # Rút trích đặc trưng cho tập test
# X_test_feature = []
# for i in range(len(X_test)):
# feature = hog(X_test[i],orientations=9,pixels_per_cell=(5,5),cells_per_block=(1,1),block_norm="L2")
# X_test_feature.append(feature)
# X_test_feature = np.array(X_test_feature,dtype=np.float32)
#--------------------------------------------------------------------------
# Hàm rút trích đặc trưng
def feature(x):
X_feature = []
if len(x.shape) == 2:
feature = hog(x,orientations=9,pixels_per_cell=(5,5),cells_per_block=(1,1),block_norm="L2")
X_feature.append(feature)
else:
for i in range(len(x)):
feature = hog(x[i],orientations=9,pixels_per_cell=(5,5),cells_per_block=(1,1),block_norm="L2")
X_feature.append(feature)
X_feature = np.array(X_feature)
return (X_feature)
# Hàm dự đoán nhãn
def predict(x):
X_feature = feature(x)
y_pred = model.predict(X_feature)
return (y_pred)
# Lấy ra các đặc trưng của tập X_train và X_test
X_train_feature = feature(X_train)
X_test_feature = feature(X_test)
# Import model
model = LinearSVC(C=10)
# Xây dựng mô hình
model.fit(X_train_feature,y_train)
# Dự đoán nhãn cho tập X_test
y_predict = model.predict(X_test_feature)
# In ra độ chính xác
print(accuracy_score(y_test,y_predict))
def get_digit_predicted(image):
im_gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
im,thre = cv2.threshold(im_gray,90,255,cv2.THRESH_BINARY_INV)
# Tìm các contours
contours,hierachy = cv2.findContours(thre,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# Tìm 3 contours có diện tích lớn nhất
area_cnt = [cv2.contourArea(cnt) for cnt in contours]
area_sort = np.argsort(area_cnt)[::-1]
area_sort_3 = area_sort[:3]
contours_3 = []
for i in area_sort_3:
contours_3.append(contours[i])
# Tìm tọa độ boudingRect của các contours
rects = [cv2.boundingRect(cnt) for cnt in contours_3]
# Sắp xếp contours từ trái sang phải dựa vào tọa độ X
contours_LTR = []
rects_sort = sorted(rects)
for i in range(len(rects_sort)):
for j in range(len(rects)):
if rects_sort[i] == rects[j]:
contours_LTR.append(contours_3[j])
# react_LTR là x,y,w,h tương ứng tọa độ, rộng và cao của các bounding box
rects_LTR = [cv2.boundingRect(cnt) for cnt in contours_LTR]
contours = contours_LTR
# Tạo danh sách lưu số và dấu
list_digit = []
# Duyệt qua các contours xác định nhãn
for i in range(len(contours)):
# Hàm vẽ boundingbox hình chữ nhật bao quanh contours
# X,Y là tọa độ góc trên bên trái của hcn
(x,y,w,h) = cv2.boundingRect(contours[i])
h = h + 6
w = w + 6
x = x - 3
y = y - 3
# Tại đây em tăng y giảm x để góc trên tách lên 1 ít, tránh khi vẽ bị cho hình
# Tăng h và w để có boundingbox rộng hơn
cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)
# Hàm vẽ bounding box
roi = thre[y:y+h,x:x+w]
# thre tạo ra 1 matran
roi = np.pad(roi,(20,20),'constant',constant_values=(0,0))
roi = cv2.resize(roi, (60, 30), interpolation=cv2.INTER_AREA)
roi = cv2.dilate(roi, (3, 3))
# rút trích đặc trưng cho contour
roi_hog_fd = hog(roi, orientations=9, pixels_per_cell=(5, 5), cells_per_block=(1, 1),block_norm="L2")
nbr = model.predict(np.array([roi_hog_fd], np.float32))
list_digit.append(int(nbr[0]))
kytu = ""
if list_digit[i] == 10:
kytu = '+'
elif list_digit[i] == 11:
kytu = '-'
else:
kytu = str(int(list_digit[i]))
cv2.putText(image, kytu, (x, y),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 0, 255), 3)
return list_digit
# Lấy toán hạng và dấu
def get_operation(image):
list_digit = get_digit_predicted(image)
toanhang = []
pheptoan = []
for digit in list_digit:
if digit == 10:
pheptoan.append('+')
elif digit == 11:
pheptoan.append('-')
else:
toanhang.append(digit)
return toanhang,pheptoan
# Hàm tính kết quả
def kq(image):
toanhang,pheptoan = get_operation(image)
error = []
ketqua = 0
# Xác định xem có nhận được phép toán và toán hạng hay không
if len(pheptoan) < 1 :
error.append("Không nhận được dấu")
elif len(pheptoan) > 1 :
error.append("Nhận nhiều hơn 1 dấu")
elif len(toanhang) > 2:
error.append("Nhận nhiều hơn 2 toán tử")
elif len(toanhang) < 2:
error.append("Nhận ít hơn 2 toán tử")
else:
if pheptoan[0] == "+":
ketqua = ketqua + ( toanhang[0] + toanhang[1])
elif pheptoan[0] == "-":
ketqua = ketqua + ( toanhang[0] - toanhang[1])
return error,ketqua
# Hàm lấy ra vị trí để vẽ
# Hàm này lấy ra tọa độ, sau đó lấy toạn độ x + 200 để vẽ kết quả
def get_location(image):
im_gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
im,thre = cv2.threshold(im_gray,90,255,cv2.THRESH_BINARY_INV)
# Tìm các contours
contours,hierachy = cv2.findContours(thre,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# Tìm 3 contours có diện tích lớn nhất
area_cnt = [cv2.contourArea(cnt) for cnt in contours]
area_sort = np.argsort(area_cnt)[::-1]
area_sort[:3]
contours_3 = []
for i in area_sort:
contours_3.append(contours[i])
# Tìm tọa độ boudingRect của các contours
rects = [cv2.boundingRect(cnt) for cnt in contours_3]
# Sắp xếp contours từ trái sang phải dựa vào tọa độ X
contours_LTR = []
rects_sort = sorted(rects)
for i in range(len(rects_sort)):
for j in range(len(rects)):
if rects_sort[i] == rects[j]:
contours_LTR.append(contours_3[j])
# Lấy toạn độ x,y chiều rộng w và chiều cao h của các boundingbox
rects_LTR = [cv2.boundingRect(cnt) for cnt in contours_LTR]
return rects_LTR
# Hàm hiển thị ảnh có kết quả
def show(image):
error,ketqua = kq(image)
rects_LTR = get_location(image)
if len(error) != 0:
x = rects_LTR[2][0] + 160
y = rects_LTR[1][1]
kytu = "error"
cv2.putText(image, kytu, (x, y),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 0, 255), 3)
else:
x1 = rects_LTR[2][0] + 300
y1 = rects_LTR[1][1]
x2 = x1 + 60
y2 = y1
cv2.putText(image, "=", (x1, y1),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 0, 255), 3)
cv2.putText(image, str(int(ketqua)), (x2, y2),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 0, 255), 3)
return image
def main(image):
img = show(image)
cv2.imshow("im",img)
cv2.waitKey()
# Lấy toán hạng và dấu
def get_operation(image):
list_digit = get_digit_predicted(image)
toanhang = []
pheptoan = []
for digit in list_digit:
if digit == 10:
pheptoan.append('+')
elif digit == 11:
pheptoan.append('-')
else:
toanhang.append(digit)
return toanhang,pheptoan
# Hàm tính kết quả
def kq(image):
toanhang,pheptoan = get_operation(image)
error = []
ketqua = 0
# Xác định xem có nhận được phép toán và toán hạng hay không
if len(pheptoan) < 1 :
error.append("Không nhận được dấu")
elif len(pheptoan) > 1 :
error.append("Nhận nhiều hơn 1 dấu")
elif len(toanhang) > 2:
error.append("Nhận nhiều hơn 2 toán tử")
elif len(toanhang) < 2:
error.append("Nhận ít hơn 2 toán tử")
else:
if pheptoan[0] == "+":
ketqua = ketqua + ( toanhang[0] + toanhang[1])
elif pheptoan[0] == "-":
ketqua = ketqua + ( toanhang[0] - toanhang[1])
return error,ketqua
# Hàm lấy ra vị trí để vẽ
# Hàm này lấy ra tọa độ, sau đó lấy toạn độ x + 200 để vẽ kết quả
def get_location(image):
im_gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
im,thre = cv2.threshold(im_gray,90,255,cv2.THRESH_BINARY_INV)
# Tìm các contours
contours,hierachy = cv2.findContours(thre,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# Tìm 3 contours có diện tích lớn nhất
area_cnt = [cv2.contourArea(cnt) for cnt in contours]
area_sort = np.argsort(area_cnt)[::-1]
area_sort[:3]
contours_3 = []
for i in area_sort:
contours_3.append(contours[i])
# Tìm tọa độ boudingRect của các contours
rects = [cv2.boundingRect(cnt) for cnt in contours_3]
# Sắp xếp contours từ trái sang phải dựa vào tọa độ X
contours_LTR = []
rects_sort = sorted(rects)
for i in range(len(rects_sort)):
for j in range(len(rects)):
if rects_sort[i] == rects[j]:
contours_LTR.append(contours_3[j])
# Lấy toạn độ x,y chiều rộng w và chiều cao h của các boundingbox
rects_LTR = [cv2.boundingRect(cnt) for cnt in contours_LTR]
return rects_LTR
# Hàm hiển thị ảnh có kết quả
def show(image):
error,ketqua = kq(image)
rects_LTR = get_location(image)
if len(error) != 0:
x = rects_LTR[2][0] + 160
y = rects_LTR[1][1]
kytu = "error"
cv2.putText(image, kytu, (x, y),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 0, 255), 3)
else:
x1 = rects_LTR[2][0] + 300
y1 = rects_LTR[1][1]
x2 = x1 + 60
y2 = y1
cv2.putText(image, "=", (x1, y1),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 0, 255), 3)
cv2.putText(image, str(int(ketqua)), (x2, y2),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 0, 255), 3)
return image,ketqua
def main(image):
img,ketqua = show(image)
# error,result = kq(img)
# if len(error) == 0:
# print("Kết quả của phép toán: ",result)
# else:
# print("error: ",error)
# cv2.imshow("im",img)
# cv2.waitKey()
return (img,ketqua)
# Giao diện
# ----------------------------------------------------------------------------------------
# Tạo cửa sổ upload
root = Tk()
# Đặt tiêu đề chp hình tải lên
root.title("Image Loader")
# Thiết lập độ phân giải
root.geometry("1024x512")
# Cho phép thay đổi kích thước, giống như
root.resizable(width = True, height = True)
def open_img_predict():
# Select the Imagename from a folder
x = openfilename()
img = cv2.imread(x)
img,ketqua = main(img)
img = Image.fromarray(img) #CHuyen image np array to PIL image
# # opens the image
# img = Image.open(x)
# # # resize the image and apply a high-quality down sampling filter
img = img.resize((512, 256), Image.ANTIALIAS)
# # PhotoImage class is used to add image to widgets, icons etc
img = ImageTk.PhotoImage(img)
# print(img)
# create a label
panel = Label(root, image = img)
# # set the image as img
panel.image = img
panel.grid(row = 4)
def open_img():
# Select the Imagename from a folder
x = openfilename()
img = cv2.imread(x)
# opens the image
img = Image.open(x)
# # resize the image and apply a high-quality down sampling filter
img = img.resize((512, 256), Image.ANTIALIAS)
# print(img)
# # PhotoImage class is used to add image to widgets, icons etc
img = ImageTk.PhotoImage(img)
# print(img)
# create a label
panel = Label(root, image = img)
# # set the image as img
panel.image = img
panel.grid(row = 4)
def openfilename():
# open file dialog box to select image
# The dialogue box has a title "Open"
filename = filedialog.askopenfilename(title ='"pen')
# print(filename)
return filename
# Create a button and place it into the window using grid layout
btn = Button(root, text ='open image', command = open_img).grid(row = 1, columnspan = 4)
btn_2 = Button(root, text = 'predict', command = open_img_predict).grid(row = 3, columnspan = 4)
root.mainloop()
# img = cv2.imread("./data_svm_new/3cong8.jpg")
# # cv2.imshow("im",img)
# # cv2.waitKey()
# main(img)
|
[
"khuong3493455@gmail.com"
] |
khuong3493455@gmail.com
|
5d22b9accad63d48c5d37b3da688b2eb233b0256
|
580ed1224a2635893324cc8bb292b73ea8e833c7
|
/udp_server.py
|
3ac0237e90a651e67a4ff9d7c0daa2f6273886e6
|
[
"MIT"
] |
permissive
|
mytianya/python-scripts
|
661a48c4114f34be418da3ee4cb3e41a025bb938
|
8f94c5549486494de43c41fda24a039fd4b7b7e5
|
refs/heads/master
| 2023-07-21T05:45:22.194397
| 2021-08-27T09:28:44
| 2021-08-27T09:28:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
'''
创建一个udp服务器
'''
from socketserver import BaseRequestHandler,UDPServer
class TimeHandler(BaseRequestHandler):
def handle(self):
print('客户端地址:',self.client_address)
msg,sock=self.request
print(msg)
if __name__=='__main__':
serv=UDPServer(('',9111),TimeHandler)
serv.serve_forever()
|
[
"dsyslove@163.com"
] |
dsyslove@163.com
|
71868391ab4ff3d83072fcfbfc7ab3faab8917e7
|
6a16b796c788d2ee9b54684b0ee30ea146a29aa3
|
/columnApplet/columnApplet.pyde
|
2206a454ca0c6263580661e48efd8d5ccddc4906
|
[] |
no_license
|
dbt-ethz/MASdfab1819
|
8b7d574afdd35f21e88de379f0824321e86223df
|
bc84112187b65f9ae692f27ffe6e503aa7078d53
|
refs/heads/master
| 2020-03-27T10:50:58.491849
| 2019-03-28T23:01:47
| 2019-03-28T23:01:47
| 146,448,900
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,153
|
pyde
|
add_library('peasycam')
add_library('controlP5')
import math, datetime
import engineSineSubdiv as engine
import gui as gui
import mola.io as io
import mola.slicer as slicer
import mola.renderP5 as renderer
import mola.color as coloriser
def setup():
global sliceZ,pshapeSlice,guiDimX
guiDimX=400
pshapeSlice=None
sliceZ=0
size(1600, 900, P3D)
#fullScreen(P3D)
gui.cam = PeasyCam(this,350)
gui.cp5 = ControlP5(this)
gui.cp5.setAutoDraw(False)
gui.initialize()
engine.initialize()
update()
def draw():
if gui.bExport.getValue():
io.exportOBJ(engine.column,sketchPath()+'/data/mesh_exp1.obj')
println("exported")
gui.bExport.setValue(False)
engine.guiEvents()
if engine.doUpdate:
update()
if mouseX < guiDimX or mouseX>width-guiDimX:
gui.cam.setActive(False)
else:
gui.cam.setActive(True)
background(255)
hint(DISABLE_DEPTH_TEST)
gui.cam.beginHUD()
image(gui.backgroundImage, 0, 0)
gui.cam.endHUD()
hint(ENABLE_DEPTH_TEST)
display3D()
display2D()
def update():
global pshapeColumn
fill(200)
if gui.displayMode=="White":
for f in engine.column.faces:
f.color=(1,1,1,1)
if gui.displayMode=="Curvature":
coloriser.colorFacesByCurvature(engine.column.faces)
if gui.displayMode=="Area":
coloriser.colorFacesByArea(engine.column.faces)
if gui.displayMode=="Perimeter":
coloriser.colorFacesByPerimeter(engine.column.faces)
if gui.displayMode=="Compactness":
coloriser.colorFacesByCompactness(engine.column.faces)
if gui.displayMode=="Vertical Angle":
coloriser.colorFacesByVerticalAngle(engine.column.faces)
if gui.displayMode=="Horizontal Angle":
coloriser.colorFacesByHorizontalAngle(engine.column.faces)
if gui.displayMode=="Group":
faceGroups={}
randomSeed(1)
for f in engine.column.faces:
faceGroups[f.group]=(random(0,1),random(0,1),random(0,1),1)
for f in engine.column.faces:
f.color= faceGroups[f.group]
noStroke()
pshapeColumn=renderer.createMeshShape(engine.column)
def display2D():
global screen
gui.cam.beginHUD()
gui.cp5.draw()
image(gui.logo1, width-100, height-100)
#image(gui.logo2, width-170, height-100)
pushMatrix()
translate(guiDimX/2-20,height-guiDimX/2-100)
scale(2.5)
fill(70, 100)
rect(-60,-60,120,120)
fill(0)
ellipse(0,0,12.5,12.5)
if pshapeSlice!=None:
strokeWeight(2)
stroke(255, 0, 155)
shape(pshapeSlice)
popMatrix()
gui.cam.endHUD()
def display3D():
global sliceZ,pshapeSlice, rotZ
if sliceZ!=gui.sliderSlice.getValue():
sliceZ=gui.sliderSlice.getValue()
stroke(255,0,155)
pshapeSlice=renderer.createLinesShape(slicer.slice(engine.column,sliceZ))
selectedIndex = gui.listDisplay.getValue()
selectedDisplayMode = gui.listDisplay.getItem(int(selectedIndex)).get("text")
if selectedDisplayMode!=gui.displayMode:
gui.displayMode=selectedDisplayMode
update()
# mesh rendering
directionalLight(255, 255, 255, 1, 1, 1)
directionalLight(255, 255, 255, -1, -1, -1)
noStroke()
pushMatrix()
if gui.bDance.getBooleanValue():
fill(255,50)
shape(gui.dancer)
rotateX(math.pi*0.5)
translate(0,0,-150)
if gui.bRot.getBooleanValue():
rotZ+=0.01
else:
rotZ=0
rotateZ(rotZ)
if gui.bBase.getBooleanValue():
pushMatrix()
translate(0, 0, -10)
fill(150)
#rect(-60,-60,120,120)
box(120, 120, 20)
popMatrix()
if pshapeColumn!=None:
shape(pshapeColumn)
if gui.bDisplaySlice.getValue():
pushMatrix()
translate(0,0,sliceZ)
fill(200,100)
rect(-60,-60,120,120)
popMatrix()
if pshapeSlice!=None:
shape(pshapeSlice)
popMatrix()
|
[
"noreply@github.com"
] |
dbt-ethz.noreply@github.com
|
850589bb644ae4eab56ab52c3b5cf0289278f50a
|
0ba98a960871dba4b81337161c65c4db505ced45
|
/attikmoney/core/migrations/0031_auto_20191222_0759.py
|
abd8707f82d3d15f55f57624ac5890bd6f313a26
|
[] |
no_license
|
felipetsi/attikmoney
|
7323e9e5f7eb5e96a39c2d5495d6dc6cc5208659
|
5fac3341acee35a1d3f9b61537848a65609aa6f7
|
refs/heads/master
| 2022-01-18T19:13:28.814336
| 2022-01-02T01:25:24
| 2022-01-02T01:25:24
| 84,989,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
# Generated by Django 2.2.7 on 2019-12-22 10:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0030_auto_20191222_0759'),
]
operations = [
migrations.AlterField(
model_name='yieldtype',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Created at'),
),
]
|
[
"felipe.pereira@attik.com.br"
] |
felipe.pereira@attik.com.br
|
c29eef2004e1aa97dff132a0c0327f2c1991e18a
|
ea6cf1c8df3955bb168454d437e23180c94d2c47
|
/Run01/Trans/1.importdata.py
|
938c4371d697f983f5628ab906427857903561db
|
[] |
no_license
|
MohanSha/Employee-attendance-manager
|
ba0b94b3639d66e6294e1d5390c05fd346f1cd62
|
792a54f3b8ae03904bbe005059ba68674073344c
|
refs/heads/master
| 2021-07-19T13:18:34.450484
| 2017-10-28T11:24:38
| 2017-10-28T11:24:38
| 108,643,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,346
|
py
|
import xlrd
import MySQLdb
from array import *
#def Left
def left(s, amount = 1, substring = ""):
if (substring == ""):
return s[:amount]
else:
if (len(substring) > amount):
substring = substring[:amount]
return substring + s[:-amount]
# Open the workbook and define the worksheet
filename = raw_input('Enter Table Excel Filename : ')
book = xlrd.open_workbook(filename)
sheet = book.sheet_by_name("Sheet1")
tbl_filename = left(filename,(len(filename)-4))
book_tbl = xlrd.open_workbook(tbl_filename+"_tbl.xls")
sheet_tbl = book_tbl.sheet_by_name("Sheet1")
# Establish a MySQL connection
database = MySQLdb.connect (host="localhost", user = "root", passwd = "", db = "hr")
# Get the cursor, which is used to traverse the database, line by line
cursor = database.cursor()
# Create the INSERT INTO sql query
queryBOL = """INSERT INTO `"""+tbl_filename+"""`("""
column_list = ""
value_list = ""
print "sheet_tbl.nrows = "+str(sheet_tbl.nrows)
for cname in range(1, sheet_tbl.nrows):
if cname==sheet_tbl.nrows-1 :
column_list = column_list + "`"+sheet_tbl.cell(cname,0).value+"`"
value_list = value_list +"%s"
else :
column_list = column_list + "`"+sheet_tbl.cell(cname,0).value+"`, "
value_list = value_list +"%s,"
#print column_list
#print value_list
query2 = column_list+""") VALUES ("""+value_list
queryEOL = """);"""
query = queryBOL+query2+queryEOL
print query
select_qry = """select * from """+tbl_filename+""" ;"""
# Create a For loop to iterate through each row in the XLS file, starting at row 2 to skip the headers
inputrow = []
for r in range(1, sheet.nrows):
# if r > 50 :
# break
#print str(r)+" Out of "+str(sheet.nrows)
for c in range(0, sheet.ncols):
inputrow.append(str(sheet.cell(r,c).value))
print inputrow
# Execute sql Query
cursor.execute(query, inputrow)
inputrow = []
#Run select_qry to check the data uploaded
#cursor.execute(select_qry)
#result = cursor.fetchall()
#print result
# Close the cursor
cursor.close()
# Commit the transaction
database.commit()
# Close the database connection
database.close()
# Print results
print ""
print "All Done!"
print ""
columns = str(sheet.ncols)
rows = str(sheet.nrows)
print "I just imported " + columns + " columns and " + rows + " rows to MySQL!"
|
[
"mohansha.don@gmail.com"
] |
mohansha.don@gmail.com
|
e88456d798261f16179505051514fc0b8381e1ed
|
df04d39a56d35b63e51c14bca4dab30bcab7ad8f
|
/models.py
|
12113781ab4d6574ab4ab76405db969676a41fb0
|
[] |
no_license
|
javaDer/Crm_Android
|
810898273fe090dd36d34481334fe1e6e4e3cb09
|
7b61d1da7347f9e3f4f9e4ca196a30869dc0e339
|
refs/heads/master
| 2021-05-09T18:00:25.568825
| 2018-01-29T09:36:49
| 2018-01-29T09:36:49
| 119,151,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pymongo
def get_coll():
client = pymongo.MongoClient("127.0.0.1", 27017)
db = client.test
user = db.user_collection
return user
class User(object):
def __init__(self, name, email):
self.name = name
self.email = email
def save(self):
user = {"name": self.name, "email": self.email}
coll = get_coll()
id = coll.insert(user)
print id
@staticmethod
def query_user():
users = get_coll().find()
return users
|
[
"fa20091001@163.com"
] |
fa20091001@163.com
|
2097ef464f73ecb7859b6ffa930ed474e10f8c30
|
df11b3a4129d660a1eb588334a4f38a73620b9e0
|
/api/urls.py
|
7d95a920eed945cf47ae06ab44274238b3c96ab3
|
[] |
no_license
|
lebaoworks/Django-Example
|
65a581222027b6c9c873db9dcc7813dcc2f0783a
|
58540b310a00f0981d685819d7ff5b134ed4e985
|
refs/heads/main
| 2023-01-10T05:36:45.078991
| 2020-10-24T21:54:08
| 2020-10-24T21:54:08
| 306,936,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
from django.urls import include, path
from api import views
urlpatterns = [
path('api', views.ListAPIs.as_view()),
]
|
[
"noreply@github.com"
] |
lebaoworks.noreply@github.com
|
4e8cf4ed512607410548a93332fffac910539696
|
b1917395e5d0ae5e3a9d379b1eefaf976d4e6b5d
|
/utils/plot_error.py
|
284427ce6a992cb0a9317675445bce8fa674234e
|
[] |
no_license
|
gupta-nikita/Actions
|
056ec046c2cfa03e6733408f0aad8a1cfea712c5
|
3534f3fc442cff6d8f06525262c1eee674e6aaa3
|
refs/heads/master
| 2021-01-18T22:10:20.182809
| 2017-03-29T15:09:44
| 2017-03-29T15:09:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 848
|
py
|
rnn_type = 'rnn' # rnn | lstm | gru
filepath = '/media/HDD1/Models/Actions/abhi/Models/rnn/'
import matplotlib.pyplot as plt
def read_file(file_to_read, image_name, fig_xlabel, fig_title):
file = open(file_to_read, 'r')
file.readline()
error_value = list()
for line in file:
error_value.append(int(line))
plt.xlabel(fig_xlabel)
plt.ylabel('Error')
plt.title(fig_title)
plt.grid(True)
plt.savefig(image_name)
filename = ['error', 'error_bw']
fig_xlabel_list = ['epochs', 'frames']
fig_title_list = ['Error every epoch using ' + rnn_type.upper(), 'Error every frame using ' + rnn_type().upper()]
for i in range(2):
file_to_read = filepath + filename[i] + '.txt'
image_name = filepath + filename[i] + '.png'
read_file(file_to_read, image_name, fig_xlabel_list[i], fig_title_list[i])
|
[
"abhishek.chaurasia29@hotmail.com"
] |
abhishek.chaurasia29@hotmail.com
|
fe537df3daf1b7d3a23e18aa6a6536c6f46e31db
|
9e984c2a5455efc193983a6ea18ff7b6e13ba500
|
/Image procesing-calculate dimentional/old Sourse files/contour-extreme-points/ww.py
|
0caaf6689929300da58c7c163017b37b9dc2074c
|
[] |
no_license
|
Isharathilina/Human_Dimensional_Calculator
|
6f028f2f0934ed6ca4f33e026ab6a7d218685c36
|
722129c28773415d8f96825654624c7615ce1770
|
refs/heads/master
| 2023-01-25T03:49:48.820622
| 2020-11-27T17:52:11
| 2020-11-27T17:52:11
| 155,625,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
# USAGE
# python extreme_points.py
# import the necessary packages
import imutils
import cv2
# load the image, convert it to grayscale, and blur it slightly
image = cv2.imread("sdf.jpg")
#image = cv2.IMREAD_GRAYSCALE("qq.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
# threshold the image, then perform a series of erosions +
# dilations to remove any small regions of noise
thresh = cv2.threshold(gray, 45, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=2)
# find contours in thresholded image, then grab the largest
# one
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
c = max(cnts, key=cv2.contourArea)
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
# draw the outline of the object, then draw each of the
# extreme points, where the left-most is red, right-most
# is green, top-most is blue, and bottom-most is teal
cv2.drawContours(image, [c], -1, (0, 255, 255), 2)
#cv2.circle(image, extLeft, 6, (0, 0, 255), -1)
#cv2.circle(image, extRight, 6, (0, 255, 0), -1)
cv2.circle(image, extTop, 6, (255, 0, 0), -1)
cv2.circle(image, extBot, 6, (255, 255, 0), -1)
# show the output image
cv2.imshow("Image", image)
point1 = min(extTop)
point2 = max(extBot)
print(point1)
print("and")
print(point2)
h1 = point2 - point1
h2 = h1/6
print(c)
cv2.waitKey(0)
|
[
"isharawap@.com"
] |
isharawap@.com
|
fa81e752ab4f58210563f97e6a552c0b346a39bc
|
dc17b7519e595c57eb1efb4673e1e4171724ba15
|
/models/state.py
|
5051d86fc57fd181d16c903d4dfca9cf7869a831
|
[] |
no_license
|
FeliPrado31/AirBnB_clone
|
87cff5c3542903dc0400ae9d81fb5050c2e2d705
|
a3543de35bfd5df0ef5800539867f576e6ab76f5
|
refs/heads/master
| 2021-02-07T08:13:41.874436
| 2020-03-02T04:13:20
| 2020-03-02T04:13:20
| 244,001,793
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
#!/usr/bin/python3
""" class user """
from models.base_model import BaseModel
class State(BaseModel):
""" State class """
name = ""
|
[
"srgatoiscool@gmail.com"
] |
srgatoiscool@gmail.com
|
769e8e8412b7cff1cc93539541ca1f040e150fa4
|
470b64850fcd9f14ebe7920f773b24d0499e8ec4
|
/trailer/index.py
|
04141c1d166e8559f6db3b2a637f2a5ca6dd2ea1
|
[] |
no_license
|
rishiosaur/pragmathic
|
c8ebcae1cf9a8a3efb6cd64123c523c752b6d3c8
|
63538a2e7cc1804186b0d51bbede0f8a1ec4d691
|
refs/heads/master
| 2020-08-18T17:20:56.572440
| 2020-01-11T22:13:00
| 2020-01-11T22:13:00
| 215,814,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
from manimlib.imports import *
class Intro(Scene):
def construct(self):
boring = TextMobject("School", "math is boring.").set_color_by_tex_to_color_map({"School": RED})
math = TextMobject("Math", "math is hella fun.").set_color_by_tex_to_color_map({"Math": BLUE})
welcome = TextMobject("Welcome to ", "Pragmathic.").set_color_by_tex_to_color_map({"Pragmathic": BLUE})
# pragmathic = TextMobject("Pragmathic.").next_to(welcome, RIGHT).set_color_by_gradient(BLUE, GREEN)
eq1 = TexMobject(r"y(x_{0})=y_{0},y'(x_{0})=y'_{0},y''(x_{0})=y''_{0},\cdots ").next_to(welcome, UP, buff=2)
eq2 = TexMobject(r"f_{n}(x){\frac {\mathrm {d} ^{n}y}{\mathrm {d} x^{n}}}+\cdots +f_{1}(x){\frac {\mathrm {d} y}{\mathrm {d} x}}+f_{0}(x)y=g(x)").shift(DOWN*1.5)
eq3 = TexMobject(r"\operatorname {li}(x)=\int _{0}^{x}{\frac {dt}{\log(t)}}.").shift(RIGHT*2)
self.play(Write(boring))
self.wait()
self.play(Transform(boring, math))
self.wait()
self.play(FadeOut(boring))
self.play(Write(welcome))
# self.play(Write(eq1),Write(eq2),Write(eq3))
|
[
"itsrishikothari@gmail.com"
] |
itsrishikothari@gmail.com
|
4a0330f3d35565a1fd716d6052f8a5199813fd3f
|
6915d2d83086cf1200340a59248cf4f2a556248d
|
/appfacturacion/app/migrations/0001_initial.py
|
13f3fb55508f3eaa239838c4ad7282df13d38708
|
[] |
no_license
|
jcuadradoh2/appfacturacion
|
5278de851ffbae191c848e16c68c2cbb8784749c
|
4151b8ed4f0168bdd2453410efd76129a75d5366
|
refs/heads/master
| 2022-12-04T05:33:47.437006
| 2020-08-09T05:01:26
| 2020-08-09T05:01:26
| 286,120,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,403
|
py
|
# Generated by Django 2.2.14 on 2020-08-08 06:47
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ruc', models.CharField(max_length=13)),
('nombre', models.CharField(max_length=300)),
('direccion', models.TextField(blank=True, null=True)),
('creacion', models.DateTimeField(default=datetime.datetime(2020, 8, 8, 1, 47, 18, 575020))),
],
options={
'verbose_name': 'Cliente',
'verbose_name_plural': 'Clientes',
'ordering': ['-creacion'],
},
),
migrations.CreateModel(
name='Producto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion', models.CharField(max_length=100)),
('precio', models.FloatField(default=0)),
('stock', models.FloatField(default=0)),
('iva', models.BooleanField(default=True)),
('creacion', models.DateTimeField(default=datetime.datetime(2020, 8, 8, 1, 47, 18, 572018))),
],
options={
'verbose_name': 'Producto',
'verbose_name_plural': 'Productos',
'ordering': ['-creacion'],
},
),
migrations.CreateModel(
name='Factura',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateField()),
('total', models.FloatField(default=0)),
('cliente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Cliente')),
],
options={
'verbose_name': 'Factura',
'verbose_name_plural': 'Factura',
'ordering': ['-fecha'],
},
),
migrations.CreateModel(
name='DetalleFactura',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cantidad', models.FloatField(default=0)),
('precio', models.FloatField(default=0)),
('subtotal', models.FloatField(default=0)),
('creacion', models.DateTimeField(default=datetime.datetime(2020, 8, 8, 1, 47, 18, 584015))),
('factura', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Factura')),
('producto', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Producto')),
],
options={
'verbose_name': 'DetalleFactura',
'verbose_name_plural': 'DetalleFactura',
'ordering': ['-creacion'],
},
),
migrations.AddField(
model_name='cliente',
name='producto',
field=models.ManyToManyField(to='app.Producto'),
),
]
|
[
"jcuadradoh2@unemi.edu.ec"
] |
jcuadradoh2@unemi.edu.ec
|
4de0862f8cd2eb71a885c5f1753c6f087dbe5fcf
|
d8bf91fc51b4fd05246097e5c7e5aa07771b1068
|
/photo_gallery/settings.py
|
064b96358e315a7162f2260c2c31ffcc17e32e11
|
[] |
no_license
|
falcon1996/Gallery
|
9d51bfba32fe06600a9b49991c99c106003a945f
|
e1c37d1e7cd02d1d878d5ea0107292248e4fdce9
|
refs/heads/master
| 2021-06-17T04:16:14.233354
| 2017-04-23T00:29:57
| 2017-04-23T00:29:57
| 82,963,158
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,452
|
py
|
"""
Django settings for photo_gallery project.
Generated by 'django-admin startproject' using Django 1.9.12.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i)+&*2&x3ra#e6biaj^3t%siho7b!swv@l*@em%cx)hb3+ge1s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'photos',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'photo_gallery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'photo_gallery.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Calcutta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/' #this is url
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "mediafiles") # creating path
|
[
"dhruvparashar6@gmail.com"
] |
dhruvparashar6@gmail.com
|
1311ce208ad1940ea37daa9f4b2901497e5572a2
|
758c5566f4598dbfd7d12d1e8e678458912f5ba0
|
/draw_image_copy.py
|
07497c27e6386a8005a65ceafcda5ec60228d335
|
[] |
no_license
|
isamrx72/PyGames
|
989734674b868a5ae781bb6fa74221095c2ddf63
|
eb9126072078e01329549b5e2be335457145605a
|
refs/heads/master
| 2021-09-01T03:24:26.111401
| 2017-12-24T14:05:18
| 2017-12-24T14:05:18
| 115,063,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
# """ draw_image4.py """
import sys
import pygame
from pygame.locals import QUIT
pygame.init()
SURFACE = pygame.display.set_mode((400, 300))
FPSCLOCK = pygame.time.Clock()
def main():
""" main routine """
logo = pygame.image.load("pythonlogo.jpg")
theta = 0
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
theta += 1
SURFACE.fill((225, 225, 225))
# ロゴを回転し、中心が(200, 150)の位置にロゴを描画
new_logo = pygame.transform.rotate(logo, theta)
rect = new_logo.get_rect()
rect.center = (200, 150)
SURFACE.blit(new_logo, rect)
pygame.display.update()
FPSCLOCK.tick(30)
if __name__ == '__main__':
main()
|
[
"isamrx73@gmail.com"
] |
isamrx73@gmail.com
|
830d31a9a9c3b434f9a5e3d0c62520978bfd12b7
|
b911744e6b7e464e7f7bc4151b5cc170e33701b2
|
/dashborad/zabbix/zb.py
|
d55530abbc715d2ffdd205b9e86aebc548b1b974
|
[] |
no_license
|
Wstc2013/reboot_lianxi
|
199bc9cf2ce6ef9a017ca3e1589b2596fcd283e6
|
5de1a5c818e7e764a8cd5ed9950ce646c7daf458
|
refs/heads/master
| 2021-01-21T12:21:16.824326
| 2017-06-26T02:58:25
| 2017-06-26T02:58:25
| 91,792,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
#!/usr/bin/env python
#-*- coding:utf8 -*-
from zabbix_client import ZabbixServerProxy
from django.conf import settings
class Zabbix(object):
def __init__(self):
self.s = ZabbixServerProxy(settings.ZABBIX_URL)
self.s.user.login(user=settings.ZABBIX_USER, password=settings.ZABBIX_PASS)
def get_zabbix_group_info(self):
ret = self.s.hostgroup.get(output=['groupid', 'name'])
return ret
def create_hosts(self, params):
return self.s.host.create(**params)
def get_host(self,hostid):
ret = self.s.host.getobjects(hostid=hostid)
return ret
def get_template(self, ids=None):
kwargs = {"output": ['templateid', 'name']}
if ids:
kwargs['hostids'] = ids
ret = self.s.template.get(**kwargs)
return ret
|
[
"xiaoyong.feng@cnsha-61418-mac.local"
] |
xiaoyong.feng@cnsha-61418-mac.local
|
0cb7c21367baf5f8c424541c5fd87bf0e6c9605f
|
8d67c77c4572a20d4a66ad0b55befe559a1d4ee9
|
/dface/prepare_data/gen_Rnet_train_data.py
|
4f8888f201b292b638047df0f9c208571cd5cbec
|
[
"Apache-2.0"
] |
permissive
|
ratmcu/DFace
|
459c2243b5b56af137682ccc8505fa165ff3c2a6
|
e99604a85f9c7d732d9f1749350e8b3f01aae9a2
|
refs/heads/master
| 2020-06-03T15:13:13.223357
| 2019-06-12T23:04:35
| 2019-06-12T23:04:35
| 191,622,342
| 0
| 0
|
Apache-2.0
| 2019-06-12T18:12:14
| 2019-06-12T18:12:14
| null |
UTF-8
|
Python
| false
| false
| 7,659
|
py
|
import argparse
import cv2
import numpy as np
from dface.core.detect import MtcnnDetector,create_mtcnn_net
from dface.core.imagedb import ImageDB
from dface.core.image_reader import TestImageLoader
import time
import os
import cPickle
from dface.core.utils import convert_to_square,IoU
import dface.config as config
import dface.core.vision as vision
def gen_rnet_data(data_dir, anno_file, pnet_model_file, prefix_path='', use_cuda=True, vis=False):
pnet, _, _ = create_mtcnn_net(p_model_path=pnet_model_file, use_cuda=use_cuda)
mtcnn_detector = MtcnnDetector(pnet=pnet,min_face_size=12)
imagedb = ImageDB(anno_file,mode="test",prefix_path=prefix_path)
imdb = imagedb.load_imdb()
image_reader = TestImageLoader(imdb,1,False)
all_boxes = list()
batch_idx = 0
for databatch in image_reader:
if batch_idx % 100 == 0:
print ("%d images done" % batch_idx)
im = databatch
t = time.time()
boxes, boxes_align = mtcnn_detector.detect_pnet(im=im)
if boxes_align is None:
all_boxes.append(np.array([]))
batch_idx += 1
continue
if vis:
rgb_im = cv2.cvtColor(np.asarray(im), cv2.COLOR_BGR2RGB)
vision.vis_two(rgb_im, boxes, boxes_align)
t1 = time.time() - t
t = time.time()
all_boxes.append(boxes_align)
batch_idx += 1
# save_path = model_store_path()
save_path = config.MODEL_STORE_DIR
if not os.path.exists(save_path):
os.mkdir(save_path)
save_file = os.path.join(save_path, "detections_%d.pkl" % int(time.time()))
with open(save_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
gen_rnet_sample_data(data_dir,anno_file,save_file,prefix_path)
def gen_rnet_sample_data(data_dir,anno_file,det_boxs_file,prefix_path):
neg_save_dir = os.path.join(data_dir, "24/negative")
pos_save_dir = os.path.join(data_dir, "24/positive")
part_save_dir = os.path.join(data_dir, "24/part")
for dir_path in [neg_save_dir, pos_save_dir, part_save_dir]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# load ground truth from annotation file
# format of each line: image/path [x1,y1,x2,y2] for each gt_box in this image
with open(anno_file, 'r') as f:
annotations = f.readlines()
image_size = 24
net = "rnet"
im_idx_list = list()
gt_boxes_list = list()
num_of_images = len(annotations)
print ("processing %d images in total" % num_of_images)
for annotation in annotations:
annotation = annotation.strip().split(' ')
im_idx = os.path.join(prefix_path,annotation[0])
boxes = map(float, annotation[1:])
boxes = np.array(boxes, dtype=np.float32).reshape(-1, 4)
im_idx_list.append(im_idx)
gt_boxes_list.append(boxes)
save_path = config.ANNO_STORE_DIR
if not os.path.exists(save_path):
os.makedirs(save_path)
f1 = open(os.path.join(save_path, 'pos_%d.txt' % image_size), 'w')
f2 = open(os.path.join(save_path, 'neg_%d.txt' % image_size), 'w')
f3 = open(os.path.join(save_path, 'part_%d.txt' % image_size), 'w')
det_handle = open(det_boxs_file, 'r')
det_boxes = cPickle.load(det_handle)
print(len(det_boxes), num_of_images)
assert len(det_boxes) == num_of_images, "incorrect detections or ground truths"
# index of neg, pos and part face, used as their image names
n_idx = 0
p_idx = 0
d_idx = 0
image_done = 0
for im_idx, dets, gts in zip(im_idx_list, det_boxes, gt_boxes_list):
if image_done % 100 == 0:
print("%d images done" % image_done)
image_done += 1
if dets.shape[0] == 0:
continue
img = cv2.imread(im_idx)
dets = convert_to_square(dets)
dets[:, 0:4] = np.round(dets[:, 0:4])
for box in dets:
x_left, y_top, x_right, y_bottom = box[0:4].astype(int)
width = x_right - x_left + 1
height = y_bottom - y_top + 1
# ignore box that is too small or beyond image border
if width < 20 or x_left < 0 or y_top < 0 or x_right > img.shape[1] - 1 or y_bottom > img.shape[0] - 1:
continue
# compute intersection over union(IoU) between current box and all gt boxes
Iou = IoU(box, gts)
cropped_im = img[y_top:y_bottom + 1, x_left:x_right + 1, :]
resized_im = cv2.resize(cropped_im, (image_size, image_size),
interpolation=cv2.INTER_LINEAR)
# save negative images and write label
if np.max(Iou) < 0.3:
# Iou with all gts must below 0.3
save_file = os.path.join(neg_save_dir, "%s.jpg" % n_idx)
f2.write(save_file + ' 0\n')
cv2.imwrite(save_file, resized_im)
n_idx += 1
else:
# find gt_box with the highest iou
idx = np.argmax(Iou)
assigned_gt = gts[idx]
x1, y1, x2, y2 = assigned_gt
# compute bbox reg label
offset_x1 = (x1 - x_left) / float(width)
offset_y1 = (y1 - y_top) / float(height)
offset_x2 = (x2 - x_right) / float(width)
offset_y2 = (y2 - y_bottom) / float(height)
# save positive and part-face images and write labels
if np.max(Iou) >= 0.65:
save_file = os.path.join(pos_save_dir, "%s.jpg" % p_idx)
f1.write(save_file + ' 1 %.2f %.2f %.2f %.2f\n' % (
offset_x1, offset_y1, offset_x2, offset_y2))
cv2.imwrite(save_file, resized_im)
p_idx += 1
elif np.max(Iou) >= 0.4:
save_file = os.path.join(part_save_dir, "%s.jpg" % d_idx)
f3.write(save_file + ' -1 %.2f %.2f %.2f %.2f\n' % (
offset_x1, offset_y1, offset_x2, offset_y2))
cv2.imwrite(save_file, resized_im)
d_idx += 1
f1.close()
f2.close()
f3.close()
def model_store_path():
return os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))+"/model_store"
def parse_args():
parser = argparse.ArgumentParser(description='Test mtcnn',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dface_traindata_store', dest='traindata_store', help='dface train data temporary folder,include 12,24,48/postive,negative,part,landmark',
default='../data/wider/', type=str)
parser.add_argument('--anno_file', dest='annotation_file', help='wider face original annotation file',
default=os.path.join(config.ANNO_STORE_DIR,"wider_origin_anno.txt"), type=str)
parser.add_argument('--pmodel_file', dest='pnet_model_file', help='PNet model file path',
default='/idata/workspace/dface/model_store/pnet_epoch.pt', type=str)
parser.add_argument('--gpu', dest='use_cuda', help='with gpu',
default=config.USE_CUDA, type=bool)
parser.add_argument('--prefix_path', dest='prefix_path', help='annotation file image prefix root path',
default='', type=str)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
gen_rnet_data(args.traindata_store, args.annotation_file, args.pnet_model_file, args.prefix_path, args.use_cuda)
|
[
"314127900@qq.com"
] |
314127900@qq.com
|
0ec647632384f519175799e633398d1817864915
|
f5487faeed295a97a71b28447e2c4d6c4e115651
|
/Sudoku solver/backtrack_solve.py
|
f73a44e8538455abcb3e868ede8cdb66a2b3903b
|
[] |
no_license
|
Razeem-r/Personal-projects
|
ffbca37bd4474df1421932388477a69730ac5b5d
|
08fb6ab61035b72234e73765338e77227b788317
|
refs/heads/master
| 2023-01-25T00:48:22.729453
| 2020-11-28T09:23:28
| 2020-11-28T09:23:28
| 291,892,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,521
|
py
|
from array import *
import numpy as np
# Sample sudoku form
# board =[[0, 0, 6, 0, 0, 5, 0, 0, 0],
# [0, 0, 8, 0, 9, 0, 0, 0, 0],
# [0, 0, 2, 0, 0, 0, 8, 1, 7],
# [4, 0, 0, 3, 0, 8, 0, 0, 0],
# [0, 3, 0, 0, 5, 0, 0, 4, 0],
# [0, 0, 0, 2, 0, 6, 0, 0, 9],
# [8, 1, 5, 0, 0, 0, 4, 0, 0],
# [0, 0, 0, 0, 7, 0, 9, 0, 0],
# [0, 0, 0, 1, 0, 0, 6, 0, 0]]
#COnvert all numbers to string type and all empty spaces(zeroes) to '.'
for i in range(9):
for j in range(9):
board[i][j]= str(board[i][j])
if board[i][j]=='0':
board[i][j]='.'
def check(test,i,j,board,f):
for x in range(9):
if(board[i][x]!='.'):
if int(board[i][x])==test and x!=j:
f = 0
for x in range(9):
if(board[x][j]!='.'):
if int(board[x][j])==test and x!=i:
f = 0
if i>=0 and i<3:
if j>=0 and j<3:
for x in range(3):
for y in range(3):
#print(board[x][y],test)
if(board[x][y]!='.'):
if int(board[x][y])==test and x!=i and y!=j:
f = 0
#print(" ",i,j,x,y)
elif j>=3 and j<6:
for x in range(3):
for y in range(3,6):
if(board[x][y]!='.'):
if int(board[x][y])==test and x!=i and y!=j:
f = 0
elif j>=6 and j<9:
for x in range(3):
for y in range(6,9):
if(board[x][y]!='.'):
if int(board[x][y])==test and x!=i and y!=j:
f = 0
if i>=3 and i<6:
if j>=0 and j<3:
for x in range(3,6):
for y in range(3):
if(board[x][y]!='.'):
if int(board[x][y])==test and x!=i and y!=j:
f = 0
elif j>=3 and j<6:
for x in range(3,6):
for y in range(3,6):
if(board[x][y]!='.'):
if int(board[x][y])==test and x!=i and y!=j:
f = 0
elif j>=6 and j<9:
for x in range(3,6):
for y in range(6,9):
if(board[x][y]!='.'):
if int(board[x][y])==test and x!=i and y!=j:
f = 0
if i>=6 and i<9:
if j>=0 and j<3:
for x in range(6,9):
for y in range(3):
if(board[x][y]!='.'):
if int(board[x][y])==test and x!=i and y!=j:
f = 0
elif j>=3 and j<6:
for x in range(6,9):
for y in range(3,6):
if(board[x][y]!='.'):
if int(board[x][y])==test and x!=i and y!=j:
f = 0
elif j>=6 and j<9:
for x in range(6,9):
for y in range(6,9):
if(board[x][y]!='.'):
if int(board[x][y])==test and x!=i and y!=j:
f = 0
return f
def checkcriteria(test,i,j,board,flag):
global c
if flag!=-1:
c=flag
while True:
f = 1
f = check(test,i,j,board,f)
if f==1 and test<10:
board[i][j]=int(test)
break
if f==0 and test<10:
test=test+1
if test>9:
break
# print("board show ",i,j,f,type(board[i][j]))
# print(c)
# print(np.asarray(board))
if i<9:
if board[i][j]==0 or board[i][j]==c:
board[i][j]=0;
c=0
if not (i<=0 and j<=0):
return -1
else:
c=0
return 1
return 0
def checkdot(test,i,j,board,nav):
# print("in checkdot")
if i<9:
if nav==1 :
i,j = front(i,j,board)
if i<9:
if type(board[i][j])==int:
board[i][j]=0
if board[i][j]=='.' or board[i][j]==0 :
board[i][j]=int('0')
test=1
nav = checkcriteria(test,i,j,board,-1)
if nav==-1 :
i,j=back(i,j,board)
g=board[i][j]
#print(' g ',g)
if board[i][j]!=9:
k=board[i][j]+1
else:
k=9
nav = checkcriteria(k,i,j,board,g)
if nav==1 or nav ==-1:
checkdot(test,i,j,board,nav)
def back(i,j,board):
if i<9:
while True:
j-=1
if j<0:
i-=1
j=8
if type(board[i][j])==int:
break
return i,j
def front(i,j,board):
if i<9:
while True:
j+=1
if j>=9:
i+=1
j=0
if i<9:
if type(board[i][j])!=str or board[i][j]=='.':
break
if i==9:
break
return i,j
c=0
n=1
f=1
i=0
j=-1
test=1
checkdot(test,i,j,board,n)
print(np.asarray(board))
|
[
"noreply@github.com"
] |
Razeem-r.noreply@github.com
|
25c685a5ff1dcef0d1c62a2ea534caa466549562
|
832e0b42c321db1f70e7d826111f0b796cac6247
|
/ljspeech.py
|
467eb8f7ad1441c71738600224dd4340340d1fe0
|
[
"MIT"
] |
permissive
|
mitsu-h/deepvoice3
|
2615b61b92216ac11063792e29ef2f72d3a71fa9
|
fb652a9274dd1b94d374a424726e34287cd94186
|
refs/heads/master
| 2022-12-10T00:44:42.899129
| 2020-08-24T11:00:08
| 2020-08-24T11:00:08
| 230,847,732
| 0
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,976
|
py
|
from concurrent.futures import ProcessPoolExecutor
from functools import partial
import numpy as np
import os
import audio
from hparams import hparams
import time
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
'''Preprocesses the LJ Speech dataset from a given input path into a given output directory.
Args:
in_dir: The directory where you have downloaded the LJ Speech dataset
out_dir: The directory to write the output into
num_workers: Optional number of worker processes to parallelize across
tqdm: You can optionally pass tqdm to get a nice progress bar
Returns:
A list of tuples describing the training examples. This should be written to train.txt
'''
# We use ProcessPoolExecutor to parallize across processes. This is just an optimization and you
# can omit it and just call _process_utterance on each input if you want.
executor = ProcessPoolExecutor(max_workers=num_workers)
futures = []
index = 1
with open(os.path.join(in_dir, 'metadata.csv'), encoding='utf-8') as f:
for line in f:
parts = line.strip().split('|')
wav_path = os.path.join(in_dir, 'wavs', '%s.wav' % parts[0])
text = parts[2]
if len(text) < hparams.min_text:
continue
futures.append(executor.submit(
partial(_process_utterance, out_dir, index, wav_path, text)))
index += 1
return [future.result() for future in tqdm(futures)]
def _process_utterance(out_dir, index, wav_path, text):
'''Preprocesses a single utterance audio/text pair.
This writes the mel and linear scale spectrograms to disk and returns a tuple to write
to the train.txt file.
Args:
out_dir: The directory to write the spectrograms into
index: The numeric index to use in the spectrogram filenames.
wav_path: Path to the audio file containing the speech input
text: The text spoken in the input audio file
Returns:
A (spectrogram_filename, mel_filename, n_frames, text) tuple to write to train.txt
'''
# Load the audio to a numpy array:
wav = audio.load_wav(wav_path)
if hparams.rescaling:
wav = wav / np.abs(wav).max() * hparams.rescaling_max
# Compute the linear-scale spectrogram from the wav:
spectrogram = audio.spectrogram(wav).astype(np.float32)
n_frames = spectrogram.shape[1]
# Compute a mel-scale spectrogram from the wav:
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
#world parameters
f0,sp,ap = audio.world(wav,hparams.sample_rate)
f0 = (f0 / hparams.f0_norm).astype(np.float32) #normalize
sp = audio._normalize(sp).astype(np.float32)
ap = ap.astype(np.float32) #apは0~1の範囲しか値を取らないので正規化不要
world_frames = f0.shape[0]
# Write the spectrograms to disk:
spectrogram_filename = 'ljspeech-spec-%05d.npy' % index
mel_filename = 'ljspeech-mel-%05d.npy' % index
f0_filename = 'ljspeech-f0-%05d.npy' % index
sp_filename = 'ljspeech-sp-%05d.npy' % index
ap_filename = 'ljspeech-ap-%05d.npy' % index
np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
np.save(os.path.join(out_dir, f0_filename), f0, allow_pickle=False)
np.save(os.path.join(out_dir, sp_filename), sp, allow_pickle=False)
np.save(os.path.join(out_dir, ap_filename), ap, allow_pickle=False)
# Return a tuple describing this training example:
return (spectrogram_filename, mel_filename, n_frames, f0_filename, sp_filename, ap_filename, world_frames, text)
'''
audio_filename = 'ljspeech-spec-%05d.npy' % index
np.save(os.path.join(out_dir, audio_filename), wav, allow_pickle=False)
return (audio_filename, wav.shape[0], text)
'''
|
[
"u.world96@gmail.com"
] |
u.world96@gmail.com
|
ffc98858f61ec1894a60bd58273d0805e9f8f653
|
4a86874d3e740b4cd22a8bc6d439f7aba9bd52c0
|
/434.字符串中的单词数.py
|
4c7c3a0afdac2b8ef53d7ce0a09a274c3f756c5c
|
[] |
no_license
|
Jassy930/leetcode_main
|
4563f9634692d0b09a638114f59f6836edaa73f6
|
a2e4e05f2b220702dc874718e34150d1066f6ac1
|
refs/heads/master
| 2020-04-28T15:35:49.448682
| 2019-09-16T03:49:23
| 2019-09-16T03:49:23
| 175,380,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
#
# @lc app=leetcode.cn id=434 lang=python3
#
# [434] 字符串中的单词数
#
class Solution:
def countSegments(self, s: str) -> int:
ss = s.split(' ')
return len(ss)-ss.count('')
|
[
"wafe93039@163.com"
] |
wafe93039@163.com
|
5b9d10a94bd28fb08b016b9daded4640791af4b1
|
489384e8ebf66db4169980f70408742c0cb083f4
|
/apps/organization/migrations/0010_courseorg_tag.py
|
ad5fc1ceb2074092a6cd616d97daee521ebe929c
|
[] |
no_license
|
carryaimp/OnlineSchool
|
2188b24ddf23a6ef71b24bf5ef7df3e2c2617691
|
992b3bc53e66c649acec5c44b71dc621885709e7
|
refs/heads/master
| 2023-07-07T12:19:54.786264
| 2021-04-02T10:16:29
| 2021-04-02T10:16:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
# Generated by Django 2.0.2 on 2018-02-20 22:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization', '0009_teacher_age'),
]
operations = [
migrations.AddField(
model_name='courseorg',
name='tag',
field=models.CharField(default='全国知名', max_length=4, verbose_name='机构标签'),
),
]
|
[
"jia2jiayuan@163.com"
] |
jia2jiayuan@163.com
|
4231a87f3283ae55b93184de8966fa632a908023
|
adf7952e7b92e29e3d66a98fc88cb0b1ab2af215
|
/dsnoti_.py
|
35d47416537e3b3d3310bc4298bd57d5654ed9ef
|
[] |
no_license
|
KimHyoRim/hellparty
|
a7c3c57f4d24f516d9cf98ac55f327d85db4e4d4
|
3c5416346734cd75bf2f727a1c9a6a6e64f6a659
|
refs/heads/master
| 2020-05-23T16:16:14.218500
| 2019-06-19T17:27:32
| 2019-06-19T17:27:32
| 186,844,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,689
|
py
|
#!/usr/bin/python
# coding=utf-8
import sys
from urllib.request import urlopen
from urllib.parse import quote
import time
from xml.etree import ElementTree
import urllib.request
from xml.dom.minidom import parse, parseString
from urllib.request import urlopen
from urllib.parse import quote
import sqlite3
import telepot
from pprint import pprint
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
from datetime import date, datetime, timedelta
import traceback
key = "475bf83ac2911d034998c841c40225c7"
TOKEN = '856816592:AAHDHTv27olsdjqmTv96DdXP2YBH49iYAxc'
MAX_MSG_LENGTH = 300
baseurl = "http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchDailyBoxOfficeList" \
".xml?key="+key
bot = telepot.Bot(TOKEN)
def getData( date_param):
res_list = []
url = baseurl+'&targetDt='+date_param
print(url)
req = urllib.request.Request(url)
data = urllib.request.urlopen(req).read()
tree = ElementTree.fromstring(data)
itemElements = tree.getiterator("dailyBoxOffice")
MmovieNm = []
Mrank = []
MshowRange = []
MopenDt = []
MaudiAcc = []
MrankInten = []
MsalesAcc = []
Mimage = []
for item in itemElements:
movieNm = item.find("movieNm")
rank = item.find("rank")
openDt = item.find("openDt")
audiAcc = item.find("audiAcc") # 누적관객수
salesAcc = item.find("salesAcc") # 누적 매출
MmovieNm.append(movieNm.text)
Mrank.append(rank.text)
MopenDt.append(openDt.text)
MaudiAcc.append(audiAcc.text)
MsalesAcc.append(salesAcc.text)
try:
for i in range(10):
row = Mrank[i] + "등\n\n" \
+ "영화 제목 : " + MmovieNm[i] + "\n\n" \
+ "영화 개봉일 : " + MopenDt[i] + "\n\n" \
+ "누적 매출액 : " + MsalesAcc[i] + "원\n\n" \
+ "누적 관객수 : " + MaudiAcc[i] + "명\n\n"
except IndexError:
pass
print(row)
res_list.append(row)
return res_list
def sendMessage(user, msg):
try:
bot.sendMessage(user, msg)
except:
traceback.print_exc(file=sys.stdout)
def run(date_param, param='11710'):
conn = sqlite3.connect('logs.db')
cursor = conn.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS logs( user TEXT, log TEXT, PRIMARY KEY(user, log) )')
conn.commit()
user_cursor = sqlite3.connect('users.db').cursor()
user_cursor.execute('CREATE TABLE IF NOT EXISTS users( user TEXT, location TEXT, PRIMARY KEY(user, location) )')
user_cursor.execute('SELECT * from users')
for data in user_cursor.fetchall():
user = data[0]
print(user, date_param)
res_list = getData(date_param)
msg = ''
for r in res_list:
try:
cursor.execute('INSERT INTO logs (user,log) VALUES ("%s", "%s")'%(user,r))
except sqlite3.IntegrityError:
pass
else:
print( str(datetime.now()).split('.')[0], r )
if len(r+msg)+1>MAX_MSG_LENGTH:
sendMessage( user, msg )
msg = r+'\n'
else:
msg += r+'\n'
if msg:
sendMessage( user, msg )
conn.commit()
if __name__=='__main__':
today = date.today()
current_month = today.strftime('%Y%m')
print( '[',today,']received token :', TOKEN )
pprint( bot.getMe() )
run(current_month)
|
[
"noreply@github.com"
] |
KimHyoRim.noreply@github.com
|
ca76788dd821eb07d06500f9c0ec39b6a0165448
|
977f5aa46679d1da33a28177c6e22d7fdfc24c66
|
/TehFolio/apps/homepage/admin.py
|
88540eb6bd308d07dae991ae23f2f600f5425e5a
|
[] |
no_license
|
Tehtehteh/Tehportfolio
|
06c2d905b86da147ae2c4cd974d365330b5ce9d2
|
25e70a15c1e03e190e129a46bd61fd712f6a8ea1
|
refs/heads/master
| 2021-01-21T18:34:00.647887
| 2016-10-08T21:08:41
| 2016-10-08T21:08:41
| 68,407,372
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 122
|
py
|
from django.contrib import admin
from .models import BlogPost
# Register your models here.
admin.site.register(BlogPost)
|
[
"tehfvpr@gmail.com"
] |
tehfvpr@gmail.com
|
c39173555469868d74d4277658f382eee8e338d3
|
87e4d69d4a6bdc73a30c0c537e2b13bc27c62a91
|
/store_management_system/Accounts/admin.py
|
5eea4e48bf56d1705606d6665d65c141cc37d763
|
[] |
no_license
|
YashGoyal28/store_management_system
|
7409ec79bc17a4464edcc2c75265e191836f0b46
|
92238bb1f9144b12807ab82c175d22266825a1ac
|
refs/heads/main
| 2023-01-19T08:42:20.790691
| 2020-11-24T10:53:27
| 2020-11-24T10:53:27
| 307,624,622
| 1
| 0
| null | 2020-11-23T08:16:16
| 2020-10-27T07:51:49
|
Python
|
UTF-8
|
Python
| false
| false
| 137
|
py
|
from django.contrib import admin
from .models import ListOfProducts, Bill
admin.site.register(Bill)
admin.site.register(ListOfProducts)
|
[
"y.g.2001yashgoyal@gmail.com"
] |
y.g.2001yashgoyal@gmail.com
|
5e0dbf237d23e97db1acb499c7473f995063c95c
|
5492b2501f3aeebbf6e228f9bbf608bd682e6787
|
/converter.py
|
4997dab5aff68c23cfdb919fd13737d7a8b3c2d7
|
[] |
no_license
|
chainchomp440/chainchomp
|
b74bfea213f04113f5de9eaa154f76d76b112563
|
cd3c015e0b03f2b1008dc9db7282a1edd6546406
|
refs/heads/master
| 2020-08-29T04:46:21.384362
| 2019-10-28T01:21:09
| 2019-10-28T01:21:09
| 217,932,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
## This is a test code
rmb_str_value=input('please insert cny amount')
rmb_value = eval(rmb_str_value)
usd_vs_rmb= 6.77
usd_value=rmb_value / usd_vs_rmb
print ('usd value =',usd_value)
|
[
"tonyshi@TonyShis-MacBook-Air.local"
] |
tonyshi@TonyShis-MacBook-Air.local
|
29a519f17e93e1f2edbd1437486e383d9684ced6
|
0d8486c1d55c40bebea7c5428930f18165d2d0e9
|
/tests/sat/Models/c775.180.SAT.dimacs.test.py
|
8987ff256e8dbfaada30f05c291ec2c34515e155
|
[
"Apache-2.0"
] |
permissive
|
bernardocuteri/wasp
|
6f81bf6aa8fb273c91bbf68ecce4ecb195a55953
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
refs/heads/master
| 2021-06-08T11:58:25.080818
| 2020-10-05T16:57:37
| 2020-10-05T16:57:37
| 124,245,808
| 0
| 0
|
Apache-2.0
| 2018-03-07T14:13:16
| 2018-03-07T14:13:16
| null |
UTF-8
|
Python
| false
| false
| 10,721
|
py
|
input = """
c num blocks = 1
c num vars = 180
c minblockids[0] = 1
c maxblockids[0] = 180
p cnf 180 775
71 -116 -138 0
23 147 13 0
114 -167 -50 0
-6 64 34 0
-129 175 -92 0
172 -58 19 0
-40 164 122 0
-146 -145 -18 0
39 82 -45 0
77 130 47 0
-27 125 -165 0
90 56 41 0
162 -132 63 0
-139 143 -30 0
108 -133 -143 0
-49 -156 145 0
-111 -89 139 0
-99 -61 9 0
102 -177 -16 0
109 -7 -179 0
-146 143 -40 0
-65 36 -122 0
30 67 102 0
12 -159 176 0
-47 141 -30 0
-123 -124 41 0
-175 -26 -107 0
-160 -141 112 0
-20 32 51 0
-15 -155 106 0
83 110 -87 0
-107 -112 7 0
-99 -30 -35 0
47 169 108 0
-33 -68 -53 0
-154 107 79 0
-26 174 -108 0
119 81 -1 0
-36 -101 47 0
-47 7 113 0
-102 -84 -106 0
-24 169 68 0
-141 145 103 0
139 -26 95 0
-4 35 -145 0
-65 -28 6 0
-81 150 164 0
-153 -110 -138 0
80 138 -39 0
97 172 7 0
168 -165 139 0
-39 138 20 0
-28 117 -50 0
-58 19 -33 0
-42 141 98 0
106 -76 60 0
-31 -144 -158 0
173 -180 -62 0
131 -39 150 0
-109 7 -14 0
-163 -29 7 0
-150 140 -136 0
-86 84 58 0
-55 -156 -179 0
-142 121 -19 0
-122 -133 -13 0
-99 143 122 0
-144 160 -134 0
-157 -97 166 0
89 80 -68 0
174 88 -32 0
-137 105 -13 0
23 -134 -102 0
8 115 -50 0
43 -75 -140 0
130 137 26 0
-82 -26 38 0
-42 64 174 0
44 43 154 0
59 45 -159 0
-152 31 -51 0
-57 134 -6 0
36 3 25 0
37 166 -88 0
13 25 110 0
-91 126 175 0
-149 138 163 0
154 2 30 0
10 -22 -155 0
-99 -21 165 0
-26 -69 23 0
33 -111 -20 0
156 71 -164 0
38 -17 -163 0
-22 77 14 0
60 -39 21 0
150 -73 40 0
166 -66 -160 0
-20 -41 107 0
-146 18 168 0
-13 71 39 0
-145 1 13 0
-132 155 -45 0
-122 -115 45 0
31 -96 -60 0
-20 -50 178 0
176 91 -15 0
112 139 67 0
60 94 -173 0
119 79 174 0
126 177 55 0
124 128 151 0
123 -5 67 0
-42 78 55 0
118 157 152 0
27 -28 -137 0
-9 -37 -172 0
-77 -42 9 0
-178 -30 70 0
-105 109 -45 0
118 8 -17 0
-87 35 -77 0
31 121 113 0
-87 71 -180 0
-91 26 82 0
-70 126 -125 0
109 -46 55 0
92 -174 153 0
171 86 -92 0
-2 -78 -162 0
-19 -71 -81 0
-66 77 -145 0
68 -58 157 0
52 -37 -88 0
64 -73 16 0
158 -98 55 0
-155 2 -106 0
-8 -72 119 0
-126 -68 140 0
64 90 -115 0
142 44 -160 0
54 -134 -175 0
132 12 37 0
7 26 -78 0
-127 75 -73 0
114 -151 -68 0
155 167 -128 0
143 -61 -131 0
19 -4 66 0
-75 -130 -49 0
-53 -38 -97 0
6 170 -59 0
169 116 99 0
153 -98 127 0
-151 -101 -178 0
110 9 -179 0
48 -16 55 0
-81 -152 -103 0
-10 118 -26 0
78 -172 79 0
85 -31 -77 0
30 -129 -102 0
46 165 -119 0
-30 -9 27 0
109 58 -167 0
107 67 94 0
63 -125 -27 0
-87 173 174 0
-80 -116 155 0
-140 -49 -58 0
138 61 51 0
44 -130 73 0
-157 -71 84 0
-22 -50 28 0
-48 -145 177 0
146 -158 8 0
-113 129 27 0
78 76 -27 0
74 123 76 0
-18 173 124 0
-13 -69 -80 0
101 81 -144 0
154 52 35 0
178 120 -37 0
-113 3 83 0
103 155 -81 0
-15 24 163 0
-178 162 -165 0
-98 -180 -9 0
101 156 44 0
-167 31 76 0
-122 37 -110 0
79 -58 -126 0
-168 175 -30 0
-137 78 -148 0
66 -57 23 0
-131 103 74 0
-22 122 105 0
84 -32 -4 0
15 22 -62 0
-42 40 109 0
-111 -12 -20 0
-3 -113 87 0
29 52 -112 0
141 91 -108 0
141 69 -62 0
63 72 -34 0
-144 -82 -173 0
71 160 45 0
-148 -29 24 0
-148 84 53 0
22 109 179 0
-152 76 -130 0
21 53 146 0
-142 6 -167 0
-66 90 89 0
167 118 -117 0
155 -24 39 0
-119 -69 -8 0
-95 92 118 0
-110 111 180 0
-55 25 30 0
29 119 109 0
23 86 -107 0
104 -65 38 0
-89 102 -68 0
-28 -42 75 0
36 44 -148 0
89 101 82 0
117 122 -93 0
28 111 -55 0
66 139 -81 0
35 36 83 0
52 65 -101 0
71 68 -64 0
177 -131 136 0
86 -97 180 0
20 176 50 0
-12 -53 115 0
79 -24 -162 0
-98 96 79 0
119 -128 -11 0
-17 52 -47 0
64 -99 17 0
-101 -152 77 0
-131 -113 119 0
54 149 152 0
138 -68 -149 0
144 -62 89 0
103 -1 119 0
-74 76 137 0
60 -115 110 0
-158 100 57 0
10 36 -109 0
-13 -55 99 0
-116 -119 -53 0
-109 -73 125 0
-74 -125 -22 0
103 -124 -44 0
-160 116 16 0
-24 -143 138 0
124 -31 149 0
-145 -10 171 0
21 -94 121 0
26 13 111 0
128 58 -107 0
-180 -9 165 0
92 141 -95 0
-70 147 77 0
-178 -124 -42 0
116 178 112 0
-117 -41 -75 0
-20 -35 28 0
-141 -160 50 0
26 -46 180 0
-39 -166 -88 0
76 53 -13 0
127 -94 -119 0
-76 152 -134 0
-13 125 170 0
-16 -71 6 0
131 114 -73 0
167 101 -19 0
-18 -139 -152 0
-125 -162 -89 0
-2 147 -99 0
-96 155 138 0
-108 -130 148 0
-21 -73 -20 0
2 40 -176 0
117 -85 -65 0
-158 107 -20 0
-67 -171 128 0
-50 28 -13 0
128 -6 -60 0
49 -46 95 0
-84 101 -109 0
-108 90 75 0
54 132 69 0
-162 68 -86 0
-93 69 177 0
-38 153 124 0
23 -21 -29 0
169 50 116 0
77 51 -153 0
40 101 -123 0
169 143 -27 0
-47 158 -35 0
-179 75 -113 0
-50 83 -7 0
141 52 -161 0
-126 -176 131 0
36 -126 1 0
174 84 43 0
70 -179 26 0
41 67 -164 0
138 153 -23 0
56 -146 -59 0
-29 23 -40 0
174 49 -125 0
71 -166 167 0
31 -170 -70 0
-29 58 -61 0
-45 130 53 0
1 -75 14 0
128 -6 -68 0
-68 31 -135 0
-49 -131 35 0
83 -119 102 0
175 -149 172 0
-166 116 -46 0
-69 -170 83 0
-175 22 -156 0
20 127 50 0
62 49 -135 0
13 129 -43 0
175 -64 -9 0
-41 178 40 0
71 145 -118 0
91 -33 129 0
-153 -161 132 0
94 -43 -15 0
-168 -98 48 0
76 101 -25 0
105 -3 -127 0
12 -159 44 0
6 -172 -27 0
-134 -37 -7 0
-61 138 35 0
177 -42 -179 0
19 9 -90 0
50 -133 20 0
-131 56 -77 0
47 35 36 0
110 -98 9 0
110 1 41 0
-26 -171 138 0
79 -110 15 0
-135 146 60 0
-51 13 109 0
129 -69 -83 0
168 109 -95 0
180 -165 116 0
143 90 97 0
60 -113 76 0
-79 177 107 0
13 135 -38 0
-149 98 -62 0
-123 -58 -4 0
65 108 13 0
121 -10 -126 0
-89 -39 -77 0
-119 36 95 0
19 38 -78 0
156 108 -78 0
66 -75 -8 0
160 79 101 0
161 -100 -167 0
144 -49 28 0
150 -71 -15 0
163 46 85 0
132 89 -165 0
84 -172 88 0
121 -148 127 0
-96 81 -46 0
-90 8 45 0
-125 -17 60 0
177 -93 -36 0
-46 123 -47 0
12 -27 -59 0
100 -22 -128 0
-71 -104 82 0
-176 -71 -47 0
-36 -43 143 0
140 -55 -128 0
-165 -60 108 0
-69 28 175 0
-79 -143 -156 0
-12 -48 170 0
-52 154 133 0
11 -78 -159 0
-131 -93 170 0
-5 12 -23 0
45 -11 140 0
-29 93 -106 0
116 -156 -72 0
61 -168 -144 0
-58 146 22 0
-42 -156 178 0
-154 -42 -22 0
-79 -12 91 0
-121 164 103 0
96 114 -147 0
27 32 127 0
171 -15 -67 0
-33 4 -147 0
-83 133 -114 0
23 15 42 0
76 -63 110 0
113 -126 45 0
16 -41 98 0
-123 -73 119 0
55 -123 33 0
-56 153 -144 0
-136 137 178 0
53 108 121 0
-35 -10 -153 0
-70 -37 53 0
-160 40 176 0
-89 -69 23 0
-21 34 -71 0
-3 147 -49 0
-97 -138 117 0
-79 -19 54 0
-74 119 -2 0
-73 -140 -75 0
-146 -109 83 0
124 -87 -147 0
-127 152 -129 0
-25 88 142 0
107 164 -52 0
-72 -51 101 0
-151 127 58 0
151 -152 75 0
54 111 74 0
126 -99 169 0
-97 4 102 0
106 -94 -81 0
-171 -50 136 0
-20 66 81 0
75 89 115 0
-66 -81 -34 0
75 112 -94 0
-162 -175 -16 0
-67 -80 -26 0
-69 -55 -138 0
-25 71 -85 0
57 -167 99 0
49 -161 -178 0
-56 148 67 0
-152 168 59 0
-49 -134 60 0
71 -115 30 0
85 -3 -65 0
-41 127 7 0
75 -156 119 0
-125 73 -150 0
-30 18 121 0
-106 5 -151 0
174 -45 -146 0
117 102 -5 0
26 -163 -99 0
-172 105 155 0
-168 172 -1 0
-174 -7 -90 0
17 140 -11 0
-119 120 -180 0
-158 -72 -13 0
-142 55 61 0
-109 5 136 0
57 35 -173 0
-82 33 117 0
79 155 60 0
12 -56 -76 0
167 154 -130 0
-179 175 -157 0
55 -13 47 0
-20 -137 78 0
14 62 74 0
-86 -106 -51 0
-161 63 24 0
84 -89 145 0
-130 -135 65 0
-42 -130 23 0
-20 76 101 0
46 31 125 0
-68 160 128 0
-10 -167 34 0
165 16 -38 0
93 -111 -112 0
61 138 63 0
55 -83 -17 0
169 -147 49 0
60 146 83 0
-157 112 135 0
-102 -174 -94 0
-122 -20 156 0
3 78 -138 0
-139 -107 175 0
-10 138 -156 0
34 8 -72 0
157 -90 106 0
-148 -22 -98 0
-121 99 81 0
59 -177 110 0
-150 93 -26 0
19 157 -132 0
-106 149 86 0
127 6 -108 0
169 -38 163 0
17 -37 -45 0
115 -38 54 0
-10 155 117 0
49 170 34 0
-45 -149 31 0
116 140 -171 0
-119 -171 -117 0
-98 -38 133 0
97 -8 -11 0
-15 -39 121 0
-116 -107 144 0
-72 -85 -92 0
113 112 51 0
101 -65 93 0
-111 101 73 0
-111 143 145 0
23 -143 111 0
-86 130 179 0
-50 -10 -80 0
60 17 -30 0
167 -19 -149 0
-145 73 82 0
45 20 172 0
-56 -17 149 0
87 -156 106 0
-177 -135 -50 0
37 -26 84 0
-19 -52 132 0
21 -76 88 0
-136 -156 129 0
-11 -92 -74 0
42 -177 9 0
37 123 10 0
25 30 169 0
58 -100 27 0
122 -119 90 0
-98 -143 -1 0
-116 94 -19 0
-106 74 152 0
92 135 -134 0
-88 58 -154 0
3 58 123 0
141 40 38 0
-12 142 75 0
-40 122 -173 0
57 -14 114 0
151 -88 69 0
157 -111 162 0
151 20 -78 0
-77 105 -48 0
-145 -104 -52 0
-39 69 79 0
-4 86 -153 0
-168 -127 180 0
-42 -106 -130 0
109 -107 114 0
-2 -67 -120 0
53 96 1 0
177 -141 147 0
130 80 -124 0
-47 -180 -31 0
31 81 19 0
-107 56 114 0
162 92 52 0
82 169 79 0
-120 -79 139 0
54 -73 -23 0
146 -73 29 0
110 139 -124 0
-63 109 -102 0
167 -3 144 0
-72 57 66 0
88 81 -15 0
130 -140 -32 0
83 45 -48 0
-147 162 -91 0
-40 -45 62 0
-90 -105 -148 0
-140 17 133 0
-50 -167 21 0
-61 -45 165 0
-68 14 140 0
145 41 -150 0
-86 110 -9 0
-160 -21 -134 0
-75 30 -135 0
135 -106 -51 0
-165 91 -51 0
-179 10 177 0
167 -19 -67 0
-65 14 -20 0
-163 -170 21 0
-43 -136 -67 0
-50 -63 -68 0
81 13 61 0
24 96 -57 0
-46 -91 -52 0
62 -20 86 0
44 -97 167 0
-51 15 62 0
-116 106 -9 0
-151 -155 128 0
155 -60 -95 0
74 -10 -108 0
-43 146 -85 0
134 117 36 0
148 -8 20 0
18 -180 28 0
-73 -132 150 0
109 -1 169 0
134 -133 -126 0
-152 117 37 0
-80 -64 -135 0
164 -180 54 0
142 50 -175 0
-180 102 -8 0
63 -141 -15 0
-161 64 -179 0
12 -88 -25 0
-40 46 38 0
99 -140 36 0
150 86 66 0
-133 89 -69 0
-104 65 -106 0
69 80 -70 0
123 49 129 0
63 7 172 0
129 -51 63 0
-173 -122 -93 0
-47 145 141 0
72 25 -94 0
-70 -110 -39 0
115 116 43 0
-99 178 -157 0
-5 -139 -119 0
79 179 -171 0
91 41 -59 0
-65 -55 86 0
141 82 -130 0
161 54 -39 0
88 70 150 0
30 -16 24 0
56 -51 68 0
-131 -169 73 0
-137 -139 -8 0
-33 136 170 0
-114 29 14 0
124 -149 48 0
64 138 -120 0
54 92 134 0
114 164 -106 0
-68 -75 -32 0
104 -159 -144 0
-99 116 -139 0
-158 -112 -170 0
-134 112 -142 0
72 -175 -73 0
55 4 147 0
126 4 -56 0
-65 119 -72 0
-178 46 78 0
31 -172 150 0
-76 -97 121 0
-120 -81 36 0
180 52 -159 0
-108 -70 101 0
-94 135 -2 0
90 85 -150 0
140 2 120 0
-158 -104 103 0
81 -162 -118 0
-150 -178 15 0
-168 -176 37 0
-22 -26 -164 0
126 161 170 0
-115 -140 25 0
-42 -2 19 0
-98 152 28 0
124 -28 80 0
150 73 25 0
52 154 -144 0
-102 148 -76 0
164 112 -123 0
-154 110 -71 0
-124 84 172 0
-29 38 31 0
-114 -16 -36 0
-91 -125 -19 0
-107 -74 -174 0
-92 -97 173 0
-75 3 179 0
110 -149 155 0
140 74 176 0
-57 50 -100 0
106 -16 103 0
85 -39 -51 0
-10 -20 -94 0
-105 170 -149 0
149 -152 129 0
-176 164 85 0
-178 46 -173 0
-120 -90 72 0
58 112 -97 0
128 168 -33 0
-61 -103 -86 0
-89 -31 -81 0
-144 -110 175 0
46 -53 -100 0
-73 -109 -137 0
32 -120 -108 0
83 -173 -179 0
11 29 -25 0
178 56 55 0
-127 -90 -46 0
-175 -83 -124 0
-103 -79 -105 0
-167 168 -14 0
-72 180 -71 0
-174 -52 147 0
171 56 153 0
-149 87 -31 0
-1 -81 69 0
118 49 -31 0
-46 -6 -103 0
-28 -80 -162 0
19 85 91 0
69 -110 -37 0
-58 -106 -64 0
-135 74 -98 0
136 143 63 0
9 4 -83 0
-103 -5 128 0
6 92 128 0
-90 34 -136 0
108 72 -7 0
74 -91 154 0
-155 -23 -90 0
73 -10 -60 0
158 -66 4 0
-172 -116 14 0
-1 15 40 0
-53 119 40 0
160 14 107 0
46 -108 57 0
81 -62 122 0
-19 42 -174 0
70 -13 159 0
-120 109 -16 0
4 102 147 0
75 96 -150 0
-33 9 31 0
161 24 -117 0
-130 36 -108 0
58 172 52 0
12 -173 -127 0
"""
output = "SAT"
|
[
"mario@alviano.net"
] |
mario@alviano.net
|
d0be3c0cb82a1c5bd0a09ef09decb96875981c29
|
e1e5ffef1eeadd886651c7eaa814f7da1d2ade0a
|
/Systest/tests/cdr/CDR_FUN_003.py
|
8f2ed09b839658b011aae872c85d3bfe7bdbe5c0
|
[] |
no_license
|
muttu2244/MyPython
|
1ddf1958e5a3514f9605d1f83c0930b24b856391
|
984ca763feae49a44c271342dbc15fde935174cf
|
refs/heads/master
| 2021-06-09T02:21:09.801103
| 2017-10-10T07:30:04
| 2017-10-10T07:30:04
| 13,803,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,439
|
py
|
#!/usr/bin/env python2.5
"""
##############################################################################
#
# Copyright (c) Stoke, Inc.
# All Rights Reserved.
#
# This code is confidential and proprietary to Stoke, Inc. and may only
# be used under a license from Stoke.
#
##############################################################################
DESCRIPTION:To Verify CDR data getting generated for a given session even after reloading the SSX.
TEST PLAN: CDR Test plans
TEST CASES: CDR_FUN_003
TOPOLOGY DIAGRAM:
|---------------| |----------------|
| | | |
| LINUX | ------ | SSX |
| 17.1.1.1/24 |e1 2/3| 17.1.1.2/16 |
|---------------| |----------------|
AUTHOR: suhasini@primesoftsolutionsinc.com
REVIEWER: alok@primesoftsolutionsinc.com
"""
import sys, os
mydir = os.path.dirname(__file__)
qa_lib_dir = os.path.join(mydir, "../../lib/py")
if qa_lib_dir not in sys.path:
sys.path.insert(1,qa_lib_dir)
# Frame-work libraries
from SSX import *
from Linux import *
from log import *
from StokeTest import test_case, test_suite, test_runner
from log import buildLogger
from logging import getLogger
from cdr import *
from helpers import is_healthy
import re
#import configs file
from config import *
from topo import *
#import private libraries
from ike import *
from misc import *
class test_CDR_FUN_003(test_case):
myLog = getLogger()
def setUp(self):
#Establish a telnet session to the SSX box.
self.ssx = SSX(ssx["ip_addr"])
self.linux=Linux(xpress_vpn1['ip_addr'],xpress_vpn1['user_name'],xpress_vpn1['password'])
self.ssx.telnet()
self.linux.telnet()
# Clear the SSX config
self.ssx.clear_config()
# wait for card to come up
self.ssx.wait4cards()
self.ssx.clear_health_stats()
def tearDown(self):
# Close the telnet session of SSX
self.ssx.close()
self.linux.close()
def test_CDR_FUN_003(self):
# Enable debug logs for iked
self.ssx.cmd("debug module iked all")
self.ssx.cmd("debug module aaad all")
#changing context and clearing ip counters
self.ssx.cmd("context %s" %(script_var['context_name']))
#clearing sessions on ssx
self.ssx.cmd("clear session all")
#Clearing already existing files on the linux machine
self.linux.cmd("su root")
time.sleep(5)
self.linux.cmd("su krao")
time.sleep(5)
self.linux.cmd("cd")
time.sleep(5)
self.linux.cmd("rm -rf *.asn1")
self.linux.cmd("rm -rf *.xml")
self.linux.cmd("rm -rf *.ttlv")
self.linux.cmd("exit")
self.linux.cmd("exit")
self.linux.cmd("cd /tftpboot/")
#self.linux.cmd("sudo rm *.*")
self.linux.cmd("sudo rm -rf *.asn1")
self.linux.cmd("sudo rm -rf *.xml")
self.linux.cmd("sudo rm -rf *.ttlv")
#configuring interface on linux machine
self.linux.configure_ip_interface(p1_ssx_xpressvpn1[1], script_var['xpress_phy_iface1_ip_mask'])
# Push xpress vpn config on linux
self.linux.write_to_file(script_var['autoexec_config'],"autoexec.cfg","/xpm/")
self.linux.write_to_file(script_var['add_iptakama'],"add_ip_takama","/xpm/")
# Push SSX config
self.ssx.config_from_string(script_var['CDR_FUN_002'])
#Vgrouping the Topology
#vgroup_new(vlan_cfg_str)
# Initiate IKE Session from Xpress VPN Client (takama)
self.linux.cmd("cd")
self.linux.cmd("cd /xpm")
self.linux.cmd("sudo chmod 777 add_ip_takama")
self.linux.cmd("sudo ./add_ip_takama")
time.sleep(5)
self.linux.cmd("sudo ./start_ike")
time.sleep(10)
op1 = self.ssx.configcmd("show session")
self.ssx.configcmd("exit")
self.failUnless("IPSECv4" in op1,"Failed because there is no session of IPSEC")
self.linux.cmd("!ping %s -I %s -w 2 -c 2" %(script_var['ses_loopip'],script_var['pool_ip']))
self.linux.cmd("quit")
#Changing the running configuration
self.ssx.config_from_string(script_var['CDR_FUN_002A'])
#Saving the configuration.
self.ssx.cmd("save configuration")
time.sleep(2)
#Displaying the saved configuration
saved_conf = self.ssx.cmd("show configuration cdr")
self.myLog.output(saved_conf)
#Reloading the SSX.
self.myLog.info("Reloading the SSX")
#self.ssx.cmd("reload")
self.ssx.reload_device(timeout=500)
#time.sleep(500)
self.myLog.info("\n\n")
self.myLog.info("*" *50)
self.myLog.info("Waiting for 60 seconds to get the files generated on the linux machine-%s ...... " % linux['ip_addr'])
self.myLog.info("*" *50)
self.myLog.info("\n\n")
time.sleep(30)
self.linux.cmd("ls -rth /tftpboot/ | grep \".xml\" ")
#self.linux.cmd("ls -rth ~krao | grep \".xml\" ")
#self.linux.cmd("ls -rth / | grep \".xml\" ")
linuxip = self.linux.cmd("ls -rth /tftpboot/ | grep \".xml\" | tail -n 1")
linuxip1 = linuxip.strip()
self.myLog.output(linuxip1)
self.failUnless(linuxip1,"Failed to generate XML files after changing the configuration")
self.myLog.output("CDR data generation passed for a given session even after changing and saving of running configuration")
self.myLog.info("*" *50)
self.myLog.output("XML files were generated instead of TTLV files")
self.myLog.info("*" *50)
time.sleep(2)
# Checking SSX Health
hs = self.ssx.get_health_stats()
self.failUnless(is_healthy( hs), "Platform is not healthy")
if __name__ == '__main__':
if os.environ.has_key('TEST_LOG_DIR'):
os.mkdir(os.environ['TEST_LOG_DIR'])
os.chdir(os.environ['TEST_LOG_DIR'])
filename = os.path.split(__file__)[1].replace('.py','.log')
log = buildLogger(filename, debug=True, console=True)
suite = test_suite()
suite.addTest(test_CDR_FUN_003)
test_runner(stream=sys.stdout).run(suite)
|
[
"muttu2244@yahoo.com"
] |
muttu2244@yahoo.com
|
5836d969a34cd9162fc29c0eaf3f3c12370e941f
|
0fa2cb199a567f1ed81faa9a592c9d8a959f69b9
|
/adventofcode/2015/13.py
|
b6a6324619c1979229c2981430dbd6f2bd2e2591
|
[
"MIT"
] |
permissive
|
hacktoolkit/code_challenges
|
ee5d407b3cfd8e27e1ec1254e3db129ce7a8db7d
|
fa35a3cf8afd18b4c32170f64d5364e90dedd5a6
|
refs/heads/master
| 2023-06-21T17:09:42.342328
| 2022-12-25T06:18:26
| 2022-12-25T06:18:26
| 30,107,170
| 12
| 6
| null | 2022-12-29T13:58:31
| 2015-01-31T08:37:24
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,903
|
py
|
# Python Standard Library Imports
import re
from collections import defaultdict
from itertools import permutations
from utils import (
Re,
ingest,
)
INPUT_FILE = '13.in'
EXPECTED_ANSWERS = (618, 601, )
# INPUT_FILE = '13.test.in'
# EXPECTED_ANSWERS = (330, 286, )
def main():
solution = Solution()
answers = (solution.solve1(), solution.solve2(), )
print(answers)
assert(answers == EXPECTED_ANSWERS)
class Solution:
def __init__(self):
self.data = ingest(INPUT_FILE)
self.seating_chart = SeatingChart(self.data)
def solve1(self):
best_arrangment, best_score = self.seating_chart.find_optimal_seating()
answer = best_score
return answer
def solve2(self):
self.seating_chart.add_player('undefined_player')
best_arrangment, best_score = self.seating_chart.find_optimal_seating()
answer = best_score
return answer
class SeatingChart:
SEATING_REGEX = re.compile(r'^(?P<name>[A-Z][a-z]+) would (?P<change>(gain)|(lose)) (?P<amount>\d+) happiness units by sitting next to (?P<partner>[A-Z][a-z]+)\.$')
def __init__(self, rules):
self.rules = rules
chart = defaultdict(lambda: defaultdict(int))
for rule in rules:
regex = Re()
if regex.match(self.SEATING_REGEX, rule):
m = regex.last_match
name, change, amount, partner = (
m.group('name'),
m.group('change'),
int(m.group('amount')),
m.group('partner'),
)
multiplier = 1 if change == 'gain' else -1
score = multiplier * amount
chart[name][partner] = score
else:
raise Exception('Bad seating rule: %s' % rule)
self.chart = chart
self.players = sorted(list(chart.keys()))
def add_player(self, name):
self.players.append(name)
def find_optimal_seating(self):
"""Variant of stable marriage problem
https://en.wikipedia.org/wiki/Stable_marriage_problem
https://en.wikipedia.org/wiki/Gale%E2%80%93Shapley_algorithm
"""
best_arrangement = None
best_score = None
num_players = len(self.players)
for p in permutations(self.players):
score = 0
for i in range(num_players):
player = p[i]
neighbors = (
p[(i+1) % num_players],
p[(i-1) % num_players],
)
for neighbor in neighbors:
score += self.chart[player][neighbor]
if best_arrangement is None or score > best_score:
best_arrangement = p
best_score = score
return best_arrangement, best_score
if __name__ == '__main__':
main()
|
[
"hello@jontsai.com"
] |
hello@jontsai.com
|
7d2a692bfc7fae15da0ae6156a3b1e35a188a34a
|
8e8ffec676b76653555605b0e370803a3c4e3598
|
/scripts/tags.py
|
908d71c5a12d9192bddd78203180a4b659b02954
|
[] |
no_license
|
fbzekiyalniz/hub
|
04b56165d965ccf67d8ae77a0d8d2659b226b8d6
|
c72cc328a01abea0bfd3e1b8413a455b97300ee6
|
refs/heads/master
| 2020-08-03T11:51:32.008748
| 2019-10-16T17:47:41
| 2019-10-16T17:47:41
| 211,742,619
| 1
| 0
| null | 2019-09-30T00:05:57
| 2019-09-30T00:05:57
| null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
valid_tags = ['vision',
'nlp',
'generative',
'audio',
]
|
[
"ailzhang@fb.com"
] |
ailzhang@fb.com
|
76645e99a3ab8dda3a18c6fcd5ef21efa0165d58
|
cb43eaec0f296e3b216845c33417446038e7149f
|
/studnit/studnit_app/views.py
|
a03e204de73bab1bfefe81666fa9e5b1d84c3f9d
|
[
"MIT"
] |
permissive
|
sonali0901/hackathon-studnit
|
fc6d4aef4f0c5350ba5f427e0b31ce8876538c9c
|
41979fb59a33f2a95d9cd651940a6a8c8d0b7fc4
|
refs/heads/master
| 2021-01-01T04:44:30.380252
| 2016-04-17T16:45:26
| 2016-04-17T16:45:26
| 56,426,433
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,386
|
py
|
from django.shortcuts import render
from django.contrib.auth import authenticate, logout
from django.contrib.auth import login as login_1
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from studnit import settings
def login(request):
next = request.GET.get('next', '/home/')
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login_1(request, user)
print('test')
return HttpResponseRedirect('/home/')
else:
return HttpResponse("Account is not active at the moment.")
else:
return HttpResponseRedirect(settings.LOGIN_URL)
return render(request, "studnit_app/index.html", {'next': next})
def home(request):
return render(request, 'studnit_app/home.html', {})
def y12(request):
return render(request, 'studnit_app/y12.html',{})
def Post(request):
if request.method == "POST":
msg = request.POST.get('msgbox', None)
c = Chat(user=request.user, message=msg)
if msg != '':
c.save()
return JsonResponse({ 'msg': msg, 'user': c.user.username })
else:
return HttpResponse('Request must be POST.')
|
[
"sonaligpt0@gmail.com"
] |
sonaligpt0@gmail.com
|
a57090615a00f422ac7cb45b2f7d11e389791935
|
df82bc832254751da5b02d6ab780f79722fb292f
|
/definitions.py
|
f6f50fb3d8c820f80401b68e4fe2f0637246f105
|
[
"MIT"
] |
permissive
|
skilkis/GENX
|
9e84026f53cad5f035dce899e989bf140eacf557
|
049267ba7751013517d49939e4ce387484959500
|
refs/heads/master
| 2020-03-29T00:28:36.759126
| 2019-01-18T21:07:10
| 2019-01-18T21:07:10
| 149,341,276
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,077
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from constants import *
from utils import Undefined
import numpy as np
__author__ = 'San Kilkis'
class Component(property):
""" Renames the :py:class:`property` to be able to organize all engine components and retrieve them easily """
def __repr__(self):
return "<'{}' {} object at {}>".format(self.fget.__name__,
self.__class__.__name__,
hex(id(self)))
class FlowCondition(Constants):
__kwargs__ = None
# Providing dummy attributes for the debugger, these are partially overwritten at run-time by the constructor
mass_flow = Undefined('mass_flow')
corrected_mass_flow = Undefined('corrected_mass_flow')
mach = Undefined('mach')
velocity = Undefined('velocity')
t_static = Undefined('t_static')
t_total = Undefined('t_total')
p_static = Undefined('p_static')
p_total = Undefined('p_total')
medium = Undefined('medium')
rho = Undefined('rho')
station_number = Undefined('station_number')
# TODO Finish documentaiton
def __init__(self, **kwargs):
"""
:param float mass_flow:
:param float mach:
:param float velocity:
:param float t_static:
:param float p_static:
:param str medium:
:param float t_total:
:param float p_total:
:param float rho: Density of the substance in SI kilogram per meter cubed [kg/m^3]
:param str station_number:
"""
self.__kwargs__ = kwargs
for key, value in zip(kwargs.keys(), kwargs.values()):
setattr(self, key, value)
# TODO add a nice representation for visualization in debugger and print statements
# def __repr__(self):
# return '<Undefined = {}>.format'
@Attribute
def velocity(self):
""" Computes flow velocity from the mach number if available in SI meter per second [m/s] """
return self.mach * np.sqrt(self.kappa * self.gas_constant * self.t_static)
@Attribute
def rho(self):
""" Computes the density from the static pressure and temperature in SI kilogram per meter cubed [kg/m^3] """
return self.p_static / (self.gas_constant * self.t_static)
@Attribute
def kappa(self):
""" Ratio of Specific Heat Selected Medium """
if self.medium == 'air':
return self.kappa_air
elif self.medium == 'gas':
return self.kappa_gas
else:
raise AttributeError("Data for the provided medium '{}' does not exist".format(self.medium))
@Attribute
def specific_heat(self):
""" Specific Heat of the Selected Medium at Constant Pressure c_p in SI Joule per kilogram Kelvin [J/kg K] """
if self.medium == 'air':
return self.specific_heat_air
elif self.medium == 'gas':
return self.specific_heat_gas
else:
raise AttributeError("Data for the provided medium '{}' does not exist".format(self.medium))
# TODO Add Attributes: Corrected Mass Flow if necessary otherwise mass flow
@Attribute
def corrected_mass_flow(self):
""" Returns the mass flow corrected for pressure and temperature effects in SI kilogram per second [kg s^-1] """
return self.mass_flow * self.c_ratio
@Attribute
def mass_flow(self):
""" Actual mass flow in SI kilogram per second [kg s^-1] """
return self.corrected_mass_flow / self.c_ratio
@Attribute
def t_total(self):
return self.t_static * self.t_ratio
@Attribute
def p_total(self):
return self.p_static * self.p_ratio
@Attribute
def t_static(self):
return self.t_total / self.t_ratio
@Attribute
def p_static(self):
return self.p_total / self.p_ratio
@Attribute
def t_ratio(self):
""" Total Temperature to Static Temperature Ratio """
return 1 + (((self.kappa - 1) / 2.) * self.mach**2)
@Attribute
def p_ratio(self):
""" Total Pressure to Static Pressure Ratio """
return (1 + (((self.kappa - 1) / 2.) * self.mach**2))**(self.kappa / (self.kappa - 1))
@Attribute
def c_ratio(self):
""" Correction Ratio for obtaining the Corrected Mass Flow """
numerator = np.sqrt(self.t_total / self.temperature_sl)
denominator = self.p_total / self.pressure_sl
return numerator / denominator
@staticmethod
def ensure_float(entry):
return float(entry) if entry is not None or str else entry
class Stage(Constants):
@Attribute
def inflow(self):
return NotImplementedError('Implement an __init__ method to obtain the FlowCondition at the start of the stage')
@Attribute
def outflow(self):
return NotImplementedError('Implement methods to compute this parameter in sublcasses')
if __name__ == '__main__':
obj = FlowCondition(mach=0.8, p_total=101325, medium='air')
print(obj.p_static)
|
[
"sankilkis@msn.com"
] |
sankilkis@msn.com
|
87afd56421d221bac59235bfc29654fa42999ffd
|
2b485c67c723151f73ec96da9f6337a0c9857dae
|
/easy/q125 validPalindrome.py
|
1845ff93d7ae6e5ea92ed9210954fc747d35d733
|
[] |
no_license
|
Anupya/leetcode
|
c7792e6ac61b655491a1c734f9167281356471d3
|
cb45e66a41e0c6a8583bb9c4bf846b470ef4bc0f
|
refs/heads/master
| 2022-10-10T14:01:22.189414
| 2022-09-07T21:36:24
| 2022-09-07T21:36:24
| 151,865,310
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
# Given a string s, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.
class Solution:
def isPalindrome(self, s: str) -> bool:
s = ''.join(x for x in s if x.isalnum()).lower()
return s == s[::-1]
|
[
"anupya@hotmail.ca"
] |
anupya@hotmail.ca
|
cd066667c03cf7f004e710743a04ccb25008bd1f
|
5d91c8dc65df96816994b5e8cce10d2261294349
|
/natural-selection-sim 30-10-2020/organisms.py
|
6978731a0b29d6a3a8bc1ecd0ba2cd0a60ebf756
|
[] |
no_license
|
phletic/pythonEcosystemSimulation
|
9d8f90b53a6c37d0f7236404bd9997cb45daa8ce
|
9e20afacb1c5a44bca920a9f549962f46d437d5c
|
refs/heads/main
| 2023-01-06T15:27:14.272785
| 2020-10-31T04:14:44
| 2020-10-31T04:14:44
| 302,033,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,492
|
py
|
import math
import random
import string
import sys
from abc import ABC, abstractmethod
# noinspection PyUnresolvedReferences,PyUnresolvedReferences,PyUnresolvedReferences
from vectorMath import Vector
'''
This program will store all the animal classes.
I hope I will be able to allow the user to create his own organisms with custom behaviors AKA edit the program through a UI
'''
# This Node is for the a* algorithm. THe Repr method is pretty useless as I wouldn't print it but still I guess no harm keeping...
class Node:
"""
"""
def __init__(self, v, f):
self.v = v
self.f = f
def __repr__(self):
return f"{self.v},{self.f}"
class organisms(ABC):
"""
"""
def __init__(self, pos, energy, growthRate, vision, name, type):
letters = string.ascii_letters
result_str = ''.join(random.choice(letters) for i in range(10))
self.id = result_str
self.pos = pos
self.energy = energy
self.growthRate = growthRate
self.vision = vision
self.species = name
self.age = 0
self.type = type
def __repr__(self):
return self.species + "\t" + str(self.pos) + "\t" + str(self.energy) + "\t" + str(self.age) + "\t" + self.id
def getRandomLocation(self, distance, objs):
location = [i.pos for i in objs]
r = random.randint(1, distance)
theta = math.radians(random.randint(0, 359))
x = round(r * math.cos(theta) + self.pos.x)
y = round(r * math.sin(theta) + self.pos.y)
path = Vector(x, y)
while path in location:
path.x += random.randint(-1, 1)
path.y += random.randint(-1, 1)
return path
def die(self, _map):
"""
:param map: list
:return: map
"""
_map.remove(self)
return _map
@abstractmethod
def move(self, _map):
pass
# animal class. the parent
class animal(organisms):
"""
animals: Rules: When it run the move() function, it picks out all organisms in the area within its vision. If
there is another organism of differing gender, it will check if its expectations > other species attractiveness
and vice versa. When both conditions pass, it will mate
"""
# noinspection PyCompatibility
def __init__(self, pos: Vector, energy: int, growthRate: int, attractiveness: int, expectations: int, vision: int,
species: str, foodEat: list,
gender: str,
notMate: list = None):
if notMate is None:
notMate = []
self.wait = False
self.gender = gender
self.expectations = expectations
self.foodEat = foodEat
self.attractiveness = attractiveness
self.notMate = notMate
super().__init__(pos, energy, growthRate, vision, species, "animal")
# That one annoying a star program -- which in the end I had to redo
def alteredPathFindingAlgo(self, end, obstacle):
locations = [i.pos for i in obstacle]
possibleLocations = [Vector(0, 1), Vector(1, 0), Vector(0, -1), Vector(-1, 0)]
possibleLocations = [Node(v=(i + self.pos), f=(
Vector.Distance(i + self.pos, end) + Vector.Distance(self.pos, i + self.pos))) for i in
possibleLocations if i not in locations]
lowestF, index = sys.maxsize, 0
for e, i in enumerate(possibleLocations):
if i.f < lowestF:
lowestF = i.f
index = e
return possibleLocations[index].v
def reproduce(self, _map, partner):
# since B < A, I will do sth horrendous so that the random function will not have a A < B scenario
# This code determine the stats of the baby
# the creature will not mate with the immediate parents
newEnergy = random.randint(self.energy, partner.energy) if partner.energy > self.energy else random.randint(
partner.energy, self.energy)
newGrowthRate = random.randint(self.growthRate,
partner.growthRate) if partner.growthRate > self.growthRate else random.randint(
partner.growthRate, self.growthRate)
newAttractiveness = random.randint(self.attractiveness,
partner.attractiveness) if partner.attractiveness > self.attractiveness else random.randint(
partner.attractiveness, self.attractiveness)
newExpectations = random.randint(self.expectations,
partner.expectations) if partner.expectations > self.expectations else random.randint(
partner.expectations, self.expectations)
newVision = random.randint(self.vision, partner.vision) if partner.vision > self.vision else random.randint(
partner.vision, self.vision)
newGender = random.choice(["M", "F"])
newPos = self.getRandomLocation(2, _map)
Baby = animal(newPos, newEnergy, newGrowthRate,
newAttractiveness, newExpectations, newVision, self.species, self.foodEat,
newGender, notMate=[self, partner])
self.notMate.append(partner)
self.notMate.append(Baby)
partner.notMate.append(self)
partner.notMate.append(Baby)
partner.wait = False
_map.append(Baby)
return _map
def move(self, _map):
# todo eat
# todo mate
# todo die
locations = [i for i in _map if Vector.Distance(i.pos, self.pos) < self.vision and i.pos is not self.pos]
lowestDistance, index = sys.maxsize, 0
possibleLocations = []
for e, i in enumerate(locations):
distance = Vector.Distance(self.pos, i.pos)
if i.type == "animal":
if self.species in i.foodEat:
# Im prey I should run
possibleLocations.append(self.pos - i.pos + self.pos)
if i.species is self.species and i not in self.notMate and self not in i.notMate:
if self.check(i) and i.check(self):
# Im mate
if distance <= 1:
print("reproduce")
_map = self.reproduce(_map,i)
return _map
possibleLocations.append(i.pos)
if i.species in self.foodEat:
# Im predator I should chase
if distance <= 1:
print("eat")
possibleLocations.append(i.pos)
if distance < lowestDistance:
lowestDistance = distance
index = e
if not possibleLocations:
possibleLocations.append(self.getRandomLocation(self.vision, _map))
index = 0
global path
try:
path = self.alteredPathFindingAlgo(possibleLocations[index], _map)
except Exception as e:
print(possibleLocations,index,self,e)
exit()
self.pos = path
return _map
def check(self, other):
if self.expectations <= other.attractiveness:
return True
else:
return False
class plant(organisms):
"""
"""
def __init__(self, pos, energy, spread, growthRate,
species, reproduceRate): # vision = spread of plant - how far it can produce new of itself
"""
:param pos:
:param energy:
:param vision:
:param growthRate:
:param species:
"""
self.reproduceRate = reproduceRate
# noinspection PyCompatibility
super().__init__(pos, energy, growthRate, spread, species, "plant")
def spread(self, _map):
for i in range(self.reproduceRate):
location = self.getRandomLocation(self.vision, _map)
newEnergy = self.energy + random.randint(-2, 2) if self.energy - 2 > 0 else self.energy + random.randint(0,
2)
newSpread = self.vision + random.randint(-2, 2) if self.vision - 2 > 0 else self.vision + random.randint(0,
2)
newGrowthRate = self.growthRate + random.randint(-2,
2) if self.growthRate - 2 > 0 else self.growthRate + random.randint(
0, 2)
_map.append(plant(location, newEnergy, newSpread, newGrowthRate, self.species, self.reproduceRate))
return _map
def move(self, _map):
self.age += self.growthRate
if self.age >= 100:
_map = self.spread(_map)
_map = self.die(_map)
return _map
# for testing puposes only
if __name__ == '__main__':
rabbit = animal(pos=Vector(2, 2), energy=10, growthRate=10, attractiveness=10, expectations=10, vision=10,
species="rabbit", foodEat=["grass"], gender="M")
rabbitF = animal(pos=Vector(2, 10), energy=10, growthRate=10, attractiveness=10, expectations=10, vision=10,
species="rabbit", foodEat=["grass"], gender="F")
_map = [rabbit, rabbitF]
while True:
input("")
for i in _map:
_map = i.move(_map)
print(i)
|
[
"chavezchendy@gmail.com"
] |
chavezchendy@gmail.com
|
e159944e0eb7ef079d843bc84419372a591efabe
|
6ac0aeea8229c4e2c7a041e85c3afeeb106c6b01
|
/KAPL_UTIL.py
|
0dfbd1eb5ca51bf24f664c098d8017ceaca2ed1d
|
[] |
no_license
|
waiteb15/py3intro
|
325dafaaa642052280d6c050eacf8b406b40e01d
|
68b30f147e7408220490a46d3e595acd60513e9e
|
refs/heads/master
| 2020-03-27T10:50:25.928836
| 2019-02-28T21:47:11
| 2019-02-28T21:47:11
| 146,448,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
#!/usr/bin/env python
def spam():
print("hello")
def ham():
print("ham")
def _eggs():
print("EGGS")\
|
[
"waiteb15@gmail.com"
] |
waiteb15@gmail.com
|
0a077646db3b2a032b608e4bfe75a1bc68314fae
|
24ea68723c92526ea2718df3e44bdff6868eae1d
|
/demo_app.py
|
44171bfdb3ce443a2f5cc15771de2e26a3ea3235
|
[] |
no_license
|
r3ap3rpy/gitlab-flask
|
87c5b677d4c8caf02063611b8c0bf816741017af
|
5ca851cf91bde5f1bfdb3d7ffcd74f1a1ae796f5
|
refs/heads/master
| 2022-12-29T22:57:46.420142
| 2020-10-18T07:58:18
| 2020-10-18T07:58:18
| 305,051,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def index():
return "Hello World!"
@app.route("/cicd")
def cicd():
return "GitLab is awesome!"
if __name__ == '__main__':
app.run(host="localhost", port = 8080, debug = True)
|
[
"r3ap3rpy@gmail.com"
] |
r3ap3rpy@gmail.com
|
4d5982f1088caa14f4770ab5d8df791193fb9f76
|
c60522de559312f47bcf576a774e5410de35eb35
|
/wm-test.py
|
a546dbae20b4f2a72674efd86b1af13cb40d5392
|
[] |
no_license
|
benoit-pierre/config-progs
|
e147af5b478c70fea5761aad1f8823aeab9709a7
|
5b84aff8c5e51c331ace2ba6a609e4a95484a93b
|
refs/heads/master
| 2021-01-18T23:07:59.497783
| 2018-12-18T12:44:54
| 2018-12-18T12:44:54
| 14,686,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,235
|
py
|
#!/usr/bin/env python2
import subprocess
import optparse
import struct
import signal
import copy
import time
import sys
import re
import os
def check_display(option, opt, value):
m = re.match('^\d+$', value)
if not m:
raise optparse.OptionValueError('invalid display ID: %s' % value)
return int(m.group(0))
def check_geometry(option, opt, value):
m = re.match('^(\d+)x(\d+)(:(\d+)x(\d+))?$', value)
if not m:
raise optparse.OptionValueError('invalid display geometry: %s' % value)
width, height = int(m.group(1)), int(m.group(2))
if m.group(3):
horizontal_screens, vertical_screens = int(m.group(4)), int(m.group(5))
else:
horizontal_screens, vertical_screens = 1, 1
return (width, height, horizontal_screens, vertical_screens)
class Option(optparse.Option):
TYPES = optparse.Option.TYPES + ('display', 'geometry',)
TYPE_CHECKER = copy.copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER['display'] = check_display
TYPE_CHECKER['geometry'] = check_geometry
parser = optparse.OptionParser(option_class=Option)
parser.add_option('-D', '--display',
dest='display', metavar='DISPLAY', default=2, type='display',
help='X11 display ID to use')
parser.add_option('-g', '--geometry',
dest='geometry', metavar='WxH[:WxH]', default='1024x768', type='geometry',
help='display geometry: total width/height (pixels), width/height (screens, optional, default to 1x1)')
parser.add_option('-d', '--debug',
action='store_true', dest='debug', default=False,
help='enable debug traces')
(options, args) = parser.parse_args()
xsession_debug = options.debug
class SigException():
def __init__(self, signum):
self.signum = signum
def xsession_sighandler(signum, frame):
if xsession_debug:
if signal.SIGUSR1 == signum:
signame = 'SIGUSR1'
elif signal.SIGUSR2 == signum:
signame = 'SIGUSR2'
else:
signame = str(signum)
print 'xsession_sighandler(' + signame + ')'
raise SigException(signum)
signal.signal(signal.SIGUSR1, xsession_sighandler)
signal.signal(signal.SIGUSR2, xsession_sighandler)
def dim_split(dim, num):
step = dim / num
values = []
for d in range(num - 1):
values.append(step)
dim -= step
values.append(dim)
return values
def display_geometry_to_screens(display_width, display_height,
horizontal_screens, vertical_screens):
screens_widths = dim_split(display_width, horizontal_screens)
screens_heights = dim_split(display_height, vertical_screens)
screens = []
x, y = 0, 0
for w in screens_widths:
for h in screens_heights:
screens.append((x, y, w, h))
y = (y + h) % display_height
x = (x + w) % display_width
return screens
xephyr_display = ':%u' % options.display
xephyr_pid = os.fork()
if 0 == xephyr_pid:
# This will make the xserver send us a SIGUSR1 when ready.
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
xephyr_cmd = [
'Xephyr', 'Xephyr',
xephyr_display,
'+xinerama',
'-ac',
'-noreset',
'-resizeable',
'-extension', 'GLX',
]
screens = display_geometry_to_screens(*options.geometry)
for x, y, w, h in screens:
xephyr_cmd.extend([
'-origin', '%u,%u' % (x, y),
'-screen', '%ux%u' % (w, h),
])
if xsession_debug:
print 'starting xephyr:', ' '.join(xephyr_cmd)
os.execlp(*xephyr_cmd)
xsession_signum = None
try:
# Wait for xserver to be ready.
if xsession_debug:
print 'waiting for xserver to be ready'
try:
signal.pause()
except KeyboardInterrupt:
sys.exit(0)
except SigException, e:
assert signal.SIGUSR1 == e.signum
if xsession_debug:
print 'xserver ready'
os.system('xrdb -query | xrdb -load -display %s -' % xephyr_display)
# Ugly hack... using xkbcomp only work after a least one keypress...
os.system('xdotool key space')
os.system('xkbcomp %s %s' % (os.environ['DISPLAY'], xephyr_display))
os.environ['DISPLAY'] = xephyr_display
if xsession_debug:
print 'starting dbus'
dbus_cmd = ['dbus-launch', '--binary-syntax']
dbus = subprocess.Popen(dbus_cmd, stdout=subprocess.PIPE)
dbus_env = dbus.communicate()[0]
ulong_size = struct.calcsize('L')
uint_size = struct.calcsize('I')
dbus_pid, = struct.unpack('I', dbus_env[-(ulong_size+uint_size):-ulong_size])
dbus_xid, = struct.unpack('L', dbus_env[-ulong_size:])
dbus_address = dbus_env[:-(ulong_size+ulong_size+1)]
if xsession_debug:
print 'dbus_pid:', dbus_pid
print 'dbus_xid:', dbus_xid
print 'dbus_address:', dbus_address
try:
xsession_pid = os.getpid()
os.environ['XSESSION_PID'] = str(xsession_pid)
os.environ['DBUS_SESSION_BUS_ADDRESS'] = dbus_address
if xsession_debug:
print 'xsession_pid:', xsession_pid
wm_args = args[:]
if 0 == len(wm_args):
wm_args = [ 'xterm' ]
if xsession_debug:
print 'starting wm:', ' '.join(wm_args)
wm_pid = os.fork()
if 0 == wm_pid:
os.execvp(wm_args[0], wm_args)
if xsession_debug:
print 'wm_pid:', wm_pid
while True:
try:
os.waitpid(wm_pid, 0)
break
except KeyboardInterrupt:
sys.exit(0)
except SigException, e:
xsession_signum = e.signum
if xsession_debug:
print 'wm terminated'
finally:
if xsession_debug:
print 'killing dbus'
os.kill(dbus_pid, signal.SIGTERM)
finally:
if xsession_debug:
print 'killing xserver'
os.kill(xephyr_pid, signal.SIGTERM)
os.waitpid(xephyr_pid, 0)
if xsession_debug:
if signal.SIGUSR1 == xsession_signum:
print 'wm asked for reboot'
elif signal.SIGUSR2 == xsession_signum:
print 'wm asked for halt'
|
[
"benoit.pierre@gmail.com"
] |
benoit.pierre@gmail.com
|
c30aced17416596217d29686342924973fc2af5e
|
3b98d784a48191ec490c331c359c71c3416c7aaa
|
/core/filters.py
|
b42bda8b2e02e79e071e56a34c7c1b5bc1e044b4
|
[] |
no_license
|
brian-lai/django-docker
|
303718507500a844474c60e67b7aec790274b045
|
bf22ee921ffd98e3aecd530d5b6b5af80f693e8b
|
refs/heads/master
| 2022-12-28T05:13:36.197124
| 2020-10-13T23:08:10
| 2020-10-13T23:08:10
| 297,494,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
def filter_in(self, queryset, value):
'''Takes a list as an argument and parses the JSON.
'''
value = eval(value)
if isinstance(value, list):
return queryset.filter(id__in=value)
return queryset
|
[
"arijsilver@gmail.com"
] |
arijsilver@gmail.com
|
9853fc5a04732414092946eb9dc0fb337d7d3bba
|
589ac0a71099f4ee6857a31986305f0df2c16ede
|
/Doc/examples/local_blast.py
|
dbb1e5ee7f191d61d258bf9de3d1f192a4101aeb
|
[
"LicenseRef-scancode-biopython"
] |
permissive
|
barendt/biopython
|
802aad89005b302b6523a934071796edbd8ac464
|
391bcdbee7f821bff3e12b75c635a06bc1b2dcea
|
refs/heads/rna
| 2021-11-09T19:11:56.345314
| 2010-05-01T02:44:42
| 2010-05-01T02:44:42
| 636,700
| 0
| 0
|
NOASSERTION
| 2021-11-05T13:10:14
| 2010-04-29T02:35:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,503
|
py
|
#!/usr/bin/env python
"""Script demonstrating the ability to interact with local BLAST.
The contents of this script are described more fully in the available
documentation.
"""
# standard library
import os
import sys
# biopython
from Bio.Blast import NCBIStandalone
my_blast_db = os.path.join(os.getcwd(), 'at-est', 'a_cds-10-7.fasta')
my_blast_file = os.path.join(os.getcwd(), 'at-est', 'test_blast',
'sorghum_est-test.fasta')
my_blast_exe = os.path.join(os.getcwd(), 'blast', 'blastall')
print 'Running blastall...'
blast_out, error_info = NCBIStandalone.blastall(my_blast_exe, 'blastn',
my_blast_db, my_blast_file)
b_parser = NCBIStandalone.BlastParser()
b_iterator = NCBIStandalone.Iterator(blast_out, b_parser)
while 1:
b_record = b_iterator.next()
if b_record is None:
break
E_VALUE_THRESH = 0.04
for alignment in b_record.alignments:
for hsp in alignment.hsps:
if hsp.expect < E_VALUE_THRESH:
print '****Alignment****'
print 'sequence:', alignment.title
print 'length:', alignment.length
print 'e value:', hsp.expect
if len(hsp.query) > 75:
dots = '...'
else:
dots = ''
print hsp.query[0:75] + dots
print hsp.match[0:75] + dots
print hsp.sbjct[0:75] + dots
|
[
"chapmanb"
] |
chapmanb
|
4b97325e71d846b3fefb8136d5b0df52f77ba248
|
2364b31ccc8843477295befb09bacbf3a723f1a9
|
/ContentFilter.py
|
cc186f93497f9c9637e0a02dde3189d3cc543a41
|
[] |
no_license
|
yenkuanlee/FindGoodIphone7s
|
c0bd95cdf9b3d67b2e0e302ef5e8b80cda208363
|
4f76a43e9afa768e216b6fbaa9bc57f359636cf6
|
refs/heads/master
| 2021-01-15T12:02:31.440716
| 2017-09-20T03:28:17
| 2017-09-20T03:28:17
| 99,645,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,740
|
py
|
# -*- coding: UTF-8 -*-
# Kevin Yen-Kuan Lee
import urllib2
import requests
import re
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def send_email(recipient, subject, body):
import smtplib
user = ""
pwd = ""
gmail_user = user
gmail_pwd = pwd
FROM = user
TO = recipient if type(recipient) is list else [recipient]
SUBJECT = subject
TEXT = body
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
try:
server_ssl = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server_ssl.ehlo() # optional, called by login()
server_ssl.login(gmail_user, gmail_pwd)
# ssl server doesn't support or need tls, so don't call server_ssl.starttls()
server_ssl.sendmail(FROM, TO, message)
#server_ssl.quit()
server_ssl.close()
print 'successfully sent the mail'
except Exception,e:
print "failed to send mail"
def getPrice(Kurl):
content = requests.get(
url = Kurl
#url= 'https://www.ptt.cc/bbs/' + board + '/index.html',
#cookies={'over18': '1'}
).content.decode('utf-8')
Time = "NO TIME"
try:
Time = content.split("時間</span><span class=\"article-meta-value\">")[1].split("<")[0]
except:
pass
try:
price = content.split("[交易價格]:")[1].split("\n")[0]
price = price.replace(",","")
price = price.replace(",","")
return int(re.search(r'\d+', price).group()),Time
#return price
except:
return -1,Time
UrlSet = set()
Mlist = list()
fr = open('output.txt','r')
while True:
line = fr.readline()
if not line:
break
line = line.replace("\n","")
Mlist.append(line)
try:
UrlSet.add(line.split("\t")[1])
except:
pass
fr.close()
f = open(sys.argv[1],'r')
fw = open('output.txt','w')
while True:
line = f.readline()
if not line:
break
tmp = line.split("\t")
price,Time = getPrice(tmp[0])
if tmp[0] in UrlSet:
continue
if price < 30000:
try:
title = line.split("\t")[1].split("\n")[0]
Iurl = line.split("\t")[0]
send_email("yenkuanlee@gmail.com",title,Time+"\n\n"+title+"\n\n"+Iurl+"\n\n"+str(price)+" 元"+"\n\n")
send_email("mnbm03409@gmail.com",title,Time+"\n\n"+title+"\n\n"+Iurl+"\n\n"+str(price)+" 元"+"\n\n")
except:
pass
print Time+"\t"+line+str(price)+"\n"
fw.write(Time+"\t"+line+str(price)+"\n\n")
f.close()
for x in Mlist:
fw.write(x+"\n")
fw.close()
|
[
"yenkuanlee@gmail.com"
] |
yenkuanlee@gmail.com
|
f32ec776b07d6d6cb688e572fb3e6331462f5c8c
|
503d552818dd192e163c3cb5bd61a189865c1759
|
/demo/demo/urls.py
|
c96ca30cafd0495da058e4e66474476d32476c14
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dyve/django-perm
|
63c1c5f13a0510404eff7f53435ae2caf96d97a5
|
16a8f5991600aba68a16240b4dd319ce345d8197
|
refs/heads/develop
| 2023-03-26T14:57:22.909652
| 2017-10-29T09:33:20
| 2017-10-29T09:33:20
| 9,869,046
| 2
| 3
| null | 2014-08-10T06:05:08
| 2013-05-05T13:44:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import patterns, url
from .views import HomeView, ServerErrorView, ObjectDoesNotExistView, PermissionDeniedView
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
# urlpatterns = patterns('',
# # Examples:
# # url(r'^$', 'demo.views.home', name='home'),
# # url(r'^demo/', include('demo.foo.urls')),
#
# # Uncomment the admin/doc line below to enable admin documentation:
# # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
#
# # Uncomment the next line to enable the admin:
# # url(r'^admin/', include(admin.site.urls)),
# )
urlpatterns = patterns('',
url(r'^$', HomeView.as_view(), name='home'),
url(r'^permission_denied$', PermissionDeniedView.as_view(), name='permission_denied'),
url(r'^object_does_not_exist$', ObjectDoesNotExistView.as_view(), name='object_does_not_exist'),
url(r'^server_error$', ServerErrorView.as_view(), name='server_error'),
)
|
[
"dylan@zostera.nl"
] |
dylan@zostera.nl
|
33981ef3da670d488abacf13f279b5fa2b9779c1
|
fdbb74a95924e2677466614f6ab6e2bb13b2a95a
|
/third_party/python/Lib/xml/sax/__init__.py
|
f18dd425ad438770ce42e9cce873ce05d468670f
|
[
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"ISC"
] |
permissive
|
jart/cosmopolitan
|
fb11b5658939023977060a7c6c71a74093d9cb44
|
0d748ad58e1063dd1f8560f18a0c75293b9415b7
|
refs/heads/master
| 2023-09-06T09:17:29.303607
| 2023-09-02T03:49:13
| 2023-09-02T03:50:18
| 272,457,606
| 11,887
| 435
|
ISC
| 2023-09-14T17:47:58
| 2020-06-15T14:16:13
|
C
|
UTF-8
|
Python
| false
| false
| 3,679
|
py
|
"""Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
expatreader -- Driver that allows use of the Expat parser with SAX.
"""
from .xmlreader import InputSource
from .handler import ContentHandler, ErrorHandler
from ._exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
if __name__ == 'PYOBJ.COM':
import xml.sax
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
import io
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
if isinstance(string, str):
inpsrc.setCharacterStream(io.StringIO(string))
else:
inpsrc.setByteStream(io.BytesIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.expatreader"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.expatreader
import os, sys
if not sys.flags.ignore_environment and "PY_SAX_PARSER" in os.environ:
default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError as e:
import sys
if parser_name in sys.modules:
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
if sys.platform[ : 4] == "java":
def _create_parser(parser_name):
# from org.python.core import imp
drv_module = imp.importName(parser_name, 0, globals())
return drv_module.create_parser()
else:
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
|
[
"jtunney@gmail.com"
] |
jtunney@gmail.com
|
78ff37a966cfd4e8f0a4606de9a2ec9638393962
|
641c9eaa5c1df74e6edfb79628c9f18b6d5d2b91
|
/test/test_validate_country_response.py
|
3a669f45b6948c4f8613fe7cccf9466ccc8d5461
|
[
"Apache-2.0"
] |
permissive
|
Cloudmersive/Cloudmersive.APIClient.Python.Validate
|
d2d9d0ceb3fce350fd0d86466bc8b5e153a5d478
|
274d38d4a849650402f3bb931799ab82502b4306
|
refs/heads/master
| 2023-06-10T04:01:35.332273
| 2023-06-03T22:34:47
| 2023-06-03T22:34:47
| 138,435,277
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
# coding: utf-8
"""
validateapi
The validation APIs help you validate data. Check if an E-mail address is real. Check if a domain is real. Check up on an IP address, and even where it is located. All this and much more is available in the validation API. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_validate_api_client
from cloudmersive_validate_api_client.models.validate_country_response import ValidateCountryResponse # noqa: E501
from cloudmersive_validate_api_client.rest import ApiException
class TestValidateCountryResponse(unittest.TestCase):
"""ValidateCountryResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testValidateCountryResponse(self):
"""Test ValidateCountryResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_validate_api_client.models.validate_country_response.ValidateCountryResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"35204726+Cloudmersive@users.noreply.github.com"
] |
35204726+Cloudmersive@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.