blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
โ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e54416140692cc6de81bef6a7723085a7570ffe0
|
9b29c9fffd33489a35225b256d148f1a6146137b
|
/Server/StartAppium.py
|
1d03d6ae9dfaadb81966adff4b31f36bdb966eba
|
[] |
no_license
|
fghad1991/companion
|
748bb9fa0753d4bdcc1cd9f53ec21a56b7ef2aa3
|
8a836531f15400eb392f12c85d5cdfaad02f1db9
|
refs/heads/master
| 2020-12-30T14:20:40.653720
| 2017-05-15T08:30:53
| 2017-05-15T08:30:53
| 91,313,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
__author__ = 'JJLanMo'
class appium():
def start(self,host,port,bootstrap_port,session_override):
errormsg = ""
appium_server_url = ""
appium_log_path = ""
# appium - a 127.0.0.1 - p4726 - -bootstrap - port 4780 - -session - override - -log "E:/appium" - -command - timeout 600
if self.port_is_free(host,port):
cmd = 'start /b appium -a ' + host + ' -p ' + str(port) + ' --bootstrap-port ' + str(
bootstrap_port) + ' --session-override --log ' + '"' + appium_log_path + '" --command-timeout 600'
else:
print ("a")(port)
|
[
"13564733570@163.com"
] |
13564733570@163.com
|
61c54a8826e2a96a5e82ca3db7fd51286bf2f868
|
2222fa632138465c237e0d501a17e78bd41c4fca
|
/predict.py
|
6158b5f1f965669746d8b42bfa9fd8b2aff23e9a
|
[] |
no_license
|
minji9583/ai_chatbot_project
|
e3d1936091650150090c642fcda335c4a5583e3a
|
dafdf565bddb0841d5dc38dfb85a1b41310b87c8
|
refs/heads/master
| 2023-01-06T17:32:54.982468
| 2019-10-15T02:20:37
| 2019-10-15T02:20:37
| 207,702,203
| 4
| 1
| null | 2023-01-01T13:24:51
| 2019-09-11T02:08:05
|
Python
|
UTF-8
|
Python
| false
| false
| 5,403
|
py
|
import tensorflow as tf
import data
import sys
import model as ml
from configs import DEFINES
def eval():
char2idx, idx2char, vocabulary_length = data.load_voc()
train_input, train_label, eval_input, eval_label = data.load_data()
# ํ๊ฐ์
์ธ์ฝ๋ฉ ๋ง๋๋ ๋ถ๋ถ์ด๋ค.
eval_input_enc, eval_input_enc_length = data.enc_processing(eval_input, char2idx)
# ํ๊ฐ์
๋์ฝ๋ฉ ์ถ๋ ฅ ๋ถ๋ถ ๋ง๋๋ ๋ถ๋ถ์ด๋ค.
eval_target_dec, _ = data.dec_target_processing(eval_label, char2idx)
classifier = tf.estimator.Estimator(
model_fn=ml.Model, # ๋ชจ๋ธ ๋ฑ๋กํ๋ค.
model_dir=DEFINES.check_point_path, # ์ฒดํฌํฌ์ธํธ ์์น ๋ฑ๋กํ๋ค.
params={ # ๋ชจ๋ธ ์ชฝ์ผ๋ก ํ๋ผ๋ฉํฐ ์ ๋ฌํ๋ค.
'hidden_size': DEFINES.hidden_size, # ๊ฐ์ค์น ํฌ๊ธฐ ์ค์ ํ๋ค.
'layer_size': DEFINES.layer_size, # ๋ฉํฐ ๋ ์ด์ด ์ธต ๊ฐ์๋ฅผ ์ค์ ํ๋ค.
'learning_rate': DEFINES.learning_rate, # ํ์ต์จ ์ค์ ํ๋ค.
'teacher_forcing_rate': DEFINES.teacher_forcing_rate, # ํ์ต์ ๋์ฝ๋ ์ธํ ์ ๋ต ์ง์์จ ์ค์
'vocabulary_length': vocabulary_length, # ๋์
๋๋ฆฌ ํฌ๊ธฐ๋ฅผ ์ค์ ํ๋ค.
'embedding_size': DEFINES.embedding_size, # ์๋ฒ ๋ฉ ํฌ๊ธฐ๋ฅผ ์ค์ ํ๋ค.
'embedding': DEFINES.embedding, # ์๋ฒ ๋ฉ ์ฌ์ฉ ์ ๋ฌด๋ฅผ ์ค์ ํ๋ค.
'multilayer': DEFINES.multilayer, # ๋ฉํฐ ๋ ์ด์ด ์ฌ์ฉ ์ ๋ฌด๋ฅผ ์ค์ ํ๋ค.
'attention': DEFINES.attention, # ์ดํ
์
์ง์ ์ ๋ฌด๋ฅผ ์ค์ ํ๋ค.
'teacher_forcing': DEFINES.teacher_forcing, # ํ์ต์ ๋์ฝ๋ ์ธํ ์ ๋ต ์ง์ ์ ๋ฌด ์ค์ ํ๋ค.
'loss_mask': DEFINES.loss_mask, # PAD์ ๋ํ ๋ง์คํฌ๋ฅผ ํตํ loss๋ฅผ ์ ํ ํ๋ค.
'serving': DEFINES.serving # ๋ชจ๋ธ ์ ์ฅ ๋ฐ serving ์ ๋ฌด๋ฅผ ์ค์ ํ๋ค.
})
eval_result = classifier.evaluate(input_fn=lambda: data.eval_input_fn(
eval_input_enc, eval_target_dec, DEFINES.batch_size))
print('\ntest set ์ ํ๋: {accuracy:0.3f}\n'.format(**eval_result))
def predict(input_data):
# ๋ฐ์ดํฐ๋ฅผ ํตํ ์ฌ์ ๊ตฌ์ฑ ํ๋ค.
char2idx, idx2char, vocabulary_length = data.load_voc()
# ํ
์คํธ์ฉ ๋ฐ์ดํฐ ๋ง๋๋ ๋ถ๋ถ์ด๋ค.
# ์ธ์ฝ๋ฉ ๋ถ๋ถ ๋ง๋ ๋ค.
input = input_data
predic_input_enc, predic_input_enc_length = data.enc_processing([input], char2idx)
# ํ์ต ๊ณผ์ ์ด ์๋๋ฏ๋ก ๋์ฝ๋ฉ ์
๋ ฅ์
# ์กด์ฌํ์ง ์๋๋ค.(๊ตฌ์กฐ๋ฅผ ๋ง์ถ๊ธฐ ์ํด ๋ฃ๋๋ค.)
# ํ์ต ๊ณผ์ ์ด ์๋๋ฏ๋ก ๋์ฝ๋ฉ ์ถ๋ ฅ ๋ถ๋ถ๋
# ์กด์ฌํ์ง ์๋๋ค.(๊ตฌ์กฐ๋ฅผ ๋ง์ถ๊ธฐ ์ํด ๋ฃ๋๋ค.)
predic_target_dec, _ = data.dec_target_processing([""], char2idx)
if DEFINES.serving == True:
# ๋ชจ๋ธ์ด ์ ์ฅ๋ ์์น๋ฅผ ๋ฃ์ด ์ค๋ค. export_dir
predictor_fn = tf.contrib.predictor.from_saved_model(
export_dir="/home/evo_mind/DeepLearning/NLP/Work/ChatBot2_Final/data_out/model/1541575161"
)
else:
# ์์คํฐ๋ฉ์ดํฐ ๊ตฌ์ฑํ๋ค.
classifier = tf.estimator.Estimator(
model_fn=ml.Model, # ๋ชจ๋ธ ๋ฑ๋กํ๋ค.
model_dir=DEFINES.check_point_path, # ์ฒดํฌํฌ์ธํธ ์์น ๋ฑ๋กํ๋ค.
params={ # ๋ชจ๋ธ ์ชฝ์ผ๋ก ํ๋ผ๋ฉํฐ ์ ๋ฌํ๋ค.
'hidden_size': DEFINES.hidden_size, # ๊ฐ์ค์น ํฌ๊ธฐ ์ค์ ํ๋ค.
'layer_size': DEFINES.layer_size, # ๋ฉํฐ ๋ ์ด์ด ์ธต ๊ฐ์๋ฅผ ์ค์ ํ๋ค.
'learning_rate': DEFINES.learning_rate, # ํ์ต์จ ์ค์ ํ๋ค.
'teacher_forcing_rate': DEFINES.teacher_forcing_rate, # ํ์ต์ ๋์ฝ๋ ์ธํ ์ ๋ต ์ง์์จ ์ค์
'vocabulary_length': vocabulary_length, # ๋์
๋๋ฆฌ ํฌ๊ธฐ๋ฅผ ์ค์ ํ๋ค.
'embedding_size': DEFINES.embedding_size, # ์๋ฒ ๋ฉ ํฌ๊ธฐ๋ฅผ ์ค์ ํ๋ค.
'embedding': DEFINES.embedding, # ์๋ฒ ๋ฉ ์ฌ์ฉ ์ ๋ฌด๋ฅผ ์ค์ ํ๋ค.
'multilayer': DEFINES.multilayer, # ๋ฉํฐ ๋ ์ด์ด ์ฌ์ฉ ์ ๋ฌด๋ฅผ ์ค์ ํ๋ค.
'attention': DEFINES.attention, # ์ดํ
์
์ง์ ์ ๋ฌด๋ฅผ ์ค์ ํ๋ค.
'teacher_forcing': DEFINES.teacher_forcing, # ํ์ต์ ๋์ฝ๋ ์ธํ ์ ๋ต ์ง์ ์ ๋ฌด ์ค์ ํ๋ค.
'loss_mask': DEFINES.loss_mask, # PAD์ ๋ํ ๋ง์คํฌ๋ฅผ ํตํ loss๋ฅผ ์ ํ ํ๋ค.
'serving': DEFINES.serving # ๋ชจ๋ธ ์ ์ฅ ๋ฐ serving ์ ๋ฌด๋ฅผ ์ค์ ํ๋ค.
})
if DEFINES.serving == True:
predictions = predictor_fn({'input': predic_input_enc, 'output': predic_target_dec})
# data.pred2string(predictions, idx2char)
else:
# ์์ธก์ ํ๋ ๋ถ๋ถ์ด๋ค.
predictions = classifier.predict(
input_fn=lambda: data.eval_input_fn(predic_input_enc, predic_target_dec, DEFINES.batch_size))
# ์์ธกํ ๊ฐ์ ์ธ์ง ํ ์ ์๋๋ก
# ํ
์คํธ๋ก ๋ณ๊ฒฝํ๋ ๋ถ๋ถ์ด๋ค.
# data.pred2string(predictions, idx2char)
return data.pred_next_string(predictions, idx2char)
def main(self):
char2idx, idx2char, vocabulary_length = data.load_voc()
print(predict("์กธ๋ ค"))
eval()
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
tf.logging.set_verbosity
|
[
"gpfhddl09@gmail.com"
] |
gpfhddl09@gmail.com
|
d4c173c2e18b79464f26d50aed0cb587a711543d
|
20e7921251e81518fccb87a583d0a0c8c284d539
|
/orayeb/asgi.py
|
db1930dd39872542af7b6425ca6c5dbba490fe2b
|
[] |
no_license
|
ysawiris/orayeb
|
fd2c88c39807d378178f2db7ac0626e3ed38b9d5
|
ba24bfc74f34edf0f9d7d553d39723ab32a6b21f
|
refs/heads/main
| 2023-02-23T14:41:29.949013
| 2021-01-12T23:24:57
| 2021-01-12T23:24:57
| 329,027,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
ASGI config for orayeb project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'orayeb.settings')
application = get_asgi_application()
|
[
"youssef.sawiris@students.dominican.edu"
] |
youssef.sawiris@students.dominican.edu
|
1f4c69f1e02e3cd6bc5a0a58287d130163386f84
|
ad1e55b9a67c798cf4b4ce41c76b26977f8b4e8d
|
/vendor-local/celery/tests/test_events/test_events_state.py
|
b8817f5a83f3398df59f2048f1e18115062a33b1
|
[
"BSD-3-Clause"
] |
permissive
|
kumar303/rockit
|
7a6ac84bb8c37e5f3b65d7dcecf9b9c549902cf5
|
fc347b5b143835ddd77fd0c1ea4e6f2007a21972
|
refs/heads/master
| 2021-01-10T19:51:30.638073
| 2020-07-26T19:00:37
| 2020-07-26T19:00:37
| 4,219,328
| 0
| 2
|
BSD-3-Clause
| 2020-07-26T19:00:38
| 2012-05-03T22:03:24
|
Python
|
UTF-8
|
Python
| false
| false
| 10,736
|
py
|
from __future__ import absolute_import
from time import time
from itertools import count
from celery import states
from celery.events import Event
from celery.events.state import State, Worker, Task, HEARTBEAT_EXPIRE
from celery.utils import uuid
from celery.tests.utils import Case
class replay(object):
def __init__(self, state):
self.state = state
self.rewind()
self.setup()
def setup(self):
pass
def __iter__(self):
return self
def __next__(self):
try:
self.state.event(self.events[self.position()])
except IndexError:
raise StopIteration()
next = __next__
def rewind(self):
self.position = count(0).next
return self
def play(self):
for _ in self:
pass
class ev_worker_online_offline(replay):
def setup(self):
self.events = [
Event("worker-online", hostname="utest1"),
Event("worker-offline", hostname="utest1"),
]
class ev_worker_heartbeats(replay):
def setup(self):
self.events = [
Event("worker-heartbeat", hostname="utest1",
timestamp=time() - HEARTBEAT_EXPIRE * 2),
Event("worker-heartbeat", hostname="utest1"),
]
class ev_task_states(replay):
def setup(self):
tid = self.tid = uuid()
self.events = [
Event("task-received", uuid=tid, name="task1",
args="(2, 2)", kwargs="{'foo': 'bar'}",
retries=0, eta=None, hostname="utest1"),
Event("task-started", uuid=tid, hostname="utest1"),
Event("task-revoked", uuid=tid, hostname="utest1"),
Event("task-retried", uuid=tid, exception="KeyError('bar')",
traceback="line 2 at main", hostname="utest1"),
Event("task-failed", uuid=tid, exception="KeyError('foo')",
traceback="line 1 at main", hostname="utest1"),
Event("task-succeeded", uuid=tid, result="4",
runtime=0.1234, hostname="utest1"),
]
class ev_snapshot(replay):
def setup(self):
self.events = [
Event("worker-online", hostname="utest1"),
Event("worker-online", hostname="utest2"),
Event("worker-online", hostname="utest3"),
]
for i in range(20):
worker = not i % 2 and "utest2" or "utest1"
type = not i % 2 and "task2" or "task1"
self.events.append(Event("task-received", name=type,
uuid=uuid(), hostname=worker))
class test_Worker(Case):
def test_survives_missing_timestamp(self):
worker = Worker(hostname="foo")
worker.on_heartbeat(timestamp=None)
self.assertEqual(worker.heartbeats, [])
def test_repr(self):
self.assertTrue(repr(Worker(hostname="foo")))
class test_Task(Case):
def test_info(self):
task = Task(uuid="abcdefg",
name="tasks.add",
args="(2, 2)",
kwargs="{}",
retries=2,
result=42,
eta=1,
runtime=0.0001,
expires=1,
exception=1,
received=time() - 10,
started=time() - 8,
succeeded=time())
self.assertEqual(sorted(list(task._info_fields)),
sorted(task.info().keys()))
self.assertEqual(sorted(list(task._info_fields + ("received", ))),
sorted(task.info(extra=("received", ))))
self.assertEqual(sorted(["args", "kwargs"]),
sorted(task.info(["args", "kwargs"]).keys()))
def test_ready(self):
task = Task(uuid="abcdefg",
name="tasks.add")
task.on_received(timestamp=time())
self.assertFalse(task.ready)
task.on_succeeded(timestamp=time())
self.assertTrue(task.ready)
def test_sent(self):
task = Task(uuid="abcdefg",
name="tasks.add")
task.on_sent(timestamp=time())
self.assertEqual(task.state, states.PENDING)
def test_merge(self):
task = Task()
task.on_failed(timestamp=time())
task.on_started(timestamp=time())
task.on_received(timestamp=time(), name="tasks.add", args=(2, 2))
self.assertEqual(task.state, states.FAILURE)
self.assertEqual(task.name, "tasks.add")
self.assertTupleEqual(task.args, (2, 2))
task.on_retried(timestamp=time())
self.assertEqual(task.state, states.RETRY)
def test_repr(self):
self.assertTrue(repr(Task(uuid="xxx", name="tasks.add")))
class test_State(Case):
def test_repr(self):
self.assertTrue(repr(State()))
def test_worker_online_offline(self):
r = ev_worker_online_offline(State())
r.next()
self.assertTrue(r.state.alive_workers())
self.assertTrue(r.state.workers["utest1"].alive)
r.play()
self.assertFalse(r.state.alive_workers())
self.assertFalse(r.state.workers["utest1"].alive)
def test_worker_heartbeat_expire(self):
r = ev_worker_heartbeats(State())
r.next()
self.assertFalse(r.state.alive_workers())
self.assertFalse(r.state.workers["utest1"].alive)
r.play()
self.assertTrue(r.state.alive_workers())
self.assertTrue(r.state.workers["utest1"].alive)
def test_task_states(self):
r = ev_task_states(State())
# RECEIVED
r.next()
self.assertTrue(r.tid in r.state.tasks)
task = r.state.tasks[r.tid]
self.assertEqual(task.state, states.RECEIVED)
self.assertTrue(task.received)
self.assertEqual(task.timestamp, task.received)
self.assertEqual(task.worker.hostname, "utest1")
# STARTED
r.next()
self.assertTrue(r.state.workers["utest1"].alive,
"any task event adds worker heartbeat")
self.assertEqual(task.state, states.STARTED)
self.assertTrue(task.started)
self.assertEqual(task.timestamp, task.started)
self.assertEqual(task.worker.hostname, "utest1")
# REVOKED
r.next()
self.assertEqual(task.state, states.REVOKED)
self.assertTrue(task.revoked)
self.assertEqual(task.timestamp, task.revoked)
self.assertEqual(task.worker.hostname, "utest1")
# RETRY
r.next()
self.assertEqual(task.state, states.RETRY)
self.assertTrue(task.retried)
self.assertEqual(task.timestamp, task.retried)
self.assertEqual(task.worker.hostname, "utest1")
self.assertEqual(task.exception, "KeyError('bar')")
self.assertEqual(task.traceback, "line 2 at main")
# FAILURE
r.next()
self.assertEqual(task.state, states.FAILURE)
self.assertTrue(task.failed)
self.assertEqual(task.timestamp, task.failed)
self.assertEqual(task.worker.hostname, "utest1")
self.assertEqual(task.exception, "KeyError('foo')")
self.assertEqual(task.traceback, "line 1 at main")
# SUCCESS
r.next()
self.assertEqual(task.state, states.SUCCESS)
self.assertTrue(task.succeeded)
self.assertEqual(task.timestamp, task.succeeded)
self.assertEqual(task.worker.hostname, "utest1")
self.assertEqual(task.result, "4")
self.assertEqual(task.runtime, 0.1234)
def assertStateEmpty(self, state):
self.assertFalse(state.tasks)
self.assertFalse(state.workers)
self.assertFalse(state.event_count)
self.assertFalse(state.task_count)
def assertState(self, state):
self.assertTrue(state.tasks)
self.assertTrue(state.workers)
self.assertTrue(state.event_count)
self.assertTrue(state.task_count)
def test_freeze_while(self):
s = State()
r = ev_snapshot(s)
r.play()
def work():
pass
s.freeze_while(work, clear_after=True)
self.assertFalse(s.event_count)
s2 = State()
r = ev_snapshot(s2)
r.play()
s2.freeze_while(work, clear_after=False)
self.assertTrue(s2.event_count)
def test_clear_tasks(self):
s = State()
r = ev_snapshot(s)
r.play()
self.assertTrue(s.tasks)
s.clear_tasks(ready=False)
self.assertFalse(s.tasks)
def test_clear(self):
r = ev_snapshot(State())
r.play()
self.assertTrue(r.state.event_count)
self.assertTrue(r.state.workers)
self.assertTrue(r.state.tasks)
self.assertTrue(r.state.task_count)
r.state.clear()
self.assertFalse(r.state.event_count)
self.assertFalse(r.state.workers)
self.assertTrue(r.state.tasks)
self.assertFalse(r.state.task_count)
r.state.clear(False)
self.assertFalse(r.state.tasks)
def test_task_types(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(sorted(r.state.task_types()), ["task1", "task2"])
def test_tasks_by_timestamp(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_timestamp()), 20)
def test_tasks_by_type(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_type("task1")), 10)
self.assertEqual(len(r.state.tasks_by_type("task2")), 10)
def test_alive_workers(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.alive_workers()), 3)
def test_tasks_by_worker(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_worker("utest1")), 10)
self.assertEqual(len(r.state.tasks_by_worker("utest2")), 10)
def test_survives_unknown_worker_event(self):
s = State()
s.worker_event("worker-unknown-event-xxx", {"foo": "bar"})
s.worker_event("worker-unknown-event-xxx", {"hostname": "xxx",
"foo": "bar"})
def test_survives_unknown_task_event(self):
s = State()
s.task_event("task-unknown-event-xxx", {"foo": "bar",
"uuid": "x",
"hostname": "y"})
def test_callback(self):
scratch = {}
def callback(state, event):
scratch["recv"] = True
s = State(callback=callback)
s.event({"type": "worker-online"})
self.assertTrue(scratch.get("recv"))
|
[
"kumar.mcmillan@gmail.com"
] |
kumar.mcmillan@gmail.com
|
8b93e8f95d8ebe73848675dccb06fce575530545
|
994ae8f8dec8e4484ea490b3874991fd6f788b7e
|
/ARTools/ARUtils.py
|
08633d2b1a1e494907237337e3c97e0fd3b57b4f
|
[] |
no_license
|
Leslie-zh/autoRig_Tools
|
c249f6b942d3a065e5aba203a5172ed9e689c856
|
9aa833ce674095fb24e91a7f9c9b5cb6435b4655
|
refs/heads/master
| 2022-03-15T04:26:47.502259
| 2019-04-22T18:59:07
| 2019-04-22T18:59:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,732
|
py
|
from maya import cmds
from maya import OpenMaya
from maya import OpenMayaAnim
from maya import mel
import re
import pymel.core as pm
import math
import logging
logging.basicConfig()
logger = logging.getLogger('RigUtils:')
logger.setLevel(logging.DEBUG)
def snapIkFk(controller):
"""
snap ik fk or fk ik
:param controller(str or pm): Controller with ikFk shape attribute
:return:
"""
# check type
if isinstance(controller, str):
ikCtr = pm.PyNode(controller)
########################################
## Find all controls and main objects ##
########################################
# get locator shape, it is common in all ik and fk controllers.
# also it has the ikFk info
locatorS = ikCtr.listRelatives(s=True, type=pm.nodetypes.Locator)[0]
logger.debug('locatorS: %s' % locatorS)
if not locatorS:
logger.info('is not a ik fk chain')
return
# ikFk attribute value
ikFkAttr = locatorS.ikFk
instances = locatorS.getInstances()
logger.debug('locator instances: %s' % instances)
ikCtrList = [] # system ik controllers
fkCtrList = [] # # system Fk controllers
# get controllers from instances of locator
for i in instances:
controller = i.split('|')[-2]
controller = pm.PyNode(controller)
if 'ik' in str(controller):
ikCtrList.append(controller)
elif 'fk' in str(controller):
fkCtrList.append(controller)
# reverse lists, from nearest to world to last child
ikCtrList = list(reversed(ikCtrList))
fkCtrList = list(reversed(fkCtrList))
# ik Stretch node with stretch value
ikStretchNode = None
try:
# use top fk controller, because it has the correct base name of the system.
ikStretchNode = str(fkCtrList[0]).split('_')[0:3] # base name of the system
ikStretchNode = pm.PyNode('_'.join(ikStretchNode+['ikStretch','stretchValue','condition'])) # name of the stretch ik node
except:
pass
# get system main joints
mainJointList = [pm.PyNode(str(fk).replace('fk', 'main').replace('ctr', 'joint')) for fk in fkCtrList]
# get pole controller
ikHandle = ikCtrList[0].listRelatives(ad=True, type='ikHandle')[0]
poleConstraint = ikHandle.poleVectorX.inputs(type='constraint')[0]
pole = poleConstraint.target[0].targetTranslate.inputs(type='transform')[0]
## find child controllers, fingers, foot controllers, etc ##
ikChildList = [ctr.getTransform() for ctr in ikCtrList[0].listRelatives(ad=True, type='nurbsCurve') if
ctr.getTransform() not in ikCtrList]
fkChildList = [ctr.getTransform() for ctr in fkCtrList[0].listRelatives(ad=True, type='nurbsCurve') if
ctr.getTransform() not in fkCtrList]
mainChildList = [ctr for ctr in mainJointList[0].listRelatives(ad=True, type='joint') if ctr not in mainJointList]
ikChildCommonCtr = []
fkChildCommonCtr = []
ikFkChildCommonCtr = [] # [ikCtrA, fkCtrA, ikCtrB, fkctrB,...] controllers only appear in ik and fk chains
mainChildCommonJnt = []
# get common controllers between lists
# copy of the fkChils list, because we are going to delete members
for i, fkCtr in enumerate(list(fkChildList)):
# ask if the given sttr is in the list
try:
# it is possible some ctr in ik and fk, but not in main, like generaToe ctr
ikCtr = pm.PyNode(str(fkCtr).replace('fk', 'ik'))
try:
# try if it exists in main too
mainJnt = pm.PyNode(str(fkCtr).replace('fk', 'main').replace('ctr', 'joint'))
mainChildCommonJnt.append(mainJnt)
mainChildList.remove(mainJnt)
except:
# if controller is not in main, it is a special controller, only for ik and fk chains
ikFkChildCommonCtr.append(ikCtr)
ikFkChildCommonCtr.append(fkCtr)
fkChildList.remove(fkCtr)
ikChildList.remove(ikCtr)
# continue next loop
continue
# append to common lists
fkChildCommonCtr.append(fkCtr)
ikChildCommonCtr.append(ikCtr)
# remove from child lists
fkChildList.remove(fkCtr)
ikChildList.remove(ikCtr)
except:
pass
# find possible parents of the system, p.e clavicle
# use the skin skeleton, because it has a more clean hierarchy and it is easiest
skinJoint = pm.PyNode(str(fkCtrList[0]).replace('fk', 'skin').replace('ctr', 'joint'))
parentJoint = skinJoint.firstParent() # first parent for the test
parentFkCtrList = []
# iterate over the parents to find valid system parents, like clavicle
# valid parents only can have 1 joint child
while True:
childCount = parentJoint.getChildren(type='joint')
if len(childCount) > 1:
# more than 1 child joint, isn't valid
logger.debug('No parent ctr found')
break
else:
try:
parentCtr = pm.PyNode(str(parentJoint).replace('skin', 'fk').replace('joint','ctr'))
except:
# do not exists a valid ctr, break the iteration
logger.debug('No parent ctr found')
break
# save the control and try another joint
parentFkCtrList.append(parentCtr)
parentJoint = parentJoint.firstParent()
##########
#ik -> fk#
##########
if ikFkAttr.get():
# parent ctr, like clavicles
if parentFkCtrList:
parentRotation = parentFkCtrList[0].getRotation('world')
zoneName = str(parentFkCtrList[0]).split('_')[1] # zone str name
try:
# try if it has an auto attribute, if system does not has an auto attribute,
# it is not necessary apply rotation
locatorS.attr('auto%s' % zoneName.capitalize()).set(0)
parentFkCtrList[0].setRotation(parentRotation, 'world')
except:
pass
# copy stretch factor, if the system has stretch option
if ikStretchNode:
locatorS.fkStretch.set(ikStretchNode.outColorR.get())
# copy rotation from main joints
for i, mainj in enumerate(mainJointList):
fkCtrList[i].setRotation(mainj.getRotation())
# cmds.xform(fkControllers[i], a=True, eu=True, ro=cmds.xform(mainj, a=True, eu=True, q=True, ro=True))
# last system ctr, foot or hand
# if we have ikChilds, we have a foot system, so we snap to 'ball' controller (the last)
if ikChildList:
#print 'ballCtr', ikChildList[-1]
fkCtrList[-1].setRotation(ikChildList[-1].getRotation('world'), 'world')
# ikFk exclusive controllers
#for i in range(0, len(ikFkChildCommonCtr), 2):
#ikFkChildCommonCtr[i+1].setRotation(ikFkChildCommonCtr[i].getRotation('world'), 'world')
# ik Fk main common ctrllers, general first
for i, fkCtr in enumerate(fkChildCommonCtr):
fkCtr.setRotation(mainChildCommonJnt[i].getRotation('world'), 'world')
ikFkAttr.set(0)
##########
#fk -> ik#
##########
elif not ikFkAttr.get():
# reset walk values
if ikChildList: # ikControllers only just like walk controllers
ikCtrAttributes = pm.listAttr(ikCtrList[0], ud=True, k=True)
for attr in ikCtrAttributes:
ikCtrList[0].attr('%s' % attr).set(0)
# set ikChildCtr to 0
for ikCtr in ikChildList:
ikCtr.setRotation((0,0,0))
pm.xform(ikCtrList[0], ws=True, m=pm.xform(fkCtrList[-1], q=True, ws=True, m=True))
# ikFk exclusive controllers
for i in range(0, len(ikFkChildCommonCtr), 2):
ikFkChildCommonCtr[i+1].setRotation(ikFkChildCommonCtr[i].getRotation('world'), 'world')
# ikFk exclusive controllers
for i in range(0, len(ikFkChildCommonCtr), 2):
ikFkChildCommonCtr[i].setRotation(ikFkChildCommonCtr[i+1].getRotation('world'), 'world')
# ik Fk main common ctrllers, general first
for i, ikCtr in enumerate(ikChildCommonCtr):
ikCtr.setRotation(mainChildCommonJnt[i].getRotation('world'), 'world')
if pole:
# poleVector, use vector additive propriety
upperLegPos = mainJointList[0].getTranslation('world')
lowerLegPos = mainJointList[1].getTranslation('world')
footPos = mainJointList[2].getTranslation('world')
vector1 = pm.datatypes.Vector(lowerLegPos-upperLegPos)
vector1.normalize()
vector2 = pm.datatypes.Vector(lowerLegPos - footPos)
vector2.normalize()
poleVectorPos = vector1 + vector2
poleVectorPos.normalize()
# multiply the resultant vector by the value we want, this way we can control the distance
poleVectorPos = poleVectorPos * 20
# set pole vector position
pole.setTranslation(poleVectorPos+lowerLegPos, 'world')
ikFkAttr.set(1)
def neckHeadIsolateSnap(name, zone, controller, point, orient):
"""
TODO: valid with only 1 argument
isolate to 0 or 1 and snap controllers
args:
name(str): character name
zone(str): zone of controller
controller(str): controller type
point (bool): if true, snap translation
orient (bool): if true, snap rotation
"""
headControl = '%s_IK_%s_%s_1_ctr' % (name, zone, controller)
# check if exist
if not cmds.objExists(headControl):
print ('%s do not exists' % headControl)
return
# save transforms
headControlTranslate = cmds.xform(headControl, q=True, ws=True, m=True)
if orient:
# set orient
print ('set orient')
isolate = not cmds.getAttr('%s.isolateOrient' % headControl)
cmds.setAttr('%s.isolateOrient' % headControl, isolate)
if point:
# set position
print ('set point')
isolate = not cmds.getAttr('%s.isolatePoint' % headControl)
cmds.setAttr('%s.isolatePoint' % headControl, isolate)
# Transform head control
cmds.xform(headControl, ws=True, m=headControlTranslate)
## Proxies ##
# TODO: make class, include in the picker
#UI
def proxyShowUI(name):
"""
Activate proxies UI
Args:
name(str): name of the character
"""
windowName = '%sShowProxiesUI' % name.capitalize()
# check if window exists
if cmds.window(windowName, q=True, exists=True):
cmds.deleteUI(windowName)
cmds.window(windowName)
# proxy str
proxyStr = 'proxy'
# def window
column = cmds.columnLayout(adj=True, co=('both', 10))
cmds.text(label='%s Proxies' % name.capitalize())
cmds.separator(visible=True, h=20)
# row layout to store chBox and button
cmds.rowLayout(nc=2, adjustableColumn=2)
# check state
parent, state = checkState(name, proxyStr)
# ui widgets
chBox = cmds.checkBox(label='Parent', value=parent, enable=not state)
buttonName = 'Model' if state else 'Proxies'
button = cmds.button(buttonName, command=lambda x: proxyShowUIButton(name, chBox, button, proxyStr))
# show window
cmds.showWindow()
def proxyShowUIButton(name, chBox, button, proxyStr, *args):
"""
UI proxies button, turn on or off proxies.
Args:
name(str): name of the character
chBox(str): UI checkBox
button(str): UI button
proxyStr(str): common word for all the proxies
*args:
"""
chBoxValue = cmds.checkBox(chBox, q=True, v=True)
# list proxies
proxyList = cmds.ls('*%s' % proxyStr, type='transform')
# connect proxies by parenting
if chBoxValue:
value = proxyModelParent(name, proxyList, proxyStr)
# connect proxies by constraint
else:
value = proxyModelConstraints(name, proxyList, proxyStr)
# change button name and disable chBox
if value:
cmds.button(button, e=True, label='Model')
cmds.checkBox(chBox, e=True, enable=False)
# change button name and enable chBox
else:
cmds.button(button, e=True, label='Proxies')
cmds.checkBox(chBox, e=True, enable=True)
def checkState(name, proxyStr):
"""
check the state of the proxies
return:
(bool): True, parent. False, constraint
(bool): True, proxies active. False, model active
"""
proxyGrp = '%s_%s_grp' % (name, proxyStr)
proxyConstraints = cmds.listRelatives(proxyGrp, ad=True, type='constraint')
proxyTransforms = [proxy for proxy in cmds.listRelatives('%s_rig_grp' % name, ad=True, type='transform') if
proxyStr in proxy]
if proxyConstraints:
return False, True
elif proxyTransforms:
return True, True
else:
return False, False
# proxy scripts
def proxyModelConstraints(name, proxies, proxyStr):
"""
Connect proxy models to deform joints by constraints
Args:
name(str): name of the character
proxies(list(str)): list with proxies
proxyStr(str): common word for all the proxies
"""
proxyGrp = '%s_%s_grp' % (name, proxyStr)
proxyConstraints = cmds.listRelatives(proxyGrp, ad=True, type='constraint')
# disconnect proxies
if proxyConstraints:
ProxyDisconnectConstraints(name, 0)
cmds.delete(proxyConstraints)
cmds.setAttr('%s.visibility' % proxyGrp, 0)
return False # useful to change elements of the ui
# connect proxies
else:
ProxyDisconnectConstraints(name, 1)
cmds.setAttr('%s.visibility' % proxyGrp, 1)
for proxy in proxies:
try:
mainName = proxy.replace(proxyStr, 'main')
cmds.parentConstraint(mainName, proxy, maintainOffset=False)
cmds.scaleConstraint(mainName, proxy, maintainOffset=False)
except:
ctrName = proxy.replace(proxyStr, 'ctr')
cmds.parentConstraint(ctrName, proxy, maintainOffset=False)
cmds.scaleConstraint(ctrName, proxy, maintainOffset=False)
return True # useful to change elements of the ui
def proxyModelParent(name, proxies, proxyStr):
"""
Connect proxy models to deform joints by parenting them
Args:
name(str): name of the character
proxies(list(str)): list with proxies
proxyStr(str): common word for all the proxies
"""
proxyTransforms = [proxy for proxy in cmds.listRelatives('%s_rig_grp' % name, ad=True, type='transform') if
proxyStr in proxy]
proxyGrp = '%s_%s_grp' % (name, proxyStr)
# unparent proxies
if proxyTransforms:
ProxyDisconnectConstraints(name, 0) # 1 connect
cmds.parent(proxyTransforms, '%s_%s_grp' % (name, proxyStr))
cmds.setAttr('%s.visibility' % proxyGrp, 0)
return False # useful to change elements of the ui
# parent proxies
else:
ProxyDisconnectConstraints(name, 1) # 1 disconnect
for proxy in proxies:
try:
mainName = proxy.replace(proxyStr, 'main')
cmds.parent(proxy, mainName)
cmds.xform(proxy, os=True, t=(0, 0, 0), ro=(0, 0, 0), s=(1, 1, 1))
except:
ctrName = proxy.replace(proxyStr, 'ctr')
cmds.parent(proxy, ctrName)
cmds.xform(proxy, os=True, t=(0, 0, 0), ro=(0, 0, 0), s=(1, 1, 1))
return True # useful to change elements of the ui
def ProxyDisconnectConstraints(name, value):
"""
Turn off all deformable joint constraints, and skinClusters
Args:
name(str): Character name
value(int): 1->disconnect, 0->connect
"""
jointsGrp = '%s_joints_grp' % name
modelGrp = '%s_model_grp' % name
constraints = cmds.ls(jointsGrp, dag=True, type='constraint')
meshList = cmds.ls(modelGrp, dag=True, type='mesh')
# groups visibility
cmds.setAttr('%s.visibility' % jointsGrp, not value)
cmds.setAttr('%s.visibility' % modelGrp, not value)
for constraint in constraints:
cmds.setAttr('%s.nodeState' % constraint, value) # disconnect
cmds.setAttr('%s.frozen' % constraint, value)
for mesh in meshList:
skinNode = cmds.listConnections(mesh, d=True, type='skinCluster')
if skinNode:
cmds.setAttr('%s.nodeState' % skinNode[0], value) # disconnect
cmds.setAttr('%s.frozen' % skinNode[0], value)
"""
if __name__ == '__main__':
proxyShowUI('akona')
"""
|
[
"Jap3dWorks@gmail.com"
] |
Jap3dWorks@gmail.com
|
0894168cf481341050dd22efc8f324aea99c393d
|
040a735434f94168c34543c69291a6860d83ad01
|
/exel/xl_snake.py
|
87e8da4d19991f9450eba8e30d5ca4365132e651
|
[] |
no_license
|
ngphatpython190/tren-mang
|
a1fb5d9f01e68ac40f4bc9f8e12b5744fc5d7d87
|
9276e53f38218e97c5bcdecb74442fe01b87af4d
|
refs/heads/main
| 2023-09-03T11:19:12.657557
| 2021-11-06T11:35:11
| 2021-11-06T11:35:11
| 418,477,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,012
|
py
|
import xlwings as xw
from string import ascii_uppercase
from random import randint
from time import sleep
from sys import exit
class Snake:
def __init__(self,speed,width,height):
# Book setup
self.book = xw.Book()
self.sheet = self.book.sheets[0]
self.sheet.name = 'snake'
# board setup
self.speed = 1 / speed
self.width = width
self.height = height
self.board_setup()
# snake setup
self.body = [(int(height / 2),5),(int(height / 2),4),(int(height / 2),3)]
self.direction = (0,1)
self.eaten = False
self.create_apple()
def board_setup(self):
# background colors
game_cells = f'B2:{ascii_uppercase[self.width]}{self.height + 1}'
self.sheet[game_cells].color = board_color
control_cells = f'B{self.height + 2}:{ascii_uppercase[self.width]}{self.height + 6}'
self.sheet[control_cells].color = control_color
# buttons
self.exit_cell = f'{ascii_uppercase[self.width]}{self.height + 6}'
self.sheet[self.exit_cell].value = 'quit'
self.left_cell = f'C{self.height + 4}'
self.sheet[self.left_cell].value = 'left'
self.right_cell = f'E{self.height + 4}'
self.sheet[self.right_cell].value = 'right'
self.up_cell = f'D{self.height + 3}'
self.sheet[self.up_cell].value = 'up'
self.down_cell = f'D{self.height + 5}'
self.sheet[self.down_cell].value = 'down'
# button styling
for button in [self.exit_cell,self.left_cell,self.right_cell,self.up_cell,self.down_cell]:
self.sheet[button].color = button_color
self.sheet[button].font.color = text_color
# cell dimensions
self.sheet[f'B2:B{self.height + 6}'].row_height = 40
def display_game_elements(self):
# apple display
self.sheet[self.apple_pos].color = apple_color
# snake display
for index,cell in enumerate(self.body):
if index == 0:
self.sheet[cell].color = head_color
else:
self.sheet[cell].color = body_color
def create_apple(self):
# get a random cell (row,col)
row = randint(1,self.height)
col = randint(1,self.width)
# check if apple is below snake
while (row,col) in self.body:
row = randint(1,self.height)
col = randint(1,self.width)
self.apple_pos = (row,col)
def input(self):
selected_cell = self.book.selection.address.replace('$','')
if selected_cell == self.right_cell:
self.direction = (0,1)
elif selected_cell == self.left_cell:
self.direction = (0,-1)
elif selected_cell == self.up_cell:
self.direction = (-1,0)
elif selected_cell == self.down_cell:
self.direction = (1,0)
def exit_game(self):
selected_cell = self.book.selection.address.replace('$','')
if selected_cell == self.exit_cell:
self.book.close()
exit()
def move_snake(self):
if self.eaten:
new_body = self.body[:]
self.eaten = False
else:
lost_cell = self.body[-1]
new_body = self.body[:-1]
self.sheet[lost_cell].color = board_color
new_head = self.add_direction(new_body[0],self.direction)
new_body.insert(0,new_head)
self.body = new_body
def add_direction(self,cell,direction):
row = cell[0] + direction[0]
col = cell[1] + direction[1]
return(row,col)
def check_apple_collision(self):
if self.body[0] == self.apple_pos:
self.eaten = True
self.create_apple()
def check_fail(self):
head = self.body[0]
body = self.body[1:]
if head in body or head[1] <= 0 or head[1] >= self.width + 1 or head[0] <= 0 or head[0] >= self.height + 1:
self.book.close()
exit()
def run(self):
while True:
self.exit_game()
sleep(self.speed)
self.input()
self.move_snake()
self.check_apple_collision()
self.check_fail()
self.display_game_elements()
# COLORS
board_color = (226,227,223)
control_color = (46,50,51)
button_color = (81,91,94)
text_color = (255, 255, 255)
apple_color = (0,255,100)
head_color = (255,0,0)
body_color = (200,0,0)
snake = Snake(3,12,8)
snake.run()
|
[
"noreply@github.com"
] |
ngphatpython190.noreply@github.com
|
bb88c6ede9da6acfa26ee404324f10121a2c33c1
|
9695228629e72a9074882211603d12dae51abf3e
|
/show/views.py
|
8c9a14b21b2aa359624f1980e18db48375fab06f
|
[] |
no_license
|
ken890728/maze
|
b875880adc08800f03adbb4a0ce3a6c6adbe8786
|
a30cca47121a238df0a84b10b00e5b245abf0bac
|
refs/heads/master
| 2021-01-15T22:08:51.058562
| 2017-08-10T07:17:04
| 2017-08-10T07:17:04
| 99,888,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
from django.shortcuts import render_to_response, redirect
from django.http import HttpResponse
from django.template import RequestContext
from models import Record
def index(request):
return render_to_response('index.html')
def add(request, seconds):
record = Record(seconds=seconds)
record.save()
return redirect("/show")
def show(request):
records = Record.objects.all().order_by("-id")
return render_to_response('show.html',{'records':records}, context_instance=RequestContext(request))
|
[
"ken22436653@gmail.com"
] |
ken22436653@gmail.com
|
88fc2053e80bc58ae209cdfe9ed49b6a4bcafc2c
|
c3082eb2adc43b311dd3c9ff16fd3ed9df85f266
|
/python/examples/gravatar.py
|
02ce05805f996cdb851c9e6a8fb3a644257d5e44
|
[] |
no_license
|
szabgab/slides
|
78818c7138331b3ba9e221c81da3678a46efe9b3
|
63bba06678554db737602f2fbcd6510c36037e8a
|
refs/heads/main
| 2023-08-31T07:13:51.536711
| 2023-08-29T13:17:59
| 2023-08-29T13:17:59
| 122,212,527
| 87
| 69
| null | 2023-05-19T06:55:11
| 2018-02-20T14:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 294
|
py
|
import hashlib
import sys
def gravatar(email):
return hashlib.md5(email.strip().lower().encode('utf8')).hexdigest()
if len(sys.argv) != 2:
exit(f"Usage: {sys.argv[0]} EMAIL")
email = sys.argv[1]
code = gravatar(email)
print(f"https://www.gravatar.com/avatar/{code}?s=100&d=blank")
|
[
"gabor@szabgab.com"
] |
gabor@szabgab.com
|
feee5c1b995b9b8d43dd9bb95364532994f3bf72
|
ff469ac1982a55d0ac209b5c42ee20c54f9fd5e5
|
/app/app.py
|
ebc61f9ce7ef40933a9b579bb6924a70aebf00e2
|
[] |
no_license
|
carlosrv999/demo-gcp
|
7667618e98751f95d25f6d70c885ee0dd208ae44
|
2c13e48a915acf908fa048e26b6d1d754a750df7
|
refs/heads/master
| 2023-05-13T17:21:17.470582
| 2020-05-05T23:46:04
| 2020-05-05T23:46:04
| 261,557,229
| 0
| 0
| null | 2023-05-01T21:39:47
| 2020-05-05T18:51:37
|
Shell
|
UTF-8
|
Python
| false
| false
| 584
|
py
|
from flask import request, Flask
import socket
app = Flask(__name__)
@app.route("/healthz")
def healthz():
return "Healthy"
@app.route("/")
def rootpage():
return "root page"
@app.route("/greetings")
def hello():
return "Hello World from " + socket.gethostname() + "."
@app.route("/square")
def square():
x = request.args.get('x')
try:
x = int(x)
return str(x ** 2)
except:
return "Ingrese un numero entero con el parametro x en la url (/square?x=1 por ejemplo)"
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
[
"carlosrv125@icloud.com"
] |
carlosrv125@icloud.com
|
db5043627d2d557ddbf8f268af315b97bae02f34
|
17c280ade4159d4d8d5a48d16ba3989470eb3f46
|
/18/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisM2500_R_0-1.py
|
0f7c4e4db7ada38e47e4cb679cfa85d072b43dce
|
[] |
no_license
|
chengchen1993/run2_ntuple
|
798ff18489ff5185dadf3d1456a4462e1dbff429
|
c16c2b203c05a3eb77c769f63a0bcdf8b583708d
|
refs/heads/master
| 2021-06-25T18:27:08.534795
| 2021-03-15T06:08:01
| 2021-03-15T06:08:01
| 212,079,804
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,201
|
py
|
from WMCore.Configuration import Configuration
name = 'WWW'
steam_dir = 'xulyu'
config = Configuration()
config.section_("General")
config.General.requestName = 'M2500_R0-1_off'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Autumn18_V19_MC_L1FastJet_AK4PFchs.txt','Autumn18_V19_MC_L2Relative_AK4PFchs.txt','Autumn18_V19_MC_L3Absolute_AK4PFchs.txt','Autumn18_V19_MC_L1FastJet_AK8PFchs.txt','Autumn18_V19_MC_L2Relative_AK8PFchs.txt','Autumn18_V19_MC_L3Absolute_AK8PFchs.txt','Autumn18_V19_MC_L1FastJet_AK8PFPuppi.txt','Autumn18_V19_MC_L2Relative_AK8PFPuppi.txt','Autumn18_V19_MC_L3Absolute_AK8PFPuppi.txt','Autumn18_V19_MC_L1FastJet_AK4PFPuppi.txt','Autumn18_V19_MC_L2Relative_AK4PFPuppi.txt','Autumn18_V19_MC_L3Absolute_AK4PFPuppi.txt' ]
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WkkToWRadionToWWW_M2500-R0-1_TuneCP5_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM'
#config.Data.inputDBS = 'global'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outLFNDirBase='/store/group/phys_b2g/chench/cc/'#chench/'# = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'M2500_R0-1_off'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
|
[
"c.chen@cern.ch"
] |
c.chen@cern.ch
|
97bb186f1525825173d22a5a7c27d401e39fa997
|
29619c1b1ee216fd40f6c13b1cb4612cc6ef79e5
|
/merge_two_sorted_list.py
|
6379cb54ca6829eb7ed3674e29982c7b323e4810
|
[] |
no_license
|
sacsachin/programing
|
9527c4340522159423120d203152c37dcf9f8931
|
377b25c317e3972d63ead7a34397458702f8accf
|
refs/heads/master
| 2022-12-14T00:20:35.232816
| 2020-09-26T12:36:32
| 2020-09-26T12:36:32
| 298,810,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
# !/usr/bin/python3
"""
https://www.interviewbit.com/problems/remove-duplicates-from-sorted-list/
Remove dyplicate from sorted list.
"""
class Node:
def __init__(self, val=0, nxt=None):
self.val = val
self.next = nxt
class LinkList:
def __init__(self, nodes=None):
self.head = None
if nodes is not None:
node = Node(val=nodes.pop(0))
self.head = node
for elem in nodes:
node.next = Node(val=elem)
node = node.next
def solve(first, second):
ans = []
head = tail = None
while first and second:
if first.val <= second.val:
node = first
first = first.next
else:
node = second
second = second.next
if head == None:
head = tail = node
else:
tail.next = node
tail = node
while first:
tail.next = first
tail = first
first = first.next
while second:
tail.next = second
tail = second
second = second.next
current = head
while current:
ans.append(current.val)
current = current.next
return ans
if __name__ == "__main__":
first = LinkList([(lambda x: int(x))(x) for x in input().split()])
second = LinkList([(lambda x: int(x))(x) for x in input().split()])
print(solve(first.head, second.head))
|
[
"psachin@zeomega.com"
] |
psachin@zeomega.com
|
5fb84372f582c5ae87a1b79178eaa1d6403251aa
|
3f3044255b8430a9c73e02177e94c0fadc88ca9a
|
/archive/bullshitalign.py
|
654a0985d689389ac04f9e3e9689e2cfff95ae38
|
[] |
no_license
|
msgordon/optipol-reduc
|
263d95b5d0b1c7009d40c03a79d7038d3d26438e
|
37a0e778ea357ad1cb679a0364a31fe5d6dd6ff9
|
refs/heads/master
| 2020-05-17T13:26:07.974020
| 2017-10-11T00:31:24
| 2017-10-11T00:31:24
| 20,042,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
#! /usr/bin/env python
import numpy as np
import argparse
import pyfits
from scipy.ndimage.interpolation import shift as sc_shift
#from shift import shift as fft_shift
import os
from pyds9 import pydisplay
def shift(filename, xs, ys, refFile,noShift=False):
f = pyfits.open(filename)
header = f[0].header
header['REF_FILE'] = (os.path.basename(refFile),'Reference file')
header['PRE_FILE'] = (os.path.basename(filename),'Filename before shift')
header['XSHIFT'] = (xs,'X shift from ref_file')
header['YSHIFT'] = (ys,'Y shift from ref_file')
newName = os.path.splitext(filename)
newName = ''.join([newName[0],'_s',newName[1]])
#return newName
if noShift:
newDat = f[0].data
else:
#newDat = fft_shift(f[0].data,xs,ys)
newDat = sc_shift(f[0].data,[ys,xs])
print 'Writing to %s' % newName
pyfits.writeto(newName,newDat,header=header,clobber=True)
return newName
def main():
parser = argparse.ArgumentParser(description='Shift images to align objects in input file')
parser.add_argument('file',help='Input file with coordinates')
args = parser.parse_args()
data = np.genfromtxt(args.file,names=['fname','x','y'],dtype=['a100','f8','f8'],autostrip=True)
# Copy reference file
checkMe = []
ref = data[0]
checkMe.append(shift(ref['fname'],0,0,ref['fname'],noShift=False))
for dat in data[1:]:
xs = ref['x'] - dat['x']
ys = ref['y'] - dat['y']
checkMe.append(shift(dat['fname'],xs,ys,ref['fname']))
#pydisplay(checkMe)
if __name__ == '__main__':
main()
|
[
"msgordon.astro@gmail.com"
] |
msgordon.astro@gmail.com
|
9a34a1120fe9546f1836735b1d5af632a15b354f
|
2642ea15cb26372c616458233d3b94e3700e3eb7
|
/Module02/radixStable.py
|
a5a12db587c8984d1b4f16c1064dd12bf18ad12b
|
[] |
no_license
|
hdmcspadden/CS5012
|
96be4c1d09007fb365a1f2f3d4afb70f22d4ad7a
|
b856c5e780229dddd909691c2ddb4e454d377826
|
refs/heads/master
| 2023-04-18T08:12:07.733808
| 2021-05-02T21:48:29
| 2021-05-02T21:48:29
| 333,499,252
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,823
|
py
|
# Python program for implementation of Radix Sort
# A function to do counting sort of arr[] according to
# the digit represented by exp.
def countingSort(arr, exp1):
n = len(arr)
# The output array elements that will have sorted arr
output = [0] * (n)
# initialize count array as 0
# this is hard-coded for base 10.
count = [0] * (10)
# Store count of occurrences in count[]
for i in range(0, n):
index = (arr[i] / exp1)
count[int(index % 10)] += 1 # increase the count of the number of elements at that place
# Change count[i] so that count[i] now contains actual
# position of this digit in output array
for i in range(1, 10):
count[i] += count[i - 1]
# Build the output array
i = n - 1
while i >= 0:
index = (arr[i] / exp1)
output[count[int(index % 10)] - 1] = arr[i]
count[int(index % 10)] -= 1
i -= 1
# Copying the output array to arr[],
# so that arr now contains sorted numbers
i = 0
for i in range(0, len(arr)):
arr[i] = output[i]
# Method to do Radix Sort
def radixSort(arr):
# Find the maximum number to know number of digits
# knowing the max seems a cheat.
max1 = max(arr)
# Do counting sort for every digit. Note that instead
# of passing digit number, exp is passed. exp is 10^i
# where i is current digit number
exp = 1
while max1 / exp > 0:
countingSort(arr, exp)
exp *= 10
arr = [36,1,12,25,44,11,25]
print("Input array: {}".format(arr))
# Function Call
radixSort(arr)
print("Resulting array: {}".format(arr))
#for i in range(len(arr)):
# print(arr[i])
# This code is contributed by Mohit Kumra
# Edited by Patrick Gallagher
|
[
"hdmcspadden@gmail.com"
] |
hdmcspadden@gmail.com
|
84804ddd9a4668077b8d84017577df31b6b0775b
|
2d1dcf1d13e5aa5cbc60fd2730f39bbace5556e4
|
/quiz/settings/base.py
|
0f4da07aedf0f56b1eea9814e7b1505b448efd98
|
[] |
no_license
|
amalu-m/world-trivia
|
2c66c0f84c6cb87a459590cdd778c14cc0e575b7
|
ffb837231b40b0d357aef27edd45ff4e2f2a8e3a
|
refs/heads/master
| 2023-01-10T12:20:30.844418
| 2020-11-13T03:01:41
| 2020-11-13T03:01:41
| 305,027,675
| 0
| 0
| null | 2020-11-09T05:53:26
| 2020-10-18T05:14:51
|
Python
|
UTF-8
|
Python
| false
| false
| 197
|
py
|
import os
from dotenv import load_dotenv
load_dotenv()
SECRET_KEY = os.getenv("SECRET_KEY")
ENV_NAME = os.getenv("ENV_NAME")
DB_USER = os.getenv("DB_USER")
DB_PASSWORD = os.getenv("DB_PASSWORD")
|
[
"amalulmathew@gmail.com"
] |
amalulmathew@gmail.com
|
8108385351aac8c2bdb972161149bfb87e009ee6
|
e50edc6f4b4bececb13ec4f8b3249418a1b36fb1
|
/็ฅ่ฏๅฏ่งๅ/urls.py
|
cd08ec2a6e27c62e174bd5a3a0f06ec0ed71885f
|
[] |
no_license
|
Fanyunjie/ML-Project
|
e9b0efb378deaa4bf1d96cad199835f3bbe0d8ee
|
fba6248f6c978e3add20eaa904107009cadf44cc
|
refs/heads/master
| 2022-11-30T08:51:12.319983
| 2020-08-14T04:35:44
| 2020-08-14T04:35:44
| 287,442,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,546
|
py
|
"""็ฅ่ฏๅฏ่งๅ URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from app01.views import *
from django.views.generic.base import TemplateView
from django.conf.urls import url
urlpatterns = [
path('admin/', admin.site.urls),
path('knowledge', find_all_data.as_view()),
path('title_find_data', title_find_data.as_view()),
path('find_similar_title',find_similar_title.as_view()),
url('', TemplateView.as_view(template_name='index.html')),
# path('title_find_paper_data', title_find_paper_data.as_view()),
# path('title_find_news_data', title_find_news_data.as_view()),
# path('title_find_rumor_data', title_find_rumor_data.as_view()),
# path('find_similar_paper_titles', find_similar_paper_titles.as_view()),
# path('find_similar_news_titles', find_similar_news_titles.as_view()),
# path('find_similar_rumors_titles', find_similar_rumors_titles.as_view())
]
|
[
"843179781@qq.com"
] |
843179781@qq.com
|
eff067dc97f33395ef1ff5db167fdf8df5d64cba
|
ec1d4ce3fcbb8057281bee180a936be1767c3fae
|
/04.py
|
05d48ecfd651ccdb567cfb3a56625bbab081df0c
|
[] |
no_license
|
majatrepsic/advent-of-code-2017
|
fda71a48aee28ffb2e4e4b277655b4f9c4de0d77
|
c36eb3a7e82b282f39607988f622e4c67fafbd75
|
refs/heads/master
| 2021-08-24T11:03:22.033849
| 2017-12-09T11:51:25
| 2017-12-09T11:51:25
| 112,757,282
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
import csv
# Part one
valid_inputs = 0
with open('inputs/04.txt', 'rb') as csvfile:
input_reader = csv.reader(csvfile, delimiter=' ')
for row in input_reader:
# print len(row)
# print len(set(row))
if len(row) == len(set(row)):
valid_inputs += 1
print valid_inputs
# Part two
def check_permutations(row):
for i in range(0, len(row)):
for j in range(i + 1, len(row)):
# print row[i] + ',' + row[j]
# print str(sorted(row[i])) + ',' + str(sorted(row[j]))
if (len(row[i]) == len(row[j])) \
and (sorted(row[i]) == sorted(row[j])):
return False
return True
valid_inputs_permutation = 0
with open('inputs/04.txt', 'rb') as csvfile:
input_reader = csv.reader(csvfile, delimiter=' ')
for row in input_reader:
if len(row) == len(set(row)):
# print row
if check_permutations(row):
valid_inputs_permutation += 1
print valid_inputs_permutation
|
[
"maja.trepsic@amphinicy.com"
] |
maja.trepsic@amphinicy.com
|
50530eb0a7d5d0fe82c48791eaa79b4cc8fa29dd
|
759dd1e7aef4e54fe07c89d4e7289758b6fe18f3
|
/core/models.py
|
dc32b9b8dd74e7b56339ed6f083c12ee06c00190
|
[] |
no_license
|
igoo-Y/iemu11
|
059e8bcd51764bad6f5adeaca174b013defe35a4
|
0ed68ec8932ece7e4537d5cee601f5b86fae656d
|
refs/heads/main
| 2023-08-18T23:09:55.233882
| 2021-10-04T11:09:40
| 2021-10-04T11:09:40
| 351,639,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
from django.db import models
class TimeStampedModel(models.Model):
"""Time Stamed Model Definition"""
created = models.DateField(auto_now_add=True)
updated = models.DateField(auto_now=True)
class Meta:
abstract = True
|
[
"79055280+igoo-Y@users.noreply.github.com"
] |
79055280+igoo-Y@users.noreply.github.com
|
67be5ae559d9805937ef23598331da812d5621e8
|
f41409cd5bff8ad9dbd2b96f49ff27a733472da5
|
/agentpool/agentcraw/xiciagent/xiciCraw.py
|
a19528f4ade6c4ab890d48dcd09f4e58e7333117
|
[] |
no_license
|
LittleStone8/SBagentpool
|
3f5229c1dde2d9d6f9ac29e428371b90dcc18c57
|
0a0302c3763069c11d0b3bf729d6460162bb51d5
|
refs/heads/master
| 2020-06-24T02:57:54.795022
| 2019-07-26T05:47:53
| 2019-07-26T05:47:53
| 198,829,367
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,535
|
py
|
from agentpool.agentcraw.xiciagent import xicicommon
from agentpool.agentcraw.xiciagent.xiciUrlmanager import xiciUrlmanager
from agentpool.agentcraw.xiciagent.xicidownloader import xicidownloader
from agentpool.agentcraw.xiciagent.xicioutputer import xicioutputer
from agentpool.agentcraw.xiciagent.xiciparser import xiciparser
from common import agentsrc
from framemodule.Craw import Craw
class xiciCraw(Craw):
def __init__(self,Scheduler):
self.urlmanager = xiciUrlmanager()
self.downloader = xicidownloader()
self.parser = xiciparser()
self.outputer = xicioutputer()
self.job_interval= xicicommon.xici_craw_interval
self.scheduler=Scheduler
pass
def docraw(self):
self.start_craw()
if self.job_interval!=0:
self.scheduler.timer.addtask(self.start_craw,self.job_interval)
pass
def start_craw(self):
print('ๅฏๅจๅผๅงๆๅxiciไปฃ็ไปปๅก:')
for url in self.urlmanager.new_urls:
self.scheduler.run_in_main_threadpool(func=self.craw, args=(url,))
pass
def craw(self,url):
print('ๅผๅงๆๅ:'+url)
html = self.downloader.download_by_myself_useagent(url)
data = self.parser.parse(url, html)
returndata = 0
if data != None:
returndata = str(len(data))
print(url + "่ฟๅๆฐๆฎ:" + str(returndata))
storagesum=self.outputer.collect_data(data,url,agentsrc.xici)
print(url + "ๆๅๅฎๆฏ,ๅ
ฅๅบ:"+str(storagesum))
|
[
"262627160@qq.com"
] |
262627160@qq.com
|
7368fbf5331c7acbeec9c4c90da997d2e4fa59e8
|
d7d27f05de357cfa68f85c3529364409e488622c
|
/repo/repo
|
65fb2cde1f75f764db026cd4fcf4d29482857b21
|
[] |
no_license
|
yuanjianye/software_config
|
c82949d06f118690911f93bb46d9fee7def82161
|
a7314c8cef49fa83ede033df2b07b8766567be55
|
HEAD
| 2016-09-05T12:28:28.247501
| 2015-05-14T06:02:01
| 2015-05-14T06:02:01
| 35,592,603
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,140
|
#! /usr/bin/python
## repo default configuration
##
#REPO_URL = 'https://gerrit.googlesource.com/git-repo'
REPO_URL = 'root@home.yuanjianye.com:/.storage/git/git-repo'
REPO_REV = 'stable'
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# increment this whenever we make important changes to this script
VERSION = (1, 21)
# increment this if the MAINTAINER_KEYS block is modified
KEYRING_VERSION = (1, 2)
MAINTAINER_KEYS = """
Repo Maintainer <repo@android.kernel.org>
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.2.2 (GNU/Linux)
mQGiBEj3ugERBACrLJh/ZPyVSKeClMuznFIrsQ+hpNnmJGw1a9GXKYKk8qHPhAZf
WKtrBqAVMNRLhL85oSlekRz98u41H5si5zcuv+IXJDF5MJYcB8f22wAy15lUqPWi
VCkk1l8qqLiuW0fo+ZkPY5qOgrvc0HW1SmdH649uNwqCbcKb6CxaTxzhOwCgj3AP
xI1WfzLqdJjsm1Nq98L0cLcD/iNsILCuw44PRds3J75YP0pze7YF/6WFMB6QSFGu
aUX1FsTTztKNXGms8i5b2l1B8JaLRWq/jOnZzyl1zrUJhkc0JgyZW5oNLGyWGhKD
Fxp5YpHuIuMImopWEMFIRQNrvlg+YVK8t3FpdI1RY0LYqha8pPzANhEYgSfoVzOb
fbfbA/4ioOrxy8ifSoga7ITyZMA+XbW8bx33WXutO9N7SPKS/AK2JpasSEVLZcON
ae5hvAEGVXKxVPDjJBmIc2cOe7kOKSi3OxLzBqrjS2rnjiP4o0ekhZIe4+ocwVOg
e0PLlH5avCqihGRhpoqDRsmpzSHzJIxtoeb+GgGEX8KkUsVAhbQpUmVwbyBNYWlu
dGFpbmVyIDxyZXBvQGFuZHJvaWQua2VybmVsLm9yZz6IYAQTEQIAIAUCSPe6AQIb
AwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEBZTDV6SD1xl1GEAn0x/OKQpy7qI
6G73NJviU0IUMtftAKCFMUhGb/0bZvQ8Rm3QCUpWHyEIu7kEDQRI97ogEBAA2wI6
5fs9y/rMwD6dkD/vK9v4C9mOn1IL5JCPYMJBVSci+9ED4ChzYvfq7wOcj9qIvaE0
GwCt2ar7Q56me5J+byhSb32Rqsw/r3Vo5cZMH80N4cjesGuSXOGyEWTe4HYoxnHv
gF4EKI2LK7xfTUcxMtlyn52sUpkfKsCpUhFvdmbAiJE+jCkQZr1Z8u2KphV79Ou+
P1N5IXY/XWOlq48Qf4MWCYlJFrB07xjUjLKMPDNDnm58L5byDrP/eHysKexpbakL
xCmYyfT6DV1SWLblpd2hie0sL3YejdtuBMYMS2rI7Yxb8kGuqkz+9l1qhwJtei94
5MaretDy/d/JH/pRYkRf7L+ke7dpzrP+aJmcz9P1e6gq4NJsWejaALVASBiioqNf
QmtqSVzF1wkR5avZkFHuYvj6V/t1RrOZTXxkSk18KFMJRBZrdHFCWbc5qrVxUB6e
N5pja0NFIUCigLBV1c6I2DwiuboMNh18VtJJh+nwWeez/RueN4ig59gRTtkcc0PR
35tX2DR8+xCCFVW/NcJ4PSePYzCuuLvp1vEDHnj41R52Fz51hgddT4rBsp0nL+5I
socSOIIezw8T9vVzMY4ArCKFAVu2IVyBcahTfBS8q5EM63mONU6UVJEozfGljiMw
xuQ7JwKcw0AUEKTKG7aBgBaTAgT8TOevpvlw91cAAwUP/jRkyVi/0WAb0qlEaq/S
ouWxX1faR+vU3b+Y2/DGjtXQMzG0qpetaTHC/AxxHpgt/dCkWI6ljYDnxgPLwG0a
Oasm94BjZc6vZwf1opFZUKsjOAAxRxNZyjUJKe4UZVuMTk6zo27Nt3LMnc0FO47v
FcOjRyquvgNOS818irVHUf12waDx8gszKxQTTtFxU5/ePB2jZmhP6oXSe4K/LG5T
+WBRPDrHiGPhCzJRzm9BP0lTnGCAj3o9W90STZa65RK7IaYpC8TB35JTBEbrrNCp
w6lzd74LnNEp5eMlKDnXzUAgAH0yzCQeMl7t33QCdYx2hRs2wtTQSjGfAiNmj/WW
Vl5Jn+2jCDnRLenKHwVRFsBX2e0BiRWt/i9Y8fjorLCXVj4z+7yW6DawdLkJorEo
p3v5ILwfC7hVx4jHSnOgZ65L9s8EQdVr1ckN9243yta7rNgwfcqb60ILMFF1BRk/
0V7wCL+68UwwiQDvyMOQuqkysKLSDCLb7BFcyA7j6KG+5hpsREstFX2wK1yKeraz
5xGrFy8tfAaeBMIQ17gvFSp/suc9DYO0ICK2BISzq+F+ZiAKsjMYOBNdH/h0zobQ
HTHs37+/QLMomGEGKZMWi0dShU2J5mNRQu3Hhxl3hHDVbt5CeJBb26aQcQrFz69W
zE3GNvmJosh6leayjtI9P2A6iEkEGBECAAkFAkj3uiACGwwACgkQFlMNXpIPXGWp
TACbBS+Up3RpfYVfd63c1cDdlru13pQAn3NQy/SN858MkxN+zym86UBgOad2
=CMiZ
-----END PGP PUBLIC KEY BLOCK-----
Conley Owens <cco3@android.com>
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.11 (GNU/Linux)
mQENBFHRvc8BCADFg45Xx/y6QDC+T7Y/gGc7vx0ww7qfOwIKlAZ9xG3qKunMxo+S
hPCnzEl3cq+6I1Ww/ndop/HB3N3toPXRCoN8Vs4/Hc7by+SnaLFnacrm+tV5/OgT
V37Lzt8lhay1Kl+YfpFwHYYpIEBLFV9knyfRXS/428W2qhdzYfvB15/AasRmwmor
py4NIzSs8UD/SPr1ihqNCdZM76+MQyN5HMYXW/ALZXUFG0pwluHFA7hrfPG74i8C
zMiP7qvMWIl/r/jtzHioH1dRKgbod+LZsrDJ8mBaqsZaDmNJMhss9g76XvfMyLra
9DI9/iFuBpGzeqBv0hwOGQspLRrEoyTeR6n1ABEBAAG0H0NvbmxleSBPd2VucyA8
Y2NvM0BhbmRyb2lkLmNvbT6JATgEEwECACIFAlHRvc8CGwMGCwkIBwMCBhUIAgkK
CwQWAgMBAh4BAheAAAoJEGe35EhpKzgsP6AIAJKJmNtn4l7hkYHKHFSo3egb6RjQ
zEIP3MFTcu8HFX1kF1ZFbrp7xqurLaE53kEkKuAAvjJDAgI8mcZHP1JyplubqjQA
xvv84gK+OGP3Xk+QK1ZjUQSbjOpjEiSZpRhWcHci3dgOUH4blJfByHw25hlgHowd
a/2PrNKZVcJ92YienaxxGjcXEUcd0uYEG2+rwllQigFcnMFDhr9B71MfalRHjFKE
fmdoypqLrri61YBc59P88Rw2/WUpTQjgNubSqa3A2+CKdaRyaRw+2fdF4TdR0h8W
zbg+lbaPtJHsV+3mJC7fq26MiJDRJa5ZztpMn8su20gbLgi2ShBOaHAYDDi5AQ0E
UdG9zwEIAMoOBq+QLNozAhxOOl5GL3StTStGRgPRXINfmViTsihrqGCWBBUfXlUE
OytC0mYcrDUQev/8ToVoyqw+iGSwDkcSXkrEUCKFtHV/GECWtk1keyHgR10YKI1R
mquSXoubWGqPeG1PAI74XWaRx8UrL8uCXUtmD8Q5J7mDjKR5NpxaXrwlA0bKsf2E
Gp9tu1kKauuToZhWHMRMqYSOGikQJwWSFYKT1KdNcOXLQF6+bfoJ6sjVYdwfmNQL
Ixn8QVhoTDedcqClSWB17VDEFDFa7MmqXZz2qtM3X1R/MUMHqPtegQzBGNhRdnI2
V45+1Nnx/uuCxDbeI4RbHzujnxDiq70AEQEAAYkBHwQYAQIACQUCUdG9zwIbDAAK
CRBnt+RIaSs4LNVeB/0Y2pZ8I7gAAcEM0Xw8drr4omg2fUoK1J33ozlA/RxeA/lJ
I3KnyCDTpXuIeBKPGkdL8uMATC9Z8DnBBajRlftNDVZS3Hz4G09G9QpMojvJkFJV
By+01Flw/X+eeN8NpqSuLV4W+AjEO8at/VvgKr1AFvBRdZ7GkpI1o6DgPe7ZqX+1
dzQZt3e13W0rVBb/bUgx9iSLoeWP3aq/k+/GRGOR+S6F6BBSl0SQ2EF2+dIywb1x
JuinEP+AwLAUZ1Bsx9ISC0Agpk2VeHXPL3FGhroEmoMvBzO0kTFGyoeT7PR/BfKv
+H/g3HsL2LOB9uoIm8/5p2TTU5ttYCXMHhQZ81AY
=AUp4
-----END PGP PUBLIC KEY BLOCK-----
"""
GIT = 'git' # our git command
MIN_GIT_VERSION = (1, 7, 2) # minimum supported git version
repodir = '.repo' # name of repo's private directory
S_repo = 'repo' # special repo repository
S_manifests = 'manifests' # special manifest repository
REPO_MAIN = S_repo + '/main.py' # main script
MIN_PYTHON_VERSION = (2, 6) # minimum supported python version
import errno
import optparse
import os
import re
import stat
import subprocess
import sys
if sys.version_info[0] == 3:
import urllib.request
import urllib.error
else:
import imp
import urllib2
urllib = imp.new_module('urllib')
urllib.request = urllib2
urllib.error = urllib2
def _print(*objects, **kwargs):
sep = kwargs.get('sep', ' ')
end = kwargs.get('end', '\n')
out = kwargs.get('file', sys.stdout)
out.write(sep.join(objects) + end)
# Python version check
ver = sys.version_info
if ver[0] == 3:
_print('warning: Python 3 support is currently experimental. YMMV.\n'
'Please use Python 2.6 - 2.7 instead.',
file=sys.stderr)
if (ver[0], ver[1]) < MIN_PYTHON_VERSION:
_print('error: Python version %s unsupported.\n'
'Please use Python 2.6 - 2.7 instead.'
% sys.version.split(' ')[0], file=sys.stderr)
sys.exit(1)
home_dot_repo = os.path.expanduser('~/.repoconfig')
gpg_dir = os.path.join(home_dot_repo, 'gnupg')
extra_args = []
init_optparse = optparse.OptionParser(usage="repo init -u url [options]")
# Logging
group = init_optparse.add_option_group('Logging options')
group.add_option('-q', '--quiet',
dest="quiet", action="store_true", default=False,
help="be quiet")
# Manifest
group = init_optparse.add_option_group('Manifest options')
group.add_option('-u', '--manifest-url',
dest='manifest_url',
help='manifest repository location', metavar='URL')
group.add_option('-b', '--manifest-branch',
dest='manifest_branch',
help='manifest branch or revision', metavar='REVISION')
group.add_option('-m', '--manifest-name',
dest='manifest_name',
help='initial manifest file', metavar='NAME.xml')
group.add_option('--mirror',
dest='mirror', action='store_true',
help='create a replica of the remote repositories '
'rather than a client working directory')
group.add_option('--reference',
dest='reference',
help='location of mirror directory', metavar='DIR')
group.add_option('--depth', type='int', default=None,
dest='depth',
help='create a shallow clone with given depth; see git clone')
group.add_option('--archive',
dest='archive', action='store_true',
help='checkout an archive instead of a git repository for '
'each project. See git archive.')
group.add_option('-g', '--groups',
dest='groups', default='default',
help='restrict manifest projects to ones with specified '
'group(s) [default|all|G1,G2,G3|G4,-G5,-G6]',
metavar='GROUP')
group.add_option('-p', '--platform',
dest='platform', default="auto",
help='restrict manifest projects to ones with a specified '
'platform group [auto|all|none|linux|darwin|...]',
metavar='PLATFORM')
# Tool
group = init_optparse.add_option_group('repo Version options')
group.add_option('--repo-url',
dest='repo_url',
help='repo repository location', metavar='URL')
group.add_option('--repo-branch',
dest='repo_branch',
help='repo branch or revision', metavar='REVISION')
group.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
# Other
group = init_optparse.add_option_group('Other options')
group.add_option('--config-name',
dest='config_name', action="store_true", default=False,
help='Always prompt for name/e-mail')
class CloneFailure(Exception):
"""Indicate the remote clone of repo itself failed.
"""
def _Init(args):
"""Installs repo by cloning it over the network.
"""
opt, args = init_optparse.parse_args(args)
if args:
init_optparse.print_usage()
sys.exit(1)
url = opt.repo_url
if not url:
url = REPO_URL
extra_args.append('--repo-url=%s' % url)
branch = opt.repo_branch
if not branch:
branch = REPO_REV
extra_args.append('--repo-branch=%s' % branch)
if branch.startswith('refs/heads/'):
branch = branch[len('refs/heads/'):]
if branch.startswith('refs/'):
_print("fatal: invalid branch name '%s'" % branch, file=sys.stderr)
raise CloneFailure()
try:
os.mkdir(repodir)
except OSError as e:
if e.errno != errno.EEXIST:
_print('fatal: cannot make %s directory: %s'
% (repodir, e.strerror), file=sys.stderr)
# Don't raise CloneFailure; that would delete the
# name. Instead exit immediately.
#
sys.exit(1)
_CheckGitVersion()
try:
if NeedSetupGnuPG():
can_verify = SetupGnuPG(opt.quiet)
else:
can_verify = True
dst = os.path.abspath(os.path.join(repodir, S_repo))
_Clone(url, dst, opt.quiet)
if can_verify and not opt.no_repo_verify:
rev = _Verify(dst, branch, opt.quiet)
else:
rev = 'refs/remotes/origin/%s^0' % branch
_Checkout(dst, branch, rev, opt.quiet)
except CloneFailure:
if opt.quiet:
_print('fatal: repo init failed; run without --quiet to see why',
file=sys.stderr)
raise
def ParseGitVersion(ver_str):
if not ver_str.startswith('git version '):
return None
num_ver_str = ver_str[len('git version '):].strip().split('-')[0]
to_tuple = []
for num_str in num_ver_str.split('.')[:3]:
if num_str.isdigit():
to_tuple.append(int(num_str))
else:
to_tuple.append(0)
return tuple(to_tuple)
def _CheckGitVersion():
cmd = [GIT, '--version']
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except OSError as e:
_print(file=sys.stderr)
_print("fatal: '%s' is not available" % GIT, file=sys.stderr)
_print('fatal: %s' % e, file=sys.stderr)
_print(file=sys.stderr)
_print('Please make sure %s is installed and in your path.' % GIT,
file=sys.stderr)
raise CloneFailure()
ver_str = proc.stdout.read().strip()
proc.stdout.close()
proc.wait()
ver_act = ParseGitVersion(ver_str)
if ver_act is None:
_print('error: "%s" unsupported' % ver_str, file=sys.stderr)
raise CloneFailure()
if ver_act < MIN_GIT_VERSION:
need = '.'.join(map(str, MIN_GIT_VERSION))
_print('fatal: git %s or later required' % need, file=sys.stderr)
raise CloneFailure()
def NeedSetupGnuPG():
if not os.path.isdir(home_dot_repo):
return True
kv = os.path.join(home_dot_repo, 'keyring-version')
if not os.path.exists(kv):
return True
kv = open(kv).read()
if not kv:
return True
kv = tuple(map(int, kv.split('.')))
if kv < KEYRING_VERSION:
return True
return False
def SetupGnuPG(quiet):
try:
os.mkdir(home_dot_repo)
except OSError as e:
if e.errno != errno.EEXIST:
_print('fatal: cannot make %s directory: %s'
% (home_dot_repo, e.strerror), file=sys.stderr)
sys.exit(1)
try:
os.mkdir(gpg_dir, stat.S_IRWXU)
except OSError as e:
if e.errno != errno.EEXIST:
_print('fatal: cannot make %s directory: %s' % (gpg_dir, e.strerror),
file=sys.stderr)
sys.exit(1)
env = os.environ.copy()
env['GNUPGHOME'] = gpg_dir.encode()
cmd = ['gpg', '--import']
try:
proc = subprocess.Popen(cmd,
env = env,
stdin = subprocess.PIPE)
except OSError as e:
if not quiet:
_print('warning: gpg (GnuPG) is not available.', file=sys.stderr)
_print('warning: Installing it is strongly encouraged.', file=sys.stderr)
_print(file=sys.stderr)
return False
proc.stdin.write(MAINTAINER_KEYS)
proc.stdin.close()
if proc.wait() != 0:
_print('fatal: registering repo maintainer keys failed', file=sys.stderr)
sys.exit(1)
_print()
fd = open(os.path.join(home_dot_repo, 'keyring-version'), 'w')
fd.write('.'.join(map(str, KEYRING_VERSION)) + '\n')
fd.close()
return True
def _SetConfig(local, name, value):
"""Set a git configuration option to the specified value.
"""
cmd = [GIT, 'config', name, value]
if subprocess.Popen(cmd, cwd = local).wait() != 0:
raise CloneFailure()
def _InitHttp():
handlers = []
mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
try:
import netrc
n = netrc.netrc()
for host in n.hosts:
p = n.hosts[host]
mgr.add_password(p[1], 'http://%s/' % host, p[0], p[2])
mgr.add_password(p[1], 'https://%s/' % host, p[0], p[2])
except:
pass
handlers.append(urllib.request.HTTPBasicAuthHandler(mgr))
handlers.append(urllib.request.HTTPDigestAuthHandler(mgr))
if 'http_proxy' in os.environ:
url = os.environ['http_proxy']
handlers.append(urllib.request.ProxyHandler({'http': url, 'https': url}))
if 'REPO_CURL_VERBOSE' in os.environ:
handlers.append(urllib.request.HTTPHandler(debuglevel=1))
handlers.append(urllib.request.HTTPSHandler(debuglevel=1))
urllib.request.install_opener(urllib.request.build_opener(*handlers))
def _Fetch(url, local, src, quiet):
if not quiet:
_print('Get %s' % url, file=sys.stderr)
cmd = [GIT, 'fetch']
if quiet:
cmd.append('--quiet')
err = subprocess.PIPE
else:
err = None
cmd.append(src)
cmd.append('+refs/heads/*:refs/remotes/origin/*')
cmd.append('refs/tags/*:refs/tags/*')
proc = subprocess.Popen(cmd, cwd = local, stderr = err)
if err:
proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
raise CloneFailure()
def _DownloadBundle(url, local, quiet):
if not url.endswith('/'):
url += '/'
url += 'clone.bundle'
proc = subprocess.Popen(
[GIT, 'config', '--get-regexp', 'url.*.insteadof'],
cwd = local,
stdout = subprocess.PIPE)
for line in proc.stdout:
m = re.compile(r'^url\.(.*)\.insteadof (.*)$').match(line)
if m:
new_url = m.group(1)
old_url = m.group(2)
if url.startswith(old_url):
url = new_url + url[len(old_url):]
break
proc.stdout.close()
proc.wait()
if not url.startswith('http:') and not url.startswith('https:'):
return False
dest = open(os.path.join(local, '.git', 'clone.bundle'), 'w+b')
try:
try:
r = urllib.request.urlopen(url)
except urllib.error.HTTPError as e:
if e.code in [403, 404]:
return False
_print('fatal: Cannot get %s' % url, file=sys.stderr)
_print('fatal: HTTP error %s' % e.code, file=sys.stderr)
raise CloneFailure()
except urllib.error.URLError as e:
_print('fatal: Cannot get %s' % url, file=sys.stderr)
_print('fatal: error %s' % e.reason, file=sys.stderr)
raise CloneFailure()
try:
if not quiet:
_print('Get %s' % url, file=sys.stderr)
while True:
buf = r.read(8192)
if buf == '':
return True
dest.write(buf)
finally:
r.close()
finally:
dest.close()
def _ImportBundle(local):
path = os.path.join(local, '.git', 'clone.bundle')
try:
_Fetch(local, local, path, True)
finally:
os.remove(path)
def _Clone(url, local, quiet):
"""Clones a git repository to a new subdirectory of repodir
"""
try:
os.mkdir(local)
except OSError as e:
_print('fatal: cannot make %s directory: %s' % (local, e.strerror),
file=sys.stderr)
raise CloneFailure()
cmd = [GIT, 'init', '--quiet']
try:
proc = subprocess.Popen(cmd, cwd = local)
except OSError as e:
_print(file=sys.stderr)
_print("fatal: '%s' is not available" % GIT, file=sys.stderr)
_print('fatal: %s' % e, file=sys.stderr)
_print(file=sys.stderr)
_print('Please make sure %s is installed and in your path.' % GIT,
file=sys.stderr)
raise CloneFailure()
if proc.wait() != 0:
_print('fatal: could not create %s' % local, file=sys.stderr)
raise CloneFailure()
_InitHttp()
_SetConfig(local, 'remote.origin.url', url)
_SetConfig(local, 'remote.origin.fetch',
'+refs/heads/*:refs/remotes/origin/*')
if _DownloadBundle(url, local, quiet):
_ImportBundle(local)
else:
_Fetch(url, local, 'origin', quiet)
def _Verify(cwd, branch, quiet):
"""Verify the branch has been signed by a tag.
"""
cmd = [GIT, 'describe', 'origin/%s' % branch]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd = cwd)
cur = proc.stdout.read().strip()
proc.stdout.close()
proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0 or not cur:
_print(file=sys.stderr)
_print("fatal: branch '%s' has not been signed" % branch, file=sys.stderr)
raise CloneFailure()
m = re.compile(r'^(.*)-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur)
if m:
cur = m.group(1)
if not quiet:
_print(file=sys.stderr)
_print("info: Ignoring branch '%s'; using tagged release '%s'"
% (branch, cur), file=sys.stderr)
_print(file=sys.stderr)
env = os.environ.copy()
env['GNUPGHOME'] = gpg_dir.encode()
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
cwd = cwd,
env = env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
_print(file=sys.stderr)
_print(out, file=sys.stderr)
_print(err, file=sys.stderr)
_print(file=sys.stderr)
raise CloneFailure()
return '%s^0' % cur
def _Checkout(cwd, branch, rev, quiet):
"""Checkout an upstream branch into the repository and track it.
"""
cmd = [GIT, 'update-ref', 'refs/heads/default', rev]
if subprocess.Popen(cmd, cwd = cwd).wait() != 0:
raise CloneFailure()
_SetConfig(cwd, 'branch.default.remote', 'origin')
_SetConfig(cwd, 'branch.default.merge', 'refs/heads/%s' % branch)
cmd = [GIT, 'symbolic-ref', 'HEAD', 'refs/heads/default']
if subprocess.Popen(cmd, cwd = cwd).wait() != 0:
raise CloneFailure()
cmd = [GIT, 'read-tree', '--reset', '-u']
if not quiet:
cmd.append('-v')
cmd.append('HEAD')
if subprocess.Popen(cmd, cwd = cwd).wait() != 0:
raise CloneFailure()
def _FindRepo():
"""Look for a repo installation, starting at the current directory.
"""
curdir = os.getcwd()
repo = None
olddir = None
while curdir != '/' \
and curdir != olddir \
and not repo:
repo = os.path.join(curdir, repodir, REPO_MAIN)
if not os.path.isfile(repo):
repo = None
olddir = curdir
curdir = os.path.dirname(curdir)
return (repo, os.path.join(curdir, repodir))
class _Options:
help = False
def _ParseArguments(args):
cmd = None
opt = _Options()
arg = []
for i in range(len(args)):
a = args[i]
if a == '-h' or a == '--help':
opt.help = True
elif not a.startswith('-'):
cmd = a
arg = args[i + 1:]
break
return cmd, opt, arg
def _Usage():
_print(
"""usage: repo COMMAND [ARGS]
repo is not yet installed. Use "repo init" to install it here.
The most commonly used repo commands are:
init Install repo in the current working directory
help Display detailed help on a command
For access to the full online help, install repo ("repo init").
""", file=sys.stderr)
sys.exit(1)
def _Help(args):
if args:
if args[0] == 'init':
init_optparse.print_help()
sys.exit(0)
else:
_print("error: '%s' is not a bootstrap command.\n"
' For access to online help, install repo ("repo init").'
% args[0], file=sys.stderr)
else:
_Usage()
sys.exit(1)
def _NotInstalled():
_print('error: repo is not installed. Use "repo init" to install it here.',
file=sys.stderr)
sys.exit(1)
def _NoCommands(cmd):
_print("""error: command '%s' requires repo to be installed first.
Use "repo init" to install it here.""" % cmd, file=sys.stderr)
sys.exit(1)
def _RunSelf(wrapper_path):
my_dir = os.path.dirname(wrapper_path)
my_main = os.path.join(my_dir, 'main.py')
my_git = os.path.join(my_dir, '.git')
if os.path.isfile(my_main) and os.path.isdir(my_git):
for name in ['git_config.py',
'project.py',
'subcmds']:
if not os.path.exists(os.path.join(my_dir, name)):
return None, None
return my_main, my_git
return None, None
def _SetDefaultsTo(gitdir):
global REPO_URL
global REPO_REV
REPO_URL = gitdir
proc = subprocess.Popen([GIT,
'--git-dir=%s' % gitdir,
'symbolic-ref',
'HEAD'],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
REPO_REV = proc.stdout.read().strip()
proc.stdout.close()
proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
_print('fatal: %s has no current branch' % gitdir, file=sys.stderr)
sys.exit(1)
def main(orig_args):
repo_main, rel_repo_dir = _FindRepo()
cmd, opt, args = _ParseArguments(orig_args)
wrapper_path = os.path.abspath(__file__)
my_main, my_git = _RunSelf(wrapper_path)
if not repo_main:
if opt.help:
_Usage()
if cmd == 'help':
_Help(args)
if not cmd:
_NotInstalled()
if cmd == 'init':
if my_git:
_SetDefaultsTo(my_git)
try:
_Init(args)
except CloneFailure:
for root, dirs, files in os.walk(repodir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(repodir)
sys.exit(1)
repo_main, rel_repo_dir = _FindRepo()
else:
_NoCommands(cmd)
if my_main:
repo_main = my_main
ver_str = '.'.join(map(str, VERSION))
me = [sys.executable, repo_main,
'--repo-dir=%s' % rel_repo_dir,
'--wrapper-version=%s' % ver_str,
'--wrapper-path=%s' % wrapper_path,
'--']
me.extend(orig_args)
me.extend(extra_args)
try:
os.execv(sys.executable, me)
except OSError as e:
_print("fatal: unable to start %s" % repo_main, file=sys.stderr)
_print("fatal: %s" % e, file=sys.stderr)
sys.exit(148)
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"yuanjianye@126.com"
] |
yuanjianye@126.com
|
|
399f348277fb3b7163ca3a4c76f76af5f25d109d
|
3366e2c70dd863951a3386aa41bf0a00c8b79178
|
/process_iemocap.py
|
d2f454b9a90b4d90a42fcc1713a1da822ba48c98
|
[] |
no_license
|
mia2mia/emotionrecognition
|
5ec5d9e220dbfe42f2ab926be035c40553f32951
|
d1915b00f316a2b516050e772e57436c27325e53
|
refs/heads/master
| 2022-07-16T16:38:17.816397
| 2018-08-16T20:02:07
| 2018-08-16T20:02:07
| 263,222,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,659
|
py
|
import argparse
import os
from sklearn.utils import shuffle
import numpy as np
#TODO: import the correct feature extraction function
from audio_features import extract_logmel as extract_feat
def parse_args():
"""Returns dictionary containing CLI arguments"""
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--data-path", required=True, help="Path to the IEMOCAP processed dataset")
ap.add_argument("-o", "--out-path", required=True, help="Path to the output directory")
args = vars(ap.parse_args())
return args
def parse_dialog_file(dialog_file_path, x, y):
"""Function to parse the Evaluation file per dialog
Evaluation file structure:
% [START_TIME - END_TIME] TURN_NAME EMOTION [V, A, D]
[6.2901 - 8.2357] Ses01F_impro01_F000 neu [2.5000, 2.5000, 2.5000]
C-E2: Neutral; ()
C-E3: Neutral; ()
C-E4: Neutral; ()
C-F1: Neutral; (curious)
A-E3: val 3; act 2; dom 2; ()
A-E4: val 2; act 3; dom 3; (mildly aggravated but staying polite, attitude)
A-F1: val 3; act 2; dom 1; ()
[10.0100 - 11.3925] Ses01F_impro01_F001 neu [2.5000, 2.5000, 2.5000]
...
"""
global target_emotions
dialog_wav_base_path = os.path.splitext(dialog_file_path)[0]
with open(dialog_file_path, 'r') as dialog_file:
# skip the headers
_ = dialog_file.readline()
_ = dialog_file.readline()
# read the line containing frame and emotion and skip till the next
line = dialog_file.readline().strip()
while line:
wavfile, emotion = line.split('\t')[1:3]
if emotion in target_emotions:
wav_path = os.path.join(dialog_wav_base_path, wavfile+'.wav')
print (wav_path, emotion)
# extract features and append it to the list
features = extract_feat(wav_path) # shape (M, 40)
if features is not None:
x.append(features)
y.append(target_emotions[emotion])
# skip all lines till the blank line
while line != '\n':
line = dialog_file.readline()
# read in the next line containing frame and emotion
line = dialog_file.readline().strip()
def walk_through_sessions(x, y, data_path, out_path, sessions):
"""Function to iterate recursively over all session annotations,
extract features, and append the features/labels to the x,y lists.
"""
for session in sessions:
session_path = os.path.join(data_path, session)
for dirpath, dnames, fnames in os.walk(session_path):
for f in fnames:
if f.endswith(".txt"):
dialog_file_path = os.path.join(dirpath, f)
parse_dialog_file(dialog_file_path, x, y)
def compute_mean_std(x_train, y_train):
"""Function to compute mean and std of feature vectors from neutral emotions"""
# warning: not used
global mean, std, target_emotions
neutral_examples = np.vstack(x_train[np.where(y_train==target_emotions['neu'])])
mean = np.mean(neutral_examples, axis=0)
std = np.std(neutral_examples, axis=0)
print (mean.shape)
print (mean)
print (std.shape)
print (std)
def normalize_data(x, mean, std):
"""function to normalize X by subtracting mean and dividing by std"""
for i in range(len(x)):
x[i] = (x[i] - mean) / std
def create_training_set(data_path, out_path, sessions):
"""wrapper function to create training set"""
global mean, std
print ("Processing training set...")
x_train = []
y_train = []
walk_through_sessions(x_train, y_train, data_path, out_path, sessions)
print (len(x_train), len(y_train))
"""Lines to perform external normalization. Not used.
x_train = np.array(x_train)
y_train = np.array(y_train)
compute_mean_std(x_train, y_train)
print ("sample before normalization", x_train[0][0])
normalize_data(x_train, mean, std)
print ("sample after normalization", x_train[0][0])
"""
np.save(os.path.join(out_path,'x_train.npy'), x_train)
np.save(os.path.join(out_path,'y_train.npy'), y_train)
def create_testval_set(data_path, out_path, sessions):
global mean, std
print ("Processing test/val set...")
x = []
y = []
walk_through_sessions(x, y, data_path, out_path, sessions)
# normalize_data(x, mean, std)
n_val = int(len(y)/2)
x,y = shuffle(x,y)
x_val = x[:n_val]
y_val = y[:n_val]
x_test = x[n_val:]
y_test = y[n_val:]
print (len(x_val), len(y_val), len(x_test), len(y_test))
np.save(os.path.join(out_path,'x_val.npy'), x_val)
np.save(os.path.join(out_path,'y_val.npy'), y_val)
np.save(os.path.join(out_path,'x_test.npy'), x_test)
np.save(os.path.join(out_path,'y_test.npy'), y_test)
if __name__ == '__main__':
args = parse_args()
data_path = args["data_path"]
out_path = args["out_path"]
if not os.path.exists(data_path):
raise("Path to Dataset is incorrect or doesn't exist")
if not os.path.exists(out_path):
os.makedirs(out_path)
# sessions used for training and testing
sessions_train = ['Session1', 'Session2', 'Session3', 'Session4']
sessions_val = ['Session5']
target_emotions = { 'ang':0, 'hap':1, 'sad':2, 'neu':3, }
# target_emotions = {'ang':0, 'hap':1, 'exc':2, 'sad':3, 'fru':4, 'neu':5,}
mean = None
std = None
create_training_set(data_path, out_path, sessions_train)
create_testval_set(data_path, out_path, sessions_val)
|
[
"deepc94@gmail.com"
] |
deepc94@gmail.com
|
bacc0ed50288fc9e1318a109db7351883c306837
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/388510f379bf268c9fa7e135007fcf1295a5e82f-<read_sysctl_file>-fix.py
|
5944de126f95a653371ecd488b692d83c9600aee
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
def read_sysctl_file(self):
lines = []
if os.path.isfile(self.sysctl_file):
try:
f = open(self.sysctl_file, 'r')
lines = f.readlines()
f.close()
except IOError:
e = get_exception()
self.module.fail_json(msg=('Failed to open %s: %s' % (self.sysctl_file, str(e))))
for line in lines:
line = line.strip()
self.file_lines.append(line)
if ((not line) or line.startswith('#')):
continue
(k, v) = line.split('=', 1)
k = k.strip()
v = v.strip()
self.file_values[k] = v.strip()
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
603b373c5b40981b400793fb84c38b2fe0e80722
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_321/ch87_2020_05_06_17_53_40_771914.py
|
9e76ed669a24108340ce5d59e9ea300a2bd1138d
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
with open('churras.txt','r') as texto:
conteudo = texto.read
n = conteudo.split(',')
print (n)
del n[0,:,3]
valor = 0
i = 0
while i <= len(n):
valor += n[i]*n[i+1]
i += 2
print(valor)
|
[
"you@example.com"
] |
you@example.com
|
1c7c47ec1ae65cc25478853c5ec03cb378ba9ce6
|
3a17482f2229325dda7dfdf74f51b18a69faeb17
|
/setup.py
|
5f3688acdef48ff07d79ffb7decf00b496f6cf1d
|
[] |
no_license
|
Albertomanas/ApiRest_gildedRose
|
067b4031d455d40053488f713048147dec5edf14
|
430000567500fed4b6e7e745cf08caccf25c1c2b
|
refs/heads/master
| 2022-04-10T10:25:34.205277
| 2020-03-27T04:12:18
| 2020-03-27T04:12:18
| 247,736,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,440
|
py
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="API REST GildedRose",
version="0.0.1",
author="dfleta",
author_email="gelpiorama@gmail.com",
description="API REST Flask example",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dfleta/api-rest-gildedrose.git",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
'aniso8601==8.0.0',
'astroid==2.3.3',
'Click==7.0',
'dnspython==1.16.0',
'Flask==1.1.1',
'Flask-Cors==3.0.8',
'flask-mongoengine==0.9.5',
'Flask-PyMongo==2.3.0',
'Flask-RESTful==0.3.7',
'Flask-WTF==0.14.2',
'isort==4.3.21',
'itsdangerous==1.1.0',
'Jinja2==2.11.1',
'lazy-object-proxy==1.4.3',
'MarkupSafe==1.1.1',
'mccabe==0.6.1',
'mongoengine==0.19.1',
'pycodestyle==2.5.0',
'pylint==2.4.4',
'pymongo==3.10.1',
'pytz==2019.3',
'six==1.14.0',
'typed-ast==1.4.0',
'Werkzeug==1.0.0',
'wrapt==1.11.2',
'WTForms==2.2.1',
],
)
|
[
"amanas@cifpfbmoll.eu"
] |
amanas@cifpfbmoll.eu
|
259d2cd92a32309fc550fed0d26e1561a84b8007
|
e6cd7bc1214ae7a44c3cb789a360e9a840eb8034
|
/week2/practical3_approximate_matching.py
|
fb11fdab32e9342cf35b834f063f2b84c27f9179
|
[] |
no_license
|
MariaTroj/DNA_sequencing_algo
|
3e93546645c9e96966aeeecacd36c5c06e9541bd
|
11a45bcdd4d5df3e96ab2ef245b613dca0aa0acf
|
refs/heads/master
| 2023-09-03T15:13:44.292655
| 2021-10-19T11:30:20
| 2021-10-19T11:30:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,254
|
py
|
from practical1_boyer_moore import *
def approximate_match(pattern: str, text: str, n: int):
segment_length = int(round(len(pattern) / (n + 1)))
all_matches = set()
for i in range(n+1):
start = i*segment_length
end = min((i+1) * segment_length, len(pattern))
p_bm = BoyerMoore(pattern[start:end], alphabet='ACGT')
matches = boyer_moore(pattern[start:end], p_bm, text)
# Extend matching segments to see if whole p matches
for m in matches:
if m < start or m-start+len(pattern) > len(text):
continue
mismatches = 0
for j in range(0, start):
if not pattern[j] == text[m - start + j]:
mismatches += 1
if mismatches > n:
break
for j in range(end, len(pattern)):
if not pattern[j] == text[m - start + j]:
mismatches += 1
if mismatches > n:
break
if mismatches <= n:
all_matches.add(m - start)
return list(all_matches)
if __name__ == '__main__':
pattern = 'AACTTG'
text = 'CACTTAATTTG'
print(approximate_match(pattern, text, 2))
|
[
"maria.a.mierzwa@gmail.com"
] |
maria.a.mierzwa@gmail.com
|
5dbf5f0adba3a36a978555d503f078233ec24a42
|
ec7359f3c1739d68ee4d18351ee048d7c42d9e5c
|
/regression/samples/overfitting.py
|
0c616ec001e19f84937dd3d00b0c0a63f52ef596
|
[] |
no_license
|
guilhermevarela/machinelearning
|
381e4db33f6500f07e3886754e94befed9610b84
|
3af1514c6f6916f51c242648ee185b417736a023
|
refs/heads/master
| 2020-12-02T19:33:52.426149
| 2019-04-02T21:45:12
| 2019-04-02T21:45:12
| 96,360,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,670
|
py
|
'''
Created on May 11, 2017
@author: Varela
'''
# notes for this course can be found at:
# https://deeplearningcourses.com/c/data-science-linear-regression-in-python
# https://www.udemy.com/data-science-linear-regression-in-python
import numpy as np
import matplotlib.pyplot as plt
import aux as ax
from regress import lin, r2
#make up some data and plot it
N = 100
X = np.linspace(0, 6*np.pi, N)
Y = np.sin(X)
plt.plot(X,Y)
plt.show()
# def fit(X,Y):
# return np.linalg.solve(X.T.dot(X), X.T.dot(Y))
def fit_and_display(X,Y, sample, deg):
N = len(X)
trainidx = np.random.choice(N, sample)
Xtrain = X[trainidx]
Ytrain = Y[trainidx]
plt.scatter(Xtrain, Ytrain)
plt.show()
#fit polynomial
# Xtrain_poly = ax.polyfy(Xtrain, deg)
X_poly = ax.polyfy(X, deg)
Xtrain_poly = X_poly[trainidx]
# w = fit(Xtrain_poly, Ytrain)
#Compute redisiduals
w, resid0 = lin(Xtrain_poly, Ytrain)
resid1 = r2(w, X_poly, Y)
print ("degree %d .: in-sample %.4f out sample %.4f" % (deg ,resid0, resid1))
#display the polynomial
# X_poly = ax.polyfy(X, deg)
Y_hat = X_poly.dot(w)
plt.plot(X,Y)
plt.plot(X,Y_hat)
plt.scatter(Xtrain, Ytrain)
plt.title("deg= %d" % deg)
plt.show()
for deg in (2,3,4,5,6,7,8,9):
fit_and_display(X, Y, 10, deg)
#LAZY programmers's
# import numpy as np
# import matplotlib.pyplot as plt
#
#
# def make_poly(X, deg):
# n = len(X)
# data = [np.ones(n)]
# for d in xrange(deg):
# data.append(X**(d+1))
# return np.vstack(data).T
#
#
# def fit(X, Y):
# return np.linalg.solve(X.T.dot(X), X.T.dot(Y))
#
#
# def fit_and_display(X, Y, sample, deg):
# N = len(X)
# train_idx = np.random.choice(N, sample)
# Xtrain = X[train_idx]
# Ytrain = Y[train_idx]
#
# plt.scatter(Xtrain, Ytrain)
# plt.show()
#
# # fit polynomial
# Xtrain_poly = make_poly(Xtrain, deg)
# w = fit(Xtrain_poly, Ytrain)
#
# # display the polynomial
# X_poly = make_poly(X, deg)
# Y_hat = X_poly.dot(w)
# plt.plot(X, Y)
# plt.plot(X, Y_hat)
# plt.scatter(Xtrain, Ytrain)
# plt.title("deg = %d" % deg)
# plt.show()
#
#
# def get_mse(Y, Yhat):
# d = Y - Yhat
# return d.dot(d) / len(d)
#
#
# def plot_train_vs_test_curves(X, Y, sample=20, max_deg=20):
# N = len(X)
# train_idx = np.random.choice(N, sample)
# Xtrain = X[train_idx]
# Ytrain = Y[train_idx]
#
# test_idx = [idx for idx in xrange(N) if idx not in train_idx]
# # test_idx = np.random.choice(N, sample)
# Xtest = X[test_idx]
# Ytest = Y[test_idx]
#
# mse_trains = []
# mse_tests = []
# for deg in xrange(max_deg+1):
# Xtrain_poly = make_poly(Xtrain, deg)
# w = fit(Xtrain_poly, Ytrain)
# Yhat_train = Xtrain_poly.dot(w)
# mse_train = get_mse(Ytrain, Yhat_train)
#
# Xtest_poly = make_poly(Xtest, deg)
# Yhat_test = Xtest_poly.dot(w)
# mse_test = get_mse(Ytest, Yhat_test)
#
# mse_trains.append(mse_train)
# mse_tests.append(mse_test)
#
# plt.plot(mse_trains, label="train mse")
# plt.plot(mse_tests, label="test mse")
# plt.legend()
# plt.show()
#
# plt.plot(mse_trains, label="train mse")
# plt.legend()
# plt.show()
#
# if __name__ == "__main__":
# # make up some data and plot it
# N = 100
# X = np.linspace(0, 6*np.pi, N)
# Y = np.sin(X)
#
# plt.plot(X, Y)
# plt.show()
#
# for deg in (5, 6, 7, 8, 9):
# fit_and_display(X, Y, 10, deg)
# plot_train_vs_test_curves(X, Y)
# '
|
[
"guilhermevarela@hotmail.com"
] |
guilhermevarela@hotmail.com
|
f75e0ea9b0d03abe0bb91340f6a38c7ef5450902
|
02f565644b729c496bb4d802dfc6cb3a5db68ff1
|
/problems/sum_of_root_to_leaf_binary_numbers.py
|
bf25156100ee3f75041e482b2c02950d01d394ed
|
[] |
no_license
|
saubhik/leetcode
|
99a854ad87272eb82b16f22408ee7314ba0db099
|
221f0cb3105e4ccaec40cd1d37b9d7d5e218c731
|
refs/heads/master
| 2023-04-27T03:11:03.565056
| 2021-05-17T07:55:22
| 2021-05-17T07:55:22
| 275,324,914
| 3
| 1
| null | 2020-10-03T07:06:17
| 2020-06-27T07:48:37
|
Python
|
UTF-8
|
Python
| false
| false
| 949
|
py
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
# Normal DFS.
# Time: O(H) = O(N) in worst case of degenerate tree.
# Space: O(H) = O(N) in worst case of degenerate tree, due to recursion call stack.
def sumRootToLeaf(self, root: TreeNode, running_sum: int = 0) -> int:
if root is None:
return 0
running_sum = running_sum * 2 + root.val
left_sum, right_sum = 0, 0
if root.left is not None:
left_sum = self.sumRootToLeaf(root=root.left, running_sum=running_sum)
if root.right is not None:
right_sum = self.sumRootToLeaf(root=root.right, running_sum=running_sum)
if root.left is None and root.right is None:
# leaf
return running_sum
return left_sum + right_sum
|
[
"saubhik.mukherjee@gmail.com"
] |
saubhik.mukherjee@gmail.com
|
d76eac3ec6e96bc0e958803f72c4ec807b4ce5fb
|
9afbf779eb64a925e379bcd005798ec04c1b7232
|
/WebScraper/WebScraper.py
|
4df72dea9228643b671e7dc2c2cfcd61782770ce
|
[] |
no_license
|
hoomanvhd/Broken_link_finder_in_a_page
|
1a1124f86163a66231b44bfa055ef6e0115dad68
|
d7a885021f33e2039057f7cd9d7c6267d3b1a87f
|
refs/heads/master
| 2021-05-17T04:14:16.126600
| 2020-03-27T18:49:49
| 2020-03-27T18:49:49
| 250,616,917
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
from bs4 import BeautifulSoup
import requests
from urllib.parse import urlparse, urljoin
import urllib.request
import colorama
colorama.init()
green = colorama.Fore.GREEN
yellow = colorama.Fore.YELLOW
gray = colorama.Fore.LIGHTBLACK_EX
red = colorama.Fore.RED
reset = colorama.Fore.RESET
internal_urls = set()
external_urls = set()
def Check(url):
try:
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
if response.status in [204, 301, 308, 400, 401, 404, 408, 410, 500, 501, 502]:
print(red + response.status + " - " + response.reason + " --> " + url)
else:
print(green + " no problem in --> " + url)
except Exception as exception:
print(yellow + " {}-{} ".format(exception, url))
pass
def is_valid(url):
parsed= urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_links(url):
urls = set()
domain = urlparse(url).netloc
soup = BeautifulSoup(requests.get(url).content, "html.parser")
for a_tag in soup.findAll("a"):
href = a_tag.attrs.get("href")
if href == "" or href is None:
continue
href = urljoin(url, href)
parsed_href = urlparse(href)
href = parsed_href.scheme + ("://") + parsed_href.netloc + parsed_href.path
if not is_valid(href):
continue
if href in internal_urls:
continue
if domain not in href:
if href not in external_urls:
Check(href)
external_urls.add(href)
continue
Check(href)
urls.add(href)
internal_urls.add(href)
return urls
total_urls = 0
def crawl(url, max_urls=30):
global total_urls
total_urls += 1
links = get_links(url)
for link in links:
if total_urls > max_urls:
break
crawl(link, max_urls=max_urls)
if __name__ == "__main__":
URL = input("please enter a url: ")
crawl(URL)
print("Total links: ", len(external_urls) + len(internal_urls))
|
[
"mahmood.vahedi.94@gmail.com"
] |
mahmood.vahedi.94@gmail.com
|
010571ff13652a4024b55546fc0ae60db508f76c
|
f781030f19b59d59817e4020b68f2bb1a293e629
|
/find_best_smoothing_value.py
|
99c7d5ce7dad4f0516cb4665822e306fcd245b6d
|
[] |
no_license
|
xchenlp/simplified-lp-nlu-v2
|
9885e7d32184d6a1910931f59abaf1c2785146ac
|
e0b00c13f4b08ec9bbd632e58fb4d7fd6f68f4ab
|
refs/heads/master
| 2023-01-31T20:27:53.087258
| 2020-12-09T01:04:31
| 2020-12-09T01:04:31
| 318,620,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,963
|
py
|
from model import Model
import os
import pandas
from sklearn.metrics import classification_report, f1_score, accuracy_score
import argparse
os.environ["CUDA_VISIBLE_DEVICES"] = '-1' # disable gpu. This is because on ca-gpu2, the cuDNN version is wrong for tensorflow
def main():
f1s, accs = [], []
es = [0.01, 0.02, 0.03, 0.04, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5]
for e in es:
model = Model(word2vec_pkl_path='/data/cb_nlu_v2/vectors/wiki-news-300d-1M.pkl', config_path='config.yml', label_smoothing=e)
test_model_path = f'/data/cb_nlu_test_model_early_stopping_with_label_smoothing'
if not os.path.exists(test_model_path):
os.makedirs(test_model_path)
tr_set_path = f'/data/starter_pack_datasets/telco/tr_100_per_class.json'
te_set_path = f'/data/starter_pack_datasets/telco/te.json'
print("start training")
######################### training #########################
model.train(tr_set_path, test_model_path)
######################### training ends #########################
print("start testing")
######################### loading #########################
model.load(test_model_path)
######################### loading ends #########################
######################### predicting #########################
df_te = pandas.read_json(te_set_path, lines=True)
output = model.predict(list(df_te.text))
######################### predicting ends #########################
######################### evaluating the prediction #########################
ground_truths = list(df_te.intent)
predictions = [x['label'] for x in output]
threshold_predictions = [x['label'] if x['highestProb'] >= 0.6 else 'undefined' for x in output]
df_te.loc[:, 'pred_intent'] = threshold_predictions
df_te.loc[:, 'pred_score'] = [x['highestProb'] for x in output]
df_te.loc[:, 'prob'] = [x['prob'] for x in output]
#df_te.to_json(f'/data/cb_nlu_results/telco/te_preds_xentroy_smoothing_{e}.json', orient='records', lines=True)
print(classification_report(y_true=ground_truths, y_pred=threshold_predictions))
f1s.append(f1_score(y_true=ground_truths, y_pred=threshold_predictions, average='macro'))
accs.append(accuracy_score(y_true=ground_truths, y_pred=threshold_predictions))
######################### evaluating the prediction ends #########################
max_f1 = max(f1s)
max_acc = max(accs)
max_f1_e = f1s.index(max_f1)
max_acc_e = accs.index(max_acc)
print(f'best macro f1 score {max_f1} is obtained at e={es[max_f1_e]}')
print(f'best accuracy score {max_acc} is obtained at e={es[max_acc_e]}')
if __name__ == '__main__':
#parser = argparse.ArgumentParser()
#parser.add_argument('--vertical', required=True, type=str)
#args = parser.parse_args()
# main(args)
main()
|
[
"xchen@liveperson.com"
] |
xchen@liveperson.com
|
6309417daf524ad0787d2defa32fb75f0a8a4672
|
3335d3c7b8eccaeb3a212a9247aa320de9306896
|
/arboretum/auditree/evidences/repo_metadata.py
|
f434980da977e6cbc785a2b2ba22413c0dc16ca8
|
[
"Apache-2.0"
] |
permissive
|
syllogy/auditree-arboretum
|
6dc3f3e254bc2124b9a1e7860f29040241afb3ce
|
ed3cdfc7eca3c44591e606178aff00744d3e4e1e
|
refs/heads/main
| 2023-03-01T19:34:55.819952
| 2021-02-15T17:53:31
| 2021-02-15T17:53:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,202
|
py
|
# -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Repository metadata evidence."""
import json
from arboretum.common.constants import IGNORE_REPO_METADATA
from compliance.evidence import RawEvidence
from compliance.utils.data_parse import format_json
class RepoMetadataEvidence(RawEvidence):
"""Repository metadata raw evidence class."""
@property
def repo_size(self):
"""Provide the repo size."""
if self.content:
rs_factory = {
'gh': self._get_gh_repo_size,
'gl': self._get_gl_repo_size,
'bb': self._get_bb_repo_size
}
if not hasattr(self, '_size'):
self._size = rs_factory[self.name[:2]]()
return self._size
@property
def filtered_content(self):
"""Provide evidence content minus the ignored fields as JSON."""
if self.content:
if not hasattr(self, '_filtered_content'):
metadata = json.loads(self.content)
for field in IGNORE_REPO_METADATA[self.name[:2]]:
try:
metadata.pop(field)
except KeyError:
pass
self._filtered_content = str(format_json(metadata))
return self._filtered_content
def _get_gh_repo_size(self):
return json.loads(self.content)['size']
def _get_gl_repo_size(self):
raise NotImplementedError('Support for Gitlab coming soon...')
def _get_bb_repo_size(self):
raise NotImplementedError('Support for Bitbucket coming soon...')
|
[
"noreply@github.com"
] |
syllogy.noreply@github.com
|
bfeef4f3238d108628c91eff7054ab03319a3c5d
|
d21f40ea0014950437e06c0ffb688b407e359a56
|
/utilities/config.py
|
7215bbcd7d43042448206ed0294f51985588258f
|
[] |
no_license
|
slimanihajar/flights-delta-lake
|
7b8d54544ba7ab77457deb26a01715c74750dd8a
|
2d2bc36bc508d6dd4dd76ac16cf47ab9008e66c9
|
refs/heads/main
| 2023-08-13T19:47:40.949018
| 2021-09-29T12:35:41
| 2021-09-29T12:35:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,051
|
py
|
#!/c/Users/aghar/anaconda3/envs/ds/python
# -*- coding: utf-8 -*-
#
# PROGRAMMER: Ahmed Gharib
# DATE CREATED: 20/06/2021
# REVISED DATE:
# PURPOSE: Configuration file to hold all the variables and import needed for DEND project
#
##
# Imports python modules
import os
import pandas as pd
import logging
from IPython.core.display import HTML
from utilities.flights_raw import FlightsRaw
print('importing libraries ....')
print(
"""
Libraries (
os, pandas as pd, logging, HTML, FlightsRaw
)
Are available now
"""
)
# Set pandas maximum column width to 400
pd.options.display.max_colwidth = 400
# Setting pandas to display all columns
pd.options.display.max_columns = None
print('pandas maximum column width is set to 400 and maximum number of columns to None')
logging.info('pandas maximum column width is set to 400 and maximum number of columns to None')
print('Setting up variables ....')
logging.info('Setting up variables ....')
# Setting the path for data source and delta lake
working_dir = os.getcwd()
data_source = os.path.join(working_dir, 'data_source', '')
delta_lake = os.path.join(working_dir, 'delta_lake', '')
flight_raw_path = os.path.join(delta_lake, 'flights_raw', '')
# Creating the flight raw directory if not exists
if not os.path.isdir(flight_raw_path):
os.mkdir(flight_raw_path)
flight_bronz_path = os.path.join(delta_lake, 'flight_bronz')
flight_silver_path = os.path.join(delta_lake, 'flight_silver')
flight_gold_path = os.path.join(delta_lake, 'flight_gold')
date_gold_path = os.path.join(delta_lake, 'date_gold')
checkpoints_path = os.path.join(working_dir, 'checkpoints')
flight_raw_checkpoint = os.path.join(checkpoints_path, "flight_raw", "")
flight_bronz_checkpoint = os.path.join(checkpoints_path, "flight_bronz")
flight_silver_checkpoint = os.path.join(checkpoints_path, "flight_silver")
flight_gold_checkpoint = os.path.join(checkpoints_path, "flight_gold")
flight_raw_data_path = os.path.join(data_source, 'flights_raw', '')
lookup_tables_path = os.path.join(data_source, 'LookupTables', '')
l_plane_path = os.path.join(lookup_tables_path, 'L_PLANE.csv')
l_airport_path = os.path.join(lookup_tables_path, 'L_AIRPORT.csv')
l_cancelation_path = os.path.join(lookup_tables_path, 'L_CANCELLATION.csv')
l_unique_carrier_path = os.path.join(lookup_tables_path, 'L_UNIQUE_CARRIERS.csv')
# Vriables list of dictionaries to organize and print the variables when loading the configurations
vars = [
{'Name': 'working_dir', 'Value': working_dir,
'Description': 'string path for current working directory'},
{'Name': 'data_source', 'Value': data_source,
'Description': 'string path for data source location'},
{'Name': 'delta_lake', 'Value': delta_lake,
'Description': 'string path for delta lake location'},
{'Name': 'flight_raw_path', 'Value': flight_raw_path,
'Description': 'string path for flight raw data'},
{'Name': 'flight_bronz_path', 'Value': flight_bronz_path,
'Description': 'string path for flight bronz data'},
{'Name': 'flight_silver_path', 'Value': flight_silver_path,
'Description': 'string path for flight silver data'},
{'Name': 'flight_gold_path', 'Value': flight_gold_path,
'Description': 'string path for flight gold data'},
{'Name': 'date_gold_path', 'Value': date_gold_path,
'Description': 'string path for date gold data'},
{'Name': 'checkpoints_path', 'Value': checkpoints_path,
'Description': 'string path for checkpoints directory'},
{'Name': 'flight_raw_checkpoint', 'Value': flight_raw_checkpoint,
'Description': 'string path for flight raw checkpoint'},
{'Name': 'flight_bronz_checkpoint', 'Value': flight_bronz_checkpoint,
'Description': 'string path for flight bronz checkpoint'},
{'Name': 'flight_silver_checkpoint', 'Value': flight_silver_checkpoint,
'Description': 'string path for flight silver checkpoint'},
{'Name': 'flight_gold_checkpoint', 'Value': flight_gold_checkpoint,
'Description': 'string path for flight gold checkpoint'},
{'Name': 'flight_raw_data_path', 'Value': flight_raw_data_path,
'Description': 'string path for flight raw data source'},
{'Name': 'lookup_tables_path', 'Value': lookup_tables_path,
'Description': 'string path for lookup tables directory'},
{'Name': 'l_plane_path', 'Value': l_plane_path,
'Description': 'string path for plane data csv file'},
{'Name': 'l_airport_path', 'Value': l_airport_path,
'Description': 'string path for airport data csv file'},
{'Name': 'l_cancelation_path', 'Value': l_cancelation_path,
'Description': 'string path for cancelation data csv file'},
{'Name': 'l_unique_carrier_path', 'Value': l_unique_carrier_path,
'Description': 'string path for unique carrier csv file'}
]
vars_df = pd.DataFrame(vars).to_html()
print('vars_df is available as HTML content to display simply run HTML(vars_df)')
logging.info('vars_df is available as HTML content to display simply run HTML(vars_df)')
|
[
"a.gharib89@yahoo.com"
] |
a.gharib89@yahoo.com
|
7de2176332b55861a0bb08f58d2b7caa91fa5460
|
77b3923245bd7b4a215471067548d1aeb9c8d8c0
|
/fboss/system_tests/test_server.py
|
09fd3e7fcf0fdec66bad1a6a34595c791128fe76
|
[
"BSD-3-Clause"
] |
permissive
|
nanokinetics/fboss
|
95682f29aa77bd465ee64f54cad9f41789c9f66b
|
2e4799aefaddd5ef5f82002c6d31538257b90460
|
refs/heads/master
| 2021-04-14T04:41:36.688789
| 2018-03-22T23:02:18
| 2018-03-22T23:17:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,250
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
"""
A service to run on testing hosts, so that we can source traffic, sync
traffic, etc.
Modeled after pyserver example in open source
https://github.com/facebook/fbthrift/blob/master/thrift/tutorial/\
py/PythonServer.py
"""
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import THeaderProtocol
from thrift.server import TServer
from neteng.fboss.ttypes import FbossBaseError
from fboss.system_tests.test import TestService
from fboss.system_tests.test.ttypes import DeviceType
from fboss.system_tests.test.constants import DEFAULT_PORT
from os.path import isfile
import json
import logging
import pcapy # this library currently only supports python 2.x :-(
import signal
import socket
import subprocess
import time
import traceback
class TestServer(TestService.Iface):
SERVICE = TestService
PCAP_READ_BATCHING = 100 # in miliseconds
DEVICE_TYPE_ENUM_TO_STRING = {
DeviceType.LOOPBACK: "dummy"
}
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
self.log.addHandler(logging.StreamHandler())
self.log.setLevel(logging.INFO)
self.pcap_captures = {}
self.pkt_captures = {}
self.log.debug("Log: debug enabled")
# Keep track of added_interfaces so users can't remove interfaces
# they didn't add
self.added_interfaces = set()
signal.signal(signal.SIGINT, self._cleanUpAddedInterfaces)
self.log.info("Log: info enabled")
self.log.warn("Log: warnings enabled")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._cleanUpAddedInterfaces()
def ping(self, ip, options=None):
""" @param ip : a string, e.g., "128.8.128.118" """
# Ping default options is to capture 1 packet
if not options:
options = ['-c', '1']
if ":" in ip:
options.append('-6')
# Using subprocess.call with shell=False should prevent
# any security concerns because this just calls exec() natively
# (assuming there are no commandline buffer overflows in ping)
command = ["ping"] + options + [ip]
self.log.debug("Ping: running `%s`" % " ".join(command))
with open("/dev/null") as devnull:
response = subprocess.call(command, stdout=devnull)
return response == 0
def get_interface_mtu(self, intf):
command = ["cat", "/sys/class/net/%s/mtu" % intf]
response = subprocess.check_output(command)
return int(response)
def get_v4_ip(self, intf):
command = ["ip", "-4", "addr", "show", intf]
response = subprocess.check_output(command)
return response
def get_v6_ip(self, intf):
command = ["ip", "-6", "addr", "show", intf]
response = subprocess.check_output(command)
return response
def status(self):
return True
def startPktCapture(self, interface_name, pcap_filter_str):
self.log.debug("startPktCapture(%s,filter=%s)" % (
interface_name, pcap_filter_str))
if interface_name in self.pcap_captures:
# close out any old captures
del self.pcap_captures[interface_name]
reader = pcapy.open_live(interface_name, 1500,
1, self.PCAP_READ_BATCHING)
reader.setnonblock(1)
reader.setfilter(pcap_filter_str)
self.pcap_captures[interface_name] = reader
self.pkt_captures[interface_name] = []
def _pkt_callback(self, interface_name, pkt_hdr, pkt_data):
pkts = self.pkt_captures[interface_name]
self.log.debug("_pkt_callback(%s): %d pkts captured" %
(interface_name, len(pkts)))
pkts.append((pkt_hdr, pkt_data))
def getPktCapture(self, interface_name, ms_timeout, maxPackets):
""" All documentation for this call is in thrift definition """
if interface_name not in self.pcap_captures:
raise FbossBaseError("No startPktCapture() for Interface " +
interface_name)
reader = self.pcap_captures[interface_name]
start = time.time()
intf = interface_name # horrible hack to fit in 80 chars below
self.log.debug("getPktCapture(%s,ms_timeout=%d,maxPackets=%d)" %
(interface_name, ms_timeout, maxPackets))
while time.time() < (start + (ms_timeout / 1000)):
# this reader is set to not block, so this will busy wait until
# packets show up or the timeout occurs. Given the note
# about the readtimeout param in open_live() not being widely
# supported, this is the best we can do
try:
reader.dispatch(
maxPackets,
lambda phdr, pdata: self._pkt_callback(intf, phdr, pdata))
except Exception as e:
traceback.print_exc()
print(str(e))
raise
pkts = self.pkt_captures[interface_name]
self.log.debug(" got %d packets" % len(pkts))
if len(pkts) >= maxPackets:
break
return_pkts = []
for _phdr, pdata in pkts:
capture = TestService.CapturedPacket()
capture.packet_data = pdata
capture.packet_length = _phdr.getlen()
return_pkts.append(capture)
return return_pkts
def stopPktCapture(self, interface_name):
if interface_name not in self.pcap_captures:
raise FbossBaseError("Calling stopPktCapture() without " +
" startPktCapture()?")
del self.pcap_captures[interface_name]
def sendPkt(self, interface_name, pkt):
""" To send a packet, we need an open_live call.
If one's already open, use that, if not, start and
stop one quickly """
self.log.warning("Doing sendPkt(%s,len(%s))" % (interface_name,
len(pkt)))
## NOTE: pcapy.reader.sendpacket() is not implemented! use python
raw = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
raw.bind((interface_name, 0))
raw.send(pkt)
@staticmethod
def check_output(cmd, **kwargs):
return subprocess.check_output(cmd.split(' '), **kwargs)
def kill_iperf3(self):
try:
self.check_output('pkill -9 iperf3')
except Exception:
pass
def iperf3_server(self, timeout, options):
''' initialize iperf3 server, with timeout to expire server if no
client request comes in
'''
def timeout_handler(signum, frame):
raise FbossBaseError("IPERF3 SERVER TIMEOUT")
self.kill_iperf3() # kill lingering iperf3 processes (server or client)
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(timeout)
iperf3_pid_fn = '/tmp/iperf3_thrift.pid'
server_options = '-I {fn} -J -1 -s '.format(fn=iperf3_pid_fn)
if options:
server_options += ' '.join(options)
command = "iperf3 {options}".format(options=server_options)
try:
response = self.check_output(command)
except Exception as e:
response = json.dumps({'error': repr(e)})
finally:
signal.alarm(0)
if isfile(iperf3_pid_fn):
with open(iperf3_pid_fn, 'r') as f:
pid = f.read().strip('\0')
self.check_output('kill -9 {pid}'.format(pid=pid))
return response
def iperf3_client(self, server_ip, client_retries=3):
''' @param ip : a string, e.g., "128.8.128.118"
@param client_retries: int, how many retries client attempts
'''
self.kill_iperf3() # kill lingering iperf3 processes (server or client)
is_ipv6 = '-6' if ':' in server_ip else ''
command = "iperf3 {} -J -t 1 -c {}".format(is_ipv6, server_ip)
client_loop_cnt = 0
while client_loop_cnt < client_retries:
try:
response = self.check_output(command)
break
except Exception:
client_loop_cnt += 1
error_msg = '{} retries to reach iperf3 server {}' \
.format(client_loop_cnt, server_ip)
response = json.dumps({'error': error_msg})
time.sleep(1)
return response
def flap_server_port(self, interface, numberOfFlaps, sleepDuration):
''' System test to verify port flap handling works and doesn't hang
the system.
'''
command_for_interface_down = 'ifconfig ' + interface + " down"
command_for_interface_up = 'ifup ' + interface
for _iteration in range(1, numberOfFlaps + 1):
# Ignore the SIGHUP signal to maintain session when the server port
# is turned down.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_IGN)
self.log.debug("Flap iteration {}".format(_iteration))
self.check_output(command_for_interface_down)
time.sleep(sleepDuration)
self.check_output(command_for_interface_up)
# Change the SIGHUP settings back to original value.
signal.signal(signal.SIGHUP, old_handler)
def add_interface(self, ifName, deviceType):
if ifName in self.added_interfaces:
raise FbossBaseError(
"Device {} already exists".format(ifName))
strDeviceType = self.DEVICE_TYPE_ENUM_TO_STRING.get(deviceType)
if not strDeviceType:
raise FbossBaseError(
"DeviceType {} not found/supported (are you sure you used the enum?)"
.format(deviceType))
command = "ip link add name {} type {}".format(ifName, strDeviceType)
try:
self.check_output(command)
except Exception as e:
# Ignoring "File exists" error
if "exit status 2" not in str(e):
raise FbossBaseError("Error adding interface: {}".format(str(e)))
self.added_interfaces.add(ifName)
return True
def remove_interface(self, ifName):
if ifName not in self.added_interfaces:
raise FbossBaseError(
"User attempted to remove an interface they did not add. Ignoring.")
command = "ip link del dev {}".format(ifName)
try:
self.check_output(command)
except Exception as e:
raise FbossBaseError("Error deleting interface: {}".format(str(e)))
self.added_interfaces.remove(ifName)
return True
def add_address(self, address, ifName):
command = "ip addr add {} dev {}".format(address, ifName)
try:
self.check_output(command)
except Exception as e:
# Ignoring "File exists" error
if "exit status 2" not in str(e):
raise FbossBaseError("Error adding address: {}".format(str(e)))
return True
def _cleanUpAddedInterfaces(self):
for ifName in self.added_interfaces:
command = "ip link del dev {}".format(ifName)
try:
self.check_output(command)
except Exception as e:
self.log.info("Error deleting interface: {}".format(str(e)))
if __name__ == '__main__':
with TestServer() as handler:
transport = TSocket.TServerSocket(DEFAULT_PORT)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = THeaderProtocol.THeaderProtocolFactory()
server = TServer.TSimpleServer(handler, transport, tfactory, pfactory)
# You could do one of these for a multithreaded server
# server = TServer.TThreadedServer(handler, transport, tfactory, pfactory)
print('Starting the server...')
server.serve()
print('Done.')
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
9b94651beea06eff8912c9e6c4fa33218dddd01b
|
0bfb0c59bdaf3a8999f22abc9a177ba0560817cc
|
/fgiggle/manage.py
|
e58288544eb6dfd6b05827602c2a0384d52d93e5
|
[] |
no_license
|
CristiBalu/furry-giggle
|
0b51bf7d61ac4beb9d8ed7725c932dc09841418c
|
81521cc52f3b8fddcc2527dfb34867a21323a4b7
|
refs/heads/master
| 2020-04-19T04:05:32.567934
| 2016-09-07T22:05:44
| 2016-09-07T22:05:44
| 67,527,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fgiggle.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"sb720@exeter.ac.uk"
] |
sb720@exeter.ac.uk
|
991297ff8bc64fc31dcaf0117b5a96ea34df9c31
|
2579392850ca0534d98b391bae670b578792ff3f
|
/historical/tests/test_commons.py
|
37802929ba4e1335a852fe4644105623bbf7fce8
|
[
"Apache-2.0"
] |
permissive
|
shdobxr/historical
|
7bf4b6db357d78065b7ed31d77b7b237ee61d031
|
05a8473fb5fc00233fdbb0974c515110dc64ffbc
|
refs/heads/master
| 2020-03-24T09:10:19.330468
| 2018-07-26T20:31:18
| 2018-07-26T20:31:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,057
|
py
|
"""
.. module: historical.tests.test_commons
:platform: Unix
:copyright: (c) 2017 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. author:: Mike Grima <mgrima@netflix.com>
"""
import json
from datetime import datetime
import pytest
from historical.common.exceptions import DurableItemIsMissingException
from historical.s3.collector import process_update_records
from historical.tests.factories import CloudwatchEventFactory, DetailFactory, serialize, DynamoDBRecordFactory, \
DynamoDBDataFactory
S3_BUCKET = {
"arn": "arn:aws:s3:::testbucket1",
"principalId": "joe@example.com",
"userIdentity": {
"sessionContext": {
"userName": "oUEKDvMsBwpk",
"type": "Role",
"arn": "arn:aws:iam::123456789012:role/historical_poller",
"principalId": "AROAIKELBS2RNWG7KASDF",
"accountId": "123456789012"
},
"principalId": "AROAIKELBS2RNWG7KASDF:joe@example.com"
},
"accountId": "123456789012",
"eventTime": "2017-09-08T00:34:34Z",
"eventSource": "aws.s3",
"BucketName": "testbucket1",
"Region": "us-east-1",
"Tags": {},
"configuration": {
"Grants": {
"75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a": [
"FULL_CONTROL"
]
},
"Owner": {
"ID": "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a"
},
"LifecycleRules": [
{
"Status": "Enabled",
"Prefix": None,
"Expiration": {
"Days": 7
},
"ID": "Some cleanup"
}
],
"Logging": {},
"Policy": None,
"Versioning": {},
"Website": None,
"Cors": [],
"Notifications": {},
"Acceleration": None,
"Replication": {},
"CreationDate": "2006-02-03T16:45:09Z",
"AnalyticsConfigurations": [],
"MetricsConfigurations": [],
"InventoryConfigurations": [],
"Name": "testbucket1",
"_version": 8
}
}
def test_deserialize_current_record_to_current_model(historical_role, current_s3_table, buckets):
from historical.common.dynamodb import deserialize_current_record_to_current_model
from historical.s3.models import CurrentS3Model
# Create the event to fetch the Current data from:
bucket = S3_BUCKET.copy()
bucket['eventTime'] = datetime(year=2017, month=5, day=12, hour=10, minute=30, second=0).isoformat() + 'Z'
ddb_record = json.loads(json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
NewImage=bucket, Keys={
'arn': bucket['arn']
}),
eventName='INSERT'), default=serialize))
result = deserialize_current_record_to_current_model(ddb_record, CurrentS3Model)
assert result.configuration.attribute_values['Name'] == "testbucket1"
assert isinstance(result, CurrentS3Model)
# And for sns_too_big:
# Create the bucket in the current table:
now = datetime.utcnow().replace(tzinfo=None, microsecond=0)
create_event = json.loads(json.dumps(CloudwatchEventFactory(
detail=DetailFactory(
requestParameters={
"bucketName": "testbucket1"
},
eventSource="aws.s3",
eventName="CreateBucket",
eventTime=now
)
), default=serialize))
process_update_records([create_event])
del bucket['configuration']
ddb_record = json.loads(json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
NewImage=bucket, Keys={
'arn': bucket['arn']
}),
eventName='INSERT'), default=serialize))
ddb_record['sns_too_big'] = True
result = deserialize_current_record_to_current_model(ddb_record, CurrentS3Model)
assert result.configuration.attribute_values['Name'] == "testbucket1"
assert isinstance(result, CurrentS3Model)
# And if the object isn't in the current table:
ddb_record = json.loads(json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
NewImage=bucket, Keys={
'arn': 'arn:aws:s3:::notarealbucket'
}),
eventName='INSERT'), default=serialize))
ddb_record['sns_too_big'] = True
result = deserialize_current_record_to_current_model(ddb_record, CurrentS3Model)
assert not result
def test_deserialize_durable_record_to_durable_model(historical_role, durable_s3_table, buckets):
from historical.common.dynamodb import deserialize_durable_record_to_durable_model, \
deserialize_current_record_to_durable_model
from historical.s3.models import CurrentS3Model, DurableS3Model
# Create the event to fetch the Durable data from:
bucket = S3_BUCKET.copy()
del bucket['eventSource']
bucket['eventTime'] = datetime(year=2017, month=5, day=12, hour=10, minute=30, second=0).isoformat() + 'Z'
ddb_record = json.loads(json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
NewImage=bucket, Keys={
'arn': bucket['arn']
}),
eventName='INSERT'), default=serialize))
result = deserialize_durable_record_to_durable_model(ddb_record, DurableS3Model)
assert result
assert result.configuration.attribute_values['Name'] == "testbucket1"
assert result.eventTime == bucket['eventTime']
assert isinstance(result, DurableS3Model)
# And for sns_too_big:
# Create the bucket in the durable table:
ddb_record = json.loads(json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
NewImage=bucket, Keys={
'arn': bucket['arn']
}),
eventName='INSERT'), default=serialize))
revision = deserialize_current_record_to_durable_model(ddb_record, CurrentS3Model, DurableS3Model)
revision.save()
ddb_record['sns_too_big'] = True
del bucket['configuration']
result = deserialize_durable_record_to_durable_model(ddb_record, DurableS3Model)
assert result
assert result.configuration.attribute_values['Name'] == "testbucket1"
assert result.eventTime == bucket['eventTime']
assert isinstance(result, DurableS3Model)
# And if the object isn't in the durable table:
ddb_record = json.loads(json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
NewImage=bucket, Keys={
'arn': 'arn:aws:s3:::notarealbucket'
}),
eventName='INSERT'), default=serialize))
ddb_record['sns_too_big'] = True
# Raises an exception:
with pytest.raises(DurableItemIsMissingException):
deserialize_durable_record_to_durable_model(ddb_record, DurableS3Model)
def test_deserialize_durable_record_to_current_model(historical_role, current_s3_table, buckets):
from historical.common.dynamodb import deserialize_durable_record_to_current_model
from historical.s3.models import CurrentS3Model
# Create the event to fetch the Current data from:
bucket = S3_BUCKET.copy()
del bucket['eventSource']
bucket['eventTime'] = datetime(year=2017, month=5, day=12, hour=10, minute=30, second=0).isoformat() + 'Z'
ddb_record = json.loads(json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
NewImage=bucket, Keys={
'arn': bucket['arn']
}),
eventName='INSERT'), default=serialize))
result = deserialize_durable_record_to_current_model(ddb_record, CurrentS3Model)
assert result.configuration.attribute_values['Name'] == "testbucket1"
assert isinstance(result, CurrentS3Model)
# And for sns_too_big:
# Create the bucket in the Current table:
now = datetime.utcnow().replace(tzinfo=None, microsecond=0)
create_event = json.loads(json.dumps(CloudwatchEventFactory(
detail=DetailFactory(
requestParameters={
"bucketName": "testbucket1"
},
eventSource="aws.s3",
eventName="CreateBucket",
eventTime=now
)
), default=serialize))
process_update_records([create_event])
del bucket['configuration']
ddb_record = json.loads(json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
NewImage=bucket, Keys={
'arn': bucket['arn']
}),
eventName='INSERT'), default=serialize))
ddb_record['sns_too_big'] = True
result = deserialize_durable_record_to_current_model(ddb_record, CurrentS3Model)
assert result
assert result.configuration.attribute_values['Name'] == "testbucket1"
assert isinstance(result, CurrentS3Model)
# And if the object isn't in the durable table:
ddb_record = json.loads(json.dumps(DynamoDBRecordFactory(dynamodb=DynamoDBDataFactory(
NewImage=bucket, Keys={
'arn': 'arn:aws:s3:::notarealbucket'
}),
eventName='INSERT'), default=serialize))
ddb_record['sns_too_big'] = True
result = deserialize_durable_record_to_current_model(ddb_record, CurrentS3Model)
assert not result
|
[
"mgrima@netflix.com"
] |
mgrima@netflix.com
|
9459684b423408f0e0e297ebd428202457793f54
|
bfd523ae160a49e24c9529dbbf6eb151c1790b2c
|
/localizacao/migrations/0001_initial.py
|
c406ff375662ebd56882ed26ca894e8712b6c5e7
|
[] |
no_license
|
FilipeBP/DjangoAPI
|
bad1d5b273fa0795e2ad1abf9c206998b46c0504
|
b05d9f46901597d95b1f2dd39bfd429bbdfbd0cc
|
refs/heads/master
| 2020-09-08T09:11:56.469188
| 2019-11-16T19:13:07
| 2019-11-16T19:13:07
| 221,089,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 834
|
py
|
# Generated by Django 2.2.7 on 2019-11-13 20:46
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Localization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.CharField(max_length=70)),
('state', models.CharField(max_length=70)),
('city', models.CharField(max_length=70)),
('row1', models.CharField(max_length=150)),
('row2', models.CharField(blank=True, max_length=150, null=True)),
('lat', models.DecimalField(decimal_places=6, max_digits=9)),
],
),
]
|
[
"filipebritoperez@hotmail.com"
] |
filipebritoperez@hotmail.com
|
a23aa3dc313e6e4ce01499017354a5d938c4d720
|
4c15467527b2526cb24e543868b4b4249949b4d4
|
/main.py
|
c606b82bbe05f5c085945c4adaf02f820f2365b9
|
[] |
no_license
|
reergymerej/chords
|
9cd3f4170a5361d715a9c04b2d0f62feb222f05a
|
878113d95823431b0a4217e386d8aa3f8c11ab5c
|
refs/heads/master
| 2021-09-03T11:56:11.743040
| 2018-01-08T21:56:22
| 2018-01-08T21:56:22
| 115,677,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,930
|
py
|
#!/usr/bin/env python3
print("Give me a root note.")
NOTES = [
'A',
'Bโญ',
'B',
'C',
'Cโฏ',
'D',
'Eโญ',
'E',
'F',
'Fโฏ',
'G',
'Gโฏ',
]
def validate_note(note):
return note.upper() in NOTES
def interpret_note_variant(note):
"""Convert shorthand and weird names to our NOTES."""
note = note.replace('+', 'โฏ')
note = note.replace('-', 'โญ')
note = note.upper()
weird_variations = {
'Aโญ': 'Gโฏ',
'Aโฏ': 'Bโญ',
'Dโญ': 'Cโฏ',
'Dโฏ': 'Eโญ',
'Gโญ': 'Fโฏ',
}
if (note in weird_variations):
return weird_variations[note]
return note
def ask_for_note():
while True:
note = interpret_note_variant(input("root note: "))
if (validate_note(note)):
return note
else:
print("I don't understand that note.")
def shift_list(the_list, item):
"""Reorganize list so item is at 0 index."""
index = the_list.index(item)
before = the_list[:index]
the_list = the_list[index:]
the_list.extend(before)
return the_list
def get_major_chord(root_note):
shifted_notes = shift_list(NOTES, root_note)
third = shifted_notes[4]
fifth = shifted_notes[7]
chord = '{:s}-{:s}-{:s}'.format(root_note, third, fifth)
return 'major', chord
def get_minor_chord(root_note):
shifted_notes = shift_list(NOTES, root_note)
third = shifted_notes[3]
fifth = shifted_notes[7]
chord = '{:s}-{:s}-{:s}'.format(root_note, third, fifth)
return 'minor', chord
note = ask_for_note()
chord_types = [
get_major_chord,
get_minor_chord,
]
for chord_type in chord_types:
chord_type_name, chord = chord_type(note)
print('The {:s} chord for the root {:s} is {:s}.'.format(chord_type_name, note, chord))
|
[
"jeremy.greer.atl@gmail.com"
] |
jeremy.greer.atl@gmail.com
|
2ae0490a8033275531c9922d8b3dbeb36687774c
|
2ed1b17c2bd710a1cb31d5e6e7956297a374a8f6
|
/04-Evr_List_Comprehensions/Solved/comprehensions.py
|
c45261007e8c2e6730273eefa277f6e2845eb86f
|
[] |
no_license
|
jzcapets/09PythonActivities2
|
cf5910dc1212dcb6195936f0cb0b36b08d7b30f0
|
7f2998cda57fa7f43a6ac6c610eb35f68abd8983
|
refs/heads/master
| 2020-11-27T12:41:40.663968
| 2019-12-21T15:11:43
| 2019-12-21T15:11:43
| 229,444,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
fish = "halibut"
# Loop through each letter in the string
# and push to an array
letters = []
for letter in fish:
letters.append(letter)
print(letters)
# List comprehensions provide concise syntax for creating lists
letters = [letter for letter in fish]
print(letters)
# We can manipulate each element as we go
capital_letters = []
for letter in fish:
capital_letters.append(letter.upper())
print(capital_letters)
# List Comprehension for the above
capital_letters = [letter.upper() for letter in fish]
print(capital_letters)
# We can also add conditional logic (if statements) to a list comprehension
july_temperatures = [87, 85, 92, 79, 106]
hot_days = []
for temperature in july_temperatures:
if temperature > 90:
hot_days.append(temperature)
print(hot_days)
# List Comprehension with conditional
hot_days = [temperature for temperature in july_temperatures if temperature > 90]
print(hot_days)
|
[
"noreply@github.com"
] |
jzcapets.noreply@github.com
|
bc3244f2e25658027cd449260e5ba51108e15589
|
2d56818b961354d1aed1f4884eeacfa5390a236f
|
/Day 9/day9_starting_code.py
|
aa4be2b5d8b2470e223af316bae473703566dbf3
|
[] |
no_license
|
bayramhayri/100DaysOfPython
|
675a6ece8cbb45b47af7d95e8f881b623877dd25
|
f3dae0882055f11f65f02ac3b86702ecd154d5d6
|
refs/heads/main
| 2023-01-23T16:30:34.561243
| 2020-12-05T14:23:08
| 2020-12-05T14:23:08
| 312,025,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
programming_dictionary = {
"Bug": "An error in a program that prevents the program from running as expected.",
"Function": "A piece of code that you can easily call over and over again."
}
print(programming_dictionary["Bug"])
# Adding new items to dictionary.
programming_dictionary["Loop"] = "The action of doing something over and over again."
print(programming_dictionary)
# Create an empty dictionary.
empty_dictionary = {}
# Wipe an existing dictionary.
#programming_dictionary = {}
# Edit an item in a dictionary.
programming_dictionary["Bug"] = "A moth in your computer."
print(programming_dictionary)
# Loop through a dictionary
for key in programming_dictionary:
print(key)
print(programming_dictionary[key])
|
[
"bayramhayri@gmail.com"
] |
bayramhayri@gmail.com
|
ec916814fa9524bc04235e34b57fdd9c3a848dc7
|
678864ef6b96ca59c5de29dab0047f6b7cc7463a
|
/libs/SmartMeshSDK/ApiDefinition/IpMgrDefinition.py
|
9dd1768799bb4cc3d972f6237ea4bfa323ccba15
|
[] |
no_license
|
ddidiy/smartmeshsdk
|
c880f894a7f45a38af15d2f427a742115d1b744d
|
947224c945e560854ff3e80d142a79077c267671
|
refs/heads/master
| 2021-01-12T19:24:18.464870
| 2016-08-04T19:05:02
| 2016-08-04T19:05:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77,802
|
py
|
#!/usr/bin/python
import ApiDefinition
import ByteArraySerializer
class IpMgrDefinition(ApiDefinition.ApiDefinition):
'''
\ingroup ApiDefinition
\brief API definition for the IP manager.
\note This class inherits from ApiDefinition. It redefines the attributes of
its parents class, but inherits the methods.
'''
STRING = ApiDefinition.FieldFormats.STRING
BOOL = ApiDefinition.FieldFormats.BOOL
INT = ApiDefinition.FieldFormats.INT
INTS = ApiDefinition.FieldFormats.INTS
HEXDATA = ApiDefinition.FieldFormats.HEXDATA
RC = ApiDefinition.ApiDefinition.RC
SUBID1 = ApiDefinition.ApiDefinition.SUBID1
SUBID2 = ApiDefinition.ApiDefinition.SUBID2
RC_OK = ApiDefinition.ApiDefinition.RC_OK
OPTIONAL = [
'pkLen_1',
'delay_1',
'pkLen_2',
'delay_2',
'pkLen_3',
'delay_3',
'pkLen_4',
'delay_4',
'pkLen_5',
'delay_5',
'pkLen_6',
'delay_6',
'pkLen_7',
'delay_7',
'pkLen_8',
'delay_8',
'pkLen_9',
'delay_9',
'pkLen_10',
'delay_10',
'stationId',
]
def __init__(self):
self.serializer = ByteArraySerializer.ByteArraySerializer(self)
def default_serializer(self,commandArray,fieldsToFill):
'''
\brief IpMgrDefinition-specific implementation of default serializer
\param commandArray An array of the form [commandName, subCommandname]
The array can be of any length, and is of length 1
if no subcommands are used.
\param fieldsToFill The list of fields to send, organized as a
dictionary of the form
<tt>fieldname:fieldvalue</tt>.
\returns id,byteString where id is the command ID and byteArray an array
of bytes
'''
return self.serializer.serialize(commandArray,fieldsToFill)
def deserialize(self,type,cmdId,byteArray):
'''
\brief IpMgrDefinition-specific implementation of deserializer
'''
return self.serializer.deserialize(type,cmdId,byteArray)
# We redefine this attribute inherited from ApiDefinition. See
# ApiDefinition for a full description of the structure of this field.
fieldOptions = {
# 'notificationTypes' : [
# [ 1, 'event', 'Event notification'],
# [ 2, 'log', 'Log notification'],
# [ 4, 'data', 'Data payload notification'],
# [ 5, 'ipData', '6LoWPAN packet notification'],
# [ 6, 'healthReport', 'Health report notification'],
# ],
# 'eventTypes' : [
# [ 0, 'moteReset', 'A mote reset'],
# [ 1, 'networkReset', 'The network was reset'],
# [ 2, 'commandFinish ', 'A command has completed execution'],
# [ 3, 'moteJoin', 'A mote joined the network'],
# [ 4, 'moteOperational', 'A new mote was configured and is now operational'],
# [ 5, 'moteLost', 'A mote is no longer communicating in the network'],
# [ 6, 'netTime', 'Contains the network uptime (in response to a getTime command)'],
# [ 7, 'pingResponse', 'A reply was received from a mote ping'],
# [10, 'pathCreate', 'A path was created'],
# [11, 'pathDelete', 'A path was deleted'],
# [12, 'packetSent', 'A packet was sent'],
# [13, 'moteCreate', 'A mote was created'],
# [14, 'moteDelete', 'A mote was deleted'],
# ],
RC : [
[ 0, 'RC_OK', 'The application layer has processed the command correctly'],
[ 1, 'RC_INVALID_COMMAND', 'Invalid command'],
[ 2, 'RC_INVALID_ARGUMENT', 'Invalid argument'],
[11, 'RC_END_OF_LIST', 'End of list is returned when an iteration reaches the end of the list of objects'],
[12, 'RC_NO_RESOURCES', 'Reached maximum number of items'],
[13, 'RC_IN_PROGRESS', 'Operation is in progress'],
[14, 'RC_NACK', 'Negative acknowledgment'],
[15, 'RC_WRITE_FAIL', 'Flash write failed'],
[16, 'RC_VALIDATION_ERROR', 'Parameter validation error'],
[17, 'RC_INV_STATE', 'Object has inappropriate state'],
[18, 'RC_NOT_FOUND', 'Object is not found'],
],
'frameProfile' : [
[ 1, 'Profile_01', 'Fast network build, medium speed network operation'],
],
'advertisementState' : [
[ 0, 'on', 'Advertisement is on'],
[ 1, 'off', 'Advertisement is off'],
],
'downstreamFrameMode' : [
[ 0, 'normal', 'Normal downstream bandwidth'],
[ 1, 'fast', 'Fast downstream bandwidth'],
],
'networkState' : [
[ 0, 'operational', 'Network is operating normally'],
[ 1, 'radiotest', 'Manager is in radiotest mode'],
[ 2, 'notStarted', 'Waiting for startNetwork API command'],
[ 3, 'errorStartup', 'Unexpected error occurred at startup'],
[ 4, 'errorConfig', 'Invalid or not licensed configuration found at startup'],
[ 5, 'errorLicense', 'Invalid license file found at startup'],
],
'moteState' : [
[ 0, 'lost', 'Mote is not currently part of the network'],
[ 1, 'negotiating', 'Mote is in the process of joining the network'],
[ 4, 'operational', 'Mote is operational'],
],
'resetType' : [
[ 0, 'resetSystem', 'Reset the system'],
[ 2, 'resetMote', 'Reset the mote'],
],
'backboneFrameMode' : [
[ 0, 'off', 'Backbone frame is off'],
[ 1, 'upstream', 'Backbone frame is activated for upstream frames'],
[ 2, 'bidirectional', 'Backbone frame is activated for both upstream and downstream frames'],
],
'pathFilter' : [
[ 0, 'all', 'All paths'],
[ 1, 'upstream', 'Upstream paths'],
],
'pathDirection' : [
[ 0, 'none', 'No path'],
[ 1, 'unused', 'Path is not used'],
[ 2, 'upstream', 'Upstream path'],
[ 3, 'downstream', 'Downstream path'],
],
'packetPriority': [
[0, 'Low', 'Default packet priority'],
[1, 'Medium', 'Higher packet priority'],
[2, 'High', 'Highest packet priority'],
],
'commandFinishedResult': [
[0, 'OK', 'Command completed successfully'],
[1, 'nack', 'Command not acknowledged'],
[2, 'commandTimeout', 'Command timed out'],
],
'ccaMode': [
[0, 'off', 'CCA disabled'],
[1, 'energy', 'Energy detect'],
[2, 'carrier', 'Carrier detect'],
[3, 'both', 'Energy detect and Carrier detect'],
],
'radiotestType': [
[0, 'packet', 'Transmit packets'],
[1, 'cm', 'Continuous modulation'],
[2, 'cw', 'Continuous wave'],
[3, 'pkcca', 'Packet test with clear channel assessment (CCA) enabled'],
],
'seqSize': [
[0, '0', ''],
[1, '1', ''],
[2, '2', ''],
[3, '3', ''],
[4, '4', ''],
[5, '5', ''],
[6, '6', ''],
[7, '7', ''],
[8, '8', ''],
[9, '9', ''],
[10, '10', ''],
],
#==================== misc ============================================
'successCode' : [
[ 0, 'success', ''],
[ 1, 'unsupported_version', ''],
[ 2, 'invalid_mode', ''],
],
'mode' : [
[ 0, 'legacy', ''],
],
'userRole' : [
[ 0, 'viewer', 'Viewer-role user has read-only access to non-sensitive network information'],
[ 1, 'user', 'User-role user has read-write privileges'],
],
}
# We redefine this attribute inherited from ApiDefinition. See
# ApiDefinition for a full description of the structure of this field.
commands = [
# command 'hello' (commandID 1) is handled by the Hdlc module
# command 'hello_response' (commandID 2) is handled by the Hdlc module
{
'id' : 1,
'name' : 'mux_hello',
'description': 'Sent by the manager to initiate a new session with a client.',
'request' : [
['version', INT, 1, None],
['secret', HEXDATA, 8, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['version', INT, 1, None],
],
},
},
{
'id' : 1,
'name' : 'hello',
'description': '',
'request' : [
['version', INT, 1, None],
['cliSeqNo', INT, 1, None],
['mode', INT, 1, True],
],
},
{
'id' : 2,
'name' : 'hello_response',
'description': '',
'request' : [], # there is no request
'response' : {
'FIELDS': [
['successCode', INT, 1, True],
['version', INT, 1, None],
['mgrSeqNo', INT, 1, None],
['cliSeqNo', INT, 1, None],
['mode', INT, 1, True],
],
},
},
{
'id' : 21,
'name' : 'reset',
'description': 'The reset command is used to reset various objects. The command argument is an object type, and if the object is a mote the MAC address must be specified (otherwise that argument is ignored).',
'request' : [
['type', INT, 1, 'resetType'],
['macAddress', HEXDATA, 8, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['macAddress', HEXDATA, 8, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NOT_FOUND' : 'Mote with specified MAC address is not found',
'RC_INV_STATE' : 'Mote is not in operational state',
'RC_NACK' : 'User commands queue is full (applies to mote reset)',
'RC_INVALID_ARGUMENT' : 'Invalid reset type value',
},
},
{
'id' : 22,
'name' : 'subscribe',
'description': "The subscribe command indicates that the manager should send the external application the specified notifications. It contains two filter fields:\n\n- filter is a bitmask of flags indicating the types of notifications that the client wants to receive\n- unackFilter allows the client to select which of the notifications selected in filter should be sent acknowledged. If a notification is sent as 'acknowledged', thesubsequent notification packets will be queued while waiting for response.\n\nEach subscription request overwrites the previous one. If an application is subscribed to data and then decides he also wants events he should send a subscribe command with both the data and event flags set. To clear all subscriptions, the client should send a subscribe command with the filter set to zero. When a session is initiated between the manager and a client, the subscription filter is initialized to zero.\n\nThe subscribe bitmap uses the values of the notification type enumeration. Some values are unused to provide backwards compatibility with earlier APIs.",
'request' : [
['filter', HEXDATA, 4, None],
['unackFilter', HEXDATA, 4, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_INVALID_ARGUMENT' : 'Invalid subscription filter value',
},
},
{
'id' : 23,
'name' : 'getTime',
'description': 'The getTime command returns the current manager UTC time and current absolute slot number (ASN). The time values returned by this command are delayed by queuing and transfer time over the serial connection. For additional precision, an external application should trigger the networkTime notification using the Time Pin.',
'request' : [
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['uptime', INT, 4, None],
['utcSecs', INT, 8, None],
['utcUsecs', INT, 4, None],
['asn', HEXDATA, 5, None],
['asnOffset', INT, 2, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
},
},
{
'id' : 26,
'name' : 'setNetworkConfig',
'description': 'The setNetworkConfig command changes network configuration parameters. The response code indicates whether the changes were successfully applied. This change is persistent.\n\nGenerally, changes to network configuration will take effect when the manager reboots. Exceptions are detailed below:\n\n- Max Motes: The new maxMotes value is used as soon as new motes try to join the network, but motes are not removed from the network if the value is set to a number lower than numMotes.\n- Base Bandwidth: Changing baseBandwidth while the network is running does not reallocate bandwidth to Operational motes.',
'request' : [
['networkId', INT, 2, None],
['apTxPower', INTS, 1, None],
['frameProfile', INT, 1, True],
['maxMotes', INT, 2, None],
['baseBandwidth', INT, 2, None],
['downFrameMultVal', INT, 1, None],
['numParents', INT, 1, None],
['ccaMode', INT, 1, True],
['channelList', INT, 2, None],
['autoStartNetwork', BOOL, 1, None],
['locMode', INT, 1, None],
['bbMode', INT, 1, 'backboneFrameMode'],
['bbSize', INT, 1, None],
['isRadioTest', INT, 1, None],
['bwMult', INT, 2, None],
['oneChannel', INT, 1, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_INVALID_ARGUMENT' : 'Validation of the network parameters failed',
'RC_WRITE_FAIL' : 'Flash write error, cannot save new settings',
},
},
{
'id' : 31,
'name' : 'clearStatistics',
'description': 'The clearStatistics command clears the accumulated network statistics. The command does not clear path quality or mote statistics.',
'request' : [
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
},
},
{
'id' : 33,
'name' : 'exchangeMoteJoinKey',
'description': 'The exchangeMoteJoinKey command triggers the manager to send a new join key to the specified mote and update the manager\'s ACL entry for the mote. The response contains a callbackId. A commandFinished event notification with this callbackId will be sent when the operation is complete. This change is persistent.',
'request' : [
['macAddress', HEXDATA, 8, None],
['key', HEXDATA, 16, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['callbackId', INT, 4, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NOT_FOUND' : 'Mote with specified MAC address is not found',
'RC_INV_STATE' : 'Mote is not in operational state',
'RC_NACK' : 'User commands queue is full',
'RC_WRITE_FAIL' : 'Flash write error, can\'t save new settings',
},
},
{
'id' : 34,
'name' : 'exchangeNetworkId',
'description': 'The exchangeNetworkId command triggers the manager to distribute a new network ID to all the motes in the network. A callbackId is returned in the response. A commandFinished notification with this callbackId will be sent when the operation is complete.This change is persistent.',
'request' : [
['id', INT, 2, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['callbackId', INT, 4, None],
],
},
'responseCodes': {
'RC_OK' : 'Command received',
'RC_IN_PROGRESS' : 'A command is still pending. Wait until acommandFinishednotification is received for the previous command before retrying.',
'RC_NACK' : 'User commands queue is full',
'RC_WRITE_FAIL' : 'Flash write error; cannot save new settings',
},
},
{
'id' : 35,
'name' : 'radiotestTx',
'description': 'The radiotestTx command allows the user to initiate a radio transmission test. It may only be executed if the manager has been booted up in radiotest mode (see setNetworkConfig command). Three types of transmission tests are supported:\n\n- Packet transmission\n- Continuous modulation (CM)\n- Continuous wave, i.e unmodulated signal (CW)\n\nIn a packet transmission test, the device generates a repeatCnt number of packet sequences. Each sequence consists of up to 10 packets with configurable size and delays. Each packet starts with a PHY preamble (5 bytes), followed by a PHY length field (1 byte), followed by data payload of up to 125 bytes, and finally a 2-byte 802.15.4 CRC at the end. Byte 0 of the payload contains stationId of the sender. Bytes 1 and 2 contain the packet number (in big-endian format) that increments with every packet transmitted. Bytes 3..N contain a counter (from 0..N-2) that increments with every byte inside payload. Transmissions occur on the set of channels defined by chanMask , selected inpseudo-randomorder.\n\nIn a continuous modulation test, the device generates continuous pseudo-random modulated signal, centered at the specified channel. The test is stopped by resetting the device.\n\nIn a continuous wave test, the device generates an unmodulated tone, centered at the specified channel. The test tone is stopped by resetting the device.\n\n\n\nChannel numbering is 0-15, corresponding to IEEE 2.4 GHz channels 11-26.\n\nThe station ID is a user selectable value. It is used in packet tests so that a receiver (see radiotestRx) can identify packets from this device in cases where there may be multiple tests running in the same radio space. This field is not used for CM or CW tests.',
'request' : [
['testType', INT, 1, 'radiotestType'],
['chanMask', HEXDATA, 2, None],
['repeatCnt', INT, 2, None],
['txPower', INTS, 1, None],
['seqSize', INT, 1, 'seqSize'],
['pkLen_1', INT, 1, None],
['delay_1', INT, 2, None],
['pkLen_2', INT, 1, None],
['delay_2', INT, 2, None],
['pkLen_3', INT, 1, None],
['delay_3', INT, 2, None],
['pkLen_4', INT, 1, None],
['delay_4', INT, 2, None],
['pkLen_5', INT, 1, None],
['delay_5', INT, 2, None],
['pkLen_6', INT, 1, None],
['delay_6', INT, 2, None],
['pkLen_7', INT, 1, None],
['delay_7', INT, 2, None],
['pkLen_8', INT, 1, None],
['delay_8', INT, 2, None],
['pkLen_9', INT, 1, None],
['delay_9', INT, 2, None],
['pkLen_10', INT, 1, None],
['delay_10', INT, 2, None],
['stationId', INT, 1, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_IN_PROGRESS' : 'Radiotest is in progress',
'RC_INVALID_ARGUMENT' : 'Invalid "channel" or "txPower" value',
},
'serializer' : 'serialize_radiotestTx',
},
{
'id' : 37,
'name' : 'radiotestRx',
'description': 'The radiotestRx command clears all previously collected statistics and initiates radio reception on the specified channel. It may only be executed if the manager has been booted up in radiotest mode (see setNetworkConfig command). During the test, the device keeps statistics about the number of packets received (with and without error). The test results may be retrieved using the getRadiotestStatistics command.\n\nThe station ID is a user selectable value. It must be set to match the station ID used by the transmitter. Station ID is used to isolate traffic if multiple tests are running in the same radio space.\n\n\n\nChannel numbering is 0-15, corresponding to IEEE 2.4 GHz channels 11-26.',
'request' : [
['mask', HEXDATA, 2, None],
['duration', INT, 2, None],
['stationId', INT, 1, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_IN_PROGRESS' : 'Radiotest is in progress',
'RC_INVALID_ARGUMENT' : 'Invalid mask value',
},
},
{
'id' : 38,
'name' : 'getRadiotestStatistics',
'description': 'This command retrieves statistics from a previously run radiotestRx command.It may only be executed if the manager has been booted up in radiotest mode (see setNetworkConfig command).',
'request' : [
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['rxOk', INT, 2, None],
['rxFail', INT, 2, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_IN_PROGRESS' : 'Radiotest is in progress',
'RC_INVALID_COMMAND' : 'No radiotest was started',
},
},
{
'id' : 39,
'name' : 'setACLEntry',
'description': 'The setACLEntry command adds a new entry or updates an existing entry in the Access Control List (ACL).This change is persistent.',
'request' : [
['macAddress', HEXDATA, 8, None],
['joinKey', HEXDATA, 16, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NO_RESOURCES' : 'ACL is full (when adding a new entry)',
'RC_WRITE_FAIL' : 'Flash write error, can\'t save new settings',
},
},
{
'id' : 40,
'name' : 'getNextACLEntry',
'description': 'The getNextACLEntry command returns information about next mote entry in the access control list (ACL). To begin a search (find the first mote in ACL), a zero MAC address (0000000000000000) should be sent. There is no mechanism for reading the ACL entry of a specific mote. This call is an iterator. If you call getNextACLEntry with mote A as the argument, your response is the ACL entry for mote B, where B is the next mote in the ACL.',
'request' : [
['macAddress', HEXDATA, 8, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['macAddress', HEXDATA, 8, None],
['joinKey', HEXDATA, 16, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_END_OF_LIST' : 'End of ACL is reached',
'RC_NOT_FOUND' : 'No such mote in the ACL',
},
},
{
'id' : 41,
'name' : 'deleteACLEntry',
'description': 'The deleteACLEntry command deletes the specified mote from the access control list (ACL). If the macAddress parameter is set to all 0xFFs or all 0x00s, the entire ACL is cleared. This change is persistent.',
'request' : [
['macAddress', HEXDATA, 8, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NOT_FOUND' : 'Specified mote is not found in ACL',
'RC_WRITE_FAIL' : 'Flash write error, can\'t save new settings',
},
},
{
'id' : 42,
'name' : 'pingMote',
'description': 'The pingMote command sends a ping (echo request) to the mote specified by MAC address. A unique callbackId is generated and returned with the response. When the response is received from the mote, the manager generates a pingResponse notification with the measured round trip delay and several other parameters. The request is sent using unacknowledged transport, so the mote is not guaranteed to receive the request.',
'request' : [
['macAddress', HEXDATA, 8, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['callbackId', INT, 4, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NOT_FOUND' : 'Specified mote not found',
'RC_INV_STATE' : 'Mote is not in operational state',
'RC_NO_RESOURCES' : 'User commands queue is full',
'RC_IN_PROGRESS' : 'Previous echo request command is still pending for specified mote',
},
},
{
'id' : 43,
'name' : 'getLog',
'description': 'The getLog command retrieves diagnostic logs from the manager or a mote specified by MAC address.',
'request' : [
['macAddress', HEXDATA, 8, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NOT_FOUND' : 'Specified mote not found',
'RC_INV_STATE' : 'Mote is not in operational state',
},
},
{
'id' : 44,
'name' : 'sendData',
'description': "The sendData command sends a packet to a mote in the network. The response contains a callbackId. When the manager injects the packet into the network, it will generate a packetSent notification. It is the responsibility of the customer'sapplication layer at the mote to send a response. It is also the responsibility of thecustomer's application layer to timeout if no response is received at the manager if one is expected.\n\nThe sendData command should be used by applications that communicate directly with the manager. If end-to-end (application to mote) IP connectivity is required, the application should use the sendIP command. For a more comprehensive discussion of the distinction, see the SmartMesh IPNetwork User Guide.",
'request' : [
['macAddress', HEXDATA, 8, None],
['priority', INT, 1, 'packetPriority'],
['srcPort', INT, 2, None],
['dstPort', INT, 2, None],
['options', INT, 1, None],
['data', HEXDATA, None,None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['callbackId', INT, 4, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NOT_FOUND' : 'Specified mote is not found',
'RC_INV_STATE' : 'Mote is not in operational state',
'RC_NACK' : 'User commands queue is full or couldn\'t allocate memory buffer for payload',
'RC_INVALID_ARGUMENT' : 'Payload size exceeds maximum allowed value',
},
},
{
'id' : 45,
'name' : 'startNetwork',
'description': 'The startNetwork command tells the manager to allow the network to start forming (begin accepting join requests from devices). The external application must issue the startNetwork command if the autoStartNetwork flag is not set (see setNetworkConfig).\n\nThis command has been deprecated and should not be used in new designs.',
'request' : [
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_IN_PROGRESS' : 'The network is already started',
},
},
{
'id' : 46,
'name' : 'getSystemInfo',
'description': 'The getSystemInfo command returns system-level information about the hardware and software versions.',
'request' : [
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['macAddress', HEXDATA, 8, None],
['hwModel', INT, 1, None],
['hwRev', INT, 1, None],
['swMajor', INT, 1, None],
['swMinor', INT, 1, None],
['swPatch', INT, 1, None],
['swBuild', INT, 2, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
},
},
{
'id' : 47,
'name' : 'getMoteConfig',
'description': 'The getMoteConfig command returns a single mote description as the response. The command takes two arguments, a MAC Address and a flag indicating whether the MAC Address refers to the requested mote or to the next mote in managers memory. This command may be used to iterate through all motes known by the manager by starting with the macAddress parameter set to 0 and next set to true, and then using the MAC Address of that response as the input to the next call.\n\nThe mote MAC address is used in all query commands, but space constraints require the neighbor health reports to use the Mote ID for identification. Therefore, both identifiers are present in the mote structure.',
'request' : [
['macAddress', HEXDATA, 8, None],
['next', BOOL, 1, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['macAddress', HEXDATA, 8, None],
['moteId', INT, 2, None],
['isAP', BOOL, 1, None],
['state', INT, 1, 'moteState'],
['reserved', INT, 1, None],
['isRouting', BOOL, 1, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NOT_FOUND' : 'The specified mote doesn\'t exist',
'RC_END_OF_LIST' : 'Last mote in the list has been reached (next = true)',
},
},
{
'id' : 48,
'name' : 'getPathInfo',
'description': 'The getPathInfo command returns parameters of requested path.',
'request' : [
['source', HEXDATA, 8, None],
['dest', HEXDATA, 8, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['source', HEXDATA, 8, None],
['dest', HEXDATA, 8, None],
['direction', INT, 1, 'pathDirection'],
['numLinks', INT, 1, None],
['quality', INT, 1, None],
['rssiSrcDest', INTS, 1, None],
['rssiDestSrc', INTS, 1, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NOT_FOUND' : 'A path between the specified motes doesn\'t exist',
},
},
{
'id' : 49,
'name' : 'getNextPathInfo',
'description': 'The getNextPathInfo command allows iteration across paths connected to a particular mote. The pathId parameter indicates the previous value in the iteration. Setting pathId to 0 returns the first path. A pathId can not be used as a unique identifier for a path. It is only valid when associated with a particular mote.',
'request' : [
['macAddress', HEXDATA, 8, None],
['filter', INT, 1, 'pathFilter'],
['pathId', INT, 2, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['pathId', INT, 2, None],
['source', HEXDATA, 8, None],
['dest', HEXDATA, 8, None],
['direction', INT, 1, 'pathDirection'],
['numLinks', INT, 1, None],
['quality', INT, 1, None],
['rssiSrcDest', INTS, 1, None],
['rssiDestSrc', INTS, 1, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NOT_FOUND' : 'The specified path ID does not exist',
'RC_END_OF_LIST' : 'The specified pathId in the request is the end of the list',
},
},
{
'id' : 50,
'name' : 'setAdvertising',
'description': 'The setAdvertising command tells the manager to activate or deactivate advertising. The response is a callbackId. A commandFinished notification with the callbackId is generated when the command propagation is complete.',
'request' : [
['activate', INT, 1, 'advertisementState'],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['callbackId', INT, 4, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_IN_PROGRESS' : 'A command is still pending. Wait until acommandFinishednotification is received for the previous command before retrying.',
},
},
{
'id' : 51,
'name' : 'setDownstreamFrameMode',
'description': 'The setDownstreamFrameMode command tells the manager to shorten or extend the downstream slotframe. The base slotframe length will be multiplied by the downFrameMultVal for "normal" speed. For "fast" speed the downstream slotframe is the base length.Once this command is executed, the manager switches to manual mode and no longer changes slotframesize automatically. The response is a callbackId. A commandFinished notification with the callbackId is generated when the command propagation is complete.',
'request' : [
['frameMode', INT, 1, 'downstreamFrameMode'],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['callbackId', INT, 4, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_IN_PROGRESS' : 'A command is still pending. Wait until acommandFinishednotification is received for the previous command before retrying.',
'RC_INVALID_ARGUMENT' : 'The downFrameMultVal (as set by setNetworkConfig) is equal to 1, so changing the downstream frame mode would have no effect.',
},
},
{
'id' : 53,
'name' : 'getManagerStatistics',
'description': 'The getManagerStatistics command returns dynamic information and statistics about the manager API. The statistics counts are cleared together with all current statistics using clearStatistics.',
'request' : [
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['serTxCnt', INT, 2, None],
['serRxCnt', INT, 2, None],
['serRxCRCErr', INT, 2, None],
['serRxOverruns', INT, 2, None],
['apiEstabConn', INT, 2, None],
['apiDroppedConn', INT, 2, None],
['apiTxOk', INT, 2, None],
['apiTxErr', INT, 2, None],
['apiTxFail', INT, 2, None],
['apiRxOk', INT, 2, None],
['apiRxProtErr', INT, 2, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
},
},
{
'id' : 54,
'name' : 'setTime',
'description': 'This command has been deprecated, and should not be used in new designs. When the Manager restarts, it will start counting from 20:00:00 UTC July 2, 2002.\n\nThe setTime command sets the UTC time on the manager. This command may only be executed when the network is not running. If the trigger flag is false, the manager sets the specified time as soon as it receives the setTime command. When the manager receives a Time Pin trigger, it temporarily stores the current time. If a setTime request is received within a short period of time following the trigger, the manager calculates the delay since the trigger and adjust the time such that the trigger was received at the specified time value.',
'request' : [
['trigger', INT, 1, None],
['utcSecs', INT, 8, None],
['utcUsecs', INT, 4, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed. The manager is ready to set the time.',
'RC_INVALID_ARGUMENT' : 'One of the parameters was invalid',
'RC_VALIDATION_ERROR' : 'Network is running, setTime command is disabled.',
},
},
{
'id' : 55,
'name' : 'getLicense',
'description': 'The getLicense command has been deprecated in Manager >= 1.3.0.There is no need to use a license to enable > 32 mote networks.\n\nThe getLicense command returns the current license key.',
'request' : [
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['license', HEXDATA, 13, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
},
},
{
'id' : 56,
'name' : 'setLicense',
'description': 'The setLicense command has been deprecated in Manager >= 1.3.0. There is no longer a need to use a license to enable > 32 mote networks.\n\nThe setLicense command validates and updates the software license key stored in flash. Features enabled or disabled by the license key change will take effect after the device is restarted.If the license parameter is set to all 0x0s, the manager restores the default license. This change is persistent.',
'request' : [
['license', HEXDATA, 13, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_VALIDATION_ERROR' : 'The license key is not valid',
'RC_WRITE_FAIL' : 'Flash write error, cannot save new settings',
},
},
{
'id' : 58,
'name' : 'setCLIUser',
'description': 'The setCLIUser command sets the password that must be used to log into the command line for a particular user role. The user roles are:\n\n- Viewer - read-only access to non-sensitive information\n- User - read-write access This change is persistent.',
'request' : [
['role', INT, 1, 'userRole'],
['password', HEXDATA, 16, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_WRITE_FAIL' : 'Flash write error, can\'t save new settings',
},
},
{
'id' : 59,
'name' : 'sendIP',
'description': 'The sendIP command sends a 6LoWPAN packet to a mote in the network. The response contains a callback Id. When the manager injects the packet into the network, it will generate a packetSent notification with the calllbackId. The application is responsible for constructing a valid 6LoWPAN packet. The packet is sent to the mote best-effort, so the application should deal with responses and timeouts, if any.\n\nThe sendIP command should be used by applications that require end-to-end IP connectivity. For applications that do not require end-to-end IP connectivity, the sendData command provides a simpler interface without requiring the application to understand 6LoWPAN encapsulation. For a more comprehensive discussion of the distinction, see the SmartMesh IP Network User Guide.',
'request' : [
['macAddress', HEXDATA, 8, None],
['priority', INT, 1, 'packetPriority'],
['options', INT, 1, None],
['encryptedOffset', INT, 1, None],
['data', HEXDATA, None,None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['callbackId', INT, 4, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NOT_FOUND' : 'Specified mote is not found',
'RC_INV_STATE' : 'Mote is not in operational state',
'RC_NACK' : 'User commands queue is full or could not allocate memory buffer for payload',
'RC_INVALID_ARGUMENT' : 'Payload size exceeds maximum allowed value or the 6LoWPAN packet is invalid',
},
},
{
'id' : 61,
'name' : 'restoreFactoryDefaults',
'description': 'The restoreFactoryDefaults command restores the default configuration and clears the ACL. This change is persistent.\n\nFor Manager versions <1.3.0 that required a license, the license used to enable optional features is preserved during a restore.',
'request' : [
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_WRITE_FAIL' : 'Flash write error; cannot save new settings',
},
},
{
'id' : 62,
'name' : 'getMoteInfo',
'description': 'The getMoteInfo command returns dynamic information for the specified mote.',
'request' : [
['macAddress', HEXDATA, 8, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['macAddress', HEXDATA, 8, None],
['state', INT, 1, 'moteState'],
['numNbrs', INT, 1, None],
['numGoodNbrs', INT, 1, None],
['requestedBw', INT, 4, None],
['totalNeededBw', INT, 4, None],
['assignedBw', INT, 4, None],
['packetsReceived', INT, 4, None],
['packetsLost', INT, 4, None],
['avgLatency', INT, 4, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NOT_FOUND' : 'No such mote',
},
},
{
'id' : 63,
'name' : 'getNetworkConfig',
'description': 'The getNetworkConfig command returns general network configuration parameters, including the Network ID, bandwidth parameters and number of motes.',
'request' : [
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['networkId', INT, 2, None],
['apTxPower', INTS, 1, None],
['frameProfile', INT, 1, True],
['maxMotes', INT, 2, None],
['baseBandwidth', INT, 2, None],
['downFrameMultVal', INT, 1, None],
['numParents', INT, 1, None],
['ccaMode', INT, 1, True],
['channelList', INT, 2, None],
['autoStartNetwork', BOOL, 1, None],
['locMode', INT, 1, None],
['bbMode', INT, 1, 'backboneFrameMode'],
['bbSize', INT, 1, None],
['isRadioTest', INT, 1, None],
['bwMult', INT, 2, None],
['oneChannel', INT, 1, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
},
},
{
'id' : 64,
'name' : 'getNetworkInfo',
'description': 'The getNetworkInfo command returns dynamic network information and statistics.',
'request' : [
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['numMotes', INT, 2, None],
['asnSize', INT, 2, None],
['advertisementState', INT, 1, True],
['downFrameState', INT, 1, 'downstreamFrameMode'],
['netReliability', INT, 1, None],
['netPathStability', INT, 1, None],
['netLatency', INT, 4, None],
['netState', INT, 1, 'networkState'],
['ipv6Address', HEXDATA, 16, None],
['numLostPackets', INT, 4, None],
['numArrivedPackets', INT, 8, None],
['maxNumbHops', INT, 1, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
},
},
{
'id' : 65,
'name' : 'getMoteConfigById',
'description': 'The getMoteConfigById command returns a single mote description as the response. The command takes one argument, the short address of a mote (Mote ID). The command returns the same response structure as the getMoteConfig command.',
'request' : [
['moteId', INT, 2, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['macAddress', HEXDATA, 8, None],
['moteId', INT, 2, None],
['isAP', BOOL, 1, None],
['state', INT, 1, 'moteState'],
['reserved', INT, 1, None],
['isRouting', BOOL, 1, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NOT_FOUND' : 'No such mote',
},
},
{
'id' : 66,
'name' : 'setCommonJoinKey',
'description': 'The setCommonJoinKey command will set a new value for the common join key. The common join key is used to decrypt join messages only if the ACL is empty.',
'request' : [
['key', HEXDATA, 16, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
},
},
{
'id' : 67,
'name' : 'getIPConfig',
'description': 'The getIPConfig command returns the manager\'s IP configuration parameters, including the IPv6 address and mask.',
'request' : [
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['ipv6Address', HEXDATA, 16, None],
['mask', HEXDATA, 16, None],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
},
},
{
'id' : 68,
'name' : 'setIPConfig',
'description': 'The setIPConfig command sets the IPv6 prefix of the mesh network. Only the upper 8 bytes of the IPv6 address are relevant: the lower 8 bytes of the IPv6 address are ignored, and lower 8 bytes of the mask field are reserved and should be set to 0.This change is persistent.',
'request' : [
['ipv6Address', HEXDATA, 16, None],
['mask', HEXDATA, 16, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_WRITE_FAIL' : 'Flash write error, can\'t save new settings',
},
},
{
'id' : 69,
'name' : 'deleteMote',
'description': 'The deleteMote command deletes a mote from the manager\'s list. A mote can only be deleted if it in the Lost or Unknown states. This change is persistent.',
'request' : [
['macAddress', HEXDATA, 8, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
],
},
'responseCodes': {
'RC_OK' : 'Command successfully completed',
'RC_NOT_FOUND' : 'Specified mote is not found',
'RC_INV_STATE' : 'Mote state is not Lost or mote is access point',
'RC_WRITE_FAIL' : 'Flash write error, can\'t save new settings',
},
},
{
'id' : 70,
'name' : 'getMoteLinks',
'description': 'The getMoteLinks command returns information about links assigned to the mote. The response contains a list of links starting with Nth link on the mote, where N is supplied as the idx parameter in the request. To retrieve all links on the device the user can call this command with idx that increments by number of links returned with priorresponse, until the command returns RC_END_OF_LIST response code. Note that links assigned to a mote may change between API calls.',
'request' : [
['macAddress', HEXDATA, 8, None],
['idx', INT, 2, None],
],
'response' : {
'FIELDS': [
[RC, INT, 1, True],
['idx', INT, 2, None],
['utilization', INT, 1, None],
['numLinks', INT, 1, None],
['frameId_1', INT, 1, None], # 1
['slot_1', INT, 4, None],
['channelOffset_1', INT, 1, None],
['moteId_1', INT, 2, None],
['flags_1', INT, 1, None],
['frameId_2', INT, 1, None], # 2
['slot_2', INT, 4, None],
['channelOffset_2', INT, 1, None],
['moteId_2', INT, 2, None],
['flags_2', INT, 1, None],
['frameId_3', INT, 1, None], # 3
['slot_3', INT, 4, None],
['channelOffset_3', INT, 1, None],
['moteId_3', INT, 2, None],
['flags_3', INT, 1, None],
['frameId_4', INT, 1, None], # 4
['slot_4', INT, 4, None],
['channelOffset_4', INT, 1, None],
['moteId_4', INT, 2, None],
['flags_4', INT, 1, None],
['frameId_5', INT, 1, None], # 5
['slot_5', INT, 4, None],
['channelOffset_5', INT, 1, None],
['moteId_5', INT, 2, None],
['flags_5', INT, 1, None],
['frameId_6', INT, 1, None], # 6
['slot_6', INT, 4, None],
['channelOffset_6', INT, 1, None],
['moteId_6', INT, 2, None],
['flags_6', INT, 1, None],
['frameId_7', INT, 1, None], # 7
['slot_7', INT, 4, None],
['channelOffset_7', INT, 1, None],
['moteId_7', INT, 2, None],
['flags_7', INT, 1, None],
['frameId_8', INT, 1, None], # 8
['slot_8', INT, 4, None],
['channelOffset_8', INT, 1, None],
['moteId_8', INT, 2, None],
['flags_8', INT, 1, None],
['frameId_9', INT, 1, None], # 9
['slot_9', INT, 4, None],
['channelOffset_9', INT, 1, None],
['moteId_9', INT, 2, None],
['flags_9', INT, 1, None],
['frameId_10', INT, 1, None], # 10
['slot_10', INT, 4, None],
['channelOffset_10', INT, 1, None],
['moteId_10', INT, 2, None],
['flags_10', INT, 1, None],
],
},
'responseCodes': {
'RC_NOT_FOUND' : 'No such mote.',
'RC_INV_STATE' : 'Mote is not in operational state',
'RC_END_OF_LIST' : 'The index requested is greater than number of links.',
},
},
]
subCommandsEvents = [
{
'id' : 0,
'name' : 'eventMoteReset',
'description': 'This notification is sent when a user-initiated reset is executed by the manager.',
'response' : {
'FIELDS': [
['macAddress', HEXDATA, 8, None],
],
},
},
{
'id' : 1,
'name' : 'eventNetworkReset',
'description': 'This notification is sent when the manager starts the network. This event has no eventData fields.',
'response' : {
'FIELDS': [
],
},
},
{
'id' : 2,
'name' : 'eventCommandFinished',
'description': 'The commandFinished notification is sent when a command associated with the provided callback id finishes executing.',
'response' : {
'FIELDS': [
['callbackId', HEXDATA, 4, None],
['rc', INT, 1, 'commandFinishedResult'],
],
},
},
{
'id' : 3,
'name' : 'eventMoteJoin',
'description': 'This notification is sent when a mote joins the network.',
'response' : {
'FIELDS': [
['macAddress', HEXDATA, 8, None],
],
},
},
{
'id' : 4,
'name' : 'eventMoteOperational',
'description': 'This notification is sent when a mote that joins the network becomes operational.',
'response' : {
'FIELDS': [
['macAddress', HEXDATA, 8, None],
],
},
},
{
'id' : 5,
'name' : 'eventMoteLost',
'description': "This notification is sent when a mote's state changes to Lost , which indicates that the mote is not responding to downstream messages.",
'response' : {
'FIELDS': [
['macAddress', HEXDATA, 8, None],
],
},
},
{
'id' : 6,
'name' : 'eventNetworkTime',
'description': 'The time notification is triggered by the client asserting the TIME pin or by calling the getTime command. This notification contains the time when the TIME pin was asserted (or the getTime command was processed) expressed as:\n\n- ASN The absolute slot number (the number of timeslots since " 7/2/2002 8:00:00 PM PST" if UTC is set on manager, otherwise since Jan 1, 1970)\n\n\n- Uptime The number of seconds since the device was booted\n- Unixtime The number of seconds and microseconds since Jan 1, 1970 in UTC',
'response' : {
'FIELDS': [
['uptime', INT, 4, None],
['utcSecs', INT, 8, None],
['utcUsecs', INT, 4, None],
['asn', HEXDATA, 5, None],
['asnOffset', INT, 2, None],
],
},
},
{
'id' : 7,
'name' : 'eventPingResponse',
'description': 'This notification is sent when a reply is received from a mote ping.',
'response' : {
'FIELDS': [
['callbackId', INT, 4, None],
['macAddress', HEXDATA, 8, None],
['delay', INT, 4, None],
['voltage', INT, 2, None],
['temperature', INTS, 1, None],
],
},
},
{
'id' : 10,
'name' : 'eventPathCreate',
'description': 'This notification is sent when the manager creates a connection (path) between two motes.',
'response' : {
'FIELDS': [
['source', HEXDATA, 8, None],
['dest', HEXDATA, 8, None],
['direction', INT, 1, 'pathDirection'],
],
},
},
{
'id' : 11,
'name' : 'eventPathDelete',
'description': 'This notification is sent when the manager removes a connection (path) between two motes.',
'response' : {
'FIELDS': [
['source', HEXDATA, 8, None],
['dest', HEXDATA, 8, None],
['direction', INT, 1, 'pathDirection'],
],
},
},
{
'id' : 12,
'name' : 'eventPacketSent',
'description': 'The packetSent notification is generated when client\'s packet is removed from manager\'s queue and sent into the wireless network.',
'response' : {
'FIELDS': [
['callbackId', INT, 4, None],
['rc', INT, 1, None],
],
},
},
{
'id' : 13,
'name' : 'eventMoteCreate',
'description': 'This event is sent when a mote joins the manager for the first time.',
'response' : {
'FIELDS': [
['macAddress', HEXDATA, 8, None],
['moteId', INT, 2, None],
],
},
},
{
'id' : 14,
'name' : 'eventMoteDelete',
'description': 'This notification is sent when a mote is deleted as a result of moteDelete command.',
'response' : {
'FIELDS': [
['macAddress', HEXDATA, 8, None],
['moteId', INT, 2, None],
],
},
},
]
subCommandsNotification = [
{
'id' : 1,
'name' : 'notifEvent',
'description': 'Event notification',
'response' : {
'FIELDS': [
['eventId', INT, 4, None],
[SUBID2, INT, 1, None],
],
},
'subCommands': subCommandsEvents,
},
{
'id' : 2,
'name' : 'notifLog',
'description': 'A log notifications is generated in response to the getLog command. Each log notification contains a message from the mote\'s log.',
'response' : {
'FIELDS': [
['macAddress', HEXDATA, 8, None],
['logMsg', HEXDATA, None,None],
],
},
},
{
'id' : 4,
'name' : 'notifData',
'description': 'The data notification contains a header and a variable length array of binary data. The length of the data is determined based on the length of the notification.\n\nThe manager forwards all packets received on its IP address and non-manager ports as data notifications.',
'response' : {
'FIELDS': [
['utcSecs', INT, 8, None],
['utcUsecs', INT, 4, None],
['macAddress', HEXDATA, 8, None],
['srcPort', INT, 2, None],
['dstPort', INT, 2, None],
['data', HEXDATA, None,None],
],
},
},
{
'id' : 5,
'name' : 'notifIpData',
'description': "The ipData notification contains full IP packet sent by the mote, including 6LoWPAN header, UDP header, and the UDP payload. Manager generates this notification when it receives packet from a mote with destination other than manager's own IP address. The size of the data field can be calculated by subtracting the fixed header size (up to macAddress) from the size of overall notification packet.",
'response' : {
'System': [
['utcSecs', INT, 8, None],
['utcUsecs', INT, 4, None],
['macAddress', HEXDATA, 8, None],
['data', HEXDATA, None,None],
],
},
},
{
'id' : 6,
'name' : 'notifHealthReport',
'description': 'The healthReport notifications include the raw payload of health reports received from devices. The payload contains one or more specific health report messages. Each message contains an identifier, length and variable-sized data. The individual healthReport message structures are defined below.',
'response' : {
'System': [
['macAddress', HEXDATA, 8, None],
['payload', HEXDATA, None,None],
],
},
},
]
# We redefine this attribute inherited from ApiDefinition. See
# ApiDefinition for a full description of the structure of this field.
notifications = [
{
'id' : 3,
'name' : 'manager_hello',
'description': 'Sent by the manager to a initiate new session with a client.',
'response' : {
'FIELDS': [
['version', INT, 1, None],
['mode', INT, 1, None],
],
},
},
{
'id' : 20,
'name' : 'notification',
'description': '',
'response' : {
'FIELDS': [
[SUBID1, INT, 1, None],
]
},
'subCommands': subCommandsNotification,
},
]
def serialize_radiotestTx(self, commandArray, cmd_params):
# start by serializing with complete API definition
returnVal = self.default_serializer(commandArray, cmd_params)
# remove the unused sequence definitions
startIndex = 7+cmd_params['seqSize']*3
numVals = 10-cmd_params['seqSize']
returnVal[1][startIndex:startIndex+numVals*3] = []
return returnVal
|
[
"twatteyne@linear.com"
] |
twatteyne@linear.com
|
361d332a36051cf2846855e434f8291687e94b7a
|
0c6332a4ab51a2b762d74c6a73b321838637fa06
|
/Haystome2/wsgi.py
|
b04a2d7141c334fa57e2376e242c5508d348ce4a
|
[] |
no_license
|
bopopescu/Haystome
|
58d097125506a35b4ded60f52d8ab9ff28060c25
|
9242f96a17fdd63696179e9015f54f7b1e000d9b
|
refs/heads/master
| 2022-11-20T01:33:20.999462
| 2019-12-05T01:31:41
| 2019-12-05T01:31:41
| 280,994,289
| 0
| 0
| null | 2020-07-20T02:33:35
| 2020-07-20T02:33:34
| null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for Haystome2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Haystome2.settings')
application = get_wsgi_application()
|
[
"tan.rathanah@gmail.com"
] |
tan.rathanah@gmail.com
|
fb86da0ccb95eb27bef39c4322166f68a0ee5aaa
|
4acab9fb77c551ee58b00df025431376f11106be
|
/1. PythonChallenges/reverse.py
|
b7621e2809df5f1ec1bf36231f485545e94ccaab
|
[] |
no_license
|
MakeSchool-17/twitter-bot-python-samlee405
|
e7404f278eeb07097577eefbe5f549e450087089
|
09a14af858af89292d4d636d91f7be2d35118cfe
|
refs/heads/master
| 2021-01-18T23:07:09.913608
| 2016-11-28T21:24:44
| 2016-11-28T21:24:44
| 72,684,235
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
import sys
def main():
argumentList = sys.argv
del argumentList[0]
inputString = " ".join(argumentList)
newString = ""
index = len(inputString)
for _ in range(0, index):
newString = newString + inputString[index - 1]
index = index - 1
print("The reverse of your string is: " + newString)
main()
|
[
"samlee405@yahoo.com"
] |
samlee405@yahoo.com
|
2c7925a442418e9717b78845dced5ee0d39b26da
|
d7775fa094ce48785c4a9abbc00ed1e3cd3a8ba8
|
/api/tests/test_hotel.py
|
7907bba8fd4281d2ecac2e1bec6f9efdce4c25b3
|
[] |
no_license
|
fabricioadenir/tourismAPI
|
75580ef45ef51f9123dc6879ef9ed9c1e9b956f1
|
8e65dcde7c03fc2fbf8d015b6db2ebc96fbcefdb
|
refs/heads/main
| 2023-02-17T19:16:01.613525
| 2021-01-19T16:50:31
| 2021-01-19T16:50:31
| 321,408,077
| 0
| 0
| null | 2021-01-19T16:50:32
| 2020-12-14T16:31:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,009
|
py
|
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from rest_framework.test import force_authenticate
from django.contrib.auth.models import User
from api.models import Hotel, Category, City, Country
from api.views import HotelViewSet
class HotelViewSetTestClass(TestCase):
"""
Responsible for testing the data structure.
"""
def setUp(self):
Category.objects.create(name="Hospedagem de Verรฃo")
City.objects.create(
name="Florianรณpolis",
state="SC"
)
Country.objects.create(
name="Brasil"
)
category = Category.objects.get(id=1)
city = City.objects.get(id=1)
country = Country.objects.get(id=1)
self.hotel = Hotel.objects.create(
hotel_name="Alameda Alegra",
image="http://tourism.com/images/alameda-alegra.jpg",
city=city,
country=country,
category=category,
price=3000
)
self.username = 'tourism'
self.password = 'tourism_pwd'
self.user = User.objects.create_superuser(
self.username, 'test@example.com', self.password)
def test_without_authentication(self):
"""
Test without authentication
"""
api_request = APIRequestFactory().get("")
detail_view = HotelViewSet.as_view({'get': 'retrieve'})
response = detail_view(api_request, pk=self.hotel.pk)
self.assertEqual(response.status_code, 403)
def test_with_authentication(self):
"""
Auth using force_authenticate
"""
factory = APIRequestFactory()
user = User.objects.get(username=self.username)
detail_view = HotelViewSet.as_view({'get': 'retrieve'})
api_request = factory.get('')
force_authenticate(api_request, user=user)
response = detail_view(api_request, pk=self.hotel.pk)
self.assertEqual(response.status_code, 200)
|
[
"fabricio.adenir@softplan.com.br"
] |
fabricio.adenir@softplan.com.br
|
a5cd54ab133af5c5678d2b51970ce91d246f1b8b
|
c4c63c0ee30b655ef335afa6bce361746be27d9f
|
/app/User/models.py
|
221e561bd86cb7f0a2713eea845b34d931fc93a4
|
[] |
no_license
|
neofit77/food-recipes-web-service
|
6227bec14627cf9c52af6203b40794c57eedfd52
|
e16faaf13b9dff933e0c0655f47ec00fafe256f1
|
refs/heads/main
| 2023-07-14T19:43:02.850531
| 2021-08-20T08:50:13
| 2021-08-20T08:50:13
| 396,765,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,055
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.db import models
from rest_framework_simplejwt.tokens import RefreshToken
from django.contrib.auth.hashers import make_password
class UserManager(BaseUserManager):
"""Overide default methods for django User model"""
def create_user(self, username, email, password=None):
"""Overide create_user method"""
if username is None:
raise TypeError('Users should have a username')
user = self.model(username=username, email=self.normalize_email(email))
user.set_password(make_password(password))
user.save()
return user
def create_superuser(self, username, email, password):
"""Overide method that create superuser"""
if password is None:
raise TypeError('Password should not be none')
user = self.create_user(username, email, password)
user.is_superuser = True
user.is_staff = True
user.save()
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Overide built django class User"""
username = models.CharField(max_length=40, unique=True)
email = models.EmailField(max_length=40, unique=True)
first_name = models.CharField(max_length=40)
last_name = models.CharField(max_length=40)
company_name = models.CharField(max_length=100, default='')
company_domain = models.CharField(max_length=50, default='')
city = models.CharField(max_length=60, default='')
is_staff = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now=True)
updated_at = models.DateTimeField(auto_now=True)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
objects = UserManager()
def __str__(self):
return self.username
def tokens(self):
refresh = RefreshToken.for_user(self)
return {
'refresh': str(refresh),
'access': str(refresh.access_token)
}
|
[
"aleksandar.petrovicvr@gmail.com"
] |
aleksandar.petrovicvr@gmail.com
|
97beaeedfc15447c373d82819beeb0e8631738aa
|
d4a891ea1d8fd87068daee7a8dfa2f0968add0e7
|
/MyDProject/album/migrations/0002_belongs.py
|
5bdc046e16da380927217a08d90683406b4505de
|
[] |
no_license
|
pravinkmrp/MyProjectRepo
|
f5a320ba0604b37ae69ee932444e283697910dfe
|
68ce63aed02cd7cbef7a07b01ba0553b256e8700
|
refs/heads/master
| 2021-01-22T02:34:22.026108
| 2020-09-17T11:48:43
| 2020-09-17T11:48:43
| 30,541,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('album', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Belongs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('folder', models.ForeignKey(to='album.Folder')),
('parent', models.ForeignKey(related_name='parent', to='album.Folder')),
],
options={
},
bases=(models.Model,),
),
]
|
[
"pravinkmrp@gmail.com"
] |
pravinkmrp@gmail.com
|
950f014ebeca729c8a7fc11225d996b5ce09836c
|
ead3782e2d3cd07bf0b513ce5bf4b0a9f443e8e0
|
/classes_play.py
|
66933044fd66f5bfccf0c4e0f14a7e740a4f74a8
|
[] |
no_license
|
racinmat/kralupy_coding
|
013fa6209ef6de9c633563f5609fc6ef018c61b2
|
c61646c3fbe38c95d46900c398b4d34143d063d0
|
refs/heads/master
| 2020-07-06T03:27:54.614442
| 2019-08-25T09:52:32
| 2019-08-25T09:52:32
| 202,874,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,869
|
py
|
class Inventory:
my_items = []
def __init__(self, items) -> None:
super().__init__()
self.items = items
print('jรก jsem konstruktor')
def filled_size(self):
return len(self.items)
def __repr__(self) -> str:
return f'Inventory(items={self._items})'
def __contains__(self, item):
return item in self._items
def loot_inventory(self, another: 'Inventory'):
self.items += another.items
another.items = []
def __iter__(self):
return iter(self._items)
@property
def items(self):
return self._items.copy()
@items.setter
def items(self, other_items):
self._items = other_items
class LimitedInventory(Inventory):
def __init__(self, items, max_capacity) -> None:
self.max_capacity = max_capacity
super().__init__(items)
def loot_inventory(self, another: 'Inventory'):
free_space = self.max_capacity - self.filled_size()
if another.filled_size() > free_space:
self.items += another.items[:free_space]
another.items = another.items[free_space:]
else:
self.items += another.items
another.items = []
@property
def items(self):
return self._items.copy()
@items.setter
def items(self, other_items):
if len(other_items) > self.max_capacity:
raise Exception('inventory is too small')
self._items = other_items
def create_inventories():
inventory_1 = Inventory([
'malรก vฤc', 'velkรก vฤc'])
inventory_2 = LimitedInventory([
'malรก vฤc', 'stลednรญ vฤc', 'vekรก vฤc'], 4)
return [inventory_1, inventory_2]
def main():
inventory_1 = Inventory([
'vaลeฤka', 'meฤ osudu'])
inventory_2 = LimitedInventory([
'vaลeฤka', 'meฤ osudu', '30 zlatek'], 4)
print(create_inventories())
# print(inventory_1.filled_size(), inventory_1.items)
# print(inventory_2.filled_size(), inventory_2.items)
# print(inventory_1)
# print('pลidรกvรกm louฤ')
# inventory_1.items.append('louฤ')
# print('pลidรกvรกm louฤ hackem')
# inventory_1._items.append('louฤ')
# # print(inventory_1)
# # inventory_1.items = []
# # print(inventory_1)
# # print('louฤ' in inventory_1)
# inventory_1.loot_inventory(inventory_2)
# print(inventory_1)
# print(inventory_2)
# inventory_2.loot_inventory(inventory_1)
# print(inventory_1)
# print(inventory_2)
item = 'mลฏj velkรฝ item'
print('item', item)
for item in inventory_1:
print(item)
print('item', item)
print(inventory_1.my_items)
print(inventory_2.my_items)
inventory_1.my_items.append('tvoje itemy')
print(inventory_1.my_items)
print(inventory_2.my_items)
if __name__ == '__main__':
main()
|
[
"racinmat@fel.cvut.cz"
] |
racinmat@fel.cvut.cz
|
4803c21ff84494043745787f4b9e8fcf6ed4cefc
|
54ba419e5bad49858f6a861472c98ef307cf25d3
|
/pages/views.py
|
0e93366dc22b857ae6c8bc64f812d9eb06f630d5
|
[] |
no_license
|
shuvo14051/real-estate51
|
84ccbd777e29598f50fd55efc2adc4ceebe51119
|
13987bcad3395cfe1a78decdda809ddf11a891da
|
refs/heads/master
| 2022-11-30T22:36:55.300239
| 2020-08-16T10:08:22
| 2020-08-16T10:08:22
| 287,917,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
from django.shortcuts import render
from listings.choice import price_choices, bedroom_choices, state_choices
from listings.models import Listing
from realtors.models import Realtor
def index(request):
listings = Listing.objects.order_by('-list_date').filter(is_published = True)[:3]
context = {
'listings':listings,
'state_choices':state_choices,
'bedroom_choices':bedroom_choices,
'price_choices':price_choices,
}
return render(request, 'pages/index.html', context)
def about(request):
realtors = Realtor.objects.order_by('-hire_date')
mvp_realtors = Realtor.objects.all().filter(is_mvp = True)
context = {
'realtors':realtors,
'mvp_realtors': mvp_realtors,
}
return render(request, 'pages/about.html', context)
|
[
"shuvo1137017@gmail.com"
] |
shuvo1137017@gmail.com
|
e4436c2940f82a4b464c4040bf76269a52189500
|
286b6dc56323f982092ffafbfac8a32dbbaeb7ef
|
/training_assignments/Day_05/logging_1.py
|
ab38671c36e6f080f41b3a6e1bf5506b890b451a
|
[] |
no_license
|
learndevops19/pythonTraining-CalsoftInc
|
ccee0d90aadc00bfdb17f9578620f6bf92f80a4c
|
c5f61516b835339b394876edd1c6f62e7cc6f0c3
|
refs/heads/master
| 2021-02-05T04:27:17.590913
| 2019-11-20T17:27:06
| 2019-11-20T17:27:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def Add(a ,b):
logger.info("in Add function")
return a+b
def Sub(a ,b):
logger.info("in sub function")
return a-b
def Mul(a ,b):
logger.info("in mul function")
return a*b
def Div(a ,b):
logger.info("in divide function")
try:
c=a/b
return c
except ZeroDivisionError:
logger.error("0 as a second argument")
Add(1,2)
Mul(1,2)
Sub(1,2)
Div(1,0)
|
[
"rajpratik71@gmail.com"
] |
rajpratik71@gmail.com
|
393532f42e7a054d841d2b238db0d1525894c730
|
3a298c93b67386392d3dee243671f2c101decf01
|
/leetcode/patterns/08-tree-dfs/15_delete_nodes_and_return_forest.py
|
004f1730162a62940d860e9aa5c02e17c05792eb
|
[] |
no_license
|
Zahidsqldba07/coding-problems-2
|
ffbc8408e4408fc846c828af2ec50a9d72e799bc
|
020bffbd14ca9993f1e678181ee7df761f1533de
|
refs/heads/master
| 2023-06-26T11:05:34.089697
| 2021-07-21T15:16:10
| 2021-07-21T15:16:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 722
|
py
|
class Solution:
def delNodes(self, root, to_delete):
self.nodes = []
self.toDelete = set(to_delete)
self.delNodesRec(root)
if not root.val in self.toDelete:
self.nodes.append(root)
return self.nodes
def delNodesRec(self, node):
if node is None: return None
node.left = self.delNodesRec(node.left)
node.right = self.delNodesRec(node.right)
if node.val in self.toDelete:
if node.left is not None:
self.nodes.append(node.left)
if node.right is not None:
self.nodes.append(node.right)
return None
return node
|
[
"alvee.akand@outlook.com"
] |
alvee.akand@outlook.com
|
ae60e70aebbd79c68c1790edc4625e528d4ed8a9
|
6e8b920ddc4ac45c80796e4db08c12ce65058429
|
/Python/Black-Hat-Python/chapter 2/tcpsclient-gui.py
|
8c6526db1341042d08f8e14f1ae20088ae7a15df
|
[] |
no_license
|
arpbadger/Library
|
d4a892c77917b593a457ebda5ca40dacf1ec99ce
|
a078fe30f62160a82d798d86e1fa6224116da018
|
refs/heads/master
| 2020-06-07T20:45:36.143018
| 2019-07-23T00:46:44
| 2019-07-23T00:46:44
| 193,090,914
| 0
| 0
| null | 2019-07-23T00:46:45
| 2019-06-21T12:03:02
|
C
|
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
# tcpclient-badger-gui
# developed by arpbadger with assistance from Justic Seitz, "Black Hat Python"
# The GUI version of tcpclient-badger
from Tkinter import *
import socket
target_host = ""
target_port = 0
# Pring title page
def title_sequence():
print ('''
__ _ _ ___ _
/\ \ \___| |___ _____ _ __| | __ / __\ __ _ __| | __ _ ___ _ __
/ \/ / _ \ __\ \ /\ / / _ \| '__| |/ / /__\/// _` |/ _` |/ _` |/ _ \ '__|
/ /\ / __/ |_ \ V V / (_) | | | < / \/ \ (_| | (_| | (_| | __/ |
\_\ \/ \___|\__| \_/\_/ \___/|_| |_|\_\ \_____/\__,_|\__,_|\__, |\___|_|
|___/
''')
return
# get domain or targer IP and port from the user
def add_target():
global target_host
target_host = e.get()
def add_port():
global target_port
target_port = e1.get()
target_port = int(target_port)
def get_target():
return target_host,target_port
# Request http connection
def connect(target_host,target_port):
#edit target_host variable. I.E cut of www
ftarget_host = target_host[2:]
#create a socket object
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#connect the client
client.connect((target_host,target_port))
#send some data
client.send("GET / HTTP/1.1\r\nHost: "+ftarget_host+"\r\n\r\n")
#receive some data
response = client.recv(4096)
print(response)
# Main sequence
def main():
title_sequence()
get_target()
connect(target_host,target_port)
# Create GUI Interface
root = Tk()
root.title('TCPClient Badger')
# Create target entry feild
e = Entry(root)
e.pack()
e.focus_set()
# Create 'add target' button
b = Button(root,text='Add target',command = add_target)
b.pack()
# Create port entry feild
e1 = Entry(root)
e1.pack()
e1.focus_set()
# Create 'add port' button
c = Button(root,text='Add Port', command = add_port)
c.pack()
d = Button(root,text='Run Badger', command = main)
d.pack(side='bottom')
# run gui interface
root.mainloop()
|
[
"arpbadger@protonmail.com"
] |
arpbadger@protonmail.com
|
c546a6a6e6f1bac8f1c5d07cc5d4459195450daa
|
85c4417294b234ba3e3ef652d1cd5724706d77e1
|
/main.py
|
1b28738b21267e9c7370c316182d6fb723630add
|
[] |
no_license
|
iamabdil/workout-tracker
|
e9df8e0df4d7f9bdf3442f1056f8dcdb19d78b2d
|
e552de4246c14c3df9377015b6f4deea91d63bd2
|
refs/heads/master
| 2023-04-25T22:37:07.870057
| 2021-05-21T19:10:47
| 2021-05-21T19:10:47
| 369,631,270
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,761
|
py
|
import requests
from datetime import datetime
import os
GENDER = "Male"
WEIGHT_KG = 81
HEIGHT_CM = 179
AGE = 22
# APP_ID = "63c48190"
APP_ID = os.environ['APP_ID']
# API_KEY = "36924c0dc3e1dea5ddc559a75ad334e0"
API_KEY = os.environ['API_KEY']
USERNAME = ""
USERNAME = os.environ['USERNAME']
PROJECT_NAME = "Workout Tracking"
SHEET_NAME = ""
exercise_endpoint = "https://trackapi.nutritionix.com/v2/natural/exercise"
sheety_endpoint = "https://api.sheety.co/dca6ac72c87dc84251d90fc67ef39e87/workoutTracking/workouts"
exercise_text = input("Tell me which exercise you did!")
headers = {
"x-app-id": APP_ID,
"x-app-key": API_KEY,
}
params = {
"query": exercise_text,
"gender": GENDER,
"weight_kg": WEIGHT_KG,
"height_cm": HEIGHT_CM,
"age": AGE,
}
response = requests.post(exercise_endpoint, json=params, headers=headers)
response.raise_for_status()
result = response.json()
today_date = datetime.now().strftime("%d/%m/%Y")
now_time = datetime.now().strftime("%X")
for exercise in result["exercises"]:
sheet_inputs = {
"workout": {
"date": today_date,
"time": now_time,
"exercise": exercise["name"].title(),
"duration": exercise["duration_min"],
"calories": exercise["nf_calories"],
}
}
sheety_response = requests.post(sheety_endpoint, json=sheet_inputs)
# Basic Authentication
sheety_response = requests.post(
sheety_endpoint,
json=sheet_inputs,
auth=(
USERNAME,
"abc123"
)
)
# Bearer Token Authentication
bearer_headers = {
"Authorization": "Bearer bnVsbDpudWxa"
}
sheety_response = requests.post(
sheety_endpoint,
json=sheet_inputs,
headers=bearer_headers,
)
print(sheety_response.text)
|
[
"51669829+iamabdil@users.noreply.github.com"
] |
51669829+iamabdil@users.noreply.github.com
|
c47940f8d078b632487c905152a5da7ba9ea90a7
|
98731548daa91f8f7c10e0bd41e6f5dc59f8cdf7
|
/zArchive/datastore-2014/intro_datastore.py
|
f576a6cec34cc6df1de48cf18b0411a18a3e1c77
|
[
"MIT"
] |
permissive
|
topquark28/CSSI-2016
|
3ae52686abfc208684cdfb9843e4e8bf51f6e493
|
42b82462fc58fd3f454e2529e860d5edc0548e7f
|
refs/heads/master
| 2020-12-25T10:59:12.837422
| 2016-07-20T05:58:47
| 2016-07-20T05:58:47
| 63,488,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
from google.appengine.ext import ndb
class Student(ndb.Model):
name = ndb.StringProperty(required=True)
university = ndb.StringProperty(required=True)
birthday = ndb.DateProperty(required=False)
|
[
"reimerc@gmail.com"
] |
reimerc@gmail.com
|
ce175b4964ee84d489102bcbf273165ae4966f89
|
1023fbd328d001782fdc42a9f2b81d2ac715704a
|
/01-Neural_Networks_and_Deep_Learning/week4/py-project2/dnn_app_utils_v2.py
|
057ed658f1d64bf98464b00feb95597d3cc2b5ae
|
[] |
no_license
|
Qu-rixin/deeplearning.ai-notes
|
a7868ce6b4fc87f266a41bccf3887fa95705bc67
|
fb217a0fd4be79846be1e069c9baea2221689fa5
|
refs/heads/master
| 2020-09-02T14:56:16.851256
| 2019-11-03T12:01:02
| 2019-11-03T12:01:02
| 209,943,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,199
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import h5py
def sigmoid(Z):
"""
Implements the sigmoid activation in numpy
Arguments:
Z -- numpy array of any shape
Returns:
A -- output of sigmoid(z), same shape as Z
cache -- returns Z as well, useful during backpropagation
"""
A = 1/(1+np.exp(-Z))
cache = Z
return A, cache
def relu(Z):
"""
Implement the RELU function.
Arguments:
Z -- Output of the linear layer, of any shape
Returns:
A -- Post-activation parameter, of the same shape as Z
cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently
"""
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
"""
Implement the backward propagation for a single RELU unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
dZ = np.array(dA, copy=True) # just converting dz to a correct object.
# When z <= 0, you should set dz to 0 as well.
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def sigmoid_backward(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
def load_data():
train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(1)
W1 = np.random.randn(n_h, n_x)*0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h)*0.01
b2 = np.zeros((n_y, 1))
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(1)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1]) #*0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
Z = W.dot(A) + b
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)
the cache of linear_sigmoid_forward() (there is one, indexed L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation = "relu")
caches.append(cache)
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], activation = "sigmoid")
caches.append(cache)
assert(AL.shape == (1,X.shape[1]))
return AL, caches
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = Y.shape[1]
# Compute loss from aL and y.
cost = (1./m) * (-np.dot(Y,np.log(AL).T) - np.dot(1-Y, np.log(1-AL).T))
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = 1./m * np.dot(dZ,A_prev.T)
db = 1./m * np.sum(dZ, axis = 1, keepdims = True)
dA_prev = np.dot(W.T,dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (there are (L-1) or them, indexes from 0 to L-2)
the cache of linear_activation_forward() with "sigmoid" (there is one, index L-1)
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
current_cache = caches[L-1]
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid")
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 2)], current_cache, activation = "relu")
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads["db" + str(l+1)]
return parameters
def predict(X, y, parameters):
"""
This function is used to predict the results of a L-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
n = len(parameters) // 2 # number of layers in the neural network
p = np.zeros((1,m))
# Forward propagation
probas, caches = L_model_forward(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, probas.shape[1]):
if probas[0,i] > 0.5:
p[0,i] = 1
else:
p[0,i] = 0
#print results
#print ("predictions: " + str(p))
#print ("true labels: " + str(y))
print("Accuracy: " + str(np.sum((p == y)/m)))
return p
def print_mislabeled_images(classes, X, y, p):
"""
Plots images where predictions and truth were different.
X -- dataset
y -- true labels
p -- predictions
"""
a = p + y
mislabeled_indices = np.asarray(np.where(a == 1))
plt.rcParams['figure.figsize'] = (40.0, 40.0) # set default size of plots
num_images = len(mislabeled_indices[0])
for i in range(num_images):
index = mislabeled_indices[1][i]
plt.subplot(2, num_images, i + 1)
plt.imshow(X[:,index].reshape(64,64,3), interpolation='nearest')
plt.axis('off')
plt.title("Prediction: " + classes[int(p[0,index])].decode("utf-8") + " \n Class: " + classes[y[0,index]].decode("utf-8"))
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, num_px * num_px * 3)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization.
parameters = initialize_parameters_deep(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
AL, caches = L_model_forward(X, parameters)
# Compute cost.
cost = compute_cost(AL, Y)
# Backward propagation.
grads = L_model_backward(AL, Y, caches)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate=learning_rate)
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
|
[
"noreply@github.com"
] |
Qu-rixin.noreply@github.com
|
1641dbf439c23db22eef588cd46047788da9152a
|
6968d37d97727685af9e413d34fd8cd9004c836e
|
/blog/migrations/0002_auto_20200630_2248.py
|
74544862a11349c0d0a8d5191e6e5a1e8eef507e
|
[] |
no_license
|
eunjungleo/0715lecnote
|
6ef8c7992c6c82fd49a2f8059c1a78f7e9ade3e9
|
a6b93279c1ad806e1a9297cc3be325b8a237bc3e
|
refs/heads/master
| 2022-11-17T12:55:08.289124
| 2020-07-15T11:26:35
| 2020-07-15T11:26:35
| 279,613,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
# Generated by Django 3.0.7 on 2020-06-30 13:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='post',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='post_id', to='blog.Post'),
),
migrations.AlterField(
model_name='post',
name='comment',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.DO_NOTHING, related_name='comment_id', to='blog.Comment'),
),
]
|
[
"asaprocky123@likelion.org"
] |
asaprocky123@likelion.org
|
d000a16a23df4f4f7a6039f2585d22f6f582f620
|
d829c090d178a1bca976a4299014c46e0fbac24b
|
/push/push.py
|
10c6881e00019cf8d126cece53945f586d4bbf33
|
[
"MIT"
] |
permissive
|
quannv108/redis_fifo_clients
|
9bdab30daad86caff8b5b37f8e77d8282f7bb175
|
880c2ea1c7e49f39dc74e16d3b2dd96e9ff1c9e4
|
refs/heads/master
| 2022-02-24T04:16:01.095022
| 2019-10-04T07:17:14
| 2019-10-04T07:17:14
| 212,522,124
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
import redis
import os
import time
from datetime import datetime
if __name__ == '__main__':
print('push starting...')
redis_conn = redis.Redis(host=os.getenv('HOST'), port=6379, db=0)
count = 0
while True:
time.sleep(3)
now = datetime.now()
print('will push value {}'.format(now))
redis_conn.lpush('abc', now.strftime('%Y-%m-%d %H:%M:%S.%f'))
count += 1
|
[
"quannv@d-soft.com.vn"
] |
quannv@d-soft.com.vn
|
fc347e03429f8460b8f3300c15f04c370a519892
|
c61a28aba19f7cdf9a5127e8a782bf115c265e70
|
/env/bin/pygmentize
|
71647a426bbc992d8f9a844ab9330a2cd73d7788
|
[] |
no_license
|
sharmilaviji/RecruitPRO-NEW
|
fa72c8fc00f469a41798b1047c11dcc470fbc495
|
dcfaedebe56b45acd6ddcab7e24c939b853a2c8c
|
refs/heads/master
| 2021-05-26T12:14:12.611154
| 2020-04-27T04:40:50
| 2020-04-27T04:40:50
| 254,125,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
#!/home/sharmila/frappe-bench/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pygments.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"sharmiviji1997@gmail.com"
] |
sharmiviji1997@gmail.com
|
|
23c3506556a0d679d371d7221560b44fc26c5655
|
6514c3c286e8637e9d73fe140df61e260394e655
|
/vault/forms.py
|
84d2c5d438d66e6290da87d0f7aff7f2afef02c7
|
[] |
no_license
|
Duncan-Stout/your_vault
|
3b8ceb785388c6c5f7b783d072dc137907b5ee85
|
4cf079dbef98a04d28498c7ec4fa7556e561f638
|
refs/heads/main
| 2023-02-12T05:50:43.776694
| 2020-11-28T09:39:11
| 2020-11-28T09:39:11
| 312,155,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
from django import forms
class UploadfileForm(forms.Form):
title = forms.CharField(max_length=50)
file = forms.FileField()
|
[
"destout1983@yahoo.com"
] |
destout1983@yahoo.com
|
7a8e55bc4d5539029e8c24e20c1d25272b887f74
|
62f271853955906d3c2898f357c65945a0b38998
|
/py_dj_blog/wsgi.py
|
6d936bdf523dcd65ba59226562454833795d077a
|
[] |
no_license
|
shootie007/py_dj_blog
|
46bd9cc65cf98a4e9065223c9132ee9930a95245
|
9e018201e8550dd92bd1c0d275f46c4498de55a0
|
refs/heads/master
| 2022-12-04T11:10:57.086828
| 2020-08-28T07:50:07
| 2020-08-28T07:50:07
| 285,501,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for py_dj_blog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'py_dj_blog.settings')
application = get_wsgi_application()
|
[
"shoot@Shootie-MacBook.local"
] |
shoot@Shootie-MacBook.local
|
2dbb562e94ee2bfe30d7a41946ff1b5bca578a55
|
11fc58b24edc43f00cba7bf029fe498b7beb493d
|
/lab project/mysite/mysite/urls.py
|
12dfe9c0586bc409f5b73c24bf7f18b563bd452b
|
[] |
no_license
|
AndrewZhou924/Bootstrap_Django_website
|
ab460a2f45b75d0bf3cce9167def5571cf872d87
|
7f565ef56a5859092535b953f90f3a4b6169233b
|
refs/heads/master
| 2022-12-04T18:14:31.799855
| 2019-07-01T07:12:24
| 2019-07-01T07:12:24
| 193,053,240
| 0
| 0
| null | 2022-11-22T03:15:07
| 2019-06-21T07:35:19
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.conf.urls import include
from django.contrib import admin
from login import views
from login import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^index/', views.index),
url(r'^login/', views.login),
url(r'^register/', views.register),
url(r'^logout/', views.logout),
url(r'^captcha', include('captcha.urls'))
]
|
[
"andrewzhou924@gmail.com"
] |
andrewzhou924@gmail.com
|
a545ac2c798829459b6312574645abf1a5576465
|
94d065f032359c0c7f18f5c8cba933dde14d3f95
|
/train.py
|
1616e35044629e87e7b0b512a1a8a8bf23415f2f
|
[] |
no_license
|
smartprobe/HGNN
|
d57350e19274a9364ec50093af1d1c2e402f2153
|
6c68c42fff55057ec5e14185e51b7449293a4583
|
refs/heads/main
| 2023-01-25T05:21:46.183436
| 2020-12-10T12:15:05
| 2020-12-10T12:15:05
| 320,261,591
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,421
|
py
|
from torchtools import *
from data import MiniImagenetLoader,TieredImagenetLoader
from model import EmbeddingImagenet, Unet,Unet2
import shutil
import os
import random
import time
class ModelTrainer(object):
def __init__(self,
enc_module,
unet_module,
data_loader):
# set encoder and unet
self.enc_module = enc_module.to(tt.arg.device)
self.unet_module = unet_module.to(tt.arg.device)
if tt.arg.num_gpus > 1:
print('Construct multi-gpu model ...')
self.enc_module = nn.DataParallel(self.enc_module, device_ids=[2, 3], dim=0)
self.unet_module = nn.DataParallel(self.unet_module, device_ids=[2, 3], dim=0)
print('done!\n')
# get data loader
self.data_loader = data_loader
# set module parameters
self.module_params = list(self.enc_module.parameters()) + list(self.unet_module.parameters())
# set optimizer
self.optimizer = optim.Adam(params=self.module_params,
lr=tt.arg.lr,
weight_decay=tt.arg.weight_decay)
# set loss
self.node_loss = nn.NLLLoss()
self.global_step = 0
self.val_acc = 0
self.test_acc = 0
def train(self):
val_acc = self.val_acc
# set edge mask (to distinguish support and query edges)
num_supports = tt.arg.num_ways * tt.arg.num_shots
num_queries = tt.arg.num_queries
num_samples = num_supports + num_queries
time_start=time.time()
# for each iteration
for iter in range(self.global_step + 1, tt.arg.train_iteration + 1):
# init grad
self.optimizer.zero_grad()
# set current step
self.global_step = iter
# load task data list
[support_data,
support_label,
query_data,
query_label] = self.data_loader['train'].get_task_batch(num_tasks=tt.arg.meta_batch_size,
num_ways=tt.arg.num_ways,
num_shots=tt.arg.num_shots,
num_queries=int(tt.arg.num_queries /tt.arg.num_ways),
seed=iter + tt.arg.seed)
# set as single data
full_data = torch.cat([support_data, query_data], 1)
full_label = torch.cat([support_label, query_label], 1)
full_edge = self.label2edge(full_label)
# set init edge
init_edge = full_edge.clone() # batch_size x 2 x num_samples x num_samples
init_edge[:, num_supports:, :] = 0.5
init_edge[:, :, num_supports:] = 0.5
for i in range(num_queries):
init_edge[:, num_supports + i, num_supports + i] = 1.0
# set as train mode
self.enc_module.train()
self.unet_module.train()
# (1) encode data
full_data = [self.enc_module(data.squeeze(1)) for data in full_data.chunk(full_data.size(1), dim=1)]
full_data = torch.stack(full_data, dim=1) # batch_size x num_samples x featdim
one_hot_label = self.one_hot_encode(tt.arg.num_ways, support_label.long())
query_padding = (1 / tt.arg.num_ways) * torch.ones([full_data.shape[0]] + [num_queries] + [tt.arg.num_ways],
device=one_hot_label.device)
one_hot_label = torch.cat([one_hot_label, query_padding], dim=1)
full_data = torch.cat([full_data, one_hot_label], dim=-1)
if tt.arg.transductive == True:
# transduction
full_node_out = self.unet_module(init_edge, full_data)
else:
# non-transduction
support_data = full_data[:, :num_supports] # batch_size x num_support x featdim
query_data = full_data[:, num_supports:] # batch_size x num_query x featdim
support_data_tiled = support_data.unsqueeze(1).repeat(1, num_queries, 1,
1) # batch_size x num_queries x num_support x featdim
support_data_tiled = support_data_tiled.view(tt.arg.meta_batch_size * num_queries, num_supports,
-1) # (batch_size x num_queries) x num_support x featdim
query_data_reshaped = query_data.contiguous().view(tt.arg.meta_batch_size * num_queries, -1).unsqueeze(
1) # (batch_size x num_queries) x 1 x featdim
input_node_feat = torch.cat([support_data_tiled, query_data_reshaped],
1) # (batch_size x num_queries) x (num_support + 1) x featdim
input_edge_feat = 0.5 * torch.ones(tt.arg.meta_batch_size, num_supports + 1, num_supports + 1).to(
tt.arg.device) # batch_size x (num_support + 1) x (num_support + 1)
input_edge_feat[:, :num_supports, :num_supports] = init_edge[:, :num_supports,
:num_supports] # batch_size x (num_support + 1) x (num_support + 1)
input_edge_feat = input_edge_feat.repeat(num_queries, 1,
1) # (batch_size x num_queries) x (num_support + 1) x (num_support + 1)
# 2. unet
node_out = self.unet_module(input_edge_feat,
input_node_feat) # (batch_size x num_queries) x (num_support + 1) x num_classes
node_out = node_out.view(tt.arg.meta_batch_size, num_queries, num_supports + 1,
tt.arg.num_ways) # batch_size x num_queries x (num_support + 1) x num_classes
full_node_out = torch.zeros(tt.arg.meta_batch_size, num_samples, tt.arg.num_ways).to(tt.arg.device)
full_node_out[:, :num_supports, :] = node_out[:, :, :num_supports, :].mean(1)
full_node_out[:, num_supports:, :] = node_out[:, :, num_supports:, :].squeeze(2)
# 3. compute loss
query_node_out = full_node_out[:,num_supports:]
node_pred = torch.argmax(query_node_out, dim=-1)
node_accr = torch.sum(torch.eq(node_pred, full_label[:, num_supports:].long())).float() \
/ node_pred.size(0) / num_queries
node_loss = [self.node_loss(data.squeeze(1), label.squeeze(1).long()) for (data, label) in
zip(query_node_out.chunk(query_node_out.size(1), dim=1), full_label[:, num_supports:].chunk(full_label[:, num_supports:].size(1), dim=1))]
node_loss = torch.stack(node_loss, dim=0)
node_loss = torch.mean(node_loss)
node_loss.backward()
self.optimizer.step()
# adjust learning rate
self.adjust_learning_rate(optimizers=[self.optimizer],
lr=tt.arg.lr,
iter=self.global_step)
# logging
tt.log_scalar('train/loss', node_loss, self.global_step)
tt.log_scalar('train/node_accr', node_accr, self.global_step)
tt.log_scalar('train/time', time.time()-time_start, self.global_step)
# evaluation
if self.global_step % tt.arg.test_interval == 0:
val_acc = self.eval(partition='val')
is_best = 0
if val_acc >= self.val_acc:
self.val_acc = val_acc
is_best = 1
tt.log_scalar('val/best_accr', self.val_acc, self.global_step)
self.save_checkpoint({
'iteration': self.global_step,
'enc_module_state_dict': self.enc_module.state_dict(),
'unet_module_state_dict': self.unet_module.state_dict(),
'val_acc': val_acc,
'optimizer': self.optimizer.state_dict(),
}, is_best)
tt.log_step(global_step=self.global_step)
def eval(self,partition='test', log_flag=True):
best_acc = 0
# set edge mask (to distinguish support and query edges)
num_supports = tt.arg.num_ways * tt.arg.num_shots
num_queries = tt.arg.num_queries
num_samples = num_supports + num_queries
query_node_accrs = []
time_start_eval=time.time()
# for each iteration
for iter in range(tt.arg.test_iteration // tt.arg.test_batch_size):
# load task data list
[support_data,
support_label,
query_data,
query_label] = self.data_loader[partition].get_task_batch(num_tasks=tt.arg.test_batch_size,
num_ways=tt.arg.num_ways,
num_shots=tt.arg.num_shots,
num_queries=int(tt.arg.num_queries /tt.arg.num_ways),
seed=iter)
'''
q0 = query_data[:,0,:].clone()
q1 = query_data[:,1,:].clone()
query_data[:, 1, :] = q0
query_data[:, 0, :] = q1
ql0 = query_label[:,0].clone()
ql1 = query_label[:,1].clone()
query_label[:, 1] = ql0
query_label[:, 0] = ql1
'''
# set as single data
full_data = torch.cat([support_data, query_data], 1)
full_label = torch.cat([support_label, query_label], 1)
full_edge = self.label2edge(full_label)
# set init edge
init_edge = full_edge.clone()
init_edge[:, num_supports:, :] = 0.5
init_edge[:, :, num_supports:] = 0.5
for i in range(num_queries):
init_edge[:, num_supports + i, num_supports + i] = 1.0
# set as eval mode
self.enc_module.eval()
self.unet_module.eval()
# (1) encode data
full_data = [self.enc_module(data.squeeze(1)) for data in full_data.chunk(full_data.size(1), dim=1)]
full_data = torch.stack(full_data, dim=1) # batch_size x num_samples x featdim
one_hot_label = self.one_hot_encode(tt.arg.num_ways, support_label.long())
query_padding = (1 / tt.arg.num_ways) * torch.ones([full_data.shape[0]] + [num_queries] + [tt.arg.num_ways],
device=one_hot_label.device)
one_hot_label = torch.cat([one_hot_label, query_padding], dim=1)
full_data = torch.cat([full_data, one_hot_label], dim=-1)
if tt.arg.transductive == True:
# transduction
full_node_out = self.unet_module(init_edge, full_data)
else:
# non-transduction
support_data = full_data[:, :num_supports] # batch_size x num_support x featdim
query_data = full_data[:, num_supports:] # batch_size x num_query x featdim
support_data_tiled = support_data.unsqueeze(1).repeat(1, num_queries, 1,
1) # batch_size x num_queries x num_support x featdim
support_data_tiled = support_data_tiled.view(tt.arg.test_batch_size * num_queries, num_supports,
-1) # (batch_size x num_queries) x num_support x featdim
query_data_reshaped = query_data.contiguous().view(tt.arg.test_batch_size * num_queries, -1).unsqueeze(
1) # (batch_size x num_queries) x 1 x featdim
input_node_feat = torch.cat([support_data_tiled, query_data_reshaped],
1) # (batch_size x num_queries) x (num_support + 1) x featdim
input_edge_feat = 0.5 * torch.ones(tt.arg.test_batch_size, num_supports + 1, num_supports + 1).to(
tt.arg.device) # batch_size x (num_support + 1) x (num_support + 1)
input_edge_feat[:, :num_supports, :num_supports] = init_edge[:, :num_supports,
:num_supports] # batch_size x (num_support + 1) x (num_support + 1)
input_edge_feat = input_edge_feat.repeat(num_queries, 1,
1) # (batch_size x num_queries) x (num_support + 1) x (num_support + 1)
# 2. unet
node_out = self.unet_module(input_edge_feat,
input_node_feat) # (batch_size x num_queries) x (num_support + 1) x num_classes
node_out = node_out.view(tt.arg.test_batch_size, num_queries, num_supports + 1,
tt.arg.num_ways) # batch_size x num_queries x (num_support + 1) x num_classes
full_node_out = torch.zeros(tt.arg.test_batch_size, num_samples, tt.arg.num_ways).to(tt.arg.device)
full_node_out[:, :num_supports, :] = node_out[:, :, :num_supports, :].mean(1)
full_node_out[:, num_supports:, :] = node_out[:, :, num_supports:, :].squeeze(2)
# 3. compute loss
query_node_out = full_node_out[:, num_supports:]
node_pred = torch.argmax(query_node_out, dim=-1)
node_accr = torch.sum(torch.eq(node_pred, full_label[:, num_supports:].long())).float() \
/ node_pred.size(0) / num_queries
query_node_accrs += [node_accr.item()]
print('time cost',time.time()-time_start_eval,'s')
# logging
if log_flag:
tt.log('---------------------------')
tt.log_scalar('{}/node_accr'.format(partition), np.array(query_node_accrs).mean(), self.global_step)
tt.log('evaluation: total_count=%d, accuracy: mean=%.2f%%, std=%.2f%%, ci95=%.2f%%' %
(iter,
np.array(query_node_accrs).mean() * 100,
np.array(query_node_accrs).std() * 100,
1.96 * np.array(query_node_accrs).std() / np.sqrt(
float(len(np.array(query_node_accrs)))) * 100))
tt.log('---------------------------')
return np.array(query_node_accrs).mean()
def adjust_learning_rate(self, optimizers, lr, iter):
new_lr = lr * (0.5 ** (int(iter / tt.arg.dec_lr)))
for optimizer in optimizers:
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def label2edge(self, label):
# get size
num_samples = label.size(1)
# reshape
label_i = label.unsqueeze(-1).repeat(1, 1, num_samples)
label_j = label_i.transpose(1, 2)
# compute edge
edge = torch.eq(label_i, label_j).float().to(tt.arg.device)
return edge
def one_hot_encode(self, num_classes, class_idx):
return torch.eye(num_classes)[class_idx].to(tt.arg.device)
def save_checkpoint(self, state, is_best):
torch.save(state, 'asset/checkpoints/{}/'.format(tt.arg.experiment) + 'checkpoint.pth.tar')
if is_best:
shutil.copyfile('asset/checkpoints/{}/'.format(tt.arg.experiment) + 'checkpoint.pth.tar',
'asset/checkpoints/{}/'.format(tt.arg.experiment) + 'model_best.pth.tar')
def set_exp_name():
exp_name = 'D-{}'.format(tt.arg.dataset)
exp_name += '_N-{}_K-{}_Q-{}'.format(tt.arg.num_ways, tt.arg.num_shots,tt.arg.num_queries)
exp_name += '_B-{}_T-{}'.format(tt.arg.meta_batch_size,tt.arg.transductive)
exp_name += '_P-{}_Un-{}'.format(tt.arg.pool_mode,tt.arg.unet_mode)
exp_name += '_SEED-{}_2'.format(tt.arg.seed)
return exp_name
if __name__ == '__main__':
tt.arg.device = 'cuda:0' if tt.arg.device is None else tt.arg.device
tt.arg.dataset_root = 'dataset'
tt.arg.dataset = 'mini' if tt.arg.dataset is None else tt.arg.dataset
tt.arg.num_ways = 5 if tt.arg.num_ways is None else tt.arg.num_ways
tt.arg.num_shots = 5 if tt.arg.num_shots is None else tt.arg.num_shots
tt.arg.num_queries = tt.arg.num_ways*1
tt.arg.num_supports = tt.arg.num_ways*tt.arg.num_shots
tt.arg.transductive = True if tt.arg.transductive is None else tt.arg.transductive
if tt.arg.transductive == False:
tt.arg.meta_batch_size = 20
else:
tt.arg.meta_batch_size = 40
tt.arg.seed = 222 if tt.arg.seed is None else tt.arg.seed
tt.arg.num_gpus = 1
# model parameter related
tt.arg.emb_size = 128
tt.arg.in_dim = tt.arg.emb_size + tt.arg.num_ways
tt.arg.pool_mode = 'kn' if tt.arg.pool_mode is None else tt.arg.pool_mode # 'way'/'support'/'kn'
tt.arg.unet_mode = 'addold' if tt.arg.unet_mode is None else tt.arg.unet_mode # 'addold'/'noold'
unet2_flag = False # the label of using unet2
# confirm ks
if tt.arg.num_shots == 1 and tt.arg.transductive == False:
if tt.arg.pool_mode == 'support': # 'support': pooling on support
tt.arg.ks = [0.6, 0.5] # 5->3->1
elif tt.arg.pool_mode == 'kn': # left close support node
tt.arg.ks = [0.6, 0.5] # 5->3->1
else:
print('wrong mode setting!!!')
raise NameError('wrong mode setting!!!')
elif tt.arg.num_shots == 5 and tt.arg.transductive == False:
if tt.arg.pool_mode == 'way': # 'way' pooling on support by way
tt.arg.ks_1 = [0.6, 0.5] # 5->3->1
mode_1 = 'way'
tt.arg.ks_2 = [0.6, 0.5] # 5->3->1 # supplementary pooling for fair comparing
mode_2 = 'support'
unet2_flag = True
elif tt.arg.pool_mode == 'kn':
tt.arg.ks_1 = [0.6, 0.5] # 5->3->1
mode_1 = 'way&kn'
tt.arg.ks_2 = [0.6, 0.5] # 5->3->1 # supplementary pooling for fair comparing
mode_2 = 'kn'
unet2_flag = True
else:
print('wrong mode setting!!!')
raise NameError('wrong mode setting!!!')
elif tt.arg.num_shots == 1 and tt.arg.transductive == True:
if tt.arg.pool_mode == 'support': # 'support': pooling on support
tt.arg.ks = [0.6, 0.5] # 5->3->1
elif tt.arg.pool_mode == 'kn': # left close support node
tt.arg.ks = [0.6, 0.5] # 5->3->1
else:
print('wrong mode setting!!!')
raise NameError('wrong mode setting!!!')
elif tt.arg.num_shots == 5 and tt.arg.transductive == True:
if tt.arg.pool_mode == 'way': # 'way' pooling on support by way
tt.arg.ks_1 = [0.6, 0.5] # 5->3->1
mode_1 = 'way'
tt.arg.ks_2 = [0.6, 0.5] # 5->3->1 # supplementary pooling for fair comparing
mode_2 = 'support'
unet2_flag = True
elif tt.arg.pool_mode == 'kn':
tt.arg.ks_1 = [0.2] # 5->1
mode_1 = 'way&kn'
tt.arg.ks_2 = [0.2] # 5->1 # supplementary pooling for fair comparing
mode_2 = 'kn'
unet2_flag = True
else:
print('wrong mode setting!!!')
raise NameError('wrong mode setting!!!')
else:
print('wrong shot and T settings!!!')
raise NameError('wrong shot and T settings!!!')
# train, test parameters
tt.arg.train_iteration = 100000 if tt.arg.dataset == 'mini' else 200000
tt.arg.test_iteration = 10000
tt.arg.test_interval = 20 # 5000
tt.arg.test_batch_size = 10
tt.arg.log_step = 10
tt.arg.lr = 1e-3
tt.arg.grad_clip = 5
tt.arg.weight_decay = 1e-6
tt.arg.dec_lr = 10000 if tt.arg.dataset == 'mini' else 20000
tt.arg.dropout = 0.1 if tt.arg.dataset == 'mini' else 0.0
tt.arg.experiment = set_exp_name() if tt.arg.experiment is None else tt.arg.experiment
print(set_exp_name())
# set random seed
np.random.seed(tt.arg.seed)
torch.manual_seed(tt.arg.seed)
torch.cuda.manual_seed_all(tt.arg.seed)
random.seed(tt.arg.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
tt.arg.log_dir_user = tt.arg.log_dir if tt.arg.log_dir_user is None else tt.arg.log_dir_user
tt.arg.log_dir = tt.arg.log_dir_user
if not os.path.exists('asset/checkpoints'):
os.makedirs('asset/checkpoints')
if not os.path.exists('asset/checkpoints/' + tt.arg.experiment):
os.makedirs('asset/checkpoints/' + tt.arg.experiment)
enc_module = EmbeddingImagenet(emb_size=tt.arg.emb_size)
if tt.arg.transductive == False:
if unet2_flag == False:
unet_module = Unet(tt.arg.ks, tt.arg.in_dim, tt.arg.num_ways, 1)
else:
unet_module = Unet2(tt.arg.ks_1, tt.arg.ks_2, mode_1, mode_2, tt.arg.in_dim, tt.arg.num_ways, 1)
else:
if unet2_flag == False:
unet_module = Unet(tt.arg.ks, tt.arg.in_dim, tt.arg.num_ways, tt.arg.num_queries)
else:
unet_module = Unet2(tt.arg.ks_1, tt.arg.ks_2, mode_1, mode_2, tt.arg.in_dim, tt.arg.num_ways, tt.arg.num_queries)
if tt.arg.dataset == 'mini':
train_loader = MiniImagenetLoader(root=tt.arg.dataset_root, partition='train')
valid_loader = MiniImagenetLoader(root=tt.arg.dataset_root, partition='val')
elif tt.arg.dataset == 'tiered':
train_loader = TieredImagenetLoader(root=tt.arg.dataset_root, partition='train')
valid_loader = TieredImagenetLoader(root=tt.arg.dataset_root, partition='val')
else:
print('Unknown dataset!')
raise NameError('Unknown dataset!!!')
data_loader = {'train': train_loader,
'val': valid_loader
}
# create trainer
trainer = ModelTrainer(enc_module=enc_module,
unet_module=unet_module,
data_loader=data_loader)
trainer.train()
|
[
"noreply@github.com"
] |
smartprobe.noreply@github.com
|
fbec8e03c0fe27c466c4d49ca9f37570fa8f45b3
|
d28df0e0ec46aa47d75273e55dad5bba34162919
|
/InitUser/wsgi.py
|
8c8f8275895d5ab3237d295335142d525a115d9e
|
[] |
no_license
|
sarudalf3/Django_register
|
0a85e7215f809a3d0bdc4392b3c576018b08c87f
|
f5d2c61e15949ce22337f992a8c17769991299c5
|
refs/heads/master
| 2023-07-17T13:44:30.502860
| 2021-09-02T00:52:28
| 2021-09-02T00:52:28
| 402,244,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for InitUser project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'InitUser.settings')
application = get_wsgi_application()
|
[
"ruben.miranda.f@gmail.com"
] |
ruben.miranda.f@gmail.com
|
059f8ab0f37b5e7f40886a1a96270d144121d038
|
f320256bf4cd65db7a8bc0cd6fe61131cd1903d7
|
/pondpi/ui/views/index.py
|
3ae374badc357aabeea3e6a4ad96712ca9a488a8
|
[
"MIT"
] |
permissive
|
meinjens/pondpi
|
6bc1a9648a42bc25135b7a99058cdb6ddb0b5c46
|
f6830d0d834b685bff2729943188171096f31c93
|
refs/heads/main
| 2023-05-13T22:28:49.098092
| 2023-01-28T14:19:50
| 2023-01-28T14:19:50
| 354,050,540
| 0
| 0
|
MIT
| 2023-05-01T22:44:37
| 2021-04-02T14:56:55
|
Jinja
|
UTF-8
|
Python
| false
| false
| 373
|
py
|
import flask
from pondpi.ui import ui
@ui.route('/', methods=["GET"])
def index():
return flask.render_template('start.html')
@ui.route('/dashboard', methods=["GET"])
def dashboard():
return flask.render_template('dashboard.html')
@ui.route('/control-station', methods=["GET"])
def control_station():
return flask.render_template('control-station.html')
|
[
"1739138+meinjens@users.noreply.github.com"
] |
1739138+meinjens@users.noreply.github.com
|
c09fbb5af4bd07204b3f6165d002d80a474d3891
|
9fb8ba88d50ffba4e9ad4f3fb4cfd7c5d13f3591
|
/new/read_xml.py
|
93b34d9bb50bfb872bb4f5b66487d06f7f93449b
|
[] |
no_license
|
joshsomma/python_webdata
|
b35e4903ca63b8f17493de32e8d24222107f6401
|
017673bd89ca2ec7af52a3dc214f06f1464d2831
|
refs/heads/master
| 2016-08-10T15:31:13.071254
| 2016-02-07T00:26:18
| 2016-02-07T00:26:18
| 45,772,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
# import libs
import urllib
import xml.etree.ElementTree as ET
# prompt to enter data location
address = raw_input('Enter location: ')
print 'Retrieving', address
# open and store data from location
uh = urllib.urlopen(address)
data = uh.read()
# parse data in object
tree = ET.fromstring(data)
total = 0
for num in tree.iter('count'):
#print num.text
total += int(num.text)
print "The total is: ", total
|
[
"jsomma@squiz.com"
] |
jsomma@squiz.com
|
db876c2995e00f27fc4d14a58b98efde7091f4a1
|
3b5f7fe204a3f2f8d0534e7f02bc4aa517e12d70
|
/wargaming/games/wot/__init__.py
|
7a957b218e13abcfe313079ae9bc77ed6c89ccba
|
[
"MIT"
] |
permissive
|
cstrutton/python-wargaming
|
1916d45cad1ca5a490342253a2f6f41427afeb50
|
8a446b5c34a55025dd961438d6b67a3a0a186791
|
refs/heads/master
| 2020-12-29T18:52:23.411208
| 2014-11-26T15:12:01
| 2014-11-26T15:12:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,308
|
py
|
# -*- coding: utf-8 -*-
import six
from wargaming import settings
from wargaming.api import BaseAPI, MetaAPI
from wargaming.games.wot.accounts import Accounts
from wargaming.games.wot.auth import Auth
from wargaming.games.wot.clans import Clans
from wargaming.games.wot.globalwar import GlobalWar
from wargaming.games.wot.encyclopedia import Encyclopedia
from wargaming.games.wot.ratings import Ratings
from wargaming.games.wot.clan_ratings import ClanRatings
from wargaming.games.wot.tanks import Tanks
__all__ = ('API', )
@six.add_metaclass(MetaAPI)
class API(BaseAPI):
"""World of Tanks API client"""
def __init__(self, application_id, language=settings.DEFAULT_LANGUAGE):
"""
:param application_id: Your application ID from the https://wargaming.net/developers/applications/
:type application_id: str
:param language: Language for the requests (defaults to English)
:type language: str
"""
super(API, self).__init__(application_id, language,
base_url='https://api.worldoftanks.com/wot/')
accounts = Accounts()
auth = Auth()
clans = Clans()
globalwar = GlobalWar()
encyclopedia = Encyclopedia()
ratings = Ratings()
clan_ratings = ClanRatings()
tanks = Tanks()
|
[
"self@svartalf.info"
] |
self@svartalf.info
|
4b4a3d00e0c59dfe5920fc6020a70ade72ea8065
|
0e26592078429d8b0b3da25e797128f72938b311
|
/ๆฐๅญไฟกๅทๅค็ๅฎ้ช/2.2.py
|
c0aa3bac0692a05600e274f97181bdfcc0d8f226
|
[] |
no_license
|
wrh2048470785/2
|
e67c144243c9a64cc3dc5935e46ebd820ddfdc62
|
de15509e3d32bcbdd79a2d8a2a7c75eb3bf09240
|
refs/heads/main
| 2023-06-08T03:48:25.054502
| 2021-06-30T07:51:15
| 2021-06-30T07:51:15
| 366,720,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,667
|
py
|
from thinkdsp import Sinusoid
from thinkdsp import normalize, unbias
import numpy as np
import matplotlib.pyplot as plt
from thinkdsp import SquareSignal
from thinkdsp import TriangleSignal
class SawtoothSignal(Sinusoid):
'''้ฏ้ฝฟๆณข็็ฑป'''
def evaluate(self, ts):
#cycleๆฏ่ช่ตทๅงๆถ้ด็ๅจๆๆฐ
cycles = self.freq * ts + self.offset / np.pi / 2
frac, _ = np.modf(cycles)
#np.modf()่ฝๅฐไธไธชๆฐๅไธบๅฐๆฐ้จๅๅๆดๆฐ้จๅ
ys = normalize(unbias(frac), self.amp)
#frac็ๅบๅไปไบ0ๅฐ1
#unbiasๅฐๆณขๅฝขไธ็งปไฝฟๅ
ถไธญๅฟไฝไบ0
#normalizeๅฐๆณขๅฝขๅฝไธๅๅฐ็ปๅฎ็ๆฏๅน
๏ผamp๏ผ
return ys
plt.rcParams['font.sans-serif']=['SimHei'] #ๅ ไธ่ฟไธๅฅๅฐฑ่ฝๅจๅพ่กจไธญๆพ็คบไธญๆ
plt.rcParams['axes.unicode_minus']=False #็จๆฅๆญฃๅธธๆพ็คบ่ดๅท
#plt.subplot(511)#ไบง็5่ก1ๅ็็ฌฌไธไธชๅพ
#plt.title(u"้ฏ้ฝฟๆณข็้ข่ฐฑ")
sawtooth = SawtoothSignal().make_wave(duration=0.5, framerate=40000)
#ๅถไฝๆณขๅฝข้ฟๅบฆไธบ0.5s๏ผๆฏ็ง40000ๅธง
#sawtooth.make_spectrum().plot()
#plt.subplot(513)
#plt.title(u"้ฏ้ฝฟๆณข๏ผ็ฐ๏ผ&ๆนๆณข็้ข่ฐฑ")
#sawtooth.make_spectrum().plot(color='gray')#ๅฐๅ้ฏ้ฝฟๆณขๆพ็คบไธบ็ฐ่ฒไพฟไบ่งๅฏ
#square = SquareSignal(amp=0.5).make_wave(duration=0.5, framerate=40000)
#ๅฐๆนๆณข็ๆฏๅน
่ฐๆดไธบ0.5ไพฟไบๆฏ่พ
#square.make_spectrum().plot()
#plt.subplot(515)
plt.title(u"้ฏ้ฝฟๆณข๏ผ็ฐ๏ผ&ไธ่งๆณข็้ข่ฐฑ")
sawtooth.make_spectrum().plot(color='gray')
triangle = TriangleSignal(amp=0.79).make_wave(duration=0.5, framerate=40000)
triangle.make_spectrum().plot()
plt.show()
|
[
"noreply@github.com"
] |
wrh2048470785.noreply@github.com
|
37e661602813c679895aa41f210f591c32611488
|
d4ea1f4955863aa2a013c6f3ae206675cf437f51
|
/code-examples/mapreduce/wc_mapper.py
|
dc4fac9cfe07f66eda851d2ff279f984a56f6f1e
|
[] |
no_license
|
SolGirouard/css
|
b20cec0df4c011603ec717512bbcd00016442cf1
|
fcf6fde063f290cb6a2fa7eedb288da810046dc9
|
refs/heads/master
| 2021-01-19T13:22:49.670095
| 2017-05-05T22:50:31
| 2017-05-05T22:50:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
#!/usr/bin/env python2
"""Example mapper module for counting words via map-reduce.
This file is saved as wc_mapper.py with execute permission
(chmod +x wc_mapper.py)"""
import sys
def main():
"""Take lines from stdin and emit each word with count 1.
This is for illustration purposes, treating any string separated by
whitespace as a 'word'. Additional cleaning (e.g., removing punctuation)
could be added if necessary."""
for line in sys.stdin:
words = line.strip().split()
for word in words:
print word + '\t' + '1'
if __name__ == "__main__":
main()
|
[
"jongbin@stanford.edu"
] |
jongbin@stanford.edu
|
7dc97045f49c5a4732e198885b55b6eca8057611
|
b92adbd59161b701be466b3dbeab34e2b2aaf488
|
/.c9/metadata/environment/fb_post_learning/fb_post/utils/__init__.py
|
da90613ab5a9ef0bd8b13998516cf28468ee3e78
|
[] |
no_license
|
R151865/cloud_9_files
|
7486fede7af4db4572f1b8033990a0f07f8749e8
|
a468c44e9aee4a37dea3c8c9188c6c06e91cc0c4
|
refs/heads/master
| 2022-11-22T10:45:39.439033
| 2020-07-23T09:31:52
| 2020-07-23T09:31:52
| 281,904,416
| 0
| 1
| null | 2022-11-20T00:47:10
| 2020-07-23T09:08:48
|
Python
|
UTF-8
|
Python
| false
| false
| 24,199
|
py
|
{"filter":false,"title":"__init__.py","tooltip":"/fb_post_learning/fb_post/utils/__init__.py","undoManager":{"mark":93,"position":93,"stack":[[{"start":{"row":0,"column":0},"end":{"row":30,"column":12},"action":"insert","lines":["from .create_comment import create_comment","from .create_post import create_post","from .get_post import get_post","from .get_replies_for_comment import get_replies_for_comment","from .get_user_posts import get_user_posts","\"\"\"","from .other_functions import get_total_reaction_count, get_reaction_metrics","from .other_functions import delete_post, get_posts_with_more_positive_reactions","from .other_functions import get_posts_reacted_by_user, get_reactions_to_post","\"\"\"","from .react_to_comment import react_to_comment","from .react_to_post import react_to_post","from .reply_to_comment import reply_to_comment","","__all__ = ['create_comment',"," 'create_post',"," 'get_post',"," 'get_replies_for_comment',"," 'get_user_posts',"," 'react_to_comment',"," 'react_to_post',"," 'reply_to_comment'"," ]","","sb=['get_total_reaction_count',"," 'get_reaction_metrics',"," 'delete_post',"," 'get_posts_with_more_positive_reactions',"," 'get_posts_reacted_by_user',"," 'get_reactions_to_post',"," ]"],"id":1}],[{"start":{"row":21,"column":29},"end":{"row":21,"column":30},"action":"insert","lines":[","],"id":2}],[{"start":{"row":21,"column":30},"end":{"row":22,"column":0},"action":"insert","lines":["",""],"id":3},{"start":{"row":22,"column":0},"end":{"row":22,"column":11},"action":"insert","lines":[" "]}],[{"start":{"row":22,"column":11},"end":{"row":22,"column":35},"action":"insert","lines":["get_total_reaction_count"],"id":4}],[{"start":{"row":5,"column":0},"end":{"row":10,"column":0},"action":"remove","lines":["\"\"\"","from .other_functions import get_total_reaction_count, get_reaction_metrics","from .other_functions import delete_post, get_posts_with_more_positive_reactions","from .other_functions import get_posts_reacted_by_user, get_reactions_to_post","\"\"\"",""],"id":5}],[{"start":{"row":5,"column":0},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":6}],[{"start":{"row":18,"column":35},"end":{"row":18,"column":36},"action":"insert","lines":["\""],"id":7}],[{"start":{"row":18,"column":11},"end":{"row":18,"column":12},"action":"insert","lines":["'"],"id":8}],[{"start":{"row":18,"column":36},"end":{"row":18,"column":37},"action":"remove","lines":["\""],"id":9}],[{"start":{"row":18,"column":36},"end":{"row":18,"column":37},"action":"insert","lines":["'"],"id":10},{"start":{"row":18,"column":37},"end":{"row":18,"column":38},"action":"insert","lines":[","]}],[{"start":{"row":9,"column":0},"end":{"row":10,"column":0},"action":"insert","lines":["",""],"id":11}],[{"start":{"row":10,"column":0},"end":{"row":10,"column":1},"action":"insert","lines":["F"],"id":12},{"start":{"row":10,"column":1},"end":{"row":10,"column":2},"action":"insert","lines":["R"]},{"start":{"row":10,"column":2},"end":{"row":10,"column":3},"action":"insert","lines":["O"]},{"start":{"row":10,"column":3},"end":{"row":10,"column":4},"action":"insert","lines":["M"]}],[{"start":{"row":10,"column":3},"end":{"row":10,"column":4},"action":"remove","lines":["M"],"id":13},{"start":{"row":10,"column":2},"end":{"row":10,"column":3},"action":"remove","lines":["O"]},{"start":{"row":10,"column":1},"end":{"row":10,"column":2},"action":"remove","lines":["R"]},{"start":{"row":10,"column":0},"end":{"row":10,"column":1},"action":"remove","lines":["F"]}],[{"start":{"row":10,"column":0},"end":{"row":10,"column":1},"action":"insert","lines":["f"],"id":14},{"start":{"row":10,"column":1},"end":{"row":10,"column":2},"action":"insert","lines":["r"]},{"start":{"row":10,"column":2},"end":{"row":10,"column":3},"action":"insert","lines":["o"]},{"start":{"row":10,"column":3},"end":{"row":10,"column":4},"action":"insert","lines":["m"]}],[{"start":{"row":10,"column":4},"end":{"row":10,"column":5},"action":"insert","lines":[" "],"id":15},{"start":{"row":10,"column":5},"end":{"row":10,"column":6},"action":"insert","lines":["."]}],[{"start":{"row":10,"column":6},"end":{"row":10,"column":7},"action":"insert","lines":["g"],"id":16},{"start":{"row":10,"column":7},"end":{"row":10,"column":8},"action":"insert","lines":["e"]},{"start":{"row":10,"column":8},"end":{"row":10,"column":9},"action":"insert","lines":["t"]},{"start":{"row":10,"column":9},"end":{"row":10,"column":10},"action":"insert","lines":["_"]},{"start":{"row":10,"column":10},"end":{"row":10,"column":11},"action":"insert","lines":["t"]},{"start":{"row":10,"column":11},"end":{"row":10,"column":12},"action":"insert","lines":["o"]}],[{"start":{"row":10,"column":6},"end":{"row":10,"column":12},"action":"remove","lines":["get_to"],"id":17},{"start":{"row":10,"column":6},"end":{"row":10,"column":30},"action":"insert","lines":["get_total_reaction_count"]}],[{"start":{"row":10,"column":30},"end":{"row":10,"column":31},"action":"insert","lines":[" "],"id":18},{"start":{"row":10,"column":31},"end":{"row":10,"column":32},"action":"insert","lines":["i"]},{"start":{"row":10,"column":32},"end":{"row":10,"column":33},"action":"insert","lines":["m"]},{"start":{"row":10,"column":33},"end":{"row":10,"column":34},"action":"insert","lines":["p"]},{"start":{"row":10,"column":34},"end":{"row":10,"column":35},"action":"insert","lines":["o"]},{"start":{"row":10,"column":35},"end":{"row":10,"column":36},"action":"insert","lines":["r"]},{"start":{"row":10,"column":36},"end":{"row":10,"column":37},"action":"insert","lines":["t"]}],[{"start":{"row":10,"column":37},"end":{"row":10,"column":38},"action":"insert","lines":[" "],"id":19},{"start":{"row":10,"column":38},"end":{"row":10,"column":39},"action":"insert","lines":["g"]},{"start":{"row":10,"column":39},"end":{"row":10,"column":40},"action":"insert","lines":["e"]},{"start":{"row":10,"column":40},"end":{"row":10,"column":41},"action":"insert","lines":["t"]}],[{"start":{"row":10,"column":38},"end":{"row":10,"column":41},"action":"remove","lines":["get"],"id":20},{"start":{"row":10,"column":38},"end":{"row":10,"column":62},"action":"insert","lines":["get_total_reaction_count"]}],[{"start":{"row":10,"column":62},"end":{"row":11,"column":0},"action":"insert","lines":["",""],"id":21},{"start":{"row":11,"column":0},"end":{"row":11,"column":1},"action":"insert","lines":["f"]},{"start":{"row":11,"column":1},"end":{"row":11,"column":2},"action":"insert","lines":["r"]},{"start":{"row":11,"column":2},"end":{"row":11,"column":3},"action":"insert","lines":["o"]},{"start":{"row":11,"column":3},"end":{"row":11,"column":4},"action":"insert","lines":["m"]}],[{"start":{"row":11,"column":3},"end":{"row":11,"column":4},"action":"remove","lines":["m"],"id":22},{"start":{"row":11,"column":2},"end":{"row":11,"column":3},"action":"remove","lines":["o"]},{"start":{"row":11,"column":1},"end":{"row":11,"column":2},"action":"remove","lines":["r"]},{"start":{"row":11,"column":0},"end":{"row":11,"column":1},"action":"remove","lines":["f"]}],[{"start":{"row":11,"column":0},"end":{"row":11,"column":1},"action":"insert","lines":["f"],"id":23},{"start":{"row":11,"column":1},"end":{"row":11,"column":2},"action":"insert","lines":["r"]},{"start":{"row":11,"column":2},"end":{"row":11,"column":3},"action":"insert","lines":["o"]},{"start":{"row":11,"column":3},"end":{"row":11,"column":4},"action":"insert","lines":["m"]}],[{"start":{"row":11,"column":4},"end":{"row":11,"column":5},"action":"insert","lines":[" "],"id":24},{"start":{"row":11,"column":5},"end":{"row":11,"column":6},"action":"insert","lines":["."]},{"start":{"row":11,"column":6},"end":{"row":11,"column":7},"action":"insert","lines":["e"]}],[{"start":{"row":11,"column":6},"end":{"row":11,"column":7},"action":"remove","lines":["e"],"id":25}],[{"start":{"row":11,"column":6},"end":{"row":11,"column":7},"action":"insert","lines":["g"],"id":26},{"start":{"row":11,"column":7},"end":{"row":11,"column":8},"action":"insert","lines":["e"]}],[{"start":{"row":11,"column":8},"end":{"row":11,"column":9},"action":"insert","lines":["t"],"id":27}],[{"start":{"row":11,"column":9},"end":{"row":11,"column":10},"action":"insert","lines":["r"],"id":28},{"start":{"row":11,"column":10},"end":{"row":11,"column":11},"action":"insert","lines":["e"]},{"start":{"row":11,"column":11},"end":{"row":11,"column":12},"action":"insert","lines":["a"]}],[{"start":{"row":11,"column":11},"end":{"row":11,"column":12},"action":"remove","lines":["a"],"id":29},{"start":{"row":11,"column":10},"end":{"row":11,"column":11},"action":"remove","lines":["e"]},{"start":{"row":11,"column":9},"end":{"row":11,"column":10},"action":"remove","lines":["r"]}],[{"start":{"row":11,"column":9},"end":{"row":11,"column":10},"action":"insert","lines":["_"],"id":30},{"start":{"row":11,"column":10},"end":{"row":11,"column":11},"action":"insert","lines":["r"]},{"start":{"row":11,"column":11},"end":{"row":11,"column":12},"action":"insert","lines":["e"]},{"start":{"row":11,"column":12},"end":{"row":11,"column":13},"action":"insert","lines":["a"]}],[{"start":{"row":11,"column":6},"end":{"row":11,"column":13},"action":"remove","lines":["get_rea"],"id":31},{"start":{"row":11,"column":6},"end":{"row":11,"column":26},"action":"insert","lines":["get_reaction_metrics"]}],[{"start":{"row":11,"column":26},"end":{"row":11,"column":27},"action":"insert","lines":[" "],"id":32},{"start":{"row":11,"column":27},"end":{"row":11,"column":28},"action":"insert","lines":["i"]},{"start":{"row":11,"column":28},"end":{"row":11,"column":29},"action":"insert","lines":["m"]},{"start":{"row":11,"column":29},"end":{"row":11,"column":30},"action":"insert","lines":["p"]},{"start":{"row":11,"column":30},"end":{"row":11,"column":31},"action":"insert","lines":["o"]},{"start":{"row":11,"column":31},"end":{"row":11,"column":32},"action":"insert","lines":["r"]},{"start":{"row":11,"column":32},"end":{"row":11,"column":33},"action":"insert","lines":["t"]}],[{"start":{"row":11,"column":33},"end":{"row":11,"column":34},"action":"insert","lines":[" "],"id":33},{"start":{"row":11,"column":34},"end":{"row":11,"column":35},"action":"insert","lines":["r"]},{"start":{"row":11,"column":35},"end":{"row":11,"column":36},"action":"insert","lines":["e"]},{"start":{"row":11,"column":36},"end":{"row":11,"column":37},"action":"insert","lines":["a"]}],[{"start":{"row":11,"column":36},"end":{"row":11,"column":37},"action":"remove","lines":["a"],"id":34},{"start":{"row":11,"column":35},"end":{"row":11,"column":36},"action":"remove","lines":["e"]},{"start":{"row":11,"column":34},"end":{"row":11,"column":35},"action":"remove","lines":["r"]}],[{"start":{"row":11,"column":34},"end":{"row":11,"column":35},"action":"insert","lines":["g"],"id":35},{"start":{"row":11,"column":35},"end":{"row":11,"column":36},"action":"insert","lines":["e"]},{"start":{"row":11,"column":36},"end":{"row":11,"column":37},"action":"insert","lines":["t"]},{"start":{"row":11,"column":37},"end":{"row":11,"column":38},"action":"insert","lines":["_"]}],[{"start":{"row":11,"column":38},"end":{"row":11,"column":39},"action":"insert","lines":["r"],"id":36},{"start":{"row":11,"column":39},"end":{"row":11,"column":40},"action":"insert","lines":["e"]},{"start":{"row":11,"column":40},"end":{"row":11,"column":41},"action":"insert","lines":["a"]}],[{"start":{"row":11,"column":34},"end":{"row":11,"column":41},"action":"remove","lines":["get_rea"],"id":37},{"start":{"row":11,"column":34},"end":{"row":11,"column":54},"action":"insert","lines":["get_reaction_metrics"]}],[{"start":{"row":20,"column":38},"end":{"row":21,"column":0},"action":"insert","lines":["",""],"id":38},{"start":{"row":21,"column":0},"end":{"row":21,"column":11},"action":"insert","lines":[" "]}],[{"start":{"row":21,"column":11},"end":{"row":21,"column":13},"action":"insert","lines":["''"],"id":39}],[{"start":{"row":21,"column":12},"end":{"row":21,"column":32},"action":"insert","lines":["get_reaction_metrics"],"id":40}],[{"start":{"row":21,"column":33},"end":{"row":21,"column":34},"action":"insert","lines":[","],"id":41}],[{"start":{"row":21,"column":34},"end":{"row":22,"column":0},"action":"insert","lines":["",""],"id":42},{"start":{"row":22,"column":0},"end":{"row":22,"column":11},"action":"insert","lines":[" "]}],[{"start":{"row":11,"column":54},"end":{"row":12,"column":0},"action":"insert","lines":["",""],"id":43},{"start":{"row":12,"column":0},"end":{"row":12,"column":1},"action":"insert","lines":["f"]},{"start":{"row":12,"column":1},"end":{"row":12,"column":2},"action":"insert","lines":["r"]},{"start":{"row":12,"column":2},"end":{"row":12,"column":3},"action":"insert","lines":["o"]},{"start":{"row":12,"column":3},"end":{"row":12,"column":4},"action":"insert","lines":["m"]}],[{"start":{"row":12,"column":4},"end":{"row":12,"column":5},"action":"insert","lines":[" "],"id":44},{"start":{"row":12,"column":5},"end":{"row":12,"column":6},"action":"insert","lines":["."]},{"start":{"row":12,"column":6},"end":{"row":12,"column":7},"action":"insert","lines":["d"]},{"start":{"row":12,"column":7},"end":{"row":12,"column":8},"action":"insert","lines":["e"]},{"start":{"row":12,"column":8},"end":{"row":12,"column":9},"action":"insert","lines":["l"]}],[{"start":{"row":12,"column":6},"end":{"row":12,"column":9},"action":"remove","lines":["del"],"id":45},{"start":{"row":12,"column":6},"end":{"row":12,"column":17},"action":"insert","lines":["delete_post"]}],[{"start":{"row":12,"column":17},"end":{"row":12,"column":18},"action":"insert","lines":[" "],"id":46},{"start":{"row":12,"column":18},"end":{"row":12,"column":19},"action":"insert","lines":["i"]},{"start":{"row":12,"column":19},"end":{"row":12,"column":20},"action":"insert","lines":["m"]},{"start":{"row":12,"column":20},"end":{"row":12,"column":21},"action":"insert","lines":["p"]},{"start":{"row":12,"column":21},"end":{"row":12,"column":22},"action":"insert","lines":["o"]},{"start":{"row":12,"column":22},"end":{"row":12,"column":23},"action":"insert","lines":["r"]},{"start":{"row":12,"column":23},"end":{"row":12,"column":24},"action":"insert","lines":["t"]}],[{"start":{"row":12,"column":24},"end":{"row":12,"column":25},"action":"insert","lines":[" "],"id":47},{"start":{"row":12,"column":25},"end":{"row":12,"column":26},"action":"insert","lines":["d"]},{"start":{"row":12,"column":26},"end":{"row":12,"column":27},"action":"insert","lines":["e"]}],[{"start":{"row":12,"column":25},"end":{"row":12,"column":27},"action":"remove","lines":["de"],"id":48},{"start":{"row":12,"column":25},"end":{"row":12,"column":36},"action":"insert","lines":["delete_post"]}],[{"start":{"row":22,"column":34},"end":{"row":23,"column":0},"action":"insert","lines":["",""],"id":49},{"start":{"row":23,"column":0},"end":{"row":23,"column":11},"action":"insert","lines":[" "]}],[{"start":{"row":23,"column":11},"end":{"row":23,"column":13},"action":"insert","lines":["\"\""],"id":50}],[{"start":{"row":23,"column":12},"end":{"row":23,"column":23},"action":"insert","lines":["delete_post"],"id":51}],[{"start":{"row":23,"column":23},"end":{"row":23,"column":24},"action":"remove","lines":["\""],"id":52}],[{"start":{"row":23,"column":11},"end":{"row":23,"column":12},"action":"remove","lines":["\""],"id":53}],[{"start":{"row":23,"column":11},"end":{"row":23,"column":12},"action":"insert","lines":["'"],"id":54}],[{"start":{"row":23,"column":23},"end":{"row":23,"column":24},"action":"insert","lines":["'"],"id":55},{"start":{"row":23,"column":24},"end":{"row":23,"column":25},"action":"insert","lines":[","]}],[{"start":{"row":23,"column":25},"end":{"row":24,"column":0},"action":"insert","lines":["",""],"id":56},{"start":{"row":24,"column":0},"end":{"row":24,"column":11},"action":"insert","lines":[" "]}],[{"start":{"row":24,"column":11},"end":{"row":24,"column":13},"action":"insert","lines":["''"],"id":57}],[{"start":{"row":12,"column":36},"end":{"row":13,"column":0},"action":"insert","lines":["",""],"id":58}],[{"start":{"row":13,"column":0},"end":{"row":14,"column":0},"action":"insert","lines":["",""],"id":59}],[{"start":{"row":14,"column":0},"end":{"row":15,"column":0},"action":"insert","lines":["from .delete_post import delete_post",""],"id":60}],[{"start":{"row":27,"column":12},"end":{"row":27,"column":50},"action":"insert","lines":["get_posts_with_more_positive_reactions"],"id":61}],[{"start":{"row":27,"column":51},"end":{"row":27,"column":52},"action":"insert","lines":[","],"id":62}],[{"start":{"row":27,"column":52},"end":{"row":28,"column":0},"action":"insert","lines":["",""],"id":63},{"start":{"row":28,"column":0},"end":{"row":28,"column":11},"action":"insert","lines":[" "]}],[{"start":{"row":14,"column":6},"end":{"row":14,"column":17},"action":"remove","lines":["delete_post"],"id":64},{"start":{"row":14,"column":6},"end":{"row":14,"column":44},"action":"insert","lines":["get_posts_with_more_positive_reactions"]}],[{"start":{"row":14,"column":52},"end":{"row":14,"column":63},"action":"remove","lines":["delete_post"],"id":65},{"start":{"row":14,"column":52},"end":{"row":14,"column":90},"action":"insert","lines":["get_posts_with_more_positive_reactions"]}],[{"start":{"row":28,"column":11},"end":{"row":28,"column":13},"action":"insert","lines":["''"],"id":66}],[{"start":{"row":28,"column":12},"end":{"row":28,"column":37},"action":"insert","lines":["get_posts_reacted_by_user"],"id":67}],[{"start":{"row":28,"column":38},"end":{"row":28,"column":39},"action":"insert","lines":[","],"id":68}],[{"start":{"row":28,"column":39},"end":{"row":29,"column":0},"action":"insert","lines":["",""],"id":69},{"start":{"row":29,"column":0},"end":{"row":29,"column":11},"action":"insert","lines":[" "]}],[{"start":{"row":15,"column":0},"end":{"row":15,"column":1},"action":"insert","lines":["f"],"id":70},{"start":{"row":15,"column":1},"end":{"row":15,"column":2},"action":"insert","lines":["r"]},{"start":{"row":15,"column":2},"end":{"row":15,"column":3},"action":"insert","lines":["o"]},{"start":{"row":15,"column":3},"end":{"row":15,"column":4},"action":"insert","lines":["m"]}],[{"start":{"row":15,"column":4},"end":{"row":15,"column":5},"action":"insert","lines":[" "],"id":71},{"start":{"row":15,"column":5},"end":{"row":15,"column":6},"action":"insert","lines":["."]}],[{"start":{"row":15,"column":6},"end":{"row":15,"column":31},"action":"insert","lines":["get_posts_reacted_by_user"],"id":72}],[{"start":{"row":15,"column":31},"end":{"row":15,"column":32},"action":"insert","lines":[" "],"id":73},{"start":{"row":15,"column":32},"end":{"row":15,"column":33},"action":"insert","lines":["i"]},{"start":{"row":15,"column":33},"end":{"row":15,"column":34},"action":"insert","lines":["m"]},{"start":{"row":15,"column":34},"end":{"row":15,"column":35},"action":"insert","lines":["p"]},{"start":{"row":15,"column":35},"end":{"row":15,"column":36},"action":"insert","lines":["r"]},{"start":{"row":15,"column":36},"end":{"row":15,"column":37},"action":"insert","lines":["t"]}],[{"start":{"row":15,"column":36},"end":{"row":15,"column":37},"action":"remove","lines":["t"],"id":74},{"start":{"row":15,"column":35},"end":{"row":15,"column":36},"action":"remove","lines":["r"]}],[{"start":{"row":15,"column":35},"end":{"row":15,"column":36},"action":"insert","lines":["o"],"id":75},{"start":{"row":15,"column":36},"end":{"row":15,"column":37},"action":"insert","lines":["r"]},{"start":{"row":15,"column":37},"end":{"row":15,"column":38},"action":"insert","lines":["t"]}],[{"start":{"row":15,"column":38},"end":{"row":15,"column":39},"action":"insert","lines":[" "],"id":76}],[{"start":{"row":15,"column":39},"end":{"row":15,"column":64},"action":"insert","lines":["get_posts_reacted_by_user"],"id":77}],[{"start":{"row":15,"column":64},"end":{"row":16,"column":0},"action":"insert","lines":["",""],"id":80},{"start":{"row":16,"column":0},"end":{"row":16,"column":1},"action":"insert","lines":["f"]},{"start":{"row":16,"column":1},"end":{"row":16,"column":2},"action":"insert","lines":["o"]},{"start":{"row":16,"column":2},"end":{"row":16,"column":3},"action":"insert","lines":["m"]}],[{"start":{"row":16,"column":2},"end":{"row":16,"column":3},"action":"remove","lines":["m"],"id":81},{"start":{"row":16,"column":1},"end":{"row":16,"column":2},"action":"remove","lines":["o"]}],[{"start":{"row":16,"column":1},"end":{"row":16,"column":2},"action":"insert","lines":["r"],"id":82},{"start":{"row":16,"column":2},"end":{"row":16,"column":3},"action":"insert","lines":["o"]},{"start":{"row":16,"column":3},"end":{"row":16,"column":4},"action":"insert","lines":["m"]}],[{"start":{"row":16,"column":4},"end":{"row":16,"column":5},"action":"insert","lines":[" "],"id":83},{"start":{"row":16,"column":5},"end":{"row":16,"column":6},"action":"insert","lines":["."]}],[{"start":{"row":30,"column":11},"end":{"row":30,"column":13},"action":"insert","lines":["''"],"id":84}],[{"start":{"row":30,"column":12},"end":{"row":30,"column":33},"action":"insert","lines":["get_reactions_to_post"],"id":85}],[{"start":{"row":16,"column":6},"end":{"row":16,"column":27},"action":"insert","lines":["get_reactions_to_post"],"id":86}],[{"start":{"row":16,"column":27},"end":{"row":16,"column":28},"action":"insert","lines":[" "],"id":87},{"start":{"row":16,"column":28},"end":{"row":16,"column":29},"action":"insert","lines":["i"]},{"start":{"row":16,"column":29},"end":{"row":16,"column":30},"action":"insert","lines":["m"]},{"start":{"row":16,"column":30},"end":{"row":16,"column":31},"action":"insert","lines":["p"]},{"start":{"row":16,"column":31},"end":{"row":16,"column":32},"action":"insert","lines":["o"]},{"start":{"row":16,"column":32},"end":{"row":16,"column":33},"action":"insert","lines":["r"]},{"start":{"row":16,"column":33},"end":{"row":16,"column":34},"action":"insert","lines":["t"]}],[{"start":{"row":16,"column":34},"end":{"row":16,"column":35},"action":"insert","lines":[" "],"id":88}],[{"start":{"row":16,"column":35},"end":{"row":16,"column":56},"action":"insert","lines":["get_reactions_to_post"],"id":89}],[{"start":{"row":16,"column":56},"end":{"row":17,"column":0},"action":"insert","lines":["",""],"id":90},{"start":{"row":17,"column":0},"end":{"row":18,"column":0},"action":"insert","lines":["",""]}],[{"start":{"row":36,"column":0},"end":{"row":42,"column":12},"action":"remove","lines":["sb=['get_total_reaction_count',"," 'get_reaction_metrics',"," 'delete_post',"," 'get_posts_with_more_positive_reactions',"," 'get_posts_reacted_by_user',"," 'get_reactions_to_post',"," ]"],"id":91},{"start":{"row":35,"column":0},"end":{"row":36,"column":0},"action":"remove","lines":["",""]},{"start":{"row":34,"column":12},"end":{"row":35,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":33,"column":10},"end":{"row":33,"column":11},"action":"remove","lines":[" "],"id":92},{"start":{"row":33,"column":9},"end":{"row":33,"column":10},"action":"remove","lines":[" "]},{"start":{"row":33,"column":8},"end":{"row":33,"column":9},"action":"remove","lines":[" "]},{"start":{"row":33,"column":4},"end":{"row":33,"column":8},"action":"remove","lines":[" "]},{"start":{"row":33,"column":0},"end":{"row":33,"column":4},"action":"remove","lines":[" "]},{"start":{"row":32,"column":34},"end":{"row":33,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":14,"column":51},"end":{"row":14,"column":52},"action":"remove","lines":[" "],"id":93}],[{"start":{"row":14,"column":51},"end":{"row":14,"column":52},"action":"insert","lines":["\\"],"id":94}],[{"start":{"row":14,"column":52},"end":{"row":15,"column":0},"action":"insert","lines":["",""],"id":95}],[{"start":{"row":15,"column":0},"end":{"row":15,"column":11},"action":"insert","lines":[" "],"id":96}]]},"ace":{"folds":[],"scrolltop":80.6879657657598,"scrollleft":0,"selection":{"start":{"row":15,"column":11},"end":{"row":15,"column":11},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":{"row":3,"state":"start","mode":"ace/mode/python"}},"timestamp":1588912940229,"hash":"aebebb4973aa457238f537967f37636bcaf98c26"}
|
[
"r151865@rguktrkv.ac.in"
] |
r151865@rguktrkv.ac.in
|
0a2ef5825e62d7df2aea484feabf55d8d106bc18
|
225a5b040c0fdb758b5077fe779315339c46d828
|
/detect/lib/prepare_training_data/txt2voc.py
|
a9d2a5dc0bc735793aa56d09e83c83c2b6458e7c
|
[] |
no_license
|
RobotWithCV/ocrui
|
b9b37cf238f97cddd70024bc27946754f476bbd2
|
459c82e8e31831533c4b60ac3e90b90ce68b9a9e
|
refs/heads/master
| 2020-05-19T18:31:33.281564
| 2018-08-15T12:34:18
| 2018-08-15T12:34:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,584
|
py
|
import os
import numpy as np
import math
import cv2 as cv
root = '/home/deeple/project/ctpn/text-detection-ctpn/data/0730/'
path = root + 'errdata'
gt_path = root + 'errlabel_txt'
out_path = 're_image'
if not os.path.exists(out_path):
os.makedirs(out_path)
files = os.listdir(path)
files.sort()
#files=files[:100]
for file in files:
_, basename = os.path.split(file)
if basename.lower().split('.')[-1] not in ['jpg', 'png']:
continue
stem, ext = os.path.splitext(basename)
gt_file = os.path.join(gt_path, '' + stem + '.txt')
img_path = os.path.join(path, file)
print(img_path)
img = cv.imread(img_path)
img_size = img.shape
im_size_min = np.min(img_size[0:2])
im_size_max = np.max(img_size[0:2])
# im_scale = float(600) / float(im_size_min)
# if np.round(im_scale * im_size_max) > 1200:
# im_scale = float(1200) / float(im_size_max)
# re_im = cv.resize(img, None, None, fx=im_scale, fy=im_scale, interpolation=cv.INTER_LINEAR)
# re_size = re_im.shape
cv.imwrite(os.path.join(out_path, stem) + '.jpg', img)
with open(gt_file, 'r') as f:
lines = f.readlines()
for line in lines:
tmp = line.strip('\n').split(',')
xmin, ymin, xmax, ymax = [int(i) for i in tmp]
width = xmax - xmin
height = ymax - ymin
# reimplement
step = 16.0
x_left = []
x_right = []
x_left.append(xmin)
x_left_start = int(math.ceil(xmin / 16.0) * 16.0)
if x_left_start == xmin:
x_left_start = xmin + 16
for i in np.arange(x_left_start, xmax, 16):
x_left.append(i)
x_left = np.array(x_left)
x_right.append(x_left_start - 1)
for i in range(1, len(x_left) - 1):
x_right.append(x_left[i] + 15)
x_right.append(xmax)
x_right = np.array(x_right)
idx = np.where(x_left == x_right)
x_left = np.delete(x_left, idx, axis=0)
x_right = np.delete(x_right, idx, axis=0)
if not os.path.exists('label_tmp'):
os.makedirs('label_tmp')
with open(os.path.join('label_tmp', stem) + '.txt', 'a') as f:
for i in range(len(x_left)):
f.writelines("text\t")
f.writelines(str(int(x_left[i])))
f.writelines("\t")
f.writelines(str(int(ymin)))
f.writelines("\t")
f.writelines(str(int(x_right[i])))
f.writelines("\t")
f.writelines(str(int(ymax)))
f.writelines("\n")
|
[
"651238355@qq.com"
] |
651238355@qq.com
|
67e65c9303e2f35244f9a50b2a85e1d3d3956dbb
|
82b69271d5473d538fb9230c0cc316d5cd5bd5f3
|
/tests/settings.py
|
b123d7fec0420117c939a67c94b21990a38fb8a7
|
[
"MIT"
] |
permissive
|
aaloy/django-simple-options
|
3ade7369eaebd15bc4d0af7f1d1ba458b15b7c22
|
3199faf5da34e6cb05fc7dab4890a6f32b1f5217
|
refs/heads/master
| 2020-06-12T02:33:45.489231
| 2019-06-28T15:43:43
| 2019-06-28T15:43:43
| 194,169,633
| 0
| 0
|
MIT
| 2019-06-27T22:12:48
| 2019-06-27T22:12:48
| null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import django
DEBUG = True
USE_TZ = True
SECRET_KEY = "dummy"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"options",
]
SITE_ID = 1
if django.VERSION >= (1, 10):
MIDDLEWARE = ()
else:
MIDDLEWARE_CLASSES = ()
|
[
"marcosgabarda@gmail.com"
] |
marcosgabarda@gmail.com
|
814fba12e0e1c92414fe77f4e1ff2cfc2fc371a4
|
5f2103b1083b088aed3f3be145d01a770465c762
|
/187. Repeated DNA Sequences.py
|
6a64da5e4c341cebe26e62b83c60533052f79460
|
[] |
no_license
|
supersj/LeetCode
|
5605c9bcb5ddcaa83625de2ad9e06c3485220019
|
690adf05774a1c500d6c9160223dab7bcc38ccc1
|
refs/heads/master
| 2021-01-17T17:23:39.585738
| 2017-02-27T15:08:42
| 2017-02-27T15:08:42
| 65,526,089
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
# All DNA is composed of a series of nucleotides abbreviated as A, C, G, and T, for example: "ACGAATTCCG". When studying DNA, it is sometimes useful to identify repeated sequences within the DNA.
#
# Write a function to find all the 10-letter-long sequences (substrings) that occur more than once in a DNA molecule.
#
# For example,
class Solution(object):
def findRepeatedDnaSequences(self, s):
"""
:type s: str
:rtype: List[str]
"""
result = {}
hashmap = {}
slen = len(s)
if slen < 10:
return []
for i in range(slen-10+1):
tmp = s[i:i+10]
if tmp in hashmap.keys() and tmp not in result.keys() :
result[s[i:i+10]] = 1
else:
hashmap[s[i:i+10]] = 1
return result.keys()
|
[
"ml@ml.ml"
] |
ml@ml.ml
|
df205bbcb23d0a5a2204480913e333ac31bcc619
|
e9dda48b620484a50d5685c8f7f17284bb42a086
|
/rig/zbw_shapeClash.py
|
9c63afcc20a48886d47eccaf4884bf3e2965d2a2
|
[] |
no_license
|
zethwillie/zTools
|
5b7b1903c18e4e619b07f1eb92c4c3f24c0f1a54
|
04bc49af814561264f08b2af2b2a503a0d63440c
|
refs/heads/master
| 2022-08-26T19:47:32.844348
| 2022-07-18T15:12:48
| 2022-07-18T15:12:48
| 53,161,975
| 5
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
"""zbw_shapeClash"""
import maya.cmds as cmds
import zTools.resources.zbw_clash as clash
reload(clash)
# get all shape node clashes
def shapeClash():
clashes = clash.detectShapeClashes(fixClashes = False)
print("in Shape Clashes:", clashes)
# put them all into a list
# button to fix the shape nodes for each clash incident
|
[
"zethwillie@gmail.com"
] |
zethwillie@gmail.com
|
c39c950b162896d3455479b623469d98e24be1df
|
830e2b774c2884252fed369ba38f11636890d33d
|
/www/cgi-bin/register.py
|
a9d93da51e56045f272bb4ec7324c26c9d624201
|
[] |
no_license
|
AckerStewie/todo
|
419fddc9bedc6bf9a268ce7f28b69e24aa5882e7
|
7d0582bd8d02e5a3b633bad7b109448310ffa544
|
refs/heads/master
| 2020-06-28T03:56:18.175884
| 2019-08-02T00:32:27
| 2019-08-02T00:32:27
| 197,731,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 980
|
py
|
import psycopg2
#ใใผใฟใใผใน ๆฅ็ถ
conn = psycopg2.connect("dbname=postgres user=postgres password=password")
#ใซใผใฝใซๅๅพ
cur = conn.cursor()
#selectๆใฎๅฎ่ก
cur.execute("SELECT * FROM priorites;")
ERA = []
#for row in cur:
# ERA.append((row[1].***,row[0))
print("""
<!DOCTYPE html>
<html lang="ja">
<head>
<meta charset="UTF-8" />
<title>ๆฐ่ฆ็ป้ฒ</title>
<link rel="stylesheet" href="../css/register.css" />
</head>
<body>
<form action="login.py" method="post">
<h1>ๆฐ่ฆ็ป้ฒ</h1>
<p>ใฆใผใถใผID</p>
<input type="text" placeholder="ใฆใผใถใผIDใๅ
ฅๅใใฆใใ ใใใ" name="user_id"/>
<p>ใในใฏใผใ</p>
<input type="text" placeholder="ใในใฏใผใใๅ
ฅๅใใฆใใ ใใใ" name="password"/>
<p>ใในใฏใผใๅๅ
ฅๅ</p>
<input type="text" placeholder="ๅๅบฆใในใฏใผใใๅ
ฅๅใใฆใใ ใใใ" name="re_password"/>
<button>ๆฑบๅฎ</button>
</form>
</body>
</html>
""")
#try:
#except:
|
[
"fkiseexz5@i.softbank.jp"
] |
fkiseexz5@i.softbank.jp
|
330b4fce52616fc3aeac0ed2bbdef3bd4b5793c4
|
d725745f5c6b4ad99399aa50f368db39f5046f81
|
/angr_platforms/tricore/bo_instr.py
|
9402711fd0eabc663d5ad0d234880096523d9d7f
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr-platforms
|
6816d777ea4696af05290613a490e91b8daa79ea
|
06db4e6a594af47aaeb0a5071f2cdb9a8c30f7f5
|
refs/heads/master
| 2023-03-05T10:15:20.783462
| 2023-02-20T18:38:12
| 2023-02-20T18:38:12
| 86,003,468
| 60
| 28
|
BSD-2-Clause
| 2023-08-31T19:50:46
| 2017-03-23T22:28:04
|
Python
|
UTF-8
|
Python
| false
| false
| 57,764
|
py
|
#!/usr/bin/env python3
""" bo_instr.py
Implementation of tricore BO format instructions.
"""
import sys
from pyvex.lifting.util import Type, Instruction
import bitstring
from .rtl import reverse16, extend_to_32_bits
from .logger import log_this
class BO_LD_09_Instructions(Instruction):
""" A class for LOAD instruction with OP=09 """
name = 'BO_LD_09_Instructions ...'
op = "{0}{1}".format(bin(0)[2:].zfill(4), bin(9)[2:].zfill(4))
bin_format = op + 'a'*4 + 'b'*4 + 'c'*4 + 'd'*4 + 'e'*4 + 'f'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
tmp = bitstring.BitArray(bin="{0}{1}{2}{3}{4}{5}".format(data['e'],
data['f'],
data['c'],
data['d'],
data['a'],
data['b']))
off10 = bitstring.BitArray(bin="{0}{1}".format(tmp[0:4].bin,
tmp[10:16].bin).zfill(12))
op2 = bitstring.BitArray(bin="{0}".format(tmp[4:10]))
op2 = int(op2.bin, 2)
b = tmp[16:20]
a = tmp[20:]
if op2 == 0x0:
self.name = 'BO_LD.B_PostInc'
elif op2 == 0x1:
self.name = 'BO_LD.BU_PostInc'
elif op2 == 0x2:
self.name = 'BO_LD.H_PostInc'
elif op2 == 0x3:
self.name = 'BO_LD.HU_PostInc'
elif op2 == 0x4:
self.name = 'BO_LD.W_PostInc'
elif op2 == 0x5:
self.name = 'BO_LD.D_PostInc'
elif op2 == 0x6:
self.name = 'BO_LD.A_PostInc'
elif op2 == 0x7:
self.name = 'BO_LD.DA_PostInc'
elif op2 == 0x8:
self.name = 'BO_LD.Q_PostInc'
elif op2 == 0x10:
self.name = 'BO_LD.B_PreInc'
elif op2 == 0x11:
self.name = 'BO_LD.BU_PreInc'
elif op2 == 0x12:
self.name = 'BO_LD.H_PreInc'
elif op2 == 0x13:
self.name = 'BO_LD.HU_PreInc'
elif op2 == 0x14:
self.name = 'BO_LD.W_PreInc'
elif op2 == 0x15:
self.name = 'BO_LD.D_PreInc'
elif op2 == 0x16:
self.name = 'BO_LD.A_PreInc'
elif op2 == 0x17:
self.name = 'BO_LD.DA_PreInc'
elif op2 == 0x18:
self.name = 'BO_LD.Q_PreInc'
elif op2 == 0x20:
self.name = 'BO_LD.B_BaseShortOffset'
elif op2 == 0x21:
self.name = 'BO_LD.BU_BaseShortOffset'
elif op2 == 0x22:
self.name = 'BO_LD.H_BaseShortOffset'
elif op2 == 0x23:
self.name = 'BO_LD.HU_BaseShortOffset'
elif op2 == 0x24:
self.name = 'BO_LD.W_BaseShortOffset'
elif op2 == 0x25:
self.name = 'BO_LD.D_BaseShortOffset'
elif op2 == 0x26:
self.name = 'BO_LD.A_BaseShortOffset'
elif op2 == 0x27:
self.name = 'BO_LD.DA_BaseShortOffset'
elif op2 == 0x28:
self.name = 'BO_LD.Q_BaseShortOffset'
else:
self.name = "UNKNOWN"
data = {"a": int(a.hex, 16),
"b": int(b.hex, 16),
"off10": int(off10.hex, 16),
"op2": op2}
log_this(self.name, data, hex(self.addr))
return data
def get_sign_ext_offset(self):
return self.constant(self.data['off10'], Type.int_10).cast_to(Type.int_32, signed=True)
def get_a_b(self):
return self.get("a{0}".format(self.data['b']), Type.int_32)
def fetch_operands(self):
return [self.get_a_b(), self.get_sign_ext_offset()]
def compute_result(self, *args):
a_b = args[0]
sign_ext_offset = args[1]
result = ""
op2 = self.data['op2']
if op2 == 0x0: # BO_LD.B_PostInc
ea = a_b
result = self.load(ea, Type.int_8).cast_to(Type.int_32, signed=True)
self.put(result, "d{0}".format(self.data['a']))
result_2 = ea + sign_ext_offset # increment
self.put(result_2, "a{0}".format(self.data['b']))
elif op2 == 0x1: # BO_LD.BU_PostInc
ea = a_b
result = self.load(ea, Type.int_8).cast_to(Type.int_32)
self.put(result, "d{0}".format(self.data['a']))
result_2 = ea + sign_ext_offset # increment
self.put(result_2, "a{0}".format(self.data['b']))
elif op2 == 0x2: # BO_LD.H_PostInc
ea = a_b
result = self.load(ea, Type.int_16).cast_to(Type.int_32, signed=True)
self.put(result, "d{0}".format(self.data['a']))
result_2 = ea + sign_ext_offset # increment
self.put(result_2, "a{0}".format(self.data['b']))
elif op2 == 0x3: # BO_LD.HU_PostInc
ea = a_b
result = self.load(ea, Type.int_16).cast_to(Type.int_32)
self.put(result, "d{0}".format(self.data['a']))
result_2 = ea + sign_ext_offset # increment
self.put(result_2, "a{0}".format(self.data['b']))
elif op2 == 0x4: # BO_LD.W_PostInc
ea = a_b
result = self.load(ea, Type.int_32)
self.put(result, "d{0}".format(self.data['a']))
result_2 = ea + sign_ext_offset # increment
self.put(result_2, "a{0}".format(self.data['b']))
elif op2 == 0x5: # BO_LD.D_PostInc
ea = a_b
result_0 = self.load(ea, Type.int_32)
result_1 = self.load(ea+4, Type.int_32)
self.put(result_0, "d{0}".format(self.data['a']))
self.put(result_1, "d{0}".format(self.data['a']+1))
result_2 = ea + sign_ext_offset # increment
self.put(result_2, "a{0}".format(self.data['b']))
elif op2 == 0x6: # BO_LD.A_PostInc
ea = a_b
result = self.load(ea, Type.int_32)
self.put(result, "a{0}".format(self.data['a']))
result_2 = ea + sign_ext_offset # increment
self.put(result_2, "a{0}".format(self.data['b']))
elif op2 == 0x7: # BO_LD.DA_PostInc
ea = a_b
result_0 = self.load(ea, Type.int_32)
result_1 = self.load(ea+4, Type.int_32)
self.put(result_0, "a{0}".format(self.data['a']))
self.put(result_1, "a{0}".format(self.data['a']+1))
result_2 = ea + sign_ext_offset # increment
self.put(result_2, "a{0}".format(self.data['b']))
elif op2 == 0x8: # BO_LD.Q_PostInc
ea = a_b
result = self.load(ea, Type.int_16).cast_to(Type.int_32) << 16
self.put(result, "d{0}".format(self.data['a']))
result_2 = ea + sign_ext_offset # increment
self.put(result_2, "a{0}".format(self.data['b']))
elif op2 == 0x10: # BO_LD.B_PreInc
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_8).cast_to(Type.int_32, signed=True)
self.put(result, "d{0}".format(self.data['a']))
self.put(ea, "a{0}".format(self.data['b']))
elif op2 == 0x11: # BO_LD.BU_PreInc
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_8).cast_to(Type.int_32)
self.put(result, "d{0}".format(self.data['a']))
self.put(ea, "a{0}".format(self.data['b']))
elif op2 == 0x12: # BO_LD.H_PreInc
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_16).cast_to(Type.int_32, signed=True)
self.put(result, "d{0}".format(self.data['a']))
self.put(ea, "a{0}".format(self.data['b']))
elif op2 == 0x13: # BO_LD.HU_PreInc
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_16).cast_to(Type.int_32)
self.put(result, "d{0}".format(self.data['a']))
self.put(ea, "a{0}".format(self.data['b']))
elif op2 == 0x14: # BO_LD.W_PreInc
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_32)
self.put(result, "d{0}".format(self.data['a']))
self.put(ea, "a{0}".format(self.data['b']))
elif op2 == 0x15: # BO_LD.D_PreInc
ea = a_b + sign_ext_offset
result_0 = self.load(ea, Type.int_32)
result_1 = self.load(ea+4, Type.int_32)
self.put(result_0, "d{0}".format(self.data['a']))
self.put(result_1, "d{0}".format(self.data['a']+1))
self.put(ea, "a{0}".format(self.data['b']))
elif op2 == 0x16: # BO_LD.A_PreInc
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_32)
self.put(result, "a{0}".format(self.data['a']))
self.put(ea, "a{0}".format(self.data['b']))
elif op2 == 0x17: # BO_LD.DA_PreInc
ea = a_b + sign_ext_offset
result_0 = self.load(ea, Type.int_32)
result_1 = self.load(ea+4, Type.int_32)
self.put(result_0, "a{0}".format(self.data['a']))
self.put(result_1, "a{0}".format(self.data['a']+1))
self.put(ea, "a{0}".format(self.data['b']))
elif op2 == 0x18: # BO_LD.Q_PreInc
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_16).cast_to(Type.int_32) << 16
self.put(result, "d{0}".format(self.data['a']))
self.put(ea, "a{0}".format(self.data['b']))
elif op2 == 0x20: # BO_LD.B_BaseShortOffset
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_8).cast_to(Type.int_32, signed=True)
self.put(result, "d{0}".format(self.data['a']))
elif op2 == 0x21: # BO_LD.BU_BaseShortOffset
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_8).cast_to(Type.int_32)
self.put(result, "d{0}".format(self.data['a']))
elif op2 == 0x22: # BO_LD.H_BaseShortOffset
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_16).cast_to(Type.int_32, signed=True)
self.put(result, "d{0}".format(self.data['a']))
elif op2 == 0x23: # BO_LD.HU_BaseShortOffset
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_16).cast_to(Type.int_32)
self.put(result, "d{0}".format(self.data['a']))
elif op2 == 0x24: # BO_LD.W_BaseShortOffset
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_32)
self.put(result, "d{0}".format(self.data['a']))
elif op2 == 0x25: # BO_LD.D_BaseShortOffset
ea = a_b + sign_ext_offset
result_0 = self.load(ea, Type.int_32)
result_1 = self.load(ea+4, Type.int_32)
self.put(result_0, "d{0}".format(self.data['a']))
self.put(result_1, "d{0}".format(self.data['a']+1))
elif op2 == 0x26: # BO_LD.A_BaseShortOffset
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_32)
self.put(result, "a{0}".format(self.data['a']))
elif op2 == 0x27: # BO_LD.DA_BaseShortOffset
ea = a_b + sign_ext_offset
result_0 = self.load(ea, Type.int_32)
result_1 = self.load(ea+4, Type.int_32)
self.put(result_0, "a{0}".format(self.data['a']))
self.put(result_1, "a{0}".format(self.data['a']+1))
elif op2 == 0x28: # BO_LD.Q_BaseShortOffset
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_16).cast_to(Type.int_32) << 16
self.put(result, "d{0}".format(self.data['a']))
class BO_LD_29_Instructions(Instruction):
""" A class for LOAD instructions with OP=29 """
name = 'BO_LD_29_Instructions ...'
op = "{0}{1}".format(bin(2)[2:].zfill(4), bin(9)[2:].zfill(4))
bin_format = op + 'a'*4 + 'b'*4 + 'c'*4 + 'd'*4 + 'e'*4 + 'f'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
tmp = bitstring.BitArray(bin="{0}{1}{2}{3}{4}{5}".format(data['e'],
data['f'],
data['c'],
data['d'],
data['a'],
data['b']))
off10 = bitstring.BitArray(bin="{0}{1}".format(tmp[0:4].bin,
tmp[10:16].bin))
op2 = bitstring.BitArray(bin="{0}".format(tmp[4:10]))
op2 = int(op2.bin, 2)
b = tmp[16:20]
a = tmp[20:]
if op2 == 0x0:
self.name = 'BO_LD.B_BitRev'
elif op2 == 0x1:
self.name = 'BO_LD.BU_BitRev'
elif op2 == 0x2:
self.name = 'BO_LD.H_BitRev'
elif op2 == 0x3:
self.name = 'BO_LD.HU_BitRev'
elif op2 == 0x4:
self.name = 'BO_LD.W_BitRev'
elif op2 == 0x5:
self.name = 'BO_LD.D_BitRev'
elif op2 == 0x6:
self.name = 'BO_LD.A_BitRev'
elif op2 == 0x7:
self.name = 'BO_LD.DA_BitRev'
elif op2 == 0x8:
self.name = 'BO_LD.Q_BitRev'
elif op2 == 0x10:
self.name = 'BO_LD.B_Circ'
elif op2 == 0x11:
self.name = 'BO_LD.BU_Circ'
elif op2 == 0x12:
self.name = 'BO_LD.H_Circ'
elif op2 == 0x13:
self.name = 'BO_LD.HU_Circ'
elif op2 == 0x14:
self.name = 'BO_LD.W_Circ'
elif op2 == 0x15:
self.name = 'BO_LD.D_Circ'
elif op2 == 0x16:
self.name = 'BO_LD.A_Circ'
elif op2 == 0x17:
self.name = 'BO_LD.DA_Circ'
elif op2 == 0x18:
self.name = 'BO_LD.Q_Circ'
else:
self.name = "UNKNOWN"
data = {"a": int(a.hex, 16),
"b": int(b.hex, 16),
"off10": int(off10.bin, 2),
"op2": op2}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
if self.data['op2'] in ["0x6", "0x16"]:
return "a{0}".format(self.data['a'])
return "d{0}".format(self.data['a'])
def get_sign_ext_offset(self):
return self.constant(self.data['off10'], Type.int_10).cast_to(Type.int_32, signed=True)
def get_a_b_1(self):
return self.get("a{0}".format(self.data['b']+1), Type.int_32)
def get_a_b(self):
return self.get("a{0}".format(self.data['b']), Type.int_32)
def fetch_operands(self):
return self.get_a_b(), self.get_a_b_1(), self.get_sign_ext_offset()
def compute_result(self, *args):
a_b = args[0]
a_b_1 = args[1]
sign_ext_offset = args[2]
result = ""
op2 = self.data['op2']
if op2 == 0x0: # BO_LD.B_BitRev
index = a_b_1 & 0xffff
incr = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_8).cast_to(Type.int_32, signed=True)
new_index = reverse16(reverse16(index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
if op2 == 0x1: # BO_LD.BU_BitRev
index = a_b_1 & 0xffff
incr = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_8)
new_index = reverse16(reverse16(index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x2: # BO_LD.H_BitRev
index = a_b_1 & 0xffff
incr = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_16).cast_to(Type.int_32, signed=True)
new_index = reverse16(reverse16(index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x3: # BO_LD.HU_BitRev
index = a_b_1 & 0xffff
incr = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_16)
new_index = reverse16(reverse16(index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x4: # BO_LD.W_BitRev
index = a_b_1 & 0xffff
incr = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_32)
new_index = reverse16(reverse16(index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x5: # BO_LD.D_BitRev
index = a_b_1 & 0xffff
incr = a_b_1 >> 16
ea = a_b + index
result_0 = self.load(ea, Type.int_32)
result_1 = self.load(ea+4, Type.int_32)
self.put(result_0, "d{0}".format(self.data['a']))
self.put(result_1, "d{0}".format(self.data['a']+1))
new_index = reverse16(reverse16(index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x6: # BO_LD.A_BitRev
index = a_b_1 & 0xffff
incr = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_32)
new_index = reverse16(reverse16(index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x7: # BO_LD.DA_BitRev
index = a_b_1 & 0xffff
incr = a_b_1 >> 16
ea = a_b + index
result_0 = self.load(ea, Type.int_32)
result_1 = self.load(ea+4, Type.int_32)
self.put(result_0, "a{0}".format(self.data['a']))
self.put(result_1, "a{0}".format(self.data['a']+1))
new_index = reverse16(reverse16(index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x8: # BO_LD.Q_BitRev
index = a_b_1 & 0xffff
incr = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_16).cast_to(Type.int_32) << 16
new_index = reverse16(reverse16(index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x10: # BO_LD.B_Circ
index = a_b_1 & 0xffff
length = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_8).cast_to(Type.int_32, signed=True)
new_index = index + sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + length) & cond_new_index_neg) | \
((new_index % length) & (cond_new_index_neg^0xffffffff))
result_2 = ((length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x11: # BO_LD.BU_Circ
index = a_b_1 & 0xffff
length = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_8)
new_index = index + sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + length) & cond_new_index_neg) | \
((new_index % length) & (cond_new_index_neg^0xffffffff))
result_2 = ((length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x12: # BO_LD.H_Circ
index = a_b_1 & 0xffff
length = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_16).cast_to(Type.int_32, signed=True)
new_index = index + sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + length) & cond_new_index_neg) | \
((new_index % length) & (cond_new_index_neg^0xffffffff))
result_2 = ((length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x13: # BO_LD.HU_Circ
index = a_b_1 & 0xffff
length = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_16)
new_index = index + sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + length) & cond_new_index_neg) | \
((new_index % length) & (cond_new_index_neg^0xffffffff))
result_2 = ((length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x14: # BO_LD.W_Circ
index = a_b_1 & 0xffff
length = a_b_1 >> 16
ea_0 = a_b + index
ea_2 = a_b + index + (2 % length)
result = self.load(ea_2, Type.int_16).cast_to(Type.int_32) << 16 | \
self.load(ea_0, Type.int_16).cast_to(Type.int_32)
new_index = index + sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + length) & cond_new_index_neg) | \
((new_index % length) & (cond_new_index_neg^0xffffffff))
result_2 = ((length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x15: # BO_LD.D_Circ
index = a_b_1 & 0xffff
length = a_b_1 >> 16
ea_0 = a_b + index
ea_2 = a_b + (index + 2) % length
ea_4 = a_b + (index + 4) % length
ea_6 = a_b + (index + 6) % length
result_hw0 = self.load(ea_0, Type.int_16).cast_to(Type.int_32)
result_hw1 = self.load(ea_2, Type.int_16).cast_to(Type.int_32)
result_hw2 = self.load(ea_4, Type.int_16).cast_to(Type.int_32)
result_hw3 = self.load(ea_6, Type.int_16).cast_to(Type.int_32)
result_0 = (result_hw1 << 16) | result_hw0
result_1 = (result_hw3 << 16) | result_hw2
self.put(result_0, "d{0}".format(self.data['a']))
self.put(result_1, "d{0}".format(self.data['a']+1))
new_index = index + sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + length) & cond_new_index_neg) | \
((new_index % length) & (cond_new_index_neg^0xffffffff))
result_2 = ((length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x16: # BO_LD.A_Circ
index = a_b_1 & 0xffff
length = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_32)
new_index = index + sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + length) & cond_new_index_neg) | \
((new_index % length) & (cond_new_index_neg^0xffffffff))
result_2 = ((length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x17: # BO_LD.DA_Circ
index = a_b_1 & 0xffff
length = a_b_1 >> 16
ea_0 = a_b + index
ea_4 = a_b + (index + 4) % length
result_0 = self.load(ea_0, Type.int_32)
result_1 = self.load(ea_4, Type.int_32)
self.put(result_0, "a{0}".format(self.data['a']))
self.put(result_1, "a{0}".format(self.data['a']+1))
new_index = index + sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + length) & cond_new_index_neg) | \
((new_index % length) & (cond_new_index_neg^0xffffffff))
result_2 = ((length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x18: # BO_LD.Q_Circ
index = a_b_1 & 0xffff
length = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_16).cast_to(Type.int_32) << 16
new_index = index + sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + length) & cond_new_index_neg) | \
((new_index % length) & (cond_new_index_neg^0xffffffff))
result_2 = ((length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
return result
def commit_result(self, res):
self.put(res, self.get_dst_reg())
class BO_49_Instructions(Instruction):
""" A class for instructions with OP=49 """
name = 'BO_49_Instructions ...'
op = "{0}{1}".format(bin(4)[2:].zfill(4), bin(9)[2:].zfill(4))
bin_format = op + 'a'*4 + 'b'*4 + 'c'*4 + 'd'*4 + 'e'*4 + 'f'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
tmp = bitstring.BitArray(bin="{0}{1}{2}{3}{4}{5}".format(data['e'],
data['f'],
data['c'],
data['d'],
data['a'],
data['b']))
off10 = bitstring.BitArray(bin="{0}{1}".format(tmp[0:4].bin,
tmp[10:16].bin).zfill(12))
op2 = bitstring.BitArray(bin="{0}".format(tmp[4:10]))
op2 = int(op2.bin, 2)
b = tmp[16:20]
a = tmp[20:]
if op2 == 0x0:
self.name = 'BO_SWAP.W (Post-increment Addressing Mode)'
elif op2 == 0x1:
self.name = 'BO_LDMST (Post-increment Addressing Mode)'
elif op2 == 0x10:
self.name = 'BO_SWAP.W (Pre-increment Addressing Mode)'
elif op2 == 0x11:
self.name = 'BO_LDMST (Pre-increment Addressing Mode)'
elif op2 == 0x20:
self.name = 'BO_SWAP.W (Base + Short Offset Addressing Mode)'
elif op2 == 0x21:
self.name = 'BO_LDMST (Base + Short Offset Addressing Mode)'
elif op2 == 0x24:
self.name = 'BO_LDLCX'
elif op2 == 0x25:
self.name = 'BO_LDUCX'
elif op2 == 0x26:
self.name = 'BO_STLCX'
elif op2 == 0x27:
self.name = 'BO_STUCX'
elif op2 == 0x28:
self.name = 'BO_LEA'
else:
self.name = "UNKNOWN"
data = {"a": int(a.hex, 16),
"b": int(b.hex, 16),
"off10": int(off10.hex, 16),
"op2": op2}
log_this(self.name, data, hex(self.addr))
return data
def get_sign_ext_offset(self):
return self.constant(self.data['off10'], Type.int_10).cast_to(Type.int_32, signed=True)
def get_a_b(self):
return self.get("a{0}".format(self.data['b']), Type.int_32)
def fetch_operands(self):
return [self.get_a_b(), self.get_sign_ext_offset()]
def compute_result(self, *args):
a_b = args[0]
sign_ext_offset = args[1]
result = ""
op2 = self.data['op2']
if op2 == 0x0: # SWAP.W (Post-increment Addressing Mode)
ea = a_b
tmp = self.load(ea, Type.int_32)
self.store(self.get("d{0}".format(self.data['a']), Type.int_32), ea)
self.put(tmp, "d{0}".format(self.data['a']))
self.put(ea + sign_ext_offset, "a{0}".format(self.data['b']))
elif op2 == 0x1: # LDMST (Post-increment Addressing Mode)
ea = a_b
result = self.load(ea, Type.int_32)
e_a_1 = self.get("d{0}".format(self.data['a']), Type.int_32) # E[a][31:0]
e_a_2 = self.get("d{0}".format(self.data['a']+1), Type.int_32) # E[a][63:32]
result = (result & ~e_a_2) | (e_a_1 & e_a_2)
self.store(result, ea)
self.put(ea + sign_ext_offset, "a{0}".format(self.data['b']))
elif op2 == 0x10: # SWAP.W (Pre-increment Addressing Mode)
tmp = self.load(ea, Type.int_32)
self.store(self.get("d{0}".format(self.data['a']), Type.int_32), ea)
self.put(tmp, "d{0}".format(self.data['a']))
self.put(ea, "a{0}".format(self.data['b']))
elif op2 == 0x11: # LDMST (Pre-increment Addressing Mode)
result = self.load(ea, Type.int_32)
e_a_1 = self.get("d{0}".format(self.data['a']), Type.int_32) # E[a][31:0]
e_a_2 = self.get("d{0}".format(self.data['a']+1), Type.int_32) # E[a][63:32]
result = (result & ~e_a_2) | (e_a_1 & e_a_2)
self.store(result, ea)
self.put(ea, "a{0}".format(self.data['b']))
elif op2 == 0x20: # SWAP.W (Base + Short Offset Addressing Mode)
tmp = self.load(ea, Type.int_32)
self.store(self.get("d{0}".format(self.data['a']), Type.int_32), ea)
self.put(self.load(ea, Type.int_32), "a{0}".format(self.data['a']))
self.put(tmp, "d{0}".format(self.data['a']))
elif op2 == 0x21: # LDMST (Base + Short Offset Addressing Mode)
ea = a_b + sign_ext_offset
result = self.load(ea, Type.int_32)
e_a_1 = self.get("d{0}".format(self.data['a']), Type.int_32) # E[a][31:0]
e_a_2 = self.get("d{0}".format(self.data['a']+1), Type.int_32) # E[a][63:32]
result = (result & ~e_a_2) | (e_a_1 & e_a_2)
self.store(result, ea)
elif op2 == 0x24: # LDLCX
#dummy = self.load(ea, Type.int_32)
#dummy = self.load(ea+4, Type.int_32)
self.put(self.load(ea+8, Type.int_32), "a2")
self.put(self.load(ea+12, Type.int_32), "a3")
self.put(self.load(ea+16, Type.int_32), "d0")
self.put(self.load(ea+20, Type.int_32), "d1")
self.put(self.load(ea+24, Type.int_32), "d2")
self.put(self.load(ea+28, Type.int_32), "d3")
self.put(self.load(ea+32, Type.int_32), "a4")
self.put(self.load(ea+36, Type.int_32), "a5")
self.put(self.load(ea+40, Type.int_32), "a6")
self.put(self.load(ea+44, Type.int_32), "a7")
self.put(self.load(ea+48, Type.int_32), "d4")
self.put(self.load(ea+52, Type.int_32), "d5")
self.put(self.load(ea+56, Type.int_32), "d6")
self.put(self.load(ea+60, Type.int_32), "d7")
elif op2 == 0x25: # LDUCX
#dummy = self.load(ea, Type.int_32)
#dummy = self.load(ea+4, Type.int_32)
self.put(self.load(ea+8, Type.int_32), "a10")
self.put(self.load(ea+12, Type.int_32), "a11")
self.put(self.load(ea+16, Type.int_32), "d8")
self.put(self.load(ea+20, Type.int_32), "d9")
self.put(self.load(ea+24, Type.int_32), "d10")
self.put(self.load(ea+28, Type.int_32), "d11")
self.put(self.load(ea+32, Type.int_32), "a12")
self.put(self.load(ea+36, Type.int_32), "a13")
self.put(self.load(ea+40, Type.int_32), "a14")
self.put(self.load(ea+44, Type.int_32), "a15")
self.put(self.load(ea+48, Type.int_32), "d12")
self.put(self.load(ea+52, Type.int_32), "d13")
self.put(self.load(ea+56, Type.int_32), "d14")
self.put(self.load(ea+60, Type.int_32), "d15")
elif op2 == 0x26: # STLCX
self.store(self.get("pcxi", Type.int_32), ea)
self.store(self.get("a11", Type.int_32), ea+4)
self.store(self.get("a2", Type.int_32), ea+8)
self.store(self.get("a3", Type.int_32), ea+12)
self.store(self.get("d0", Type.int_32), ea+16)
self.store(self.get("d1", Type.int_32), ea+20)
self.store(self.get("d2", Type.int_32), ea+24)
self.store(self.get("d3", Type.int_32), ea+28)
self.store(self.get("a4", Type.int_32), ea+32)
self.store(self.get("a5", Type.int_32), ea+36)
self.store(self.get("a6", Type.int_32), ea+40)
self.store(self.get("a7", Type.int_32), ea+44)
self.store(self.get("d4", Type.int_32), ea+48)
self.store(self.get("d5", Type.int_32), ea+52)
self.store(self.get("d6", Type.int_32), ea+56)
self.store(self.get("d7", Type.int_32), ea+60)
elif op2 == 0x27: # STUCX
self.store(self.get("pcxi", Type.int_32), ea)
self.store(self.get("psw", Type.int_32), ea+4)
self.store(self.get("a10", Type.int_32), ea+8)
self.store(self.get("a11", Type.int_32), ea+12)
self.store(self.get("d8", Type.int_32), ea+16)
self.store(self.get("d9", Type.int_32), ea+20)
self.store(self.get("d10", Type.int_32), ea+24)
self.store(self.get("d11", Type.int_32), ea+28)
self.store(self.get("a12", Type.int_32), ea+32)
self.store(self.get("a13", Type.int_32), ea+36)
self.store(self.get("a14", Type.int_32), ea+40)
self.store(self.get("a15", Type.int_32), ea+44)
self.store(self.get("d12", Type.int_32), ea+48)
self.store(self.get("d13", Type.int_32), ea+52)
self.store(self.get("d14", Type.int_32), ea+56)
self.store(self.get("d15", Type.int_32), ea+60)
elif op2 == 0x28: # LEA
ea = a_b + sign_ext_offset
self.put(ea, "a{0}".format(self.data["a"]))
else:
print("Error: Unknown OP2 '{0}'!".format(self.data['op2']))
print("BO instruction OP=49, OP2=Unknown")
sys.exit(1)
class BO_69_Instructions(Instruction):
""" A class for instructions with OP=69 """
name = 'BO_69_Instructions ...'
op = "{0}{1}".format(bin(4)[2:].zfill(4), bin(9)[2:].zfill(4))
bin_format = op + 'a'*4 + 'b'*4 + 'c'*4 + 'd'*4 + 'e'*4 + 'f'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
tmp = bitstring.BitArray(bin="{0}{1}{2}{3}{4}{5}".format(data['e'],
data['f'],
data['c'],
data['d'],
data['a'],
data['b']))
off10 = bitstring.BitArray(bin="{0}{1}".format(tmp[0:4].bin,
tmp[10:16].bin).zfill(12))
op2 = bitstring.BitArray(bin="{0}".format(tmp[4:10]))
op2 = int(op2.bin, 2)
b = tmp[16:20]
a = tmp[20:]
if op2 == 0x0:
self.name = 'BO_SWAP.W (Bit-reverse Addressing Mode)'
elif op2 == 0x1:
self.name = 'BO_LDMST (Bit-reverse Addressing Mode)'
elif op2 == 0x10:
self.name = 'BO_SWAP.W (Circular Addressing Mode)'
elif op2 == 0x11:
self.name = 'BO_LDMST (Circular Addressing Mode)'
else:
self.name = "UNKNOWN"
data = {"a": int(a.hex, 16),
"b": int(b.hex, 16),
"off10": int(off10.hex, 16),
"op2": op2}
log_this(self.name, data, hex(self.addr))
return data
def get_sign_ext_offset(self):
return self.constant(self.data['off10'], Type.int_10).cast_to(Type.int_32, signed=True)
def get_a_b(self):
return self.get("a{0}".format(self.data['b']), Type.int_32)
def get_a_b_1(self):
return self.get("a{0}".format(self.data['b']+1), Type.int_32)
def fetch_operands(self):
return [self.get_a_b(), self.get_a_b_1(), self.get_sign_ext_offset()]
def compute_result(self, *args):
a_b = args[0]
a_b_1 = args[1]
sign_ext_offset = args[2]
result = ""
op2 = self.data['op2']
if op2 == 0x0: # SWAP.W (Bit-reverse Addressing Mode)
index = a_b_1 & 0xffff
incr = a_b_1 >> 16
ea = a_b + index
tmp = self.load(ea, Type.int_32)
self.store("d{0}".format(self.data['a']), ea)
self.put(tmp, "d{0}".format(self.data['a']))
new_index = reverse16(reverse16(index) + reverse16(incr))
self.put(((incr & 0xffff) << 16) | (new_index & 0xffff), "a{0}".format(self.data['b']+1))
elif op2 == 0x1: # LDMST (Bit-reverse Addressing Mode)
index = a_b_1 & 0xffff
incr = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_32)
e_a_1 = self.get("d{0}".format(self.data['a']), Type.int_32) # E[a][31:0]
e_a_2 = self.get("d{0}".format(self.data['a']+1), Type.int_32) # E[a][63:32]
result = (result & ~e_a_2) | (e_a_1 & e_a_2)
self.store(result, ea)
new_index = reverse16(reverse16(index) + reverse16(incr))
self.put(((incr & 0xffff) << 16) | (new_index & 0xffff), "a{0}".format(self.data['b']+1))
elif op2 == 0x10: # SWAP.W (Circular Addressing Mode)
index = a_b_1 & 0xffff
length = a_b_1 >> 16
ea = a_b + index
tmp = self.load(ea, Type.int_32)
self.store("d{0}".format(self.data['a']), ea)
self.put(tmp, "d{0}".format(self.data['a']))
new_index = index + sign_ext_offset
new_index = (new_index < 0).ite(
new_index + length,
new_index % length
)
self.put(((length & 0xffff) << 16) | (new_index & 0xffff), "a{0}".format(self.data['b']+1))
elif op2 == 0x11: # LDMST (Circular Addressing Mode)
index = a_b_1 & 0xffff
length = a_b_1 >> 16
ea = a_b + index
result = self.load(ea, Type.int_32)
e_a_1 = self.get("d{0}".format(self.data['a']), Type.int_32) # E[a][31:0]
e_a_2 = self.get("d{0}".format(self.data['a']+1), Type.int_32) # E[a][63:32]
result = (result & ~e_a_2) | (e_a_1 & e_a_2)
self.store(result, ea)
new_index = index + sign_ext_offset
new_index = (new_index < 0).ite(
new_index + length,
new_index % length
)
self.put(((length & 0xffff) << 16) | (new_index & 0xffff), "a{0}".format(self.data['b']+1))
else:
print("Error: Unknown OP2 '{0}'!".format(self.data['op2']))
print("BO instruction OP=69, OP2=Unknown")
sys.exit(1)
class BO_ST_89_Instructions(Instruction):
""" A class for STORE instructions with OP=89 """
name = 'BO_ST_89_Instructions ...'
op = "{0}{1}".format(bin(8)[2:].zfill(4), bin(9)[2:].zfill(4))
bin_format = op + 'a'*4 + 'b'*4 + 'c'*4 + 'd'*4 + 'e'*4 + 'f'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
tmp = bitstring.BitArray(bin="{0}{1}{2}{3}{4}{5}".format(data['e'],
data['f'],
data['c'],
data['d'],
data['a'],
data['b']))
off10 = bitstring.BitArray(bin="{0}{1}".format(tmp[0:4].bin,
tmp[10:16].bin))
op2 = bitstring.BitArray(bin="{0}".format(tmp[4:10]))
op2 = int(op2.bin, 2)
b = tmp[16:20]
a = tmp[20:]
if op2 == 0x6:
self.name = 'BO_ST.A_PostInc'
elif op2 == 0x16:
self.name = 'BO_ST.A_PreInc'
elif op2 == 0x26:
self.name = 'BO_ST.A_BaseShortOffset'
elif op2 == 0x0:
self.name = 'BO_ST.B_PostInc'
elif op2 == 0x10:
self.name = 'BO_ST.B_PreInc'
elif op2 == 0x20:
self.name = 'BO_ST.B_BaseShortOffset'
elif op2 == 0x2:
self.name = 'BO_ST.H_PostInc'
elif op2 == 0x12:
self.name = 'BO_ST.H_PreInc'
elif op2 == 0x22:
self.name = 'BO_ST.H_BaseShortOffset'
elif op2 == 0x4:
self.name = 'BO_ST.W_PostInc'
elif op2 == 0x14:
self.name = 'BO_ST.W_PreInc'
elif op2 == 0x24:
self.name = 'BO_ST.W_BaseShortOffset'
elif op2 == 0x5:
self.name = 'BO_ST.D_PostInc'
elif op2 == 0x15:
self.name = 'BO_ST.D_PreInc'
elif op2 == 0x25:
self.name = 'BO_ST.D_BaseShortOffset'
elif op2 == 0x7:
self.name = 'BO_ST.DA_PostInc'
elif op2 == 0x17:
self.name = 'BO_ST.DA_PreInc'
elif op2 == 0x27:
self.name = 'BO_ST.DA_BaseShortOffset'
elif op2 == 0x8:
self.name = 'BO_ST.Q_PostInc'
elif op2 == 0x18:
self.name = 'BO_ST.Q_PreInc'
elif op2 == 0x28:
self.name = 'BO_ST.Q_BaseShortOffset'
else:
self.name = "UNKNOWN"
data = {"a": int(a.bin, 2),
"b": int(b.bin, 2),
"off10": int(off10.bin, 2),
"op2": op2}
log_this(self.name, data, hex(self.addr))
return data
@property
def ea(self):
""" Return A[b]+ off10. """
return self.a_b + self.sign_ext_offset
@property
def sign_ext_offset(self):
return self.constant(self.data['off10'], Type.int_10).cast_to(Type.int_32, signed=True)
@property
def a_b(self):
""" Return A[b] register. """
return self.get("a{0}".format(self.data['b']), Type.int_32)
@property
def a_a_1(self):
""" Return A[a]+1 register. """
return self.get("a{0}".format(self.data['a']+1), Type.int_32)
@property
def a_a(self):
""" Return A[a] register. """
return self.get("a{0}".format(self.data['a']), Type.int_32)
@property
def d_a_1(self):
""" Return D[a]+1 register. """
return self.get("d{0}".format(self.data['a']+1), Type.int_32)
@property
def d_a(self):
""" Return D[a] register """
return self.get("d{0}".format(self.data['a']), Type.int_32)
def compute_result(self, *args):
op2 = self.data['op2']
if op2 == 0x6: # BO_ST.A_PostInc
self.store(self.a_a, self.a_b)
inc = self.a_b + self.sign_ext_offset
self.put(inc, "a{0}".format(self.data['b']))
elif op2 == 0x16: # BO_ST.A_PreInc
self.store(self.a_a, self.ea)
self.put(self.ea, "a{0}".format(self.data['b']))
elif op2 == 0x26: # BO_ST.A_BaseShortOffset
self.store(self.a_a, self.ea)
elif op2 == 0x0: # BO_ST.B_PostInc
val = self.d_a & 0xff
self.store(val, self.a_b)
inc = self.a_b + self.sign_ext_offset
self.put(inc, "a{0}".format(self.data['b']))
elif op2 == 0x10: # BO_ST.B_PreInc
val = self.d_a & 0xff
self.store(val, self.ea)
self.put(self.ea, "a{0}".format(self.data['b']))
elif op2 == 0x20: # BO_ST.B_BaseShortOffset
val = self.d_a & 0xff
self.store(val, self.ea)
elif op2 == 0x2: # BO_ST.H_PostInc
val = self.d_a & 0xffff
self.store(val, self.a_b)
inc = self.a_b + self.sign_ext_offset
self.put(inc, "a{0}".format(self.data['b']))
elif op2 == 0x12: # BO_ST.H_PreInc
val = self.d_a & 0xffff
self.store(val, self.ea)
self.put(self.ea, "a{0}".format(self.data['b']))
elif op2 == 0x22: # BO_ST.H_BaseShortOffset
val = self.d_a & 0xffff
self.store(val, self.ea)
elif op2 == 0x4: # BO_ST.W_PostInc
val = self.d_a
self.store(val, self.a_b)
inc = self.a_b + self.sign_ext_offset
self.put(inc, "a{0}".format(self.data['b']))
elif op2 == 0x14: # BO_ST.W_PreInc
self.store(self.d_a, self.ea)
self.put(self.ea, "a{0}".format(self.data['b']))
elif op2 == 0x24: # BO_ST.W_BaseShortOffset
self.store(self.d_a, self.ea)
elif op2 == 0x5: # BO_ST.D_PostInc
self.store(self.d_a, self.a_b)
self.store(self.d_a_1, self.a_b+4)
inc = self.a_b + self.sign_ext_offset
self.put(inc, "a{0}".format(self.data['b']))
elif op2 == 0x15: # BO_ST.D_PreInc
self.store(self.d_a, self.ea)
self.store(self.d_a_1, self.ea+4)
self.put(self.ea, "a{0}".format(self.data['b']))
elif op2 == 0x25: # BO_ST.D_BaseShortOffset
self.store(self.d_a, self.ea)
self.store(self.d_a_1, self.ea+4)
elif op2 == 0x7: # BO_ST.DA_PostInc
self.store(self.a_a, self.a_b)
self.store(self.a_a_1, self.a_b+4)
inc = self.a_b + self.sign_ext_offset
self.put(inc, "a{0}".format(self.data['b']))
elif op2 == 0x17: # BO_ST.DA_PreInc
self.store(self.a_a, self.ea)
self.store(self.a_a_1, self.ea+4)
self.put(self.ea, "a{0}".format(self.data['b']))
elif op2 == 0x27: # BO_ST.DA_BaseShortOffset
self.store(self.a_a, self.ea)
self.store(self.a_a_1, self.ea+4)
elif op2 == 0x8: # BO_ST.Q_PostInc
val = self.d_a >> 16
self.store(val, self.a_b)
inc = self.a_b + self.sign_ext_offset
self.put(inc, "a{0}".format(self.data['b']))
elif op2 == 0x18: # BO_ST.Q_PreInc
val = self.d_a >> 16
self.store(val, self.ea)
self.put(self.ea, "a{0}".format(self.data['b']))
elif op2 == 0x28: # BO_ST.Q_BaseShortOffset
val = self.d_a >> 16
self.store(val, self.ea)
else:
print("Error: Unknown OP2 '{0}'!".format(op2))
print("BO instruction OP=89, OP2=Unknown")
sys.exit(1)
class BO_ST_A9_Instructions(Instruction):
""" A class for STORE instructions with OP=A9 """
name = 'BO_ST_A9_Instructions ...'
op = "{0}{1}".format(bin(0xa)[2:].zfill(4), bin(9)[2:].zfill(4))
bin_format = op + 'a'*4 + 'b'*4 + 'c'*4 + 'd'*4 + 'e'*4 + 'f'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
tmp = bitstring.BitArray(bin="{0}{1}{2}{3}{4}{5}".format(data['e'],
data['f'],
data['c'],
data['d'],
data['a'],
data['b']))
off10 = bitstring.BitArray(bin="{0}{1}".format(tmp[0:4].bin,
tmp[10:16].bin))
op2 = bitstring.BitArray(bin="{0}".format(tmp[4:10]))
op2 = int(op2.bin, 2)
b = tmp[16:20]
a = tmp[20:]
if op2 == 0x0:
self.name = 'BO_ST.B_BitRev'
elif op2 == 0x10:
self.name = 'BO_ST.B_Circ'
elif op2 == 0x5:
self.name = 'BO_ST.D_BitRev'
elif op2 == 0x15:
self.name = 'BO_ST.D_Circ'
elif op2 == 0x6:
self.name = 'BO_ST.A_BitRev'
elif op2 == 0x16:
self.name = 'BO_ST.A_Circ'
elif op2 == 0x7:
self.name = 'BO_ST.DA_BitRev'
elif op2 == 0x17:
self.name = 'BO_ST.DA_Circ'
elif op2 == 0x2:
self.name = 'BO_ST.H_BitRev'
elif op2 == 0x12:
self.name = 'BO_ST.H_Circ'
elif op2 == 0x8:
self.name = 'BO_ST.Q_BitRev'
elif op2 == 0x18:
self.name = 'BO_ST.Q_Circ'
elif op2 == 0x4:
self.name = 'BO_ST.W_BitRev'
elif op2 == 0x14:
self.name = 'BO_ST.W_Circ'
else:
self.name = "UNKNOWN"
data = {"a": int(a.hex, 16),
"b": int(b.hex, 16),
"off10": int(off10.bin, 2),
"op2": op2}
log_this(self.name, data, hex(self.addr))
return data
@property
def index(self):
return self.a_b_1 & 0xffff
@property
def length(self):
return self.a_b_1 >> 16
@property
def ea_6(self):
return self.a_b + ((self.index+6) % self.length)
@property
def ea_4(self):
return self.a_b + ((self.index+4) % self.length)
@property
def ea_2(self):
return self.a_b + ((self.index+2) % self.length)
@property
def ea_0(self):
return self.a_b + self.index
@property
def sign_ext_offset(self):
return self.constant(self.data['off10'], Type.int_10).cast_to(Type.int_32, signed=True)
@property
def a_b_1(self):
return self.get("a{0}".format(self.data['b']+1), Type.int_32)
@property
def a_b(self):
return self.get("a{0}".format(self.data['b']), Type.int_32)
@property
def a_a_1(self):
return self.get("a{0}".format(self.data['a']+1), Type.int_32)
@property
def a_a(self):
return self.get("a{0}".format(self.data['a']), Type.int_32)
@property
def d_a_1(self):
return self.get("d{0}".format(self.data['a']+1), Type.int_32)
@property
def d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def compute_result(self, *args):
op2 = self.data['op2']
if op2 == 0x0: # BO_ST.B_BitRev
incr = self.a_b_1 >> 16
self.store(self.d_a & 0xff, self.ea_0)
new_index = reverse16(reverse16(self.index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x10: # BO_ST.B_Circ
self.store(self.d_a & 0xff, self.ea_0)
new_index = self.index + self.sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + self.length) & cond_new_index_neg) | \
((new_index % self.length) & (cond_new_index_neg^0xffffffff))
result = ((self.length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result, "a{0}".format(self.data['b']+1))
elif op2 == 0x5: # BO_ST.D_BitRev
incr = self.a_b_1 >> 16
self.store(self.d_a, self.ea_0)
self.store(self.d_a_1, self.ea_0+4)
new_index = reverse16(reverse16(self.index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x15: # BO_ST.D_Circ
self.store(self.d_a & 0xffff, self.ea_0)
self.store(self.d_a >> 16, self.ea_2)
self.store(self.d_a_1 & 0xffff, self.ea_4)
self.store(self.d_a_1 >> 16, self.ea_6)
new_index = self.index + self.sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + self.length) & cond_new_index_neg) | \
((new_index % self.length) & (cond_new_index_neg^0xffffffff))
result = ((self.length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result, "a{0}".format(self.data['b']+1))
elif op2 == 0x6: # BO_ST.A_BitRev
incr = self.a_b_1 >> 16
self.store(self.a_a, self.ea_0)
new_index = reverse16(reverse16(self.index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x16: # BO_ST.A_Circ
self.store(self.a_a, self.ea_0)
new_index = self.index + self.sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + self.length) & cond_new_index_neg) | \
((new_index % self.length) & (cond_new_index_neg^0xffffffff))
result = ((self.length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result, "a{0}".format(self.data['b']+1))
elif op2 == 0x7: # BO_ST.DA_BitRev
incr = self.a_b_1 >> 16
self.store(self.a_a, self.ea_0)
self.store(self.a_a_1, self.ea_0+4)
new_index = reverse16(reverse16(self.index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x17: # BO_ST.DA_Circ
self.store(self.a_a, self.ea_0)
self.store(self.a_a_1, self.ea_4)
new_index = self.index + self.sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + self.length) & cond_new_index_neg) | \
((new_index % self.length) & (cond_new_index_neg^0xffffffff))
result = ((self.length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result, "a{0}".format(self.data['b']+1))
elif op2 == 0x2: # BO_ST.H_BitRev
incr = self.a_b_1 >> 16
self.store(self.d_a & 0xffff, self.ea_0)
new_index = reverse16(reverse16(self.index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x12: # BO_ST.H_Circ
self.store(self.d_a & 0xffff, self.ea_0)
new_index = self.index + self.sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + self.length) & cond_new_index_neg) | \
((new_index % self.length) & (cond_new_index_neg^0xffffffff))
result = ((self.length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result, "a{0}".format(self.data['b']+1))
elif op2 == 0x8: # BO_ST.Q_BitRev
incr = self.a_b_1 >> 16
self.store(self.d_a >> 16, self.ea_0)
new_index = reverse16(reverse16(self.index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x18: # BO_ST.Q_Circ
self.store(self.d_a >> 16, self.ea_0)
new_index = self.index + self.sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + self.length) & cond_new_index_neg) | \
((new_index % self.length) & (cond_new_index_neg^0xffffffff))
result = ((self.length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result, "a{0}".format(self.data['b']+1))
elif op2 == 0x4: # BO_ST.W_BitRev
incr = self.a_b_1 >> 16
self.store(self.d_a, self.ea_0)
new_index = reverse16(reverse16(self.index) + reverse16(incr))
result_2 = ((incr & 0xffff) << 16) | (new_index & 0xffff)
self.put(result_2, "a{0}".format(self.data['b']+1))
elif op2 == 0x14: # BO_ST.W_Circ
self.store(self.d_a, self.ea_0)
new_index = self.index + self.sign_ext_offset
cond_new_index_neg = extend_to_32_bits(new_index & 0x80000000 == 0x80000000)
new_index = ((new_index + self.length) & cond_new_index_neg) | \
((new_index % self.length) & (cond_new_index_neg^0xffffffff))
result = ((self.length & 0xffff) << 16) | (new_index & 0xffff)
self.put(result, "a{0}".format(self.data['b']+1))
else:
print("Error: Unknown OP2 '{0}'!".format(op2))
print("BO instruction OP=A9, OP2=Unknown")
sys.exit(1)
|
[
"noreply@github.com"
] |
angr.noreply@github.com
|
9d2eb5e7101f25c69133388b5298f21e6c156b36
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/3edeb844795ba685a7cb6811051e176c0111319ceb300f8bdfc98281d9d62383/win32inet.py
|
f6bb75423847caf8b877cb3111a35d2eae311d40
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143
| 2021-06-09T05:36:34
| 2021-06-09T05:36:34
| 375,235,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,530
|
py
|
# encoding: utf-8
# module win32inet
# from C:\Users\Doly\Anaconda3\lib\site-packages\win32\win32inet.pyd
# by generator 1.147
# no doc
# imports
from pywintypes import error
# Variables with simple values
UNICODE = 1
# functions
def CommitUrlCacheEntry(*args, **kwargs): # real signature unknown
pass
def CreateUrlCacheEntry(*args, **kwargs): # real signature unknown
pass
def CreateUrlCacheGroup(*args, **kwargs): # real signature unknown
pass
def DeleteUrlCacheEntry(*args, **kwargs): # real signature unknown
pass
def DeleteUrlCacheGroup(*args, **kwargs): # real signature unknown
pass
def FindCloseUrlCache(*args, **kwargs): # real signature unknown
pass
def FindFirstUrlCacheEntry(*args, **kwargs): # real signature unknown
pass
def FindFirstUrlCacheEntryEx(*args, **kwargs): # real signature unknown
pass
def FindFirstUrlCacheGroup(*args, **kwargs): # real signature unknown
pass
def FindNextUrlCacheEntry(*args, **kwargs): # real signature unknown
pass
def FindNextUrlCacheEntryEx(*args, **kwargs): # real signature unknown
pass
def FindNextUrlCacheGroup(*args, **kwargs): # real signature unknown
pass
def FtpCommand(*args, **kwargs): # real signature unknown
pass
def FtpOpenFile(*args, **kwargs): # real signature unknown
pass
def GetUrlCacheEntryInfo(*args, **kwargs): # real signature unknown
pass
def GetUrlCacheGroupAttribute(*args, **kwargs): # real signature unknown
pass
def InternetAttemptConnect(*args, **kwargs): # real signature unknown
pass
def InternetCanonicalizeUrl(*args, **kwargs): # real signature unknown
pass
def InternetCheckConnection(*args, **kwargs): # real signature unknown
pass
def InternetCloseHandle(*args, **kwargs): # real signature unknown
pass
def InternetConnect(*args, **kwargs): # real signature unknown
pass
def InternetGetCookie(*args, **kwargs): # real signature unknown
pass
def InternetGetLastResponseInfo(*args, **kwargs): # real signature unknown
pass
def InternetGoOnline(*args, **kwargs): # real signature unknown
pass
def InternetOpen(*args, **kwargs): # real signature unknown
pass
def InternetOpenUrl(*args, **kwargs): # real signature unknown
pass
def InternetQueryOption(*args, **kwargs): # real signature unknown
pass
def InternetReadFile(*args, **kwargs): # real signature unknown
pass
def InternetSetCookie(*args, **kwargs): # real signature unknown
pass
def InternetSetOption(*args, **kwargs): # real signature unknown
pass
def InternetWriteFile(*args, **kwargs): # real signature unknown
pass
def SetUrlCacheEntryGroup(*args, **kwargs): # real signature unknown
pass
def SetUrlCacheGroupAttribute(*args, **kwargs): # real signature unknown
pass
def WinHttpGetDefaultProxyConfiguration(*args, **kwargs): # real signature unknown
pass
def WinHttpGetIEProxyConfigForCurrentUser(*args, **kwargs): # real signature unknown
pass
def WinHttpGetProxyForUrl(*args, **kwargs): # real signature unknown
pass
def WinHttpOpen(*args, **kwargs): # real signature unknown
pass
# no classes
# variables with complex values
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x00000214F80354A8>'
__spec__ = None # (!) real value is "ModuleSpec(name='win32inet', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x00000214F80354A8>, origin='C:\\\\Users\\\\Doly\\\\Anaconda3\\\\lib\\\\site-packages\\\\win32\\\\win32inet.pyd')"
|
[
"qinkunpeng2015@163.com"
] |
qinkunpeng2015@163.com
|
5599fddb1996441c6feec6e05de8788be34929d3
|
ff811849b246e78df4a4054594e4ff31dc52532a
|
/interception_efficiency.py
|
8a0076cb2cc7cdd80d8238778bd85651166a6c95
|
[] |
no_license
|
zhan3870/git_filtration
|
8e4bee8f00a435c55ec3d0389c3d8d989433b0a5
|
4fffcc9b4290955444c42bea9c813b20d595e06c
|
refs/heads/master
| 2021-07-23T08:29:00.489477
| 2017-10-31T06:26:40
| 2017-10-31T06:26:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
# coding: utf-8
# In[ ]:
def interception_efficiency(alpha,Ku,N_R):
import numpy as np
eta_R = (1+N_R)/(2*Ku)*(2*np.log(1+N_R)-1+alpha+(1/(1+N_R))**2*(1-alpha/2)-alpha/2*(1+N_R)**2)
return eta_R
|
[
"zhan3870@umn.edu"
] |
zhan3870@umn.edu
|
92bd2b77c3e3fe6df3bc537ad30e4e44ab32392e
|
91e23b6db1d756ea0fdfd3d7c77350c839226dbf
|
/home/migrations/0019_auto_20200524_0053.py
|
46d7ed3782447d84ef5d43a884c0b92314dbf712
|
[] |
no_license
|
hasretbicak/Python-Django-Havaliman-TransferSistemi
|
c02103f0ea9b611351e0f704e922b83b7d43cd1b
|
7700e5383c62394c1b6b4ab98aa4f2d16faea7b7
|
refs/heads/master
| 2022-10-09T13:13:13.047622
| 2020-05-23T23:12:54
| 2020-05-23T23:12:54
| 252,548,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
# Generated by Django 3.0.5 on 2020-05-23 21:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0018_auto_20200524_0035'),
]
operations = [
migrations.AlterField(
model_name='faq',
name='status',
field=models.CharField(choices=[('False', 'False'), ('True', 'True')], max_length=10),
),
]
|
[
"hsrt.bck002@gmail.com"
] |
hsrt.bck002@gmail.com
|
12b427b03bb9491d4cf68afa4d66488fac655eb4
|
3d1aacb0ce641a1d96cb4a4b1363b0d03bc3f87c
|
/netbox/virtualization/constants.py
|
307921e0ea6176e2d2a5e69b705dfdff8fbf7863
|
[
"Apache-2.0"
] |
permissive
|
MarcelRaschke/netbox
|
242e30909c8cdcc6cbfb1e5beb7fc29752d5025e
|
8206e46991498616bd9fdc5f40b57e66067b674a
|
refs/heads/develop
| 2023-09-05T18:59:32.540609
| 2022-08-12T02:26:58
| 2022-08-12T02:26:58
| 160,838,955
| 1
| 1
|
Apache-2.0
| 2023-04-30T00:40:43
| 2018-12-07T15:08:25
|
Python
|
UTF-8
|
Python
| false
| false
| 466
|
py
|
from __future__ import unicode_literals
from dcim.constants import DEVICE_STATUS_ACTIVE, DEVICE_STATUS_OFFLINE, DEVICE_STATUS_STAGED
# VirtualMachine statuses (replicated from Device statuses)
VM_STATUS_CHOICES = [
[DEVICE_STATUS_ACTIVE, 'Active'],
[DEVICE_STATUS_OFFLINE, 'Offline'],
[DEVICE_STATUS_STAGED, 'Staged'],
]
# Bootstrap CSS classes for VirtualMachine statuses
VM_STATUS_CLASSES = {
0: 'warning',
1: 'success',
3: 'primary',
}
|
[
"jstretch@digitalocean.com"
] |
jstretch@digitalocean.com
|
d5011dbad6c73ea2ce5cdc2afc4b6923897018b1
|
2dbccf2272e391c9c177478881a474ba84f80eac
|
/example/data_pb2_grpc.py
|
3677703a5790f6104a1bf68c86207ad8dc6baeb9
|
[] |
no_license
|
killer933/grpc_python
|
703725b212f5ffccabf77d9e2ec87eec9b55f196
|
27d91b6b358ca0d3ad760d09aa77688632d849eb
|
refs/heads/master
| 2020-06-17T12:39:58.542032
| 2019-07-09T03:44:36
| 2019-07-09T03:44:36
| 195,927,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,466
|
py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import sys
sys.path.append('C:/Users/Administrator/PycharmProjects/testkeras')
from example import data_pb2 as data__pb2
class GreeterStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SayHello = channel.unary_unary(
'/example.Greeter/SayHello',
request_serializer=data__pb2.HelloRequest.SerializeToString,
response_deserializer=data__pb2.HelloReply.FromString,
)
class GreeterServicer(object):
# missing associated documentation comment in .proto file
pass
def SayHello(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GreeterServicer_to_server(servicer, server):
rpc_method_handlers = {
'SayHello': grpc.unary_unary_rpc_method_handler(
servicer.SayHello,
request_deserializer=data__pb2.HelloRequest.FromString,
response_serializer=data__pb2.HelloReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'example.Greeter', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
[
"china.sunzhi@gmail.com"
] |
china.sunzhi@gmail.com
|
8e78c55ebf8c6ff4d594d23acba62782228ce564
|
b8cb10a3c99961f44ac758b3683523627d032680
|
/runoschool/runo/migrations/0016_auto_20201022_2208.py
|
7895a86854c2633520c818682c2444f93ede56d4
|
[] |
no_license
|
samuelatuma1/runoschool
|
4a2183be4a7e856723fc5368c90edcb79d6ed29e
|
ed75fb4077cf5ff86b7d546d3346fc4625bee97e
|
refs/heads/master
| 2023-01-29T20:22:25.160805
| 2020-12-14T08:33:13
| 2020-12-14T08:33:13
| 312,167,155
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 783
|
py
|
# Generated by Django 3.1.1 on 2020-10-22 21:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('runo', '0015_auto_20201022_1707'),
]
operations = [
migrations.AddField(
model_name='welcometab',
name='img1L',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='welcLead', to='runo.welcleaders'),
),
migrations.AddField(
model_name='welcometab2',
name='img2L',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='welcLead2', to='runo.welcleaders'),
),
]
|
[
"atumasaake@gmail.com"
] |
atumasaake@gmail.com
|
0e7053df72c502c35c80129ff5ba7ca61f4884f4
|
ea0bcd33f1bb1fa9fde281508c5446fa07cf0c27
|
/real_estate/migrations/0010_auto_20201228_1250.py
|
6cd099028d08a5ba450258e9c8bb1fad84fc64f2
|
[] |
no_license
|
Pranali2510/Real_Estate
|
1acf0d56dc7a0e7b3cc11f3a54d909a8f51f28c5
|
65b3167abf14243a75bb5bc306bd290a65d5f558
|
refs/heads/master
| 2023-04-07T16:35:47.141109
| 2021-04-08T12:10:04
| 2021-04-08T12:10:04
| 355,891,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
# Generated by Django 3.0.7 on 2020-12-28 07:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('real_estate', '0009_auto_20201227_1814'),
]
operations = [
migrations.AlterField(
model_name='agent',
name='a_image',
field=models.FileField(blank=True, default='', null=True, upload_to='images/'),
),
]
|
[
"pranalimahajan2510@gmail.com"
] |
pranalimahajan2510@gmail.com
|
91d6a853e70ce8e64013e96c27438efa6310f2a0
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/grpc/src/tools/codegen/core/gen_stats_data.py
|
85e22146f76ff03b993c5760ec1a582e6e86e0c8
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 18,146
|
py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import ctypes
import json
import math
import sys
import yaml
with open('src/core/lib/debug/stats_data.yaml') as f:
attrs = yaml.load(f.read(), Loader=yaml.Loader)
REQUIRED_FIELDS = ['name', 'doc']
def make_type(name, fields):
return (collections.namedtuple(
name, ' '.join(list(set(REQUIRED_FIELDS + fields)))), [])
def c_str(s, encoding='ascii'):
if isinstance(s, str):
s = s.encode(encoding)
result = ''
for c in s:
c = chr(c) if isinstance(c, int) else c
if not (32 <= ord(c) < 127) or c in ('\\', '"'):
result += '\\%03o' % ord(c)
else:
result += c
return '"' + result + '"'
types = (
make_type('Counter', []),
make_type('Histogram', ['max', 'buckets']),
)
Shape = collections.namedtuple('Shape', 'max buckets')
inst_map = dict((t[0].__name__, t[1]) for t in types)
stats = []
for attr in attrs:
found = False
for t, lst in types:
t_name = t.__name__.lower()
if t_name in attr:
name = attr[t_name]
del attr[t_name]
lst.append(t(name=name, **attr))
found = True
break
assert found, "Bad decl: %s" % attr
def dbl2u64(d):
return ctypes.c_ulonglong.from_buffer(ctypes.c_double(d)).value
def u642dbl(d):
return ctypes.c_double.from_buffer(ctypes.c_ulonglong(d)).value
def shift_works_until(mapped_bounds, shift_bits):
for i, ab in enumerate(zip(mapped_bounds, mapped_bounds[1:])):
a, b = ab
if (a >> shift_bits) == (b >> shift_bits):
return i
return len(mapped_bounds)
def find_ideal_shift(mapped_bounds, max_size):
best = None
for shift_bits in reversed(list(range(0, 64))):
n = shift_works_until(mapped_bounds, shift_bits)
if n == 0:
continue
table_size = mapped_bounds[n - 1] >> shift_bits
if table_size > max_size:
continue
if best is None:
best = (shift_bits, n, table_size)
elif best[1] < n:
best = (shift_bits, n, table_size)
return best
def gen_map_table(mapped_bounds, shift_data):
#print("gen_map_table(%s, %s)" % (mapped_bounds, shift_data))
tbl = []
cur = 0
mapped_bounds = [x >> shift_data[0] for x in mapped_bounds]
for i in range(0, mapped_bounds[shift_data[1] - 1]):
while i > mapped_bounds[cur]:
cur += 1
tbl.append(cur)
return tbl
static_tables = []
def decl_static_table(values, type):
global static_tables
v = (type, values)
for i, vp in enumerate(static_tables):
if v == vp:
return i
r = len(static_tables)
static_tables.append(v)
return r
def type_for_uint_table(table):
mv = max(table)
if mv < 2**8:
return 'uint8_t'
elif mv < 2**16:
return 'uint16_t'
elif mv < 2**32:
return 'uint32_t'
else:
return 'uint64_t'
def merge_cases(cases):
l = len(cases)
if l == 1:
return cases[0][1]
left_len = l // 2
left = cases[0:left_len]
right = cases[left_len:]
return 'if (value < %d) {\n%s\n} else {\n%s\n}' % (
left[-1][0], merge_cases(left), merge_cases(right))
def gen_bucket_code(shape):
bounds = [0, 1]
done_trivial = False
done_unmapped = False
first_nontrivial = None
first_unmapped = None
while len(bounds) < shape.buckets + 1:
if len(bounds) == shape.buckets:
nextb = int(shape.max)
else:
mul = math.pow(
float(shape.max) / bounds[-1],
1.0 / (shape.buckets + 1 - len(bounds)))
nextb = int(math.ceil(bounds[-1] * mul))
if nextb <= bounds[-1] + 1:
nextb = bounds[-1] + 1
elif not done_trivial:
done_trivial = True
first_nontrivial = len(bounds)
bounds.append(nextb)
bounds_idx = decl_static_table(bounds, 'int')
#print first_nontrivial, shift_data, bounds
#if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]]
if first_nontrivial is None:
return ('return grpc_core::Clamp(value, 0, %d);\n' % shape.max,
bounds_idx)
cases = [(0, 'return 0;'), (first_nontrivial, 'return value;')]
if done_trivial:
first_nontrivial_code = dbl2u64(first_nontrivial)
last_code = first_nontrivial_code
while True:
code = ''
first_nontrivial = u642dbl(first_nontrivial_code)
code_bounds_index = None
for i, b in enumerate(bounds):
if b > first_nontrivial:
code_bounds_index = i
break
code_bounds = [dbl2u64(x) - first_nontrivial_code for x in bounds]
shift_data = find_ideal_shift(code_bounds[code_bounds_index:],
65536)
if not shift_data:
break
map_table = gen_map_table(code_bounds[code_bounds_index:],
shift_data)
if not map_table:
break
if map_table[-1] < 5:
break
map_table_idx = decl_static_table(
[x + code_bounds_index for x in map_table],
type_for_uint_table(map_table))
last_code = (
(len(map_table) - 1) << shift_data[0]) + first_nontrivial_code
code += 'DblUint val;\n'
code += 'val.dbl = value;\n'
code += 'const int bucket = '
code += 'kStatsTable%d[((val.uint - %dull) >> %d)];\n' % (
map_table_idx, first_nontrivial_code, shift_data[0])
code += 'return bucket - (value < kStatsTable%d[bucket]);' % bounds_idx
cases.append((int(u642dbl(last_code)) + 1, code))
first_nontrivial_code = last_code
last = u642dbl(last_code) + 1
for i, b in enumerate(bounds[:-2]):
if bounds[i + 1] < last:
continue
cases.append((bounds[i + 1], 'return %d;' % i))
cases.append((None, 'return %d;' % (len(bounds) - 2)))
return (merge_cases(cases), bounds_idx)
# utility: print a big comment block into a set of files
def put_banner(files, banner):
for f in files:
for line in banner:
print('// %s' % line, file=f)
print(file=f)
shapes = set()
for histogram in inst_map['Histogram']:
shapes.add(Shape(max=histogram.max, buckets=histogram.buckets))
def snake_to_pascal(name):
return ''.join([x.capitalize() for x in name.split('_')])
with open('src/core/lib/debug/stats_data.h', 'w') as H:
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != '#':
break
for line in my_source:
if line[0] == '#':
copyright.append(line)
break
for line in my_source:
if line[0] != '#':
break
copyright.append(line)
put_banner([H], [line[2:].rstrip() for line in copyright])
put_banner(
[H],
["Automatically generated by tools/codegen/core/gen_stats_data.py"])
print("#ifndef GRPC_SRC_CORE_LIB_DEBUG_STATS_DATA_H", file=H)
print("#define GRPC_SRC_CORE_LIB_DEBUG_STATS_DATA_H", file=H)
print(file=H)
print("#include <grpc/support/port_platform.h>", file=H)
print("#include <atomic>", file=H)
print("#include <memory>", file=H)
print("#include <stdint.h>", file=H)
print("#include \"src/core/lib/debug/histogram_view.h\"", file=H)
print("#include \"absl/strings/string_view.h\"", file=H)
print("#include \"src/core/lib/gprpp/per_cpu.h\"", file=H)
print(file=H)
print("namespace grpc_core {", file=H)
for shape in shapes:
print("class HistogramCollector_%d_%d;" % (shape.max, shape.buckets),
file=H)
print("class Histogram_%d_%d {" % (shape.max, shape.buckets), file=H)
print(" public:", file=H)
print(" static int BucketFor(int value);", file=H)
print(" const uint64_t* buckets() const { return buckets_; }", file=H)
print(
" friend Histogram_%d_%d operator-(const Histogram_%d_%d& left, const Histogram_%d_%d& right);"
% (shape.max, shape.buckets, shape.max, shape.buckets, shape.max,
shape.buckets),
file=H)
print(" private:", file=H)
print(" friend class HistogramCollector_%d_%d;" %
(shape.max, shape.buckets),
file=H)
print(" uint64_t buckets_[%d]{};" % shape.buckets, file=H)
print("};", file=H)
print("class HistogramCollector_%d_%d {" % (shape.max, shape.buckets),
file=H)
print(" public:", file=H)
print(" void Increment(int value) {", file=H)
print(" buckets_[Histogram_%d_%d::BucketFor(value)]" %
(shape.max, shape.buckets),
file=H)
print(" .fetch_add(1, std::memory_order_relaxed);", file=H)
print(" }", file=H)
print(" void Collect(Histogram_%d_%d* result) const;" %
(shape.max, shape.buckets),
file=H)
print(" private:", file=H)
print(" std::atomic<uint64_t> buckets_[%d]{};" % shape.buckets, file=H)
print("};", file=H)
print("struct GlobalStats {", file=H)
print(" enum class Counter {", file=H)
for ctr in inst_map['Counter']:
print(" k%s," % snake_to_pascal(ctr.name), file=H)
print(" COUNT", file=H)
print(" };", file=H)
print(" enum class Histogram {", file=H)
for ctr in inst_map['Histogram']:
print(" k%s," % snake_to_pascal(ctr.name), file=H)
print(" COUNT", file=H)
print(" };", file=H)
print(" GlobalStats();", file=H)
print(
" static const absl::string_view counter_name[static_cast<int>(Counter::COUNT)];",
file=H)
print(
" static const absl::string_view histogram_name[static_cast<int>(Histogram::COUNT)];",
file=H)
print(
" static const absl::string_view counter_doc[static_cast<int>(Counter::COUNT)];",
file=H)
print(
" static const absl::string_view histogram_doc[static_cast<int>(Histogram::COUNT)];",
file=H)
print(" union {", file=H)
print(" struct {", file=H)
for ctr in inst_map['Counter']:
print(" uint64_t %s;" % ctr.name, file=H)
print(" };", file=H)
print(" uint64_t counters[static_cast<int>(Counter::COUNT)];", file=H)
print(" };", file=H)
for ctr in inst_map['Histogram']:
print(" Histogram_%d_%d %s;" % (ctr.max, ctr.buckets, ctr.name),
file=H)
print(" HistogramView histogram(Histogram which) const;", file=H)
print(
" std::unique_ptr<GlobalStats> Diff(const GlobalStats& other) const;",
file=H)
print("};", file=H)
print("class GlobalStatsCollector {", file=H)
print(" public:", file=H)
print(" std::unique_ptr<GlobalStats> Collect() const;", file=H)
for ctr in inst_map['Counter']:
print(
" void Increment%s() { data_.this_cpu().%s.fetch_add(1, std::memory_order_relaxed); }"
% (snake_to_pascal(ctr.name), ctr.name),
file=H)
for ctr in inst_map['Histogram']:
print(
" void Increment%s(int value) { data_.this_cpu().%s.Increment(value); }"
% (snake_to_pascal(ctr.name), ctr.name),
file=H)
print(" private:", file=H)
print(" struct Data {", file=H)
for ctr in inst_map['Counter']:
print(" std::atomic<uint64_t> %s{0};" % ctr.name, file=H)
for ctr in inst_map['Histogram']:
print(" HistogramCollector_%d_%d %s;" %
(ctr.max, ctr.buckets, ctr.name),
file=H)
print(" };", file=H)
print(" PerCpu<Data> data_;", file=H)
print("};", file=H)
print("}", file=H)
print(file=H)
print("#endif // GRPC_SRC_CORE_LIB_DEBUG_STATS_DATA_H", file=H)
with open('src/core/lib/debug/stats_data.cc', 'w') as C:
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != '#':
break
for line in my_source:
if line[0] == '#':
copyright.append(line)
break
for line in my_source:
if line[0] != '#':
break
copyright.append(line)
put_banner([C], [line[2:].rstrip() for line in copyright])
put_banner(
[C],
["Automatically generated by tools/codegen/core/gen_stats_data.py"])
print("#include <grpc/support/port_platform.h>", file=C)
print(file=C)
print("#include \"src/core/lib/debug/stats_data.h\"", file=C)
print("#include <stdint.h>", file=C)
print(file=C)
histo_code = []
histo_bucket_boundaries = {}
for shape in shapes:
code, bounds_idx = gen_bucket_code(shape)
histo_bucket_boundaries[shape] = bounds_idx
histo_code.append(code)
print("namespace grpc_core {", file=C)
print("namespace { union DblUint { double dbl; uint64_t uint; }; }", file=C)
for shape in shapes:
print(
"void HistogramCollector_%d_%d::Collect(Histogram_%d_%d* result) const {"
% (shape.max, shape.buckets, shape.max, shape.buckets),
file=C)
print(" for (int i=0; i<%d; i++) {" % shape.buckets, file=C)
print(
" result->buckets_[i] += buckets_[i].load(std::memory_order_relaxed);",
file=C)
print(" }", file=C)
print("}", file=C)
print(
"Histogram_%d_%d operator-(const Histogram_%d_%d& left, const Histogram_%d_%d& right) {"
% (shape.max, shape.buckets, shape.max, shape.buckets, shape.max,
shape.buckets),
file=C)
print(" Histogram_%d_%d result;" % (shape.max, shape.buckets), file=C)
print(" for (int i=0; i<%d; i++) {" % shape.buckets, file=C)
print(" result.buckets_[i] = left.buckets_[i] - right.buckets_[i];",
file=C)
print(" }", file=C)
print(" return result;", file=C)
print("}", file=C)
for typename, instances in sorted(inst_map.items()):
print(
"const absl::string_view GlobalStats::%s_name[static_cast<int>(%s::COUNT)] = {"
% (typename.lower(), typename),
file=C)
for inst in instances:
print(" %s," % c_str(inst.name), file=C)
print("};", file=C)
print(
"const absl::string_view GlobalStats::%s_doc[static_cast<int>(%s::COUNT)] = {"
% (typename.lower(), typename),
file=C)
for inst in instances:
print(" %s," % c_str(inst.doc), file=C)
print("};", file=C)
print("namespace {", file=C)
for i, tbl in enumerate(static_tables):
print("const %s kStatsTable%d[%d] = {%s};" %
(tbl[0], i, len(tbl[1]), ','.join('%s' % x for x in tbl[1])),
file=C)
print("} // namespace", file=C)
for shape, code in zip(shapes, histo_code):
print(("int Histogram_%d_%d::BucketFor(int value) {%s}") %
(shape.max, shape.buckets, code),
file=C)
print("GlobalStats::GlobalStats() : %s {}" %
",".join("%s{0}" % ctr.name for ctr in inst_map['Counter']),
file=C)
print("HistogramView GlobalStats::histogram(Histogram which) const {",
file=C)
print(" switch (which) {", file=C)
print(" default: GPR_UNREACHABLE_CODE(return HistogramView());", file=C)
for inst in inst_map['Histogram']:
print(" case Histogram::k%s:" % snake_to_pascal(inst.name), file=C)
print(
" return HistogramView{&Histogram_%d_%d::BucketFor, kStatsTable%d, %d, %s.buckets()};"
% (inst.max, inst.buckets, histo_bucket_boundaries[Shape(
inst.max, inst.buckets)], inst.buckets, inst.name),
file=C)
print(" }", file=C)
print("}", file=C)
print(
"std::unique_ptr<GlobalStats> GlobalStatsCollector::Collect() const {",
file=C)
print(" auto result = std::make_unique<GlobalStats>();", file=C)
print(" for (const auto& data : data_) {", file=C)
for ctr in inst_map['Counter']:
print(" result->%s += data.%s.load(std::memory_order_relaxed);" %
(ctr.name, ctr.name),
file=C)
for h in inst_map['Histogram']:
print(" data.%s.Collect(&result->%s);" % (h.name, h.name), file=C)
print(" }", file=C)
print(" return result;", file=C)
print("}", file=C)
print(
"std::unique_ptr<GlobalStats> GlobalStats::Diff(const GlobalStats& other) const {",
file=C)
print(" auto result = std::make_unique<GlobalStats>();", file=C)
for ctr in inst_map['Counter']:
print(" result->%s = %s - other.%s;" % (ctr.name, ctr.name, ctr.name),
file=C)
for h in inst_map['Histogram']:
print(" result->%s = %s - other.%s;" % (h.name, h.name, h.name),
file=C)
print(" return result;", file=C)
print("}", file=C)
print("}", file=C)
|
[
"jengelh@inai.de"
] |
jengelh@inai.de
|
75c9694b5b9779f317f22559caee48de01e93160
|
501ceb6d503b749fa6cb86961fdc825a516a7c22
|
/heroes4Hire/urls.py
|
c46411081e0ef7e7f1e3a39c2c9cb74c466b552e
|
[] |
no_license
|
prasanthkp89/Heroes4Hire
|
07de9d7542e9aa18f3a8fe06d8ade2414d0f3a9b
|
689128ccb8328c08e113466cf7b2a5ce18f4d08b
|
refs/heads/master
| 2021-06-11T14:20:32.081560
| 2017-02-14T07:52:28
| 2017-02-14T07:52:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
"""heroes4Hire URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from heroes import views as heroes_views
urlpatterns = [
url('^accounts/profile/$', heroes_views.profile, name='profile'),
url('^accounts/', include('django.contrib.auth.urls')),
url(r'^admin/', admin.site.urls),
url(r'^heroes/', include('heroes.urls', namespace='heroes')),
url(r'^training/', include('training.urls', namespace='trainings')),
url(r'^missions/', include('missions.urls', namespace='missions')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"wmmufunde@yahoocom"
] |
wmmufunde@yahoocom
|
420c886aade9e0be50a6e79d828ce906825b4ff5
|
549188abd5cf84ab4ec24445184ac76b2e54eec4
|
/circle_magic.py
|
91f5c26dced00a4073b14d8159f05bed1a72455f
|
[] |
no_license
|
BuakawRed/Circles_Are_Magical
|
970bfa1eadb6bb94ddd2935a98eb9fcc82b7e40c
|
084bbe71278241cb19ef4910664d3ff0ca1b3de3
|
refs/heads/master
| 2020-04-21T18:32:38.143237
| 2019-04-03T05:54:30
| 2019-04-03T05:54:30
| 169,773,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
import numpy as np
import math
def Pi_tester(N):
x=np.random.uniform(-1,1, N)
y=np.random.uniform(-1,1, N)
sum_sqr=x*x+y*y
a=[]
for val in sum_sqr:
if math.sqrt(val)<=1:
a.append(val)
M=len(a)
return (M/N)*4
print(Pi_tester(999))
|
[
"noreply@github.com"
] |
BuakawRed.noreply@github.com
|
ef8cfc42b48088eae6cdd117fce499e597517b00
|
7e12fef37575c7a3bfc352a30c8937b7dcdf590a
|
/mysite/myapp/migrations/0008_auto_20190215_1306.py
|
a0cd0f8ff402432709d6edbfe722d5074efb0dcc
|
[] |
no_license
|
shree7796/webapp
|
37945a29a41d03a3c406635fc60bb484423d3a0d
|
42a8294f5448ff41fe47810b6593a215a27a7786
|
refs/heads/master
| 2021-06-11T22:47:48.287237
| 2021-04-18T14:19:15
| 2021-04-18T14:19:15
| 179,231,439
| 1
| 0
| null | 2021-04-16T16:14:53
| 2019-04-03T07:09:46
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,813
|
py
|
# Generated by Django 2.0.10 on 2019-02-15 07:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0007_auto_20190215_1302'),
]
operations = [
migrations.AlterField(
model_name='personaleducationdetails',
name='year_of_graduation',
field=models.DateField(choices=[(1970, 1970), (1971, 1971), (1972, 1972), (1973, 1973), (1974, 1974), (1975, 1975), (1976, 1976), (1977, 1977), (1978, 1978), (1979, 1979), (1980, 1980), (1981, 1981), (1982, 1982), (1983, 1983), (1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017)], null=True),
),
migrations.AlterField(
model_name='personaleducationdetails',
name='year_passing_10th',
field=models.DateField(choices=[(1970, 1970), (1971, 1971), (1972, 1972), (1973, 1973), (1974, 1974), (1975, 1975), (1976, 1976), (1977, 1977), (1978, 1978), (1979, 1979), (1980, 1980), (1981, 1981), (1982, 1982), (1983, 1983), (1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017)], null=True),
),
migrations.AlterField(
model_name='personaleducationdetails',
name='year_passing_12th',
field=models.DateField(choices=[(1970, 1970), (1971, 1971), (1972, 1972), (1973, 1973), (1974, 1974), (1975, 1975), (1976, 1976), (1977, 1977), (1978, 1978), (1979, 1979), (1980, 1980), (1981, 1981), (1982, 1982), (1983, 1983), (1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017)], null=True),
),
]
|
[
"shivamshrivastava470@gmail.com"
] |
shivamshrivastava470@gmail.com
|
01659e7f6238a105a77b183b7982dfceadda8e86
|
761c39ed62047c0f2fc6675b17e7477cc635741e
|
/com/test/ConnectSqlite.py
|
b8d948587bf090cc6e7d29766fd7d7c45c299458
|
[] |
no_license
|
open-pythons/lottedfs
|
e48d9a121a9a1158067c6351fcf35d98575f3574
|
a17422231802503527ee4f31a90da0ae3878718e
|
refs/heads/master
| 2022-03-16T20:54:10.205219
| 2019-11-08T15:53:56
| 2019-11-08T15:53:56
| 220,019,179
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,839
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: W_H_J
@license: Apache Licence
@contact: 415900617@qq.com
@software: PyCharm
@file: SQLite3Config.py
@time: 2019/5/17 14:22
@describe: sqllit3 ๆไฝๅฉๆ
"""
import sys
import os
import sqlite3
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
sys.path.append("..")
class ConnectSqlite:
def __init__(self, dbName="./sqlite3Test.db"):
"""
ๅๅงๅ่ฟๆฅ--ไฝฟ็จๅฎ่ฎฐๅพๅ
ณ้ญ่ฟๆฅ
:param dbName: ่ฟๆฅๅบๅๅญ๏ผๆณจๆ๏ผไปฅ'.db'็ปๅฐพ
"""
self._conn = sqlite3.connect(dbName)
self._cur = self._conn.cursor()
self._time_now = "[" + sqlite3.datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') + "]"
def close_con(self):
"""
ๅ
ณ้ญ่ฟๆฅๅฏน่ฑก--ไธปๅจ่ฐ็จ
:return:
"""
self._cur.close()
self._conn.close()
def create_tabel(self, sql):
"""
ๅๅปบ่กจๅๅงๅ
:param sql: ๅปบ่กจ่ฏญๅฅ
:return: True is ok
"""
try:
self._cur.execute(sql)
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[CREATE TABLE ERROR]", e)
return False
def drop_table(self, table_name):
"""
ๅ ้ค่กจ
:param table_name: ่กจๅ
:return:
"""
try:
self._cur.execute('DROP TABLE {0}'.format(table_name))
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[DROP TABLE ERROR]", e)
return False
def delete_table(self, sql):
"""
ๅ ้ค่กจ่ฎฐๅฝ
:param sql:
:return: True or False
"""
try:
if 'DELETE' in sql.upper():
self._cur.execute(sql)
self._conn.commit()
return True
else:
print(self._time_now, "[EXECUTE SQL IS NOT DELETE]")
return False
except Exception as e:
print(self._time_now, "[DELETE TABLE ERROR]", e)
return False
def fetchall_table(self, sql, limit_flag=True):
"""
ๆฅ่ฏขๆๆๆฐๆฎ
:param sql:
:param limit_flag: ๆฅ่ฏขๆกๆฐ้ๆฉ๏ผFalse ๆฅ่ฏขไธๆก๏ผTrue ๅ
จ้จๆฅ่ฏข
:return:
"""
try:
self._cur.execute(sql)
war_msg = self._time_now + ' The [{}] is empty or equal None!'.format(sql)
if limit_flag is True:
r = self._cur.fetchall()
return r if len(r) > 0 else war_msg
elif limit_flag is False:
r = self._cur.fetchone()
return r if len(r) > 0 else war_msg
except Exception as e:
print(self._time_now, "[SELECT TABLE ERROR]", e)
def insert_update_table(self, sql):
"""
ๆๅ
ฅ/ๆดๆฐ่กจ่ฎฐๅฝ
:param sql:
:return:
"""
try:
self._cur.execute(sql)
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[INSERT/UPDATE TABLE ERROR]", e)
return False
def insert_table_many(self, sql, value):
"""
ๆๅ
ฅๅคๆก่ฎฐๅฝ
:param sql:
:param value: list:[(),()]
:return:
"""
try:
self._cur.executemany(sql, value)
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[INSERT MANY TABLE ERROR]", e)
return False
class conTest:
"""ๆต่ฏ็ฑป"""
def __init__(self):
self.con = ConnectSqlite("./sqlite3Test.db")
def create_table_test(self):
sql = '''CREATE TABLE `mytest` (
`id` DATETIME DEFAULT NULL,
`user` VARCHAR(12) DEFAULT NULL,
`name` VARCHAR(12) DEFAULT NULL,
`number` VARCHAR(12) DEFAULT NULL
)'''
print(self.con.create_tabel(sql))
def drop_table_test(self):
print(self.con.drop_table("mytest"))
def fetchall_table_test(self):
sql = "SELECT * from mytest WHERE user='1003';"
sql_all = "SELECT * from mytest;"
print("ๅ
จ้จ่ฎฐๅฝ", self.con.fetchall_table(sql_all))
print("ๅๆก่ฎฐๅฝ", self.con.fetchall_table(sql_all, False))
print("ๆกไปถๆฅ่ฏข", self.con.fetchall_table(sql))
def delete_table_test(self):
sql = "DELETE FROM mytest WHERE user='1003';"
print(self.con.delete_table(sql))
def update_table_test(self):
sql_update = "UPDATE mytest SET id={0},user={1},name={2},number={3} WHERE number={4}".format(1, 1002, "'็ไบ'",
1002,
1002)
print(self.con.insert_update_table(sql_update))
def insert_table_test_one(self):
sql = """INSERT INTO mytest VALUES (3, 1003, "็ไบ", 1003);"""
print(self.con.insert_update_table(sql))
def insert_table_test_many(self):
sql = """INSERT INTO mytest VALUES (?, ?, ?, ?)"""
value = [(2, 1004, "่ตตๅ
ญ", 1004), (4, 1005, "ๅดไธ", 1005)]
print(self.con.insert_table_many(sql, value))
def close_con(self):
self.con.close_con()
if __name__ == '__main__':
contest = conTest()
contest.create_table_test()
contest.insert_table_test_many()
contest.fetchall_table_test()
contest.insert_table_test_one()
contest.fetchall_table_test()
contest.update_table_test()
contest.drop_table_test()
contest.close_con()
|
[
"longzinziyan@gmail.com"
] |
longzinziyan@gmail.com
|
4dc67dc8f16d162464e5fa5448a7a507a8d151ba
|
e4b39086d0dd1cda8f97582b263377587aa7383b
|
/Connect4/connect4/strategy/strategy.py
|
58bbe980ef1d587229a0289575b0c447e16d2450
|
[] |
no_license
|
jercanioana/Fundamentals-of-Programming
|
f88207027e86bf1f1c088fc63992b47e34a50524
|
2da42da4029401705c6bfa832898266f25c9e41e
|
refs/heads/master
| 2022-07-04T09:57:14.903444
| 2020-05-05T12:22:48
| 2020-05-05T12:22:48
| 261,451,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
from abc import abstractmethod
from connect4.board.cell import Cell
class Strategy:
@abstractmethod
def move(self, *args) -> Cell:
pass
|
[
"ioana.jercan02@gmail.com"
] |
ioana.jercan02@gmail.com
|
3b2a70bdb3ab23a408a21d4cc1d8bc2d9154cca2
|
c1125e2835829c6ed0f06d641142fb7b4e179132
|
/project/simpleLogisticRegression.py
|
888686ad40bb890089e46f6c67cdc2f0281d5c97
|
[] |
no_license
|
chien-lung/DataScience_2018
|
b3b41e9cd9a5deaadd17e8507fcb0b80dd576022
|
dbb2a6ac9b695aafe6e2b9ba5a892387574b2514
|
refs/heads/master
| 2020-04-16T19:55:20.242523
| 2019-01-17T10:08:44
| 2019-01-17T10:08:44
| 165,879,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,880
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 8 00:03:35 2019
@author: Lung
"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
def deleteWC(wc_data, normal_not_exist_teams):
wc_not_data = wc_data.loc[wc_data['team'].isin(normal_not_exist_teams)]
wc_use_data = wc_data.loc[~wc_data['gameid'].isin(np.unique(wc_not_data['gameid']))]
return wc_use_data
filename = '2016 complete match data OraclesElixir 2018-12-18.xlsx'
stat_list = ['player','side','team','champion','position']
data = pd.read_excel(filename)
normal_data = data.loc[~data['league'].isin(['WC'])]
wc_data = data.loc[data['league'].isin(['WC'])]
normal_not_exist_teams = set(wc_data['team']).difference(set(normal_data['team']))#['Albus NoX Luna','EDward Gaming','Royal Never Give Up','I MAY']
wc_use_data = deleteWC(wc_data, normal_not_exist_teams)
b = np.array(normal_data.loc[normal_data['playerid'].isin([100])]['team'])
b_r = np.array(normal_data.loc[normal_data['playerid'].isin([100])]['result'])
r = np.array(normal_data.loc[normal_data['playerid'].isin([200])]['team'])
r_r = np.array(normal_data.loc[normal_data['playerid'].isin([200])]['result'])
team_stat = pd.DataFrame({'B_team':b,'R_team':r})
team_X = pd.get_dummies(team_stat, columns=['B_team','R_team'])
team_y = b_r
wc_b = np.array(wc_use_data.loc[wc_use_data['playerid'].isin([100])]['team'])
wc_b_r = np.array(wc_use_data.loc[wc_use_data['playerid'].isin([100])]['result'])
wc_r = np.array(wc_use_data.loc[wc_use_data['playerid'].isin([200])]['team'])
wc_r_r = np.array(wc_use_data.loc[wc_use_data['playerid'].isin([200])]['result'])
wc_stat = pd.DataFrame({'B_team':wc_b,'R_team':wc_r})
X_test = pd.get_dummies(wc_stat, columns=['B_team','R_team'])
y_test = wc_b_r
X_train, X_val, y_train, y_val = train_test_split(team_X, team_y, test_size=0.3, random_state=36)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
score = logreg.score(X_train, y_train)
print("Training set accuracy: ", '%.3f'%(score))
score = logreg.score(X_val, y_val)
print("Validation set accuracy: ", '%.3f'%(score))
score = logreg.score(X_test, y_test)
print("WC set accuracy: ", '%.3f'%(score))
'''
team_stat = pd.DataFrame({'B_team':np.concatenate((b,r)), 'R_team':np.concatenate((r,b))})
team_X = pd.get_dummies(team_stat, columns=['B_team','R_team'])
team_y = np.concatenate((b_r,r_r))
X_train, X_val, y_train, y_val = train_test_split(team_X, team_y, test_size=0.3, random_state=36)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Training set accuracy: ", '%.3f'%(score))
score = logreg.score(X_val, y_val)
print("Validation set accuracy: ", '%.3f'%(score))
#score = logreg.score(X_test, y_test)
#print("WC set accuracy: ", '%.3f'%(score))
'''
'''
for plrid in range(1,6):
plr = data.loc[data['playerid'].isin([plrid, plrid+5])]['player']
plr_arr = np.array(plr)
team_stat['player{}'.format(plrid)] = pd.Series(plr_arr, index=team_stat.index)
#Team
team_X = pd.get_dummies(team_stat)
X_train, X_test, y_train, y_test = train_test_split(team_X, team_result, test_size=0.25, random_state=36)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
score = logreg.score(X_train, y_train)
score2 = logreg.score(X_test, y_test)
print("Training set accuracy: ", '%.3f'%(score))
print("Test set accuracy: ", '%.3f'%(score2))
#Players
plr_X = pd.get_dummies(plr_stat, prefix=stat_list, columns=stat_list)
X_train, X_test, y_train, y_test = train_test_split(plr_X, plr_result, test_size=0.3, random_state=1)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
score = logreg.score(X_train, y_train)
score2 = logreg.score(X_test, y_test)
print("Training set accuracy: ", '%.3f'%(score))
print("Test set accuracy: ", '%.3f'%(score2))
'''
|
[
"e125313530@gmail.com"
] |
e125313530@gmail.com
|
0280e212179474e2cef2943f05464f14fa6fb089
|
e2cb1cc3ebf462300b519bfec5841c326dccb095
|
/pyscandl/modules/Pyscandl.py
|
b3e204b69561604583a039eadc9b607c6042296d
|
[
"BSD-3-Clause"
] |
permissive
|
Annwan/pyscandl
|
eb9fcd60bca49f277d96ec9968f1e9b57e69b72a
|
afec7b410699f1bb6289a8653fc7cfa5b0d03406
|
refs/heads/master
| 2023-03-14T09:05:36.478222
| 2021-02-23T18:24:33
| 2021-02-23T18:24:33
| 344,026,261
| 0
| 0
|
BSD-3-Clause
| 2021-03-03T06:32:19
| 2021-03-03T06:32:19
| null |
UTF-8
|
Python
| false
| false
| 12,594
|
py
|
import contextlib
from .excepts import DryNoSauceHere, TooManySauce, EmptyChapter, DelayedRelease
from .fetchers.fetcher import StandaloneFetcher
from PIL import Image
import requests
import os
from re import sub as re_sub
from sys import stderr
from wand.image import Image
class Pyscandl:
"""
The main object of the program. It is responsible of the downloads and controls the fetchers for you.
"""
def __init__(self, fetcher, chapstart=1, output:str=".", pdf:bool=True, keep:bool=False, image:bool=False, all:bool=False, link:str=None, manga:str=None, download_number:int=1, chapend=0, quiet:bool=False, skip:int=0, tiny:bool=False):
"""
Initialize this instance of the pyscandl downloader, it needs either manga or link to work.
:param fetcher: fetcher object related to the download
:param chapstart: first chapter to be downloaded
:type chapstart: int/float/str
:param output: output folder
:type output: str
:param pdf: tell if the result should be kept as a pdf
:type pdf: bool
:param keep: tell if the result should be kept as a pdf and as a collection of images
:type keep: bool
:param image: tell if the result should be kept as a collection of images
:type image: bool
:param all: download all the chapters that are available after chapstart
:type all: bool
:param link: link of the manga to download
:type link: str
:param manga: identification tag of the manga *(see every fetcher for their different variations)*
:type manga: str
:param download_number: number of chapters to download
:type download_number: int
:param chapend: chapter to end the download on, if non exstant the download will stop once the next to download chapter number is greater than it
:type chapend: int/float/str
:param quiet: should the program not output any information about what it is doing in the console
:type quiet: bool
:param skip: number of images to skip on the first chapter being downloaded *(useful if running in image mode)*
:type skip: int
:param tiny: should the name of every downloaded scan be minified and only include the chapter number and the chapter title
:type tiny: bool
:raises DryNoSauceHere: neither link or manga was specified
:raises TooManySauce: both link and manga were specified
"""
if link is not None and manga is None or link is None and manga is not None:
if issubclass(fetcher, StandaloneFetcher):
self.fetcher = fetcher(link=link, manga=manga)
else:
self.fetcher = fetcher(link=link, manga=manga, chapstart=chapstart)
elif link is None and manga is None:
raise DryNoSauceHere
else:
raise TooManySauce
# in case windows is the os, remove the banned characters
if os.name == "nt":
manga_name = re_sub(r'[\\/*?:"<>|]', u"โ", self.fetcher.manga_name)
else:
manga_name = self.fetcher.manga_name
# creating output folder
self._output = (output[-1] == "/" and output or output + "/") + manga_name + "/"
if not os.path.exists(self._output):
os.makedirs(self._output)
self._header = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36",
"Set-Cookie": f"domain={self.fetcher.domain}"}
self._header.update(self.fetcher.headers)
self._nskip = skip
self._quiet = quiet
# select download mode
self._pdf = pdf
self._keep = keep
self._image = image
self._all = all
self._download_number = download_number
self._chapend = float(chapend) if "." in str(chapend) else int(chapend)
self._path = f"{self._output}ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}/" # save path for images
self._img_bin_list = []
self._tiny = tiny
# in case windows is the os, remove the banned characters
if os.name == "nt":
chapter_name = re_sub(r'[\\/*?:"<>|]', u"โ", self.fetcher.chapter_name)
else:
chapter_name = self.fetcher.chapter_name
if self._tiny:
if isinstance(self.fetcher, StandaloneFetcher):
self._pdf_path = f"{self._output}{chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.chapter_name}"
else:
self._pdf_path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}.pdf"
self._name_metadata_pdf = f"ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
else:
if isinstance(self.fetcher, StandaloneFetcher):
self._pdf_path = f"{self._output}{manga_name} - {chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.manga_name} - {self.fetcher.chapter_name}"
else:
self._pdf_path = f"{self._output}{manga_name} - ch.{self.fetcher.chapter_number} {chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
self._banlist = []
ban_path = f"{os.path.dirname(os.path.abspath(__file__))}/../banlist"
for img in os.listdir(ban_path):
with open(f"{ban_path}/{img}", "rb") as img_bin:
self._banlist.append(img_bin.read())
def _dl_image(self):
"""
Downloads the currently selected image.
"""
if not os.path.exists(self._path):
os.makedirs(self._path)
with open(f"{self._path}{self.fetcher.npage}.{self.fetcher.ext}", "wb") as img:
img.write(requests.get(self.fetcher.image, headers=self._header).content)
if not self._quiet:
print(".", end="", flush=True)
def full_chapter(self):
"""
Fetching all the images of the chapter and storing them in RAM.
"""
if not self._quiet:
if isinstance(self.fetcher, StandaloneFetcher):
print(f"fetching: {self.fetcher.chapter_name}")
else:
print(f"fetching: ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}")
while not self.fetcher.is_last_image():
self._img_bin_list.append(requests.get(self.fetcher.image, headers=self._header).content)
if not self._quiet:
print(".", end="", flush=True)
self.fetcher.next_image()
self._img_bin_list.append(requests.get(self.fetcher.image, headers=self._header).content)
if not self._quiet:
print(".", end="", flush=True)
def keep_full_chapter(self):
"""
Downloading all the images of the chapters and storing them where the output was specified.
"""
if not self._quiet:
if isinstance(self.fetcher, StandaloneFetcher):
print(f"downloading: {self.fetcher.chapter_name}")
else:
print(f"downloading: ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}")
while not self.fetcher.is_last_image():
if self._keep:
self._img_bin_list.append(requests.get(self.fetcher.image, headers=self._header).content)
self._dl_image()
self.fetcher.next_image()
if self._keep:
self._img_bin_list.append(requests.get(self.fetcher.image, headers=self._header).content)
self._dl_image()
if not self._quiet and self._image:
print("")
def _skip(self):
"""
Skips the images as asked with the skip parameter.
"""
for loop in range(self._nskip):
self.fetcher.next_image()
def create_pdf(self):
"""
Creates the pdf at the output location with the fetched or the downloaded images of the current chapter.
:raises EmptyChapter: the images of the current chapter were all blacklisted images and the pdf was empty
"""
print("Warning: the pdf creation engine will be changed for Wand in the next major release (3.0.0). Please do not forget to install imagemagick at that time", file=stderr)
if not self._quiet:
print("\nconverting...", end=" ")
# loading the downloaded images if keep mode
# removing the images found in the banlist
self._img_bin_list = [img for img in self._img_bin_list if img not in self._banlist]
if len(self._img_bin_list) > 0:
# creating the pdf
with Image() as pdf:
for img_bin in self._img_bin_list:
with contextlib.redirect_stderr(None): # to mute alpha channel and ICC warnings as wand processes the image well anyway
with Image(blob=img_bin) as img:
pdf.sequence.append(img)
pdf.save(filename=self._pdf_path)
with open(self._pdf_path, "rb") as file:
pdf = file.read()
with open(self._pdf_path, "wb") as file:
file.write(pdf.replace(b"/Producer (https://imagemagick.org)", b"/Producer (https://pypi.org/project/pyscandl/)")
.replace(
b"/CreationDate", b"/Author <feff"+self.fetcher.author.encode("utf-16_be").hex().encode()+
b">\n/Keywords <feff"+self.fetcher.manga_name.encode("utf-16_be").hex().encode()+b">\n/CreationDate")
) # manually adding the missing metadate from the pdf creation
if not self._quiet:
print("converted")
else:
raise EmptyChapter(self.fetcher.manga_name, self.fetcher.chapter_number)
def go_to_chapter(self, chap_num):
"""
Make Pyscandl go to the asked chapter.
:param chap_num: chapter number that was asked for
:type chap_num: int/str/float
"""
# in case windows is the os, remove the banned characters
if os.name == "nt":
chapter_name = re_sub(r'[\\/*?:"<>|]', u"โ", self.fetcher.chapter_name)
else:
chapter_name = self.fetcher.chapter_name
self.fetcher.go_to_chapter(chap_num)
self._path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}/"
self._img_bin_list = []
# prepares the next pdf path and name
if self._tiny:
if isinstance(self.fetcher, StandaloneFetcher):
self._pdf_path = f"{self._output}{chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.chapter_name}"
else:
self._pdf_path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}.pdf"
self._name_metadata_pdf = f"ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
else:
if isinstance(self.fetcher, StandaloneFetcher):
self._pdf_path = f"{self._output}{self.fetcher.manga_name} - {chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.manga_name} - {self.fetcher.chapter_name}"
else:
self._pdf_path = f"{self._output}{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
def next_chapter(self):
"""
Goes to the next chapter
"""
self.fetcher.next_chapter()
# in case windows is the os, remove the banned characters
if os.name == "nt":
chapter_name = re_sub(r'[\\/*?:"<>|]', u"โ", self.fetcher.chapter_name)
else:
chapter_name = self.fetcher.chapter_name
self._path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}/"
self._img_bin_list = []
# prepares the next pdf path and name
if self._tiny:
if isinstance(self.fetcher, StandaloneFetcher):
self._pdf_path = f"{self._output}{chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.chapter_name}"
else:
self._pdf_path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}.pdf"
self._name_metadata_pdf = f"ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
else:
if isinstance(self.fetcher, StandaloneFetcher):
self._pdf_path = f"{self._output}{self.fetcher.manga_name} - {chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.manga_name} - {self.fetcher.chapter_name}"
else:
self._pdf_path = f"{self._output}{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
def full_download(self):
"""
Does the full download process with what is specified when initializing the Pyscandl object
"""
try:
# emulating a do while
self._skip()
counter = 1
try:
if self._keep or self._image:
self.keep_full_chapter()
else:
self.full_chapter()
if not self._image:
try:
self.create_pdf()
except EmptyChapter:
if not self._quiet:
print("empty")
except DelayedRelease as e:
if not self._quiet:
print(e)
while not isinstance(self.fetcher, StandaloneFetcher) and not self.fetcher.is_last_chapter() and (self._all or counter < self._download_number or float(self.fetcher.chapter_number) < self._chapend):
self.next_chapter()
try:
if self._keep or self._image:
self.keep_full_chapter()
else:
self.full_chapter()
if not self._image:
try:
self.create_pdf()
except EmptyChapter:
if not self._quiet:
print("empty")
except DelayedRelease as e:
if not self._quiet:
print(e)
counter += 1
except KeyboardInterrupt:
if not self._quiet:
print("\nmanual interruption")
finally:
self.fetcher.quit()
if not self._quiet:
print("end of the download")
|
[
"thomas99.montero@gmail.com"
] |
thomas99.montero@gmail.com
|
de75a9910004d2d0ddb1e10339b6493c821b0881
|
ce814bdc76513d1a03245a292a9b1194a287b152
|
/CEM/frozen_lake/preprocessing.py
|
9de50b6eb619024de295666c81a2f5673ddb58ef
|
[] |
no_license
|
gowtham1997/Reinforcement-Learning-Algorithms
|
b34678bbdcfee2634beef2a687758337b4fd09d2
|
44f52d5d6ffbf684c790752966dcf9b1bd4514cf
|
refs/heads/master
| 2022-08-14T18:12:51.835771
| 2019-09-28T14:40:31
| 2019-09-28T14:40:31
| 160,662,125
| 0
| 0
| null | 2022-06-21T21:36:16
| 2018-12-06T11:00:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,512
|
py
|
import torch
import numpy as np
from collections import namedtuple
import gym
import torch.nn as nn
class DiscrteOneHotWrapper(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
# converting discrete to Box space
self.observation_space = gym.spaces.Box(0.0, 1.0,
(env.observation_space.n, ),
np.float32)
def observation(self, obs):
# one hot conversion of discrete obs
new_obs = np.copy(self.observation_space.low)
new_obs[obs] = 1.0
return new_obs
def iterate_batches(env, net, batch_size):
np.random.seed(12345)
obs = env.reset()
Episode_Step = namedtuple('Episode_Step', field_names=[
'observation', 'action'])
Episode = namedtuple('Episode', field_names=['reward', 'steps'])
episode_steps = []
batch = []
episode_reward = 0.0
softmax = nn.Softmax(dim=1)
while True:
obs_tensor = torch.FloatTensor([obs])
logits = net(obs_tensor)
action_prob_tensor = softmax(logits)
action_probs = action_prob_tensor.data.numpy()[0]
action = np.random.choice(len(action_probs), p=action_probs)
new_obs, reward, is_done, _ = env.step(action)
episode_reward += reward
episode_steps.append(Episode_Step(obs, action))
if is_done:
batch.append(Episode(episode_reward, episode_steps))
new_obs = env.reset()
episode_reward = 0.0
episode_steps = []
if len(batch) == batch_size:
yield batch
batch = []
obs = new_obs
# print(action_probs)
def filter_batches(batch, percentile, gamma):
# discound reward and value episodes which take less steps to win.
disc_rewards = list(
map(lambda s: s.reward * (gamma ** len(s.steps)), batch))
reward_boundary = np.percentile(disc_rewards, percentile)
# store elite examples and pass them on in the next iteration.
elite_batch = []
train_obs = []
train_actions = []
for element, disc_reward in zip(batch, disc_rewards):
if disc_reward > reward_boundary:
train_actions.extend(map(lambda s: s.action, element.steps))
train_obs.extend(map(lambda s: s.observation, element.steps))
elite_batch.append(element)
return elite_batch, train_obs, train_actions, reward_boundary
|
[
"gowtham.ramesh1@gmail.com"
] |
gowtham.ramesh1@gmail.com
|
4e7f38e9a24f9b722097e1408a4fbf5462cce724
|
9fb581010facc08374cd636b98951dae0f2c0a2d
|
/kineticmodels/views.py
|
1abddc074f8e12b5ce97cb79e6db90ce5bcf6090
|
[] |
no_license
|
bslakman/kineticmodelssite
|
e4f74378ca3663647b9de9eeeb88af8035279780
|
0bb3aa9dff2795442d1c451b2b2d2e27742e90c5
|
refs/heads/master
| 2020-12-25T13:23:46.677652
| 2015-10-30T14:00:02
| 2015-10-30T14:00:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext, loader
def index(request):
# template=loader.get_template('kineticmodels/index.html')
return HttpResponse("This is the kinetic models index!")
|
[
"david.ma.green@gmail.com"
] |
david.ma.green@gmail.com
|
16a6a02aabedb0b303922512e57ad926b4e9f14d
|
0bc81c8742e6b7cd4bd3a804ac41cedee637c921
|
/portalweb/tests/__init__.py
|
c16c21fb2eadf9f4ff591414edaa544253c1dcfe
|
[] |
no_license
|
TPAC-MARVL/portal
|
c7ff9445ea340774aaa1890e2b847001e6564379
|
b9660d7b771f105360c814e1a861fb16dc036c2b
|
refs/heads/master
| 2016-09-16T11:25:25.742221
| 2014-11-07T04:44:19
| 2014-11-07T04:44:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
from openstackmoduletest import *
#from usermanagertest import *
#from instancemanagertest import *
#from configmanagertest import *
#from instanceservicetest import *
|
[
"fxmzb123@gmail.com"
] |
fxmzb123@gmail.com
|
c53395dc36f9d60f7b1508497f163495d634a4fb
|
d1667b512038e21045ec35d85e307d85a396dd46
|
/news/views.py
|
ba4c7aedbb6e094b83f10e6ab53a8868e1181c5f
|
[] |
no_license
|
yashmoda/CodeUtsava_HackStrangely_AgricultureServer
|
556ff334d52cffa87d330b106a0640380e763f11
|
03b73ce4c8d9cb712ed2e061746768d4df0991e5
|
refs/heads/master
| 2021-05-16T16:46:30.949722
| 2018-02-21T09:45:47
| 2018-02-21T09:45:47
| 120,063,987
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,693
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http.response import JsonResponse
from django.shortcuts import render
import urllib2
# Create your views here.
import keys
def show_news(request):
response_json = {keys.NEWS: []}
try:
if request.method == 'GET':
news_url = 'http://eventregistry.org/json/article?query=%7B%22%24query%22%3A%7B%22%24and%22%3A%5B%7B%22conceptUri%22%3A%7B%22%24and%22%3A%5B%22http%3A%2F%2Fen.wikipedia.org%2Fwiki%2FAgriculture%22%5D%7D%7D%2C%7B%22categoryUri%22%3A%7B%22%24and%22%3A%5B%22dmoz%2FBusiness%2FAgriculture_and_Forestry%2FField_Crops%22%2C%22dmoz%2FScience%2FAgriculture%22%5D%7D%7D%2C%7B%22%24or%22%3A%5B%7B%22sourceUri%22%3A%22timesofindia.indiatimes.com%22%7D%2C%7B%22sourceUri%22%3A%22thehindu.com%22%7D%2C%7B%22sourceUri%22%3A%22bhaskar.com%22%7D%2C%7B%22sourceUri%22%3A%22economictimes.indiatimes.com%22%7D%2C%7B%22sourceUri%22%3A%22indianexpress.com%22%7D%2C%7B%22sourceUri%22%3A%22in.reuters.com%22%7D%2C%7B%22sourceUri%22%3A%22dnaindia.com%22%7D%5D%7D%2C%7B%22lang%22%3A%22eng%22%7D%5D%7D%7D&action=getArticles&resultType=articles&articlesSortBy=rel&articlesCount=10&articlesArticleBodyLen=-1&apiKey=1d149793-9d64-4ebe-bd70-9d82cc8126aa&callback=JSON_CALLBACK'
response_json[keys.NEWS] = urllib2.urlopen(news_url).read()
response_json[keys.SUCCESS] = True
response_json[keys.MESSAGE] = "All news shown."
return JsonResponse(response_json)
except Exception as e:
print(str(e))
response_json[keys.SUCCESS] = False
response_json[keys.MESSAGE] = "Please try again later."
return JsonResponse(response_json)
|
[
"yashmoda07@gmail.com"
] |
yashmoda07@gmail.com
|
9ea39910d1888a5c1b4e29019173a17c18c082ce
|
19f96fdd86f1a29f4eec3d18d9e513865f1f512f
|
/webWork_backup/webflask/test_py3/lib/python3.5/weakref.py
|
318c642b1c2955967e39c3fcdbb4b83861e7c44a
|
[] |
no_license
|
Cattmonkey/openWork
|
e133376eeea3bbf5279f9199b18aa8cf44f21637
|
3e1f21f0c6bce7ec114cc8d6311746c4a2cc426b
|
refs/heads/master
| 2020-03-14T00:58:41.610424
| 2018-05-04T09:59:03
| 2018-05-04T09:59:03
| 131,368,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40
|
py
|
/root/anaconda3/lib/python3.5/weakref.py
|
[
"yyxdyx01@163.com"
] |
yyxdyx01@163.com
|
0358b4f0bbab7000ade46e616a5d61fd12016333
|
823e659ad9aad1a0717cecfdd6f8c3eab4310fd1
|
/to_do_list/todo_router.py
|
0aaa26e31be6b99be20c84ad6d54243d5cf0ba48
|
[] |
no_license
|
gaoqingloon/best_wishes
|
e3d708264d58da52239fc3ebd18c4678a7a1e253
|
cc0ffd49b96b98edfd706ddcaf1f6529a7afb443
|
refs/heads/main
| 2023-03-22T04:25:50.496289
| 2021-03-13T04:33:59
| 2021-03-13T04:33:59
| 332,776,305
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,429
|
py
|
from utils.valid_util import module_select
def todo_return_button():
print("ไปปๆ้ฎ่ฟๅ\n", end="")
input("Please input: ")
todo_router()
def todo_router():
function_select_parse = \
"+" + "-" * 58 + "+\n" \
"| <1> Our plan\t\t\t\t\t\t |\n" + \
"| <2> Secret\t\t\t\t\t\t |\n" + \
"| <3> First time for us\t\t\t\t\t |\n" + \
"| <0> Return\t\t\t\t\t\t |\n" + \
"+" + "-" * 58 + "+\n" + \
"Please select: "
print()
select_num = input(function_select_parse)
if select_num != "0829" and select_num != "papapa":
select_num = module_select(select_num, function_select_parse, 0, 3)
if select_num == "papapa":
from to_do_list.papapa import our_papapa
our_papapa()
todo_return_button()
elif select_num == "0829":
from to_do_list.todo_list import first_time
first_time()
todo_return_button()
elif int(select_num) == 1:
from to_do_list.todo_list import todo_list
todo_list()
todo_return_button()
elif int(select_num) == 2:
print(":) ๅฟๅฟ๏ผๆๅทไธๅฏนๅฆ~\n")
todo_return_button()
elif int(select_num) == 3:
print(":) qinglong.gao record, secret...\n")
todo_return_button()
else:
from best_wishes import select_route
select_route()
if __name__ == '__main__':
todo_router()
|
[
"gordon_ml@163.com"
] |
gordon_ml@163.com
|
20f5aa7402083c520f00ed6fee80516b0dce609b
|
d8adb204f43471bf868b44d4cd552103ab9b114f
|
/Lab3.py
|
5ffa9f575a491ef1c29be6d5be9a1794235886b7
|
[] |
no_license
|
ytwog/tchmkProject3
|
bfbadcb09ed2df657948cd59aa186188bc808713
|
d91a5a95ae4ebc75a9d8c02f5d76ccdec6bf2a80
|
refs/heads/master
| 2022-09-15T16:50:18.911887
| 2020-05-30T07:08:20
| 2020-05-30T07:08:20
| 268,026,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
import math
import random
N = 10 ** 100
M = 10 ** 4
res = 2 - (2 * math.log(2))
def func(M):
num = 0
for x in range(M):
b = random.randint(1, N)
r = N % b
if r < b / 2:
num += 1
print('Probability: ', num / M)
print('Teoreth: ', res)
print(func(M))
|
[
"ytwog@mail.ru"
] |
ytwog@mail.ru
|
73585b7971de472d714d021bd438be989c8d4e19
|
86ea59142d252b1f11421a9771e7ec66d4b374d7
|
/project/script/LR_model.py
|
ac09d0be0c71f0c334df669bffb37f0c3ecb0c4f
|
[] |
no_license
|
Loielaine/NLP
|
145c6883852d0fdd5713830fbeba3030ecc860c3
|
7c45ee6f98045cb22af6fede48ae61bc76c89802
|
refs/heads/master
| 2020-12-29T10:18:19.850359
| 2020-04-28T18:25:46
| 2020-04-28T18:25:46
| 238,570,520
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
from torch import nn
import torch.nn.functional as F
class LogisticRegression(nn.Module):
def __init__(self, input_size, num_classes):
super(LogisticRegression, self).__init__()
self.linear = nn.Linear(input_size, 200)
self.output = nn.Linear(200, num_classes)
# Initialize the weights of both layers
self.init_weights()
def init_weights(self):
initrange = 0.1
self.linear.weight.data.uniform_(-1 * initrange, initrange)
self.linear.bias.data.zero_()
self.output.weight.data.uniform_(-1 * initrange, initrange)
self.output.bias.data.zero_()
def forward(self, x):
x = x.view(x.size()[0], -1)
input = self.linear(x)
output = self.output(input)
out = F.softmax(output)
return out
|
[
"noreply@github.com"
] |
Loielaine.noreply@github.com
|
cb6db7eb37364ebd18c2e62d7b3e88d6bad7b233
|
719c658585b2e585ee16d7a9421d0ca28b30158e
|
/code/sulheim2020/consensusModel/fix_SBO_terms.py
|
a373c47ee1e6302f60ac5d9e6f3403665fea746a
|
[
"CC-BY-4.0"
] |
permissive
|
matinnuhamunada/Sco-GEM
|
896046f0ef2b4a15f408eab75959dd1cf0c1a5a3
|
418c1a5942cd22b89a9d118c01f72506f2c80e16
|
refs/heads/master
| 2023-05-04T16:22:27.119213
| 2021-04-04T19:57:58
| 2021-04-04T19:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,452
|
py
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
Author: Snorre Sulheim / Eduard Kerkhoven
# Description
This file adds SBO terms to reactions and metabolites. This is kind of a
copy of a matlab-script Eduard used to to the same
"""
import cobra
import logging
"""
Reactions SBO terms
===================
SBO term | name
------------------------------------
SBO:0000627 | exchange reaction
SBO:0000628 | demand reaction
SBO:0000629 | biomass production
SBO:0000630 | ATP maintenance
SBO:0000395 | encapsulating process
SBO:0000655 | transport reaction
SBO:0000176 | biochemical reaction
SBO:0000632 | sink reaction
SBO:0000631 | pseudoreaction
Metabolite SBO terms
====================
SBO term | name
------------------------------
SBO:0000649 | biomass
SBO:0000247 | simple chemical
Genes
======
All genes SBO:0000243
"""
METABOLITE_SBO_TERMS = {
"biomass": "SBO:0000649",
"simple chemical": "SBO:0000247",
}
REACTION_SBO_TERMS = {
"exchange reaction": "SBO:0000627",
"demand reaction": "SBO:0000628",
"biomass production": "SBO:0000629",
"ATP mainteinance": "SBO:0000630",
"encapsulating process": "SBO:0000395",
"transport reaction": "SBO:0000655",
"biochemical reaction": "SBO:0000176",
"sink reaction": "SBO:0000632",
"pseudoreaction": "SBO:0000631",
}
GENE_SBO_TERM = "SBO:0000243"
def add_SBO(scoGEM):
# Metabolites
for m in scoGEM.metabolites:
if m.name in ["biomass", "DNA", "RNA", "protein", "carbohydrate", "cell wall", "lipid"]:
m.annotation["SBO"] = METABOLITE_SBO_TERMS["biomass"]
else:
m.annotation["SBO"] = METABOLITE_SBO_TERMS["simple chemical"]
# Reactions
all_reactions = [r.id for r in scoGEM.reactions]
for r in scoGEM.exchanges:
r.annotation["SBO"] = REACTION_SBO_TERMS["exchange reaction"]
all_reactions.remove(r.id)
for r in scoGEM.demands:
r.annotation["SBO"] = REACTION_SBO_TERMS["demand reaction"]
all_reactions.remove(r.id)
for r in scoGEM.sinks:
r.annotation["SBO"] = REACTION_SBO_TERMS["sink reaction"]
all_reactions.remove(r.id)
scoGEM.reactions.ATPM.annotation["SBO"] = REACTION_SBO_TERMS["ATP mainteinance"]
all_reactions.remove("ATPM")
for r_id in all_reactions:
r = scoGEM.reactions.get_by_id(r_id)
if "BIOMASS_SCO" in r.id:
r.annotation["SBO"] = REACTION_SBO_TERMS["biomass production"]
elif r.id in ["PSEUDO_DONOR_NADH", "PSEUDO_DONOR_NADPH", "PSEUDO_ACCEPTOR_NAD", "PSEUDO_ACCEPTOR_NADP"]:
r.annotation["SBO"] = REACTION_SBO_TERMS["pseudoreaction"]
elif "pseudoreaction" in r.name.lower():
r.annotation["SBO"] = REACTION_SBO_TERMS["encapsulating process"]
else:
if len(r.compartments) == 2:
r.annotation["SBO"] = REACTION_SBO_TERMS["transport reaction"]
else:
r.annotation["SBO"] = REACTION_SBO_TERMS["biochemical reaction"]
# Genes
for g in scoGEM.genes:
g.annotation["SBO"] = GENE_SBO_TERM
logging.info("Added SBO terms to genes, reactions and metabolites")
print("Added SBO terms")
if __name__ == '__main__':
scoGEM_FN = "../../ModelFiles/xml/scoGEM.xml"
scoGEM = cobra.io.read_sbml_model(scoGEM_FN)
add_SBO(scoGEM)
|
[
"ssulheim@gmail.com"
] |
ssulheim@gmail.com
|
95e284a91a19a3a14eaf0b3cdecba1fa3bbdd1e9
|
6c4a0877fd924ccb2ef276a6214e9dc0440e1a65
|
/PairwiseCorrelation_3.py
|
4de4a2fc068319ded1f9921fcf146a600ee064ee
|
[] |
no_license
|
GboladeAI/hipppocampal-Neuro-modelling
|
907ccbde3a7fe541aabfb25b04fc906bfdd3a764
|
a9061830d0e6267813b40193a67586b379e67fa9
|
refs/heads/master
| 2021-06-29T15:20:56.170384
| 2017-09-14T14:24:28
| 2017-09-14T14:24:28
| 103,421,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,640
|
py
|
import math
import numpy as numpy
from pandas import *
import pylab as pltlab
from matplotlib.pyplot import *
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import bisect
import pandas as pdb
import numpy as np
import pandas as pd
import scipy.io as spio
import matplotlib.pyplot as plt
from matplotlib import cm
import sys
from math import exp, factorial, pow, log
from decimal import Decimal
import mysql.connector
import os
import sys
#orig_stdout = sys.stdout
#f = open('outputPairwiseCorrelation.txt', 'w')
#sys.stdout = f
cwd = os.getcwd()
import datetime
print (datetime.datetime.now())
def getSpikeDatPoint(NueronID):
conn = mysql.connector.connect(user='root', password='AnuIfe2014$', database='NueroSCi')
curr = conn.cursor()
selectstmt = " SELECT "
selectstmt = selectstmt + " d.id, d.AnimalName, d.time_at_location, a.TimeSpike, d.x_axis, d.y_axis, d.mesh, a.CrossValidation "
selectstmt = selectstmt + " from NueroSCi.tblNeuronspikeI a, NueroSci.tblMAP "
selectstmt = selectstmt + " b, NueroSCi.tblLOCATIONDATAI d "
selectstmt = selectstmt + " where "
selectstmt = selectstmt + " a.id = b.id_tblNEURONSPIKE and b.distance < 1 "
selectstmt = selectstmt + " and b.id_tblLOCATIONDATA = d.id and a.TimeSpike >= 5285 and a.TimeSpike<=7353 "
selectstmt = selectstmt + " and a.NeuronID = " + str(NueronID) + " "
selectstmt = selectstmt + " order by "
selectstmt = selectstmt + " 3 asc "
#print selectstmt
curr.execute(selectstmt)
data = curr.fetchall()
varTimeSpike = []
varTimeSpikeDiff = []
Neuron =[]
k = 0
summ = 0.00
for row in data:
varTimeSpike.append(float(row[2]))
Neuron.append(float(row[2]) * 1000)
if k == 0:
varTimeSpikeDiff.append(float("0.00"))
else:
if abs((float(varTimeSpike[k]) - float(varTimeSpike[k - 1]))) < 6.00:
varTimeSpike.append(float(row[2]))
Neuron.append(float(row[2])* 1000)
k = k + 1
curr.close()
conn.close()
return Neuron
def getDistinctNueron():
conn = mysql.connector.connect(user='root', password='AnuIfe2014$', database='NueroSCi')
curr = conn.cursor()
curr = conn.cursor()
selectstmt = " SELECT "
selectstmt = selectstmt + " distinct a.NeuronID as NeuronID "
selectstmt = selectstmt + " from NueroSCi.tblNeuronspikeI a, NueroSci.tblMAP "
selectstmt = selectstmt + " b, NueroSCi.tblLOCATIONDATAI d "
selectstmt = selectstmt + " where "
selectstmt = selectstmt + " a.id = b.id_tblNEURONSPIKE and b.distance < 1 "
selectstmt = selectstmt + " and b.id_tblLOCATIONDATA = d.id and a.TimeSpike >= 5285 and a.TimeSpike<=7353 "
selectstmt = selectstmt + " order by 1 asc "
#print selectstmt
curr.execute(selectstmt)
data = curr.fetchall()
NeuronIDs = []
for row in data:
NeuronIDs.append(row[0])
curr.close()
conn.close()
return NeuronIDs
N=getDistinctNueron()
sizeN = len(N)
positivecorrcoef = []
positivepairnueron = []
negativecorrcoef = []
negativepairnueron = []
zeropairnueron =[]
zerocorrcoef = []
lp=0
ln=0
lz=0
ticksp=[]
ticksn=[]
ticksz=[]
np=0
nn=0
nz=0
DtTAB=10
Dt=(DtTAB/10.0) * 1000 # 0.1 seconds
for k in range(0, sizeN ):
nuerona=N[k]
Nueron_A =getSpikeDatPoint(nuerona)
# get the spike count for Nueron A based on Dt time bin
spikecountNoise_A, binsA, patchesA = plt.hist(Nueron_A, bins=int(Dt), histtype='bar', color='blue')
#plt.savefig(str(cwd) + '/test.png')
print " nuerona : " + str(nuerona)
for j in range(k+1, sizeN ):
nueronb = N[j]
Nueron_B = getSpikeDatPoint(nueronb)
# get the spike count for Nueron B based on Dt time bin
spikecountNoise_B, binsB, patchesB = plt.hist(Nueron_B, bins=int(Dt), histtype='bar', color='blue')
#plt.savefig(str(cwd)+'/test.png')
### COmpute the Spearman correlation
a, b = numpy.corrcoef(spikecountNoise_A, spikecountNoise_B)[0]
cc = str(nuerona) +'_'+ str(nueronb)
if b>0.00:
positivecorrcoef.append(float(b))
positivepairnueron.append(str(cc))
ticksp.append(np)
np=np+1
if b < 0.00:
negativecorrcoef.append(float(b))
negativepairnueron.append(str(cc))
ticksn.append(nn)
nn=nn+1
if b==0.00:
zerocorrcoef.append(float(b))
zeropairnueron.append(str(cc))
ticksn.append(nz)
nz = nz + 1
dfpositive = DataFrame({'PairNueron':positivepairnueron, 'CorrCoef': positivecorrcoef})
#df=df.sort([ 'CorrCoef'], ascending=[ False])
dfpositive.sort_values('CorrCoef', axis=0, ascending=False, inplace=True, kind='quicksort', na_position='last')
dfnegative = DataFrame({'PairNueron':negativepairnueron, 'CorrCoef': negativecorrcoef})
#df=df.sort([ 'CorrCoef'], ascending=[ False])
dfnegative.sort_values('CorrCoef', axis=0, ascending=False, inplace=True, kind='quicksort', na_position='last')
dfzero = DataFrame({'PairNueron':zeropairnueron, 'CorrCoef': zerocorrcoef})
print("Positive Pairwise Correlation")
print("**************************************************")
conn = mysql.connector.connect(user='root', password='AnuIfe2014$', database='NueroSCi')
curr = conn.cursor()
for row in dfpositive.iterrows():
index, data = row
print (data['PairNueron']), (data['CorrCoef'])
INSERTstmt = " Insert into PairwiseCorrelation"+str(DtTAB)+"00 VALUES( 'Positive','I', "+str(data['PairNueron']).split("_")[0] +", "
INSERTstmt=INSERTstmt +str(data['PairNueron']).split("_")[1]+", "+ str(data['CorrCoef']) +" ) "
try:
curr.execute(INSERTstmt)
curr.execute("commit")
except mysql.connector.ProgrammingError:
print( "The following query failed:")
print (INSERTstmt)
conn.close()
curr.close()
print("Negative Pairwise Correlation")
print("**************************************************")
conn = mysql.connector.connect(user='root', password='AnuIfe2014$', database='NueroSCi')
curr = conn.cursor()
for row in dfnegative.iterrows():
index, data = row
#print (data['PairNueron']), (data['CorrCoef'])
INSERTstmt = " Insert into PairwiseCorrelation"+str(DtTAB)+"00 VALUES( 'Negative','I', "+str(data['PairNueron']).split("_")[0] +", "
INSERTstmt=INSERTstmt +str(data['PairNueron']).split("_")[1]+", "+ str(data['CorrCoef']) +" ) "
try:
curr.execute(INSERTstmt)
curr.execute("commit")
except mysql.connector.ProgrammingError:
print( "The following query failed:")
print (INSERTstmt)
conn.close()
curr.close()
print("")
print("")
print("")
print("Zero Pairwise Correlation")
print("**************************************************")
conn = mysql.connector.connect(user='root', password='AnuIfe2014$', database='NueroSCi')
curr = conn.cursor()
for row in dfzero.iterrows():
index, data = row
#print (data['PairNueron']), (data['CorrCoef'])
INSERTstmt = " Insert into PairwiseCorrelation"+str(DtTAB)+"00 VALUES( 'Zero','I', "+str(data['PairNueron']).split("_")[0] +", "
INSERTstmt=INSERTstmt +str(data['PairNueron']).split("_")[1]+", "+ str(data['CorrCoef']) +" ) "
try:
curr.execute(INSERTstmt)
curr.execute("commit")
except mysql.connector.ProgrammingError:
print( "The following query failed:")
print (INSERTstmt)
conn.close()
curr.close()
print("")
print("")
print("")
#sys.stdout = orig_stdout
#f.close()
print (datetime.datetime.now())
|
[
"noreply@github.com"
] |
GboladeAI.noreply@github.com
|
3612f1f79bd273655750d177b8935d7343cae48a
|
3b66632458e2463db62a800f9a0cf9e13c71a47e
|
/tests/test_iterators/test_tf_batches.py
|
a458cf5d88bfb93a210c6d1e36e2c94cdc9f33ff
|
[
"MIT"
] |
permissive
|
pesser/edflow
|
eddb6d9341b861670946c157363933e9add52288
|
317cb1b61bf810a68004788d08418a5352653264
|
refs/heads/dev
| 2022-12-09T05:19:35.850173
| 2020-07-21T16:29:15
| 2020-07-21T16:29:15
| 146,750,121
| 27
| 15
|
MIT
| 2022-12-07T20:55:50
| 2018-08-30T12:59:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
import pytest
import numpy as np
from edflow.iterators import tf_batches as batches
def test_tf_batch_to_canvas():
import tensorflow as tf
tf.enable_eager_execution()
x = np.ones((9, 100, 100, 3))
x = tf.convert_to_tensor(x)
canvas = batches.tf_batch_to_canvas(x)
assert canvas.shape == (1, 300, 300, 3)
canvas = batches.tf_batch_to_canvas(x, cols=5)
assert canvas.shape == (1, 200, 500, 3)
canvas = batches.tf_batch_to_canvas(x, cols=1)
assert canvas.shape == (1, 900, 100, 3)
canvas = batches.tf_batch_to_canvas(x, cols=0)
assert canvas.shape == (1, 900, 100, 3)
canvas = batches.tf_batch_to_canvas(x, cols=None)
assert canvas.shape == (1, 300, 300, 3)
x = np.ones((9, 100, 100, 1))
x = tf.convert_to_tensor(x)
canvas = batches.tf_batch_to_canvas(x)
assert canvas.shape == (1, 300, 300, 1)
x = np.ones((9, 100, 100, 1, 1))
x = tf.convert_to_tensor(x)
with pytest.raises(ValueError, match="input tensor has more than 4 dimensions."):
canvas = batches.tf_batch_to_canvas(x)
|
[
"supermario94123@gmail.com"
] |
supermario94123@gmail.com
|
a3612f2f4a46e893399fd2040db726e8602be19d
|
f203c94b9bc0e65dff7d955fc96e581cbed3edff
|
/desafio 20.py
|
046a0c1bbcce6376cd43b3e8872fca758a61a199
|
[] |
no_license
|
Woimarina/mundo-1---python-curso-em-video
|
45521012613c69a045d341855b619e8d59dd7811
|
09e2b127d2ee85e70807a83a7bd9880992748bee
|
refs/heads/main
| 2023-09-05T15:57:28.274525
| 2021-11-01T18:50:24
| 2021-11-01T18:50:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
import random
aluno1 = input('digite o nome do primeiro aluno: ')
aluno2 = input('digite o nome do segundo aluno: ')
aluno3 = input('digite o nome do terceiro aluno: ')
aluno4 = input('digite o nome do quarto aluno: ')
lista = [aluno1, aluno2, aluno3, aluno4]
random.shuffle(lista)
print(' a sequencia de apresentaรงรฃo serรก: {}'.format(lista))
|
[
"noreply@github.com"
] |
Woimarina.noreply@github.com
|
c50bc8addf50e1d761c36d106a08f8f01960d815
|
040e29f30d17743a67562867640b73ae19834524
|
/incentive.py
|
8de5910986cb9d0c8b94d72df90e7303903b2e26
|
[
"Apache-2.0"
] |
permissive
|
AIandSocialGoodLab/securitygamewithinformants
|
0f7ee58471ac9ccd542b73e2c60290796447b6e9
|
6244e8891d1244fd6162aea79ff259a34d0f3c66
|
refs/heads/master
| 2023-04-03T00:33:51.256758
| 2023-03-19T20:52:27
| 2023-03-19T20:52:27
| 260,015,755
| 0
| 3
| null | 2020-04-29T19:01:13
| 2020-04-29T18:54:54
| null |
UTF-8
|
Python
| false
| false
| 5,113
|
py
|
# This is the code used to generate the data for Figure 4(b) in the paper.
import gurobipy as gp
from gurobipy import GRB
import numpy as np
import pandas
# from scipy.special import softmaxscipy
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
def printGraph(X, Y, Y1, name):
plt.figure('Draw')
plt.plot(X, Y)
plt.plot(X, Y1)
legend = []
legend.append('defense utility gain')
legend.append('expected additional payoff')
plt.legend(legend, loc='lower right')
# plt.scatter(X, Y, color = 'r', marker='.')
plt.xlabel('addtional reward')
plt.draw()
plt.savefig(name + ".pdf")
plt.close()
print("print figure finish: " + name)
def lp_calc(pw, pa, ra, pd, rd, uc, uu, p, r, t):
ilp_model = gp.Model('utility_optimization')
x = ilp_model.addVars(len(pa), len(pa) + 2, name='x')
m = np.zeros((len(pa), len(p)))
for i in range(len(p)):
for j in range(len(pa)):
if uc[j, i] < uu[j, i]:
m[j, i] = len(pa) + 1
else:
m[j, i] = j + 1
ilp_model.setObjective(gp.quicksum(
(p[i] * ((1 - pw) * (x[t, 0] * rd[t] + (1 - x[t, 0]) * pd[t]) + pw * (x[t, m[t, i]] * rd[t] + (1 - x[t, m[t, i]]) * pd[t])))
for i in range(len(p))), GRB.MAXIMIZE)
ilp_model.addConstrs(gp.quicksum(
(p[i] * ((1 - pw) * (x[t, 0] * pa[t] + (1 - x[t, 0]) * ra[t]) + pw * (x[t, m[t, i]] * pa[t] + (1 - x[t, m[t, i]]) * ra[t])))
for i in range(len(p)))
>= gp.quicksum(
(p[i] * ((1 - pw) * (x[j, 0] * pa[j] + (1 - x[j, 0]) * ra[j]) + pw * (x[j, m[j, i]] * pa[j] + (1 - x[j, m[j, i]]) * ra[j])))
for i in range(len(p))) for j in range(len(pa)))
ilp_model.addConstrs(x[t1, i] >= x[t1, len(pa) + 1] for i in range(len(pa) + 2) for t1 in range(len(pa)))
ilp_model.addConstrs(x[t1, i] <= x[t1, t1 + 1] for i in range(len(pa) + 2) for t1 in range(len(pa)))
ilp_model.addConstrs(x[t1, i] >= 0 for t1 in range(len(pa)) for i in range(len(pa) + 2))
ilp_model.addConstrs(x[t1, i] <= 1 for t1 in range(len(pa)) for i in range(len(pa) + 2))
ilp_model.addConstrs(gp.quicksum(x[t, i] for t in range(len(pa))) <= r for i in range(len(pa) + 2))
ilp_model.optimize()
try:
print(ilp_model.objVal)
except Exception as e:
print(e)
return -np.inf, -np.inf, 0
x1 = ilp_model.getAttr('X', x)
attacker_val = sum((p[i] * ((1 - pw) * (x1[t, 0] * pa[t] + (1 - x1[t, 0]) * ra[t]) + pw * (
x1[t, m[t, i]] * pa[t] + (1 - x1[t, m[t, i]]) * ra[t])))
for i in range(len(p)))
print(attacker_val)
return ilp_model.objVal, attacker_val, sum(p[i] * x1[t, m[t, i]] * pw for i in range(len(p)))
def softmax(x):
return np.exp(x) / sum(np.exp(x))
import pickle
def save(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
type_number = 2
target_numbers = [10, 30, 100, 200, 300, 400, 500, 600]
target_number = 10
alpha = 0.3
eps = 1e-8
res = []
payoff_res = []
target_us = []
for num_instances in range(50):
# print(p.shape)
r = 1
pw = 0.3
maxv = -np.inf
list_a = []
list_result = []
list_pay = []
scale = 0.1
pa = -np.random.rand(target_number)
ra = np.random.rand(target_number)
uc = np.random.rand(target_number, type_number)
pd = -np.random.rand(target_number)
rd = np.random.rand(target_number)
uu = np.random.rand(target_number, type_number)
uc *= scale
uu *= scale
p = np.random.rand(type_number)
p = p / np.sum(p)
r = 1
target_u = -np.inf
for i in range(0, target_number):
cur_utility, _, _ = lp_calc(pw, pa, ra, pd, rd, uc, uu, p, r, i)
target_u = max(target_u, cur_utility)
target_us.append(target_u)
for up in np.arange(0, 0.1, 0.001):
uc = uc + up
maxv = -np.inf
payoff = 0
for i in range(0, target_number):
cur_utility, _ , xt= lp_calc(pw, pa, ra, pd, rd, uc, uu, p, r, i)
if cur_utility > maxv:
maxv = cur_utility
payoff = xt * up
list_result.append(maxv)
list_pay.append(payoff)
res.append(list_result)
payoff_res.append(list_pay)
print(res)
print_x = np.arange(0, 0.1, 0.001)
res = np.array(res)
payoff_res = np.array(payoff_res)
print(res)
save(res, 'tmp/incentive_n.pickle')
save(payoff_res, 'tmp/incentive_payoff.pickle')
# The following part is to draw a draft with the data, not exactly painting the figure in the paper
mean_res = np.mean(res, axis=0)
print(mean_res.shape)
mean_res = mean_res.reshape(-1)
mean_payoff = np.mean(payoff_res, axis=0)
mean_payoff = mean_payoff.reshape(-1)
printGraph(print_x, mean_res, mean_payoff, 'incentive')
|
[
"chenweizhe1999@126.com"
] |
chenweizhe1999@126.com
|
93fc5ec316577bd629f8918a2d20178df6b74d41
|
f15e1bf95382348c8ed758bf136bf86d2435b9b4
|
/apputils/stock_analysis.py
|
55a5308e20168c7e45544acfc69a4046e49ddb20
|
[
"Apache-2.0"
] |
permissive
|
VaidherviKansal/Delta-lake_demo
|
40e56913e631b3b22bda963bd1a240307c29cf96
|
fca694b1b89959fb29932c90a0f1784a779dd22c
|
refs/heads/master
| 2021-05-17T21:47:49.874987
| 2020-03-31T07:14:29
| 2020-03-31T07:14:29
| 250,966,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,181
|
py
|
import datetime
from pyspark.sql import SparkSession
# read from local system
spark = SparkSession.builder.appName('abc').getOrCreate()
dfBasePrice = spark.read.format("csv").option("header", "true").load("/home/vaidhervi/Downloads/stocksDailyPrices.csv")
dfBaseFund = spark.read.format("csv").option("header", "true").load("/home/vaidhervi/Downloads/stocksFundamentals.csv")
dfBaseFund.show()
dfBasePrice.show()
# write
# Create Fundamental Data (Databricks Delta table)
print("delta table")
dfBaseFund.write.mode('overwrite').format("delta").save("file:///home/vaidhervi/delta/stockF")
# Create Price Data (Databricks Delta table)
dfBasePrice.write.mode('overwrite').format("delta").save("file:///home/vaidhervi/delta/stockP")
print("transformations")
row = dfBasePrice.aggregate((dfBasePrice.price_date.max()).alias("maxDate"),
(dfBasePrice.price_date.min()).alias("minDate")).collect()[0]
startDate = row["minDate"]
endDate = row["maxDate"]
# Define our date range function
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + datetime.timedelta(n)
# Define combinePriceAndFund information by date
def combinePriceAndFund(theDate):
dfFund = dfBaseFund.where(
dfBaseFund.price_date == theDate)
dfPrice = dfBasePrice.where(
dfBasePrice.price_date == theDate).drop('price_date').collect()
# Drop the updated column
dfPriceWFund = dfPrice.join(dfFund, on=["ticker"], how='inner').drop("updated")
# Save data to DBFS
dfPriceWFund.write.format("delta").mode("append").save("file:///home/vaidhervi/delta/stocksDailyPricesWFund")
# Loop through dates to complete fundamentals
# + price ETL process
for single_date in daterange(startDate, (endDate + datetime.timedelta(days=1))):
start = datetime.datetime.now()
combinePriceAndFund(single_date)
end = datetime.datetime.now()
dfPriceWithFundamentals = spark.read.format("delta").load("file:///home/vaidhervi/delta/stocksDailyPricesWFund")
dfPriceWithFundamentals.show()
# Create temporary view of the data
dfPriceWithFundamentals.createOrReplaceTempView("priceWithFundamentals")
|
[
"vaidhervi.kansal@teamcomputers.com"
] |
vaidhervi.kansal@teamcomputers.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.