blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
532643ec7785b8e9f57df629a1c947434c7fcbcd
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/sieve-big-2705.py
|
da1acce1fdca02af865dc610333ef8e807a4d3b9
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,755
|
py
|
# A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:$IDSTRING) -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
2f147c1641f843f833516ed9c68321409fb72dac
|
84c4474a88a59da1e72d86b33b5326003f578271
|
/saleor/graphql/checkout/mutations/checkout_language_code_update.py
|
2da8f5d51c6bdb4c3a5fd72b4babc3f0f2d1e657
|
[
"BSD-3-Clause"
] |
permissive
|
vineetb/saleor
|
052bd416d067699db774f06453d942cb36c5a4b7
|
b0d5ec1a55f2ceeba6f62cf15f53faea0adf93f9
|
refs/heads/main
| 2023-07-20T02:01:28.338748
| 2023-07-17T06:05:36
| 2023-07-17T06:05:36
| 309,911,573
| 0
| 0
|
NOASSERTION
| 2020-11-04T06:32:55
| 2020-11-04T06:32:55
| null |
UTF-8
|
Python
| false
| false
| 2,360
|
py
|
import graphene
from saleor.webhook.event_types import WebhookEventAsyncType
from ...core import ResolveInfo
from ...core.descriptions import ADDED_IN_34, DEPRECATED_IN_3X_INPUT
from ...core.doc_category import DOC_CATEGORY_CHECKOUT
from ...core.enums import LanguageCodeEnum
from ...core.mutations import BaseMutation
from ...core.scalars import UUID
from ...core.types import CheckoutError
from ...core.utils import WebhookEventInfo
from ...plugins.dataloaders import get_plugin_manager_promise
from ..types import Checkout
from .utils import get_checkout
class CheckoutLanguageCodeUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
id = graphene.ID(
description="The checkout's ID." + ADDED_IN_34,
required=False,
)
token = UUID(
description=f"Checkout token.{DEPRECATED_IN_3X_INPUT} Use `id` instead.",
required=False,
)
checkout_id = graphene.ID(
required=False,
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use `id` instead."
),
)
language_code = graphene.Argument(
LanguageCodeEnum, required=True, description="New language code."
)
class Meta:
description = "Update language code in the existing checkout."
doc_category = DOC_CATEGORY_CHECKOUT
error_type_class = CheckoutError
error_type_field = "checkout_errors"
webhook_events_info = [
WebhookEventInfo(
type=WebhookEventAsyncType.CHECKOUT_UPDATED,
description="A checkout was updated.",
)
]
@classmethod
def perform_mutation( # type: ignore[override]
cls,
_root,
info: ResolveInfo,
/,
*,
checkout_id=None,
id=None,
language_code,
token=None
):
checkout = get_checkout(cls, info, checkout_id=checkout_id, token=token, id=id)
checkout.language_code = language_code
checkout.save(update_fields=["language_code", "last_change"])
manager = get_plugin_manager_promise(info.context).get()
cls.call_event(manager.checkout_updated, checkout)
return CheckoutLanguageCodeUpdate(checkout=checkout)
|
[
"noreply@github.com"
] |
vineetb.noreply@github.com
|
a99ce7753905122bebfe5cb66c00c6ed7573a622
|
b5b0a24c9006b65b33b82c954705ada878f846f9
|
/main.py
|
9fcf9f4b74636543668bd2dc1bae6fb982b0a0ee
|
[] |
no_license
|
himanshuc3/FSND-item_catalog
|
068c831a110b9ceb64388c6550b5e29ed02a22c6
|
71bc80ef6b902c08693422b5d58f9fbfbdf02676
|
refs/heads/master
| 2021-07-15T16:22:29.154023
| 2018-11-11T17:24:05
| 2018-11-11T17:24:05
| 149,265,241
| 0
| 0
| null | 2020-05-20T16:38:50
| 2018-09-18T09:48:20
|
Python
|
UTF-8
|
Python
| false
| false
| 15,022
|
py
|
from flask import Flask, render_template, \
request, url_for, flash, redirect, Blueprint, \
session as login_session, make_response, jsonify
# For applying bootstrap integration into flask jinja templates
from flask_bootstrap import Bootstrap
# For login management
from flask_login import LoginManager, login_required, login_user, logout_user
from oauth2client.client import flow_from_clientsecrets, FlowExchangeError
# For creating restFUL API
from flask_restful import Resource, Api
# For CRUD operations from database
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from database_setup import Base, Items, User
from flask_dance.contrib.google import make_google_blueprint, google
import random
import string
from form import LoginForm, RegistrationForm, NewItemForm, DeleteForm
import os
import json
import httplib2
import requests
from config import config
# Flask instance created
app = Flask(__name__)
# Making the app bootstrap flavoured
bootstrap = Bootstrap(app)
# Initializing api instance
api = Api(app)
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
# Congiguration for quering database
# Also make session scoped
engine = create_engine('sqlite:///item_catalog.db')
Base.metadata.bind = engine
db_session = scoped_session(sessionmaker(bind=engine))
# Removes session after every thread aka function view (routes)
@app.teardown_request
def remove_session(ex=None):
db_session.remove()
login_manager = LoginManager(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(user_id):
return db_session.query(User).get(int(user_id))
# Configuring google oauth blueprint
# os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
# #Configure to accept http oauth request instead of https
# google_blueprint = make_google_blueprint(
# client_id = data['google_client_id'],
# client_secret = data['google_client_secret'],
# scope=[
# "https://www.googleapis.com/auth/plus.me",
# "https://www.googleapis.com/auth/userinfo.email"
# ]
# )
# app.register_blueprint(google_blueprint, url_prefix="/google_login")
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session[
'email'])
db_session.add(newUser)
db_session.commit()
user = db_session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserInfo(user_id):
user = db_session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
try:
user = db_session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# Default home route
@app.route('/', methods=['GET'])
def Home():
latest_items = db_session.query(Items).order_by(Items.id.desc()).limit(5)
return render_template(
'index.html',
latest_items=latest_items,
login_session=login_session
)
# Login route
@app.route('/login', methods=['GET', 'POST'])
def Login():
login_form = LoginForm()
if login_form.submit1.data and login_form.validate_on_submit():
user = (
db_session
.query(User)
.filter_by(email=login_form.email.data)
.first()
)
if user is not None and user.verify_password(login_form.password.data):
login_user(user, login_form.remember_me.data)
login_form.email.data = ''
login_form.password.data = ''
return redirect(url_for('Home'))
flash('Invalid email or password')
return render_template(
'login.html',
form=login_form,
login_session=login_session
)
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print("Token's client ID does not match app's.")
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(
json.dumps('Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# see if user exists, if it doesn't make a new one
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
return redirect(url_for('Home'))
@app.route('/gdisconnect')
def gdisconnect():
access_token = login_session.get('access_token')
if access_token is None:
print('Access Token is None')
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
print('In gdisconnect access token is %s', access_token)
print('User name is: ')
print(login_session['username'])
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % \
login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
print('result is ')
print(result)
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
return redirect(url_for('Home'))
else:
response = make_response(
json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/register', methods=['GET', 'POST'])
def Register():
registration_form = RegistrationForm()
if registration_form.submit2.data and \
registration_form.validate_on_submit():
user = (
db_session
.query(User)
.filter_by(email=registration_form.email.data)
.first()
)
if user is None:
new_user = User(
name=registration_form.name.data,
email=registration_form.email.data
)
new_user.password = registration_form.password.data
db_session.add(new_user)
db_session.commit()
flash('Registration successful. \
Please Log in with the credentials')
return redirect(url_for('Home'))
flash('User already exists. Please log in')
return render_template('register.html', form=registration_form)
@app.route('/google_login')
def GoogleLogin():
# Create anti-forgery state token
state = ''.join(
random.choice(string.ascii_uppercase + string.digits)
for x in range(32))
login_session['state'] = state
return render_template('google_login.html', STATE=state)
# Logging out
# @app.route('/logout')
# @login_required
# def Logout():
# logout_user()
# flash('You have been logged out')
# return redirect(url_for('Home'))
# Show all items when clicked on the catalog
@app.route('/catalog/<string:category_name>/items')
def ItemsByCategory(category_name):
items_by_category = (
db_session
.query(Items)
.filter_by(category=category_name)
.all()
)
return render_template(
'items_by_category.html',
category_name=category_name,
items=items_by_category,
login_session=login_session
)
# Show a specific item
@app.route('/catalog/<string:category_name>/<string:item_name>')
def SpecificItem(category_name, item_name):
item = (
db_session
.query(Items)
.filter_by(title=item_name, category=category_name)
.first()
)
print(item.title, item.description)
return render_template(
'item_page.html',
item=item,
login_session=login_session
)
# Adding item route
@app.route('/catalog/new', methods=['GET', 'POST'])
def NewItem():
if 'username' not in login_session:
return redirect(url_for('Home'))
new_item_form = NewItemForm()
if request.method == 'POST' and \
new_item_form.validate_on_submit() and \
'username' in login_session:
new_item = Items(
title=new_item_form.title.data,
description=new_item_form.description.data,
category=new_item_form.category.data,
user_id=login_session['user_id']
)
db_session.add(new_item)
db_session.commit()
return redirect(url_for('Home'))
return render_template(
'new_item.html',
new_item_form=new_item_form,
login_session=login_session
)
# Editing item
@app.route(
'/catalog/<string:category_name>/<string:item_name>/edit',
methods=['GET', 'POST']
)
# @login_required
def EditItem(category_name, item_name):
if 'username' not in login_session:
redirect(url_for('Home'))
item_to_edit = (
db_session
.query(Items)
.filter_by(title=item_name, category=category_name)
.first()
)
if item_to_edit and \
'username' in login_session and \
item_to_edit.user_id == login_session['user_id']:
if request.method == 'POST':
title = request.form.get('title')
description = request.form.get('description')
category = request.form.get('category')
item_to_edit.title = title
item_to_edit.description = description
category = str(category)
db_session.add(item_to_edit)
db_session.commit()
return redirect(url_for('Home'))
return render_template(
'edit_item.html',
item=item_to_edit,
login_session=login_session
)
return redirect(url_for('Home'))
# Deleting item route
# @login_required
@app.route(
'/catalog/<string:category_name>/<string:item_name>/delete',
methods=['GET', 'POST']
)
def DeleteItem(category_name, item_name):
if 'username' not in login_session:
return redirect(url_for('Home'))
# Checking if item exists in table
item_to_delete = (
db_session
.query(Items)
.filter_by(title=item_name, category=category_name)
.first()
)
form = DeleteForm()
if item_to_delete and \
'username' in login_session and \
item_to_delete.user_id == login_session['user_id']:
if request.method == 'POST':
db_session.delete(item_to_delete)
db_session.commit()
return redirect(url_for('Home'))
return render_template(
'delete_item.html',
delete_form=form,
login_session=login_session
)
return redirect(url_for('Home'))
class Item(Resource):
def get(self):
count = db_session.query(Items).count()
items = db_session.query(Items).all()
arbitrary_item = items[random.randint(0, count-1)]
return json.dumps({
'id': arbitrary_item.id,
'title': arbitrary_item.title,
'description': arbitrary_item.description,
'category': arbitrary_item.category
})
api.add_resource(Item, '/v1/random_catalog/json')
@app.route('/v1/catalog/json')
def allItemsJSON():
items = db_session.query(Items).all()
item_dict = [
{
'id': i.id,
'title': i.title,
'description': i.description,
'category': i.category
} for i in items]
return jsonify(items=item_dict)
# Route doesn't exist
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
if __name__ == '__main__':
app.secret_key = config['secret_key']
# Enables reloader and debugger
app.debug = True
# Starting flask application at localhost:5000
app.run(host='0.0.0.0', port=4000)
|
[
"himichhabra14@gmail.com"
] |
himichhabra14@gmail.com
|
6f3ba5a679a0d4a845b7bd6df1fb487818530f60
|
01245f879c22722773c9671f7d67fb29509570a8
|
/stackLinked.py
|
d0fe86934d1e9a2c82dc5bb6b697044447130b6b
|
[] |
no_license
|
vishnu1729/Data-Structures-and-Algorithms
|
9a64b746c305e5467e82277806539fd14c06b26c
|
884bdb80b7da0aba16e7b976b4255c31e2baec74
|
refs/heads/master
| 2021-01-17T20:25:23.928216
| 2016-08-12T17:23:22
| 2016-08-12T17:23:22
| 65,432,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,653
|
py
|
"""definition of linked list implementation of s stack ADT
the different memeber functions are described beside in the comments
the class LinkedStack uses a private nested class Node that holds the units of the LinkedList"""
"""Author: Vishnu Muralidharan"""
class LinkedStack:
#non-public nested class Node
class _Node:
def __init__(self,element,next): #constructor to initialize element and next pointer
self._element = element
self._next = next
def __init__(self): #consturctor to LinkedStack class
#initialize empty stack
self._head = None
self._size = 0
def __len__(self): #length of the stack
return self._size
def isempty(self): #function to check if the stack is empty
return self._size == 0
def push(self,e): #push is similar inserting node at the beginnign of the list
#the new element is set to head of the list
#the new head node's element is set to e and the new head's next filed points to the previous head
self._head = self._Node(e,self._head)
self._size += 1
def top(self):
if isempty():
print("The stack is empty")
#return top element of stack
return self._head._element
def pop(self): #pop is similar to removing elemennet from head of linked list
if isempty():
print("Stack is empty")
answer = self._head._element
self._head = self._head._next #set the head node to the next node after old head node
self._size -= 1
return answer
|
[
"noreply@github.com"
] |
vishnu1729.noreply@github.com
|
e3968b5a6ee4acfc5472f3331048077d2290fe32
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_203/78.py
|
29bc91b6b0a4fa993c5a99a015c4bc7188f4154e
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,130
|
py
|
#!/usr/bin/env python2
import itertools
def solve_row(prev_row, row):
prev_row = list(prev_row)
prev_chars = set(prev_row)
#print prev_row, row
for i, row_i in enumerate(row):
if row_i == '?':
continue
min_i = i
max_i = i
while max_i + 1 < len(prev_row) and prev_row[max_i] == prev_row[max_i + 1]:
max_i += 1
while min_i - 1 >= 0 and prev_row[min_i] == prev_row[min_i - 1] and prev_row[min_i] in prev_chars:
min_i -= 1
prev_row[min_i:max_i+1] = row_i * (max_i + 1 - min_i)
return prev_row
def solve(r, c, a):
ans = []
prev_row = ['?' for _ in a[0]]
for row in a:
if any(row_i != '?' for row_i in row):
prev_row = solve_row(prev_row, row)
break
for row in a:
prev_row = solve_row(prev_row, row)
ans.append(prev_row)
assert '?' not in prev_row
return ans
def _iter_tuples(a):
for i, row in enumerate(a):
for j, row_j in enumerate(row):
yield i, j, row_j
def _to_tuples(a):
return list(_iter_tuples(a))
def check(r, c, a, ans):
a = _to_tuples(a)
ans = _to_tuples(ans)
for (i, j, char) in a:
if char != '?':
assert (i, j, char) in ans
ptslen = 0
for char in {char for (i, j, char) in a}:
if char == '?':
continue
pts = {(i, j) for (i, j, char2) in ans if char2 == char}
ptslen += len(pts)
i_min = min(i for i, j in pts)
i_max = max(i for i, j in pts)
j_min = min(j for i, j in pts)
j_max = max(j for i, j in pts)
pts2 = {(i, j) for i in xrange(i_min, 1 + i_max) for j in xrange(j_min, 1 + j_max)}
assert pts == pts2, (char, pts2 - pts)
assert ptslen == r * c
def main():
for t in xrange(1, 1 + int(raw_input())):
print 'Case #%d:' % t
r, c = map(int, raw_input().split())
a = [list(raw_input().strip()) for _ in xrange(r)]
ans = solve(r, c, a)
check(r, c, a, ans)
for row in ans:
print ''.join(row)
if __name__ == '__main__':
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
ec3819ed601e8ab6a68677955f102bd18ca71960
|
9952b458bc4c0335d46f2eff3e07b02b4cfb16d2
|
/bdo_gestor_guilda/core/forms/guerras.py
|
8eda11a7fa0a88de02a198d7a77008aab671172e
|
[] |
no_license
|
diegoMasin/game-guild
|
01652d5e1266f5dd80a39a614f255a502b67fa89
|
7f72b57cce575d987ba70f557f6b472f093fb4fb
|
refs/heads/master
| 2023-05-30T21:06:53.424421
| 2021-04-22T19:49:00
| 2021-04-22T19:49:00
| 182,143,786
| 0
| 0
| null | 2021-06-10T23:47:11
| 2019-04-18T19:12:18
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,634
|
py
|
from django import forms
from bdo_gestor_guilda.core.models.guerras import Guerras
from bdo_gestor_guilda.usuario.models.user_avancado import UserAvancado
class GuerrasForm(forms.ModelForm):
call = forms.ModelChoiceField(
label='Call de ...',
queryset=UserAvancado.objects.filter(ativo=True),
required=False,
widget=forms.Select(attrs={'class': 'form-control select2', 'required': False})
)
class Meta:
model = Guerras
fields = ['tipo', 'modelo', 'data_inicio', 'call', 'pt_fixa', 'quantidade_players', 'servidor', 'node']
labels = {
'tipo': 'Guerra',
'modelo': 'Estilo de Guerra',
'data_inicio': 'Dia da Guerra',
'pt_fixa': 'PT Fixa',
'quantidade_players': 'Nº Máximo de Players',
'servidor': 'Qual Servidor',
'node': 'Nome do Node da Guerra',
}
widgets = {
'tipo': forms.Select(attrs={'class': 'form-control', 'required': True}),
'modelo': forms.Select(attrs={'class': 'form-control', 'required': True}),
'data_inicio': forms.TextInput(
attrs={'class': 'form-control date-picker-default', 'placeholder': 'dd/mm/yyyy', 'required': True}),
'pt_fixa': forms.Select(choices=((True, 'Sim'), (False, 'Não')), attrs={'class': 'form-control'}),
'quantidade_players': forms.TextInput(attrs={'class': 'form-control', 'type': 'number'}),
'servidor': forms.TextInput(attrs={'class': 'form-control'}),
'node': forms.TextInput(attrs={'class': 'form-control'}),
}
|
[
"diego.masin@defensoria.ce.def.br"
] |
diego.masin@defensoria.ce.def.br
|
2b66dd52a998aea178cb94410fc4a807ae8c5154
|
fa16ff5977e219da184e387ee649799ac5f1a300
|
/imperativeVSoop/OOP/main.py
|
7cee5c666d6cb9e62230d6c2bedfcdb4adf63135
|
[] |
no_license
|
erika-r/phonebook_comparison
|
472282f54f24d803fb3e793b5eb8f8e5a7890761
|
53b6f0d5ad5b3bfd0e03a7e59e131e518c76d229
|
refs/heads/main
| 2023-06-17T03:56:23.750867
| 2021-07-13T18:08:59
| 2021-07-13T18:08:59
| 309,206,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
#!/usr/bin/python
#FINAL
from binaryTree import *
from phonebook import *
def main():
contacts = Phonebook()
#adding contacts
contacts.add("John","03495803","DCU")
contacts.add("Erika","08123456","13 Fernwood Way")
contacts.add("Tofu","08123457","15 Fernwood Way")
contacts.add("Daisy","08123458","17 Fernwood Way")
contacts.add("Ginger","03495813","DCU")
contacts.add("Garlic","08124456","12 Fernwood Way")
contacts.add("Onion","07123457","19 Fernwood Way")
contacts.add("Pepper","08124458","18 Fernwood Way")
# print("-- All starting contacts --")
# contacts.showAll()
#finding contacts WORKS
# print("--- Finding contacts ---")
# contacts.find("John")
# contacts.find("Pepper")
# contacts.find("08123456")
# contacts.find("07123457")
# #will not be found
# contacts.find("Rotten Tomatoes")
print(". . . Removing contacts . . .\n")
contacts.remove("08123457") #tofu
contacts.remove("08123458") #daisy
contacts.remove("08123456") #erika
contacts.remove("Onion") #07123457
#will not be removeable
contacts.remove("Rotten Tomatoes")
print("--- Showing all contacts ---")
contacts.showAll()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
erika-r.noreply@github.com
|
c97d16689906d212fe9082dcf81bc9105d3beed9
|
0e4c4f47b99b9cc09eee6142cd77638682964f3a
|
/events/migrations/0001_initial.py
|
1a5e00f3f0754db71fee4888a4ecc620de692a93
|
[] |
no_license
|
rytwalker/hb-catering-api
|
85820131204fee9f28691552f15eb5c7b6aa1ad4
|
c7ecf4498d044147d9f77c159305a92207cd98f5
|
refs/heads/master
| 2020-05-18T09:52:44.030836
| 2019-04-30T22:15:49
| 2019-04-30T22:15:49
| 184,338,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,741
|
py
|
# Generated by Django 2.2 on 2019-04-30 21:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('guests', models.PositiveIntegerField()),
('location', models.CharField(max_length=500)),
('address', models.CharField(max_length=255)),
('city', models.CharField(max_length=255)),
('state', models.CharField(max_length=255)),
('zipcode', models.CharField(max_length=255)),
('date', models.DateField()),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='UserEvent',
fields=[
('event_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='events.Event')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
bases=('events.event',),
),
]
|
[
"rytwalker@gmail.com"
] |
rytwalker@gmail.com
|
e506e4c948c54bc319a2fe6bfce55d10abfeda16
|
cdca1b25a83959ace8791a80d06dfe268123ed5d
|
/venv/bin/python-config
|
7a0202f61aa5b6f97361379294fa653a329f06a7
|
[] |
no_license
|
dakshsriv/Chess-Analyzer-1000
|
151f8f6a9d8917144e803684d6aa24f41ccda296
|
b3200595ed68f02b8abd880b9c226ca2592c263b
|
refs/heads/master
| 2023-07-31T01:33:29.278651
| 2021-10-03T23:00:45
| 2021-10-03T23:00:45
| 387,560,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,363
|
#!/home/daksh/Projects/Chess-Analyzer-1000/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"daksh.srivastava.10@gmail.com"
] |
daksh.srivastava.10@gmail.com
|
|
4940253fa76b9870f50b2a11d79c434ea74cbeb5
|
f2a5e91ee00059e450d86ce891f34ac891277d26
|
/tests/checker.py
|
34f88fbc9323a7972f67c3d276cab86a52b7a273
|
[
"MIT"
] |
permissive
|
SwapnilKotkar/pytenable_refactored
|
e1a371513c1e2dccb047741f3305d783402c4b5b
|
d4030389c8a4c2dc937bc5c6aff9d98281a825ef
|
refs/heads/main
| 2023-08-14T00:53:41.894880
| 2021-09-10T08:05:19
| 2021-09-10T08:05:19
| 403,574,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,063
|
py
|
import six
from dateutil.parser import parse as dateparse
import datetime, sys, re
def check(i, name, val_type, allow_none=False):
assert name in i
if not allow_none:
assert i[name] != None
single(i[name], val_type)
def single(var, val_type):
reuuid = r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'
reuuids = r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12,32}$'
if var != None:
if val_type == 'datetime':
assert isinstance(dateparse(var), datetime.datetime)
elif val_type == 'uuid':
assert len(re.findall(reuuid, var)) > 0
elif val_type == 'scanner-uuid':
assert len(re.findall(reuuids, var)) > 0
elif sys.version_info.major == 2 and val_type == str:
if six.PY3:
assert isinstance(var, str) or isinstance(var, str)
else:
assert isinstance(var, 'unicode') or isinstance(var, str)
else:
assert isinstance(var, val_type)
|
[
"swapnilkotkar793@gmail.com"
] |
swapnilkotkar793@gmail.com
|
84280fd0666262bd335a91b289f02eb112bd1b2e
|
01dad4d1d2ffaf2fa070e99fe828d42f59a9f9d1
|
/src/pycrop2ml_ui/menus/creation/externalpackage.py
|
f807d1b04ea080d66b5fe6406cd6137f97e13e1d
|
[
"MIT"
] |
permissive
|
AgriculturalModelExchangeInitiative/Pycrop2ml_ui
|
5e210facf9689348bb57c16060967118b7c5f49a
|
3d5d2b87a74f0be306056b71808286922fef2945
|
refs/heads/master
| 2023-06-24T13:52:39.933728
| 2023-06-17T00:17:26
| 2023-06-17T00:17:26
| 193,912,881
| 0
| 4
|
MIT
| 2023-02-25T13:26:57
| 2019-06-26T13:44:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,958
|
py
|
import os
import ipywidgets as wg
from pycrop2ml_ui.browser.TkinterPath import getPath
from pycrop2ml_ui.menus.creation.createcomposition import createComposition
from pycrop2ml_ui.menus.creation import createmenu
from IPython.display import display
class externalPackageMenu():
"""
Class providing the external package addition of composition model creation menu for pycrop2ml's user interface.
Parameters : \n
- data : {
'Path': '',
'Model type': 'unit',
'Model name': '',
'Model ID': '',
'Authors': '',
'Institution': '',
'Reference': '',
'Abstract': ''
}
- listpkg : []
"""
def __init__(self, data, listpkg=[]):
self._datas = data
self._out = wg.Output()
self._out2 = wg.Output()
self._list = wg.Select(options=listpkg,disabled=False)
self._listpkg = listpkg
self._apply = wg.Button(value=False,description='Apply',disabled=False,button_style='success')
self._cancel = wg.Button(value=False,description='Cancel',disabled=False,button_style='warning')
self._add = wg.Button(value=False,description='Add',disabled=False,button_style='success')
self._remove = wg.Button(value=False,description='Remove',disabled=False,button_style='danger')
def _eventAdd(self, b):
"""
Handles add button on_click event
"""
self._out2.clear_output()
extpkg = getPath()
if 'crop2ml' not in os.listdir(extpkg):
with self._out2:
print('This repository is not a model package.')
elif any([extpkg in self._listpkg, extpkg+os.path.sep+'crop2ml' == self._datas['Path']]):
with self._out2:
print('This package is already in the list.')
else:
self._listpkg.append(extpkg)
self._list.options = self._listpkg
def _eventRemove(self, b):
"""
Handles remove button on_click event
"""
self._out2.clear_output()
if self._list.value:
self._listpkg.remove(self._list.value)
self._list.options = self._listpkg
def _eventApply(self, b):
"""
Handles apply button on_click event
"""
self._out.clear_output()
self._out2.clear_output()
with self._out:
try:
tmp = createComposition(self._datas, externalpackagelist=[i for i in self._listpkg if i])
tmp.displayMenu()
except:
raise Exception('Could not load model composition creation menu.')
def _eventCancel(self, b):
"""
Handles cancel button on_click event
"""
self._out.clear_output()
self._out2.clear_output()
with self._out:
try:
tmp = createmenu.createMenu()
tmp.displayMenu()
except:
raise Exception('Could not load mainmenu.')
def displayMenu(self):
"""
Displays the package selection menu of pycrop2ml's UI.
This method is the only one available for the user in this class. Any other attribute or
method call may break the code.
"""
display(self._out)
display(self._out2)
with self._out:
display(wg.VBox([wg.HTML(value='<font size="5"><b> Model creation : composition.{}.xml<br>-> External packages</b></font>'.format(self._datas['Model name'])), wg.HBox([self._list, wg.VBox([self._add, self._remove])]), wg.HBox([self._apply, self._cancel])]))
self._apply.on_click(self._eventApply)
self._cancel.on_click(self._eventCancel)
self._add.on_click(self._eventAdd)
self._remove.on_click(self._eventRemove)
|
[
"romaric.justes@orange.fr"
] |
romaric.justes@orange.fr
|
b460173067fee5643332d9bdb6cca562422f0628
|
d007f8d6c318c3d66e76d99715edf324c9fe0294
|
/recipe_modules/ninja/__init__.py
|
e0c77a7a641618bf0e7ebb4ca42b2cc775baf20b
|
[
"BSD-3-Clause"
] |
permissive
|
nirvus/infra-recipes
|
c0f9e5facca7ad1907d639eb8819a59dc8f3584e
|
a5dc52f47405dcce56fb43a3e8ac80a2fbd56717
|
refs/heads/master
| 2020-04-07T23:15:01.809232
| 2018-11-06T02:30:12
| 2018-11-06T17:37:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36
|
py
|
DEPS = [
'recipe_engine/step',
]
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
8f9139a190d429e54bae0fddd3b33a486abcfe40
|
80301f1cffc5afce13256e2ecab6323c5df00194
|
/cn.sc/py/T2210.py
|
a756c221d750cd1c54907be6878f0a7c1118b1b4
|
[] |
no_license
|
ZhenjianYang/SoraVoiceScripts
|
c1ddf7c1bbcb933243754f9669bd6b75777c87b9
|
94a948090aba0f63b10b2c69dc845dc99c822fc4
|
refs/heads/master
| 2023-04-18T04:54:44.306652
| 2023-04-06T11:15:17
| 2023-04-06T11:15:17
| 103,167,541
| 43
| 11
| null | 2021-03-06T08:52:54
| 2017-09-11T17:36:55
|
Python
|
UTF-8
|
Python
| false
| false
| 75,412
|
py
|
from ED6SCScenarioHelper import *
def main():
SetCodePage("gbk")
# 卢安
CreateScenaFile(
FileName = 'T2210 ._SN',
MapName = 'Ruan',
Location = 'T2210.x',
MapIndex = 1,
MapDefaultBGM = "ed60012",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'弗洛拉', # 9
'多米尼克', # 10
'比古', # 11
'王国军宪兵', # 12
'达里奥', # 13
'索雷诺', # 14
'诺曼市长', # 15
'秘书德尔斯', # 16
'贝尔夫', # 17
'杯子', # 18
'杯子', # 19
'水壶', # 20
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT06/CH20020 ._CH', # 00
'ED6_DT07/CH02540 ._CH', # 01
'ED6_DT07/CH01350 ._CH', # 02
'ED6_DT07/CH01280 ._CH', # 03
'ED6_DT07/CH01300 ._CH', # 04
'ED6_DT07/CH01560 ._CH', # 05
'ED6_DT07/CH01040 ._CH', # 06
'ED6_DT07/CH01200 ._CH', # 07
'ED6_DT07/CH01140 ._CH', # 08
)
AddCharChipPat(
'ED6_DT06/CH20020P._CP', # 00
'ED6_DT07/CH02540P._CP', # 01
'ED6_DT07/CH01350P._CP', # 02
'ED6_DT07/CH01280P._CP', # 03
'ED6_DT07/CH01300P._CP', # 04
'ED6_DT07/CH01560P._CP', # 05
'ED6_DT07/CH01040P._CP', # 06
'ED6_DT07/CH01200P._CP', # 07
'ED6_DT07/CH01140P._CP', # 08
)
DeclNpc(
X = 34540,
Z = 0,
Y = 27220,
Direction = 90,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 7,
)
DeclNpc(
X = -63810,
Z = 0,
Y = 34870,
Direction = 0,
Unknown2 = 0,
Unknown3 = 2,
ChipIndex = 0x2,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 6,
)
DeclNpc(
X = 33500,
Z = 0,
Y = 24400,
Direction = 270,
Unknown2 = 0,
Unknown3 = 3,
ChipIndex = 0x3,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 5,
)
DeclNpc(
X = 2620,
Z = 0,
Y = 3200,
Direction = 180,
Unknown2 = 0,
Unknown3 = 4,
ChipIndex = 0x4,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 4,
)
DeclNpc(
X = 67820,
Z = -30,
Y = -5200,
Direction = 90,
Unknown2 = 0,
Unknown3 = 5,
ChipIndex = 0x5,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 8,
)
DeclNpc(
X = 800,
Z = 0,
Y = 2100,
Direction = 0,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 9,
)
DeclNpc(
X = -64500,
Z = 0,
Y = 33170,
Direction = 270,
Unknown2 = 0,
Unknown3 = 7,
ChipIndex = 0x7,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 10,
)
DeclNpc(
X = -7500,
Z = 0,
Y = 33230,
Direction = 90,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 36150,
Z = 0,
Y = 34260,
Direction = 193,
Unknown2 = 0,
Unknown3 = 8,
ChipIndex = 0x8,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 13,
)
DeclNpc(
X = 35510,
Z = 750,
Y = 27280,
Direction = 0,
Unknown2 = 0,
Unknown3 = 1638400,
ChipIndex = 0x0,
NpcIndex = 0x1E6,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 35450,
Z = 750,
Y = 26890,
Direction = 0,
Unknown2 = 0,
Unknown3 = 1638400,
ChipIndex = 0x0,
NpcIndex = 0x1E6,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 35490,
Z = 750,
Y = 26520,
Direction = 0,
Unknown2 = 0,
Unknown3 = 1703936,
ChipIndex = 0x0,
NpcIndex = 0x1E6,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclActor(
TriggerX = -475,
TriggerZ = 0,
TriggerY = 3173,
TriggerRange = 800,
ActorX = -475,
ActorZ = 800,
ActorY = 3173,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 14,
Unknown_22 = 0,
)
DeclActor(
TriggerX = -63800,
TriggerZ = 0,
TriggerY = 50790,
TriggerRange = 900,
ActorX = -63800,
ActorZ = -300,
ActorY = 50790,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 15,
Unknown_22 = 0,
)
DeclActor(
TriggerX = -62370,
TriggerZ = 0,
TriggerY = -43110,
TriggerRange = 500,
ActorX = -62370,
ActorZ = 2000,
ActorY = -43110,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 16,
Unknown_22 = 0,
)
DeclActor(
TriggerX = -59500,
TriggerZ = 250,
TriggerY = -36760,
TriggerRange = 800,
ActorX = -59500,
ActorZ = 1250,
ActorY = -36760,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 17,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_302", # 00, 0
"Function_1_402", # 01, 1
"Function_2_43E", # 02, 2
"Function_3_5BB", # 03, 3
"Function_4_698", # 04, 4
"Function_5_99B", # 05, 5
"Function_6_DEA", # 06, 6
"Function_7_11E1", # 07, 7
"Function_8_163D", # 08, 8
"Function_9_185C", # 09, 9
"Function_10_1A4A", # 0A, 10
"Function_11_20D0", # 0B, 11
"Function_12_24EC", # 0C, 12
"Function_13_296F", # 0D, 13
"Function_14_2FC0", # 0E, 14
"Function_15_306D", # 0F, 15
"Function_16_3077", # 10, 16
"Function_17_3081", # 11, 17
)
def Function_0_302(): pass
label("Function_0_302")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_3B8")
SetChrFlags(0xB, 0x80)
ClearChrFlags(0xC, 0x80)
ClearChrFlags(0xE, 0x80)
ClearChrFlags(0xF, 0x80)
ClearChrFlags(0x10, 0x80)
SetChrPos(0xA, 33760, 0, 25890, 270)
SetChrPos(0x8, -4550, 0, -4059, 95)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_365")
ClearChrFlags(0xD, 0x80)
SetChrPos(0x9, 67400, 0, 32619, 270)
Jump("loc_3B5")
label("loc_365")
ClearChrFlags(0xD, 0x80)
SetChrPos(0xD, 4070, 0, 35300, 270)
SetChrPos(0x9, -1900, 0, 4450, 90)
SetChrPos(0xF, -61820, 0, 30050, 355)
SetChrPos(0x8, -2750, 0, 42770, 342)
OP_43(0x8, 0x0, 0x0, 0x2)
label("loc_3B5")
Jump("loc_401")
label("loc_3B8")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x280, 0)), scpexpr(EXPR_END)), "loc_3D3")
SetChrPos(0x8, 35530, 0, 34250, 180)
Jump("loc_401")
label("loc_3D3")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 5)), scpexpr(EXPR_END)), "loc_3DD")
Jump("loc_401")
label("loc_3DD")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x242, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 2)), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_3FA")
ClearChrFlags(0x11, 0x80)
ClearChrFlags(0x12, 0x80)
ClearChrFlags(0x13, 0x80)
Jump("loc_401")
label("loc_3FA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x240, 6)), scpexpr(EXPR_END)), "loc_401")
label("loc_401")
Return()
# Function_0_302 end
def Function_1_402(): pass
label("Function_1_402")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_42C")
OP_71(0x0, 0x4)
OP_71(0x1, 0x4)
OP_71(0x2, 0x4)
OP_71(0x3, 0x4)
OP_71(0x4, 0x4)
OP_71(0x5, 0x4)
OP_71(0x6, 0x4)
label("loc_42C")
OP_72(0x10, 0x10)
OP_72(0x10, 0x8)
OP_6F(0x10, 360)
Return()
# Function_1_402 end
def Function_2_43E(): pass
label("Function_2_43E")
RunExpression(0x1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xE), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_463")
OP_99(0xFE, 0x0, 0x7, 0x672)
Jump("loc_5A5")
label("loc_463")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_47C")
OP_99(0xFE, 0x1, 0x7, 0x640)
Jump("loc_5A5")
label("loc_47C")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_495")
OP_99(0xFE, 0x2, 0x7, 0x60E)
Jump("loc_5A5")
label("loc_495")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_4AE")
OP_99(0xFE, 0x3, 0x7, 0x5DC)
Jump("loc_5A5")
label("loc_4AE")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_4C7")
OP_99(0xFE, 0x4, 0x7, 0x5AA)
Jump("loc_5A5")
label("loc_4C7")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x5), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_4E0")
OP_99(0xFE, 0x5, 0x7, 0x578)
Jump("loc_5A5")
label("loc_4E0")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_4F9")
OP_99(0xFE, 0x6, 0x7, 0x546)
Jump("loc_5A5")
label("loc_4F9")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_512")
OP_99(0xFE, 0x0, 0x7, 0x677)
Jump("loc_5A5")
label("loc_512")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_52B")
OP_99(0xFE, 0x1, 0x7, 0x645)
Jump("loc_5A5")
label("loc_52B")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x9), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_544")
OP_99(0xFE, 0x2, 0x7, 0x613)
Jump("loc_5A5")
label("loc_544")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_55D")
OP_99(0xFE, 0x3, 0x7, 0x5E1)
Jump("loc_5A5")
label("loc_55D")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_576")
OP_99(0xFE, 0x4, 0x7, 0x5AF)
Jump("loc_5A5")
label("loc_576")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xC), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_58F")
OP_99(0xFE, 0x5, 0x7, 0x57D)
Jump("loc_5A5")
label("loc_58F")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xD), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_5A5")
OP_99(0xFE, 0x6, 0x7, 0x54B)
label("loc_5A5")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_5BA")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("loc_5A5")
label("loc_5BA")
Return()
# Function_2_43E end
def Function_3_5BB(): pass
label("Function_3_5BB")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_635")
label("loc_5C2")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_632")
OP_8E(0xFE, 0xFFFFEE6C, 0x0, 0xFFFFEAA2, 0x3E8, 0x0)
OP_62(0xFE, 0x0, 2000, 0x8, 0x9, 0xFA, 0x2)
OP_8C(0xFE, 90, 400)
Sleep(3500)
OP_8E(0xFE, 0xFFFFEE6C, 0x0, 0xFFFFF204, 0x3E8, 0x0)
OP_8C(0xFE, 90, 400)
OP_62(0xFE, 0x0, 2000, 0x8, 0x9, 0xFA, 0x2)
Sleep(4500)
Jump("loc_5C2")
label("loc_632")
Jump("loc_697")
label("loc_635")
RunExpression(0x2, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
label("loc_63F")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_697")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
RunExpression(0x2, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 0x2), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_694")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x242, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 2)), scpexpr(EXPR_OR), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_68A")
OP_62(0xFE, 0x0, 2000, 0x8, 0x9, 0xFA, 0x2)
label("loc_68A")
RunExpression(0x2, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
label("loc_694")
Jump("loc_63F")
label("loc_697")
Return()
# Function_3_5BB end
def Function_4_698(): pass
label("Function_4_698")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x280, 0)), scpexpr(EXPR_END)), "loc_763")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 3)), scpexpr(EXPR_END)), "loc_6F6")
ChrTalk( #0
0xFE,
(
"伙食非常美味,\x01",
"不知不觉吃太多了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #1
0xFE,
(
"这样工作下去\x01",
"会不断长胖的啊。\x02",
)
)
CloseMessageWindow()
Jump("loc_760")
label("loc_6F6")
OP_A2(0x3)
ChrTalk( #2
0xFE,
(
"嗯~厨房\x01",
"飘来好香的味道啊。\x02",
)
)
CloseMessageWindow()
ChrTalk( #3
0xFE,
(
"这里的厨师做的饭菜\x01",
"非常非常好吃。\x02",
)
)
CloseMessageWindow()
ChrTalk( #4
0xFE,
(
"因此我的皮带\x01",
"都紧起来了。\x02",
)
)
CloseMessageWindow()
label("loc_760")
Jump("loc_997")
label("loc_763")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 5)), scpexpr(EXPR_END)), "loc_81C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 3)), scpexpr(EXPR_END)), "loc_7B8")
ChrTalk( #5
0xFE,
(
"竟然在自己家里\x01",
"养魔兽……\x02",
)
)
CloseMessageWindow()
ChrTalk( #6
0xFE,
(
"居然还有人\x01",
"会去想这么可怕的事。\x02",
)
)
CloseMessageWindow()
Jump("loc_819")
label("loc_7B8")
OP_A2(0x3)
ChrTalk( #7
0xFE,
(
"房间二楼的\x01",
"秘密魔兽饲养房间……\x02",
)
)
CloseMessageWindow()
ChrTalk( #8
0xFE,
"……看过了吗?\x02",
)
CloseMessageWindow()
ChrTalk( #9
0xFE,
(
"居然还有人\x01",
"会去想这么可怕的事。\x02",
)
)
CloseMessageWindow()
label("loc_819")
Jump("loc_997")
label("loc_81C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x242, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 2)), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_902")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 3)), scpexpr(EXPR_END)), "loc_88B")
ChrTalk( #10
0xFE,
(
"对这等美术品出手的\x01",
"只可能是绝顶的笨蛋或者天才了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #11
0xFE,
(
"普通的盗贼\x01",
"还是有自知之明的。\x02",
)
)
CloseMessageWindow()
Jump("loc_8FF")
label("loc_88B")
OP_A2(0x3)
ChrTalk( #12
0xFE,
(
"从这里的女佣\x01",
"那里听说……\x02",
)
)
CloseMessageWindow()
ChrTalk( #13
0xFE,
(
"前不久这个烛台\x01",
"被偷走过。\x02",
)
)
CloseMessageWindow()
ChrTalk( #14
0xFE,
(
"好厉害的家伙……不、不,\x01",
"竟然有这么坏的家伙。\x02",
)
)
CloseMessageWindow()
label("loc_8FF")
Jump("loc_997")
label("loc_902")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x240, 6)), scpexpr(EXPR_END)), "loc_997")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 3)), scpexpr(EXPR_END)), "loc_948")
ChrTalk( #15
0xFE,
(
"所、所以不要\x01",
"在这附近转悠。\x02",
)
)
CloseMessageWindow()
ChrTalk( #16
0xFE,
"我也很紧张呢。\x02",
)
CloseMessageWindow()
Jump("loc_997")
label("loc_948")
OP_A2(0x3)
ChrTalk( #17
0xFE,
(
"这个烛台现在\x01",
"由王国军代为保管。\x02",
)
)
CloseMessageWindow()
ChrTalk( #18
0xFE,
(
"别太靠近。\x01",
"这可是相当贵重的东西。\x02",
)
)
CloseMessageWindow()
label("loc_997")
TalkEnd(0xFE)
Return()
# Function_4_698 end
def Function_5_99B(): pass
label("Function_5_99B")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_A74")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_A13")
ChrTalk( #19
0xFE,
(
"这是使用柴火的暖炉。\x01",
"最近的炊事都靠这个了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #20
0xFE,
(
"本来是想要用火才做的,\x01",
"没想到会这么有用。\x02",
)
)
CloseMessageWindow()
OP_A2(0x2)
Jump("loc_A71")
label("loc_A13")
ChrTalk( #21
0xFE,
(
"这是使用柴火的暖炉。\x01",
"最近的炊事都靠这个了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #22
0xFE,
(
"不过,适用范围\x01",
"稍微窄了点,也没办法了。\x02",
)
)
CloseMessageWindow()
label("loc_A71")
Jump("loc_DE6")
label("loc_A74")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_B55")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_B02")
ChrTalk( #23
0xFE,
(
"管家达里奥\x01",
"回来了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #24
0xFE,
(
"多年服侍戴尔蒙家\x01",
"的同伴都在一起就心安多了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #25
0xFE,
(
"这么快就欠了雇佣我们的\x01",
"新市长的人情了啊。\x02",
)
)
CloseMessageWindow()
OP_A2(0x2)
Jump("loc_B52")
label("loc_B02")
ChrTalk( #26
0xFE,
(
"达里奥那家伙\x01",
"回来了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #27
0xFE,
(
"作为服侍戴尔蒙家的同伴,\x01",
"他回来可让人心安多了。\x02",
)
)
CloseMessageWindow()
label("loc_B52")
Jump("loc_DE6")
label("loc_B55")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x280, 0)), scpexpr(EXPR_END)), "loc_C0D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 2)), scpexpr(EXPR_END)), "loc_BAC")
ChrTalk( #28
0xFE,
(
"好,差不多\x01",
"该准备午饭了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #29
0xFE,
(
"我为了士兵们\x01",
"加倍卖力的自信作品。\x02",
)
)
CloseMessageWindow()
Jump("loc_C0A")
label("loc_BAC")
OP_A2(0x2)
ChrTalk( #30
0xFE,
(
"今天的伙食\x01",
"是加了橘子调味汁\x01",
"的照烧仔鸡。\x02",
)
)
CloseMessageWindow()
ChrTalk( #31
0xFE,
(
"是我将东方的烹调法\x01",
"加以调整的自信作品。\x02",
)
)
CloseMessageWindow()
label("loc_C0A")
Jump("loc_DE6")
label("loc_C0D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 5)), scpexpr(EXPR_END)), "loc_C6B")
ChrTalk( #32
0xFE,
(
"我也一直在担心\x01",
"达里奥那家伙的事……\x02",
)
)
CloseMessageWindow()
ChrTalk( #33
0xFE,
(
"市长被逮捕之后\x01",
"他的情况确实很奇怪……\x02",
)
)
CloseMessageWindow()
Jump("loc_DE6")
label("loc_C6B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x242, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 2)), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_D19")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 2)), scpexpr(EXPR_END)), "loc_CBD")
ChrTalk( #34
0xFE,
(
"士兵们也不挑食,\x01",
"吃得都很多。\x02",
)
)
CloseMessageWindow()
ChrTalk( #35
0xFE,
"嗯,也算做得值得了。\x02",
)
CloseMessageWindow()
Jump("loc_D16")
label("loc_CBD")
OP_A2(0x2)
ChrTalk( #36
0xFE,
(
"我现在负责佣人和士兵们\x01",
"的伙食。\x02",
)
)
CloseMessageWindow()
ChrTalk( #37
0xFE,
(
"在这房子也待了很久了。\x01",
"就让我效劳到最后吧。\x02",
)
)
CloseMessageWindow()
label("loc_D16")
Jump("loc_DE6")
label("loc_D19")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x240, 6)), scpexpr(EXPR_END)), "loc_DE6")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 2)), scpexpr(EXPR_END)), "loc_D74")
ChrTalk( #38
0xFE,
(
"不管怎么说,\x01",
"我一直在服侍戴尔蒙家。\x02",
)
)
CloseMessageWindow()
ChrTalk( #39
0xFE,
(
"家道没落了,\x01",
"还真是可惜啊。\x02",
)
)
CloseMessageWindow()
Jump("loc_DE6")
label("loc_D74")
OP_A2(0x2)
ChrTalk( #40
0xFE,
(
"戴尔蒙市长确实\x01",
"做了坏事……\x02",
)
)
CloseMessageWindow()
ChrTalk( #41
0xFE,
(
"但我和管家达里奥\x01",
"都服侍戴尔蒙家多年了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #42
0xFE,
(
"家道没落了,\x01",
"实在是难过啊。\x02",
)
)
CloseMessageWindow()
label("loc_DE6")
TalkEnd(0xFE)
Return()
# Function_5_99B end
def Function_6_DEA(): pass
label("Function_6_DEA")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_E4B")
ChrTalk( #43
0xFE,
(
"没有了导力器的光,\x01",
"这个烛台也真可怜。\x02",
)
)
CloseMessageWindow()
ChrTalk( #44
0xFE,
(
"就跟被导力文明所装点的\x01",
"我们一样……\x02",
)
)
CloseMessageWindow()
Jump("loc_11DD")
label("loc_E4B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_F2C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_EDD")
ChrTalk( #45
0xFE,
(
"达里奥也完全\x01",
"恢复状态了呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #46
0xFE,
(
"有段时间还形迹可疑,\x01",
"让人感觉诡异呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #47
0xFE,
(
"无论如何,有个熟悉这里\x01",
"的人在真是帮大忙了。\x02",
)
)
CloseMessageWindow()
OP_A2(0x1)
Jump("loc_F29")
label("loc_EDD")
ChrTalk( #48
0xFE,
(
"达里奥也完全\x01",
"恢复状态了呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #49
0xFE,
(
"有段时间还形迹可疑,\x01",
"让人感觉诡异呢。\x02",
)
)
CloseMessageWindow()
label("loc_F29")
Jump("loc_11DD")
label("loc_F2C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x280, 0)), scpexpr(EXPR_END)), "loc_FC6")
ChrTalk( #50
0xFE,
(
"根据市长选举的结果\x01",
"找工作的方针也要发生变化。\x02",
)
)
CloseMessageWindow()
ChrTalk( #51
0xFE,
(
"如果诺曼获胜就找旅游相关职业,\x01",
"要是波尔多斯就去港口酒馆。\x02",
)
)
CloseMessageWindow()
ChrTalk( #52
0xFE,
"哼哼,完美的再就业计划。\x02",
)
CloseMessageWindow()
Jump("loc_11DD")
label("loc_FC6")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 5)), scpexpr(EXPR_END)), "loc_FF7")
ChrTalk( #53
0xFE,
(
"外面好像\x01",
"很吵闹……\x02",
)
)
CloseMessageWindow()
ChrTalk( #54
0xFE,
"怎么啦?\x02",
)
CloseMessageWindow()
Jump("loc_11DD")
label("loc_FF7")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x242, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 2)), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_1131")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_END)), "loc_1062")
ChrTalk( #55
0xFE,
(
"事件之后,管家达里奥\x01",
"好像变得很奇怪呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #56
0xFE,
(
"戴尔蒙市长被逮捕\x01",
"似乎让他相当震惊。\x02",
)
)
CloseMessageWindow()
Jump("loc_112E")
label("loc_1062")
OP_A2(0x1)
ChrTalk( #57
0xFE,
(
"最近,这里的旧管家\x01",
"达里奥好像都不见人影呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #58
0xFE,
(
"说到底,那个人\x01",
"在市长被逮捕以后\x01",
"就变得有点奇怪了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #59
0xFE,
(
"『还有一个我!』什么的\x01",
"都说出来了,真是不太妙。\x02",
)
)
CloseMessageWindow()
ChrTalk( #60
0xFE,
(
"戴尔蒙市长被逮捕\x01",
"看来让他相当震惊吧。\x02",
)
)
CloseMessageWindow()
label("loc_112E")
Jump("loc_11DD")
label("loc_1131")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x240, 6)), scpexpr(EXPR_END)), "loc_11DD")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_END)), "loc_1194")
ChrTalk( #61
0xFE,
(
"军队管理期间还好,\x01",
"此后会变成怎样呢?\x02",
)
)
CloseMessageWindow()
ChrTalk( #62
0xFE,
(
"趁现在找到下一份工作\x01",
"会比较好吧。\x02",
)
)
CloseMessageWindow()
Jump("loc_11DD")
label("loc_1194")
OP_A2(0x1)
ChrTalk( #63
0xFE,
(
"军队管理期间还好,\x01",
"此后会变成怎样呢?\x02",
)
)
CloseMessageWindow()
ChrTalk( #64
0xFE,
(
"佣人还是\x01",
"会被解雇吧。\x02",
)
)
CloseMessageWindow()
label("loc_11DD")
TalkEnd(0xFE)
Return()
# Function_6_DEA end
def Function_7_11E1(): pass
label("Function_7_11E1")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_1236")
ChrTalk( #65
0xFE,
(
"除尘器也不能使用\x01",
"扫除可辛苦了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #66
0xFE,
(
"呼,这栋房子\x01",
"竟然这么宽广啊~\x02",
)
)
CloseMessageWindow()
Jump("loc_1639")
label("loc_1236")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_1340")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_12E5")
ChrTalk( #67
0xFE,
(
"啊,欢迎~\x01",
"欢迎光临市长官邸。\x02",
)
)
CloseMessageWindow()
ChrTalk( #68
0xFE,
(
"我们大家全部\x01",
"都被新市长雇佣了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #69
0xFE,
(
"导力器不能使用,\x01",
"做家务虽然辛苦点……\x02",
)
)
CloseMessageWindow()
ChrTalk( #70
0xFE,
(
"但大家一起努力\x01",
"一定能渡过难关的。\x02",
)
)
CloseMessageWindow()
OP_A2(0x0)
Jump("loc_133D")
label("loc_12E5")
ChrTalk( #71
0xFE,
(
"我们大家全部\x01",
"都被新市长雇佣了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #72
0xFE,
(
"达里奥先生也回来了,\x01",
"这下一切都恢复原样了吧⊙\x02",
)
)
CloseMessageWindow()
label("loc_133D")
Jump("loc_1639")
label("loc_1340")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x280, 0)), scpexpr(EXPR_END)), "loc_13D0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_1386")
ChrTalk( #73
0xFE,
(
"我虽然想在这房子里\x01",
"工作……\x02",
)
)
CloseMessageWindow()
ChrTalk( #74
0xFE,
"但还是很难吧。\x02",
)
CloseMessageWindow()
Jump("loc_13CD")
label("loc_1386")
OP_A2(0x0)
ChrTalk( #75
0xFE,
(
"多米尼克已经\x01",
"在找下一份工作了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #76
0xFE,
(
"这样一来\x01",
"我也着急起来了。\x02",
)
)
CloseMessageWindow()
label("loc_13CD")
Jump("loc_1639")
label("loc_13D0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 5)), scpexpr(EXPR_END)), "loc_14AA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_141A")
ChrTalk( #77
0xFE,
(
"最近一直没见着\x01",
"管家达里奥的身影。\x02",
)
)
CloseMessageWindow()
ChrTalk( #78
0xFE,
"怎么回事呢。\x02",
)
CloseMessageWindow()
Jump("loc_14A7")
label("loc_141A")
OP_A2(0x0)
ChrTalk( #79
0xFE,
(
"扫除的时候和多米尼克\x01",
"聊天来着……\x02",
)
)
CloseMessageWindow()
ChrTalk( #80
0xFE,
(
"最近一直没见着\x01",
"管家达里奥的身影。\x02",
)
)
CloseMessageWindow()
ChrTalk( #81
0xFE,
(
"事件之后\x01",
"情况就很奇怪……\x02",
)
)
CloseMessageWindow()
ChrTalk( #82
0xFE,
(
"达里奥到底\x01",
"怎么回事呢。\x02",
)
)
CloseMessageWindow()
label("loc_14A7")
Jump("loc_1639")
label("loc_14AA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x242, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 2)), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_153D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_14F9")
ChrTalk( #83
0xFE,
(
"啦啦~啦⊙\x01",
"噜噜噜噜~⊙\x02",
)
)
CloseMessageWindow()
ChrTalk( #84
0xFE,
(
"士兵先生们\x01",
"人都很好呢~\x02",
)
)
CloseMessageWindow()
Jump("loc_153A")
label("loc_14F9")
OP_A2(0x0)
ChrTalk( #85
0xFE,
(
"啦啦~啦⊙\x01",
"噜噜噜噜~⊙\x02",
)
)
CloseMessageWindow()
ChrTalk( #86
0xFE,
(
"我正在准备\x01",
"给士兵们的茶呢。\x02",
)
)
CloseMessageWindow()
label("loc_153A")
Jump("loc_1639")
label("loc_153D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x240, 6)), scpexpr(EXPR_END)), "loc_1639")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_15A6")
ChrTalk( #87
0xFE,
(
"老爷被逮捕的时候\x01",
"还在想会变成怎样……\x02",
)
)
CloseMessageWindow()
ChrTalk( #88
0xFE,
(
"看来暂时还能和以前一样\x01",
"在这里生活下去。\x02",
)
)
CloseMessageWindow()
Jump("loc_1639")
label("loc_15A6")
OP_A2(0x0)
ChrTalk( #89
0xFE,
(
"现在,这栋房子\x01",
"由王国军管理哦。\x02",
)
)
CloseMessageWindow()
ChrTalk( #90
0xFE,
(
"为了维持宅邸的管理。\x01",
"我们佣人们\x01",
"也维持原样被雇佣了下来。\x02",
)
)
CloseMessageWindow()
ChrTalk( #91
0xFE,
(
"嘿嘿,幸好军队的士兵们\x01",
"都是和善的好人。\x02",
)
)
CloseMessageWindow()
label("loc_1639")
TalkEnd(0xFE)
Return()
# Function_7_11E1 end
def Function_8_163D(): pass
label("Function_8_163D")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_174C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1705")
ChrTalk( #92
0xFE,
(
"如此非常时期\x01",
"竟然又发生事件……\x02",
)
)
CloseMessageWindow()
ChrTalk( #93
0xFE,
(
"最近这世道\x01",
"是怎么回事呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #94
0xFE,
(
"想来前市长的事件\x01",
"也是难以理解……\x02",
)
)
CloseMessageWindow()
ChrTalk( #95
0xFE,
"……不,没什么好说的。\x02",
)
CloseMessageWindow()
ChrTalk( #96
0xFE,
(
"前市长的过失是事实。\x01",
"有罪就要认罪。\x02",
)
)
CloseMessageWindow()
OP_A2(0x4)
Jump("loc_1749")
label("loc_1705")
ChrTalk( #97
0xFE,
(
"如此非常时期\x01",
"竟然又发生事件……\x02",
)
)
CloseMessageWindow()
ChrTalk( #98
0xFE,
(
"到底这世道\x01",
"是怎么回事呢。\x02",
)
)
CloseMessageWindow()
label("loc_1749")
Jump("loc_1858")
label("loc_174C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_1858")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1804")
ChrTalk( #99
0xFE,
(
"我是在戴尔蒙家\x01",
"服侍多年的人……\x02",
)
)
CloseMessageWindow()
ChrTalk( #100
0xFE,
(
"受新市长的委托,\x01",
"我作为这个市长官邸的管家\x01",
"重新回到这里了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #101
0xFE,
(
"我当作重获新生\x01",
"诚心诚意来服侍。\x02",
)
)
CloseMessageWindow()
ChrTalk( #102
0xFE,
(
"任何事都\x01",
"敬请吩咐。\x02",
)
)
CloseMessageWindow()
OP_A2(0x4)
Jump("loc_1858")
label("loc_1804")
ChrTalk( #103
0xFE,
(
"作为市长官邸的管家\x01",
"又回到宅邸了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #104
0xFE,
(
"能和同伴们一起工作的幸福\x01",
"我要牢牢抓住。\x02",
)
)
CloseMessageWindow()
label("loc_1858")
TalkEnd(0xFE)
Return()
# Function_8_163D end
def Function_9_185C(): pass
label("Function_9_185C")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_194A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_18F3")
ChrTalk( #105
0xFE,
(
"我面见了市长,\x01",
"请求对玛诺利亚紧急支持……\x02",
)
)
CloseMessageWindow()
ChrTalk( #106
0xFE,
(
"但是卢安市\x01",
"好像情况也相当严峻。\x02",
)
)
CloseMessageWindow()
ChrTalk( #107
0xFE,
(
"诺曼市长的严肃表情\x01",
"完全说明了这一点。\x02",
)
)
CloseMessageWindow()
OP_A2(0x5)
Jump("loc_1947")
label("loc_18F3")
ChrTalk( #108
0xFE,
(
"已经请求支持村子,\x01",
"但是卢安的状况也很严峻啊。\x02",
)
)
CloseMessageWindow()
ChrTalk( #109
0xFE,
(
"诺曼市长\x01",
"看起来也相当疲劳。\x02",
)
)
CloseMessageWindow()
label("loc_1947")
Jump("loc_1A46")
label("loc_194A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_1A46")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_19E6")
ChrTalk( #110
0xFE,
(
"作为玛诺利亚村的村长代理,\x01",
"我是来向卢安市长请愿的。\x02",
)
)
CloseMessageWindow()
ChrTalk( #111
0xFE,
(
"需要尽早请求食品和燃料\x01",
"的支援啊。\x02",
)
)
CloseMessageWindow()
ChrTalk( #112
0xFE,
(
"好了,立刻去跟新市长\x01",
"打个招呼吧。\x02",
)
)
CloseMessageWindow()
OP_A2(0x5)
Jump("loc_1A46")
label("loc_19E6")
ChrTalk( #113
0xFE,
(
"作为玛诺利亚村的村长代理,\x01",
"我是来向卢安市长请愿的。\x02",
)
)
CloseMessageWindow()
ChrTalk( #114
0xFE,
(
"需要尽早请求食品和燃料\x01",
"的支援啊。\x02",
)
)
CloseMessageWindow()
label("loc_1A46")
TalkEnd(0xFE)
Return()
# Function_9_185C end
def Function_10_1A4A(): pass
label("Function_10_1A4A")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_1DC2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x417, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1C7D")
Jc((scpexpr(EXPR_EXEC_OP, "OP_29(0x69, 0x0, 0x10)"), scpexpr(EXPR_END)), "loc_1AB7")
OP_62(0xE, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(1000)
ChrTalk( #115
0xFE,
"哦哦,是你们……\x02",
)
CloseMessageWindow()
ChrTalk( #116
0xFE,
(
"刚刚收到学院事件\x01",
"的报告呢。\x02",
)
)
CloseMessageWindow()
Jump("loc_1AFD")
label("loc_1AB7")
ChrTalk( #117
0xFE,
(
"哦哦……\x01",
"你们就是那些游击士吗?\x02",
)
)
CloseMessageWindow()
ChrTalk( #118
0xFE,
(
"正好收到学院事件\x01",
"的报告呢。\x02",
)
)
CloseMessageWindow()
label("loc_1AFD")
ChrTalk( #119
0x101,
"#1011F哦~消息真灵通啊。\x02",
)
CloseMessageWindow()
ChrTalk( #120
0x102,
(
"#1040F是嘉恩先生\x01",
"告知的吗?\x02",
)
)
CloseMessageWindow()
ChrTalk( #121
0xFE,
(
"啊啊,从协会\x01",
"来了使者……\x02",
)
)
CloseMessageWindow()
ChrTalk( #122
0xFE,
(
"哦,这么说来\x01",
"还没打招呼呢。\x02",
)
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_29(0x69, 0x0, 0x10)"), scpexpr(EXPR_END)), "loc_1C03")
ChrTalk( #123
0xFE,
(
"和以前见面时相比\x01",
"我的立场也发生了变化呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #124
0x101,
"#1000F啊,是哦。\x02",
)
CloseMessageWindow()
ChrTalk( #125
0xFE,
(
"我是这次就任新市长\x01",
"的诺曼。\x02",
)
)
CloseMessageWindow()
ChrTalk( #126
0xFE,
"今后请多关照。\x02",
)
CloseMessageWindow()
Jump("loc_1C3A")
label("loc_1C03")
ChrTalk( #127
0xFE,
(
"我是这次就任新市长\x01",
"的诺曼。\x02",
)
)
CloseMessageWindow()
ChrTalk( #128
0xFE,
"以后请多关照了。\x02",
)
CloseMessageWindow()
label("loc_1C3A")
ChrTalk( #129
0x101,
"#1000F哪里哪里,彼此彼此。\x02",
)
CloseMessageWindow()
ChrTalk( #130
0x102,
"#1040F恭喜您当选市长。\x02",
)
CloseMessageWindow()
Call(0, 11)
Jump("loc_1DBF")
label("loc_1C7D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 7)), scpexpr(EXPR_END)), "loc_1CBF")
ChrTalk( #131
0xFE,
(
"我们也\x01",
"下定了决心。\x02",
)
)
CloseMessageWindow()
ChrTalk( #132
0xFE,
(
"希望能尽快\x01",
"解决这个情况。\x02",
)
)
CloseMessageWindow()
Jump("loc_1DBF")
label("loc_1CBF")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1D44")
ChrTalk( #133
0xFE,
(
"关于学院的事件\x01",
"刚刚才收到报告。\x02",
)
)
CloseMessageWindow()
ChrTalk( #134
0xFE,
(
"勤务员实在可怜,\x01",
"不过据说平安解决了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #135
0xFE,
(
"代表市民,也让我\x01",
"重新表示感谢吧。\x02",
)
)
CloseMessageWindow()
OP_A2(0x6)
Jump("loc_1DBF")
label("loc_1D44")
ChrTalk( #136
0xFE,
(
"关于学院的事件\x01",
"刚刚收到报告呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #137
0xFE,
(
"这种非常时期的占据事件\x01",
"实在是令人难以置信的暴行。\x02",
)
)
CloseMessageWindow()
ChrTalk( #138
0xFE,
(
"犯人们应该\x01",
"受到严惩才行。\x02",
)
)
CloseMessageWindow()
label("loc_1DBF")
Jump("loc_20CC")
label("loc_1DC2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_20CC")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x417, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2030")
Jc((scpexpr(EXPR_EXEC_OP, "OP_29(0x69, 0x0, 0x10)"), scpexpr(EXPR_END)), "loc_1F72")
OP_62(0xE, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(1000)
ChrTalk( #139
0xFE,
"哦哦,是你们……\x02",
)
CloseMessageWindow()
ChrTalk( #140
0xFE,
(
"选举中的酒店事件时\x01",
"承蒙关照。\x02",
)
)
CloseMessageWindow()
ChrTalk( #141
0x101,
(
"#1016F啊~那个事件啊。\x02\x03",
"嗯,记得很~清楚哦。\x01",
"你的头还撞在门上。\x02",
)
)
CloseMessageWindow()
OP_62(0x102, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1)
OP_22(0x31, 0x0, 0x64)
Sleep(1000)
ChrTalk( #142
0x102,
"#1048F什么?那个事件……\x02",
)
CloseMessageWindow()
TurnDirection(0xFE, 0x101, 400)
ChrTalk( #143
0xFE,
"哎呀,真是丢脸……\x02",
)
CloseMessageWindow()
ChrTalk( #144
0xFE,
(
"不管怎样,趁此机会\x01",
"请让我重新自我介绍一下。\x02",
)
)
CloseMessageWindow()
ChrTalk( #145
0xFE,
(
"和以前见面时相比\x01",
"我的立场也发生了变化呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #146
0x101,
"#1011F啊,是哦。\x02",
)
CloseMessageWindow()
ChrTalk( #147
0xFE,
"我是就任新市长的诺曼。\x02",
)
CloseMessageWindow()
ChrTalk( #148
0xFE,
"今后请多关照。\x02",
)
CloseMessageWindow()
Jump("loc_1FED")
label("loc_1F72")
ChrTalk( #149
0xFE,
(
"唔……\x01",
"你们是游击士吧。\x02",
)
)
CloseMessageWindow()
ChrTalk( #150
0xFE,
(
"虽然不是初次见面,\x01",
"请让我重新自我介绍一下。\x02",
)
)
CloseMessageWindow()
ChrTalk( #151
0xFE,
(
"我是就任新市长的诺曼。\x01",
"以后也请多关照。\x02",
)
)
CloseMessageWindow()
label("loc_1FED")
ChrTalk( #152
0x101,
"#1000F哪里哪里,彼此彼此。\x02",
)
CloseMessageWindow()
ChrTalk( #153
0x102,
"#1040F恭喜您当选市长。\x02",
)
CloseMessageWindow()
Call(0, 11)
Jump("loc_20CC")
label("loc_2030")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 7)), scpexpr(EXPR_END)), "loc_207E")
ChrTalk( #154
0xFE,
(
"我们也\x01",
"下定了决心。\x02",
)
)
CloseMessageWindow()
ChrTalk( #155
0xFE,
(
"祈祷事件能尽快解决,\x01",
"期待诸位的表现。\x02",
)
)
CloseMessageWindow()
Jump("loc_20CC")
label("loc_207E")
ChrTalk( #156
0xFE,
(
"总之市民生活的稳定\x01",
"可以说是当前的课题。\x02",
)
)
CloseMessageWindow()
ChrTalk( #157
0xFE,
(
"为此现在正在\x01",
"寻求各方援助。\x02",
)
)
CloseMessageWindow()
label("loc_20CC")
TalkEnd(0xFE)
Return()
# Function_10_1A4A end
def Function_11_20D0(): pass
label("Function_11_20D0")
TurnDirection(0xFE, 0x102, 400)
ChrTalk( #158
0xFE,
"唔,谢谢。\x02",
)
CloseMessageWindow()
ChrTalk( #159
0xFE,
(
"不过,遗憾的是还不到\x01",
"沉浸在胜利中的时候……\x02",
)
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x5)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_215E")
ChrTalk( #160
0x106,
(
"#552F啊啊,正是。\x02\x03",
"您刚刚就任,也真是多灾多难。\x02",
)
)
CloseMessageWindow()
Jump("loc_21F0")
label("loc_215E")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x2)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_21A4")
ChrTalk( #161
0x103,
(
"#025F嗯嗯,我们明白。\x02\x03",
"您刚刚就任就碰到这些事。\x02",
)
)
CloseMessageWindow()
Jump("loc_21F0")
label("loc_21A4")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x7)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_21F0")
ChrTalk( #162
0x108,
(
"#074F唔,实在让您伤脑筋了啊。\x02\x03",
"刚刚就任\x01",
"就碰到这些难题。\x02",
)
)
CloseMessageWindow()
label("loc_21F0")
ChrTalk( #163
0xFE,
(
"说实话,\x01",
"真是无从下手啊……\x02",
)
)
CloseMessageWindow()
ChrTalk( #164
0xFE,
(
"当初的混乱虽然收拾了,\x01",
"但是导力器还是没恢复原状。\x02",
)
)
CloseMessageWindow()
ChrTalk( #165
0xFE,
(
"这种时候只能努力\x01",
"支援市民的生活了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #166
0x102,
(
"#1043F但是,就现在而言\x01",
"这是最好的对策。\x02\x03",
"遗憾的是事态的解决\x01",
"可能还要花费一些时间。\x02",
)
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x7)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2334")
ChrTalk( #167
0x108,
(
"#072F确实不是一朝一夕\x01",
"就能解决的事件啊。\x02\x03",
"为了防止长期延续\x01",
"需要更有效的对策。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xFE, 0x108, 400)
Jump("loc_2407")
label("loc_2334")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x2)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_23A1")
ChrTalk( #168
0x103,
(
"#022F是啊,这不是一朝一夕\x01",
"就能解决的事件。\x02\x03",
"为了防止长期延续\x01",
"需要更有效的对策。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xFE, 0x103, 400)
Jump("loc_2407")
label("loc_23A1")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x5)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2407")
ChrTalk( #169
0x106,
(
"#050F确实不是一天两天\x01",
"就能解决的事件。\x02\x03",
"考虑到事态的延续\x01",
"需要更有效的对策。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xFE, 0x106, 400)
label("loc_2407")
ChrTalk( #170
0xFE,
"唔,果然是这样吗。\x02",
)
CloseMessageWindow()
ChrTalk( #171
0xFE,
(
"作为新市长的首次工作来说\x01",
"略感负担沉重……\x02",
)
)
CloseMessageWindow()
ChrTalk( #172
0xFE,
(
"为了不负女神的期待,\x01",
"只有想办法努力克服了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #173
0xFE,
(
"祈祷事件能尽快解决,\x01",
"期待诸位的表现。\x02",
)
)
CloseMessageWindow()
ChrTalk( #174
0x101,
(
"#1006F嗯……\x01",
"市长也要加油。\x02",
)
)
CloseMessageWindow()
ChrTalk( #175
0x102,
"#1040F我们会尽力的!\x02",
)
CloseMessageWindow()
OP_A2(0x7)
OP_A2(0x20BC)
Return()
# Function_11_20D0 end
def Function_12_24EC(): pass
label("Function_12_24EC")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_296B")
Jc((scpexpr(EXPR_EXEC_OP, "OP_29(0x69, 0x0, 0x10)"), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x417, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_26EB")
ChrTalk( #176
0xFE,
"啊,游击士……\x02",
)
CloseMessageWindow()
ChrTalk( #177
0xFE,
(
"那、那个……\x01",
"前几天承蒙关照了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #178
0x101,
(
"#1000F啊~还以为是谁呢,\x01",
"是宾馆事件的受害者吧。\x02\x03",
"……撞到的头已经不要紧了吗?\x02",
)
)
CloseMessageWindow()
ChrTalk( #179
0xFE,
"托你的福已经完全好了。\x02",
)
CloseMessageWindow()
ChrTalk( #180
0xFE,
(
"那,今天\x01",
"来市长官邸有什么事吗?\x02",
)
)
CloseMessageWindow()
ChrTalk( #181
0x101,
"#1000F嗯,其实也没什么……\x02",
)
CloseMessageWindow()
ChrTalk( #182
0x102,
(
"#1040F请不用在意。\x01",
"只是来看看情况的。\x02",
)
)
CloseMessageWindow()
ChrTalk( #183
0xFE,
(
"算是所谓的市内巡查吧?\x01",
"一直执行任务真是辛苦了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #184
0xFE,
(
"那么,有什么事的话\x01",
"请尽管开口。\x02",
)
)
CloseMessageWindow()
ChrTalk( #185
0xFE,
(
"我至少也算是\x01",
"市长秘书嘛。\x02",
)
)
CloseMessageWindow()
ChrTalk( #186
0x101,
(
"#1000F哦~这样啊。\x02\x03",
"那么,到时候就请多关照了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #187
0xFE,
"啊啊,请不必客气。\x02",
)
CloseMessageWindow()
OP_A2(0x9)
OP_A2(0x20BD)
Jump("loc_296B")
label("loc_26EB")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 1)), scpexpr(EXPR_END)), "loc_2739")
ChrTalk( #188
0xFE,
(
"别看我这样,\x01",
"至少也是市长秘书呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #189
0xFE,
(
"有什么事情\x01",
"请尽管吩咐。\x02",
)
)
CloseMessageWindow()
Jump("loc_296B")
label("loc_2739")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_284B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_27F4")
ChrTalk( #190
0xFE,
(
"关于学院的事件\x01",
"刚刚收到了报告。\x02",
)
)
CloseMessageWindow()
ChrTalk( #191
0xFE,
(
"听说平安解决了,\x01",
"我和市长总算都放心了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #192
0xFE,
(
"现在正忙着做\x01",
"发放宣传的准备呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #193
0xFE,
(
"不管怎样得发出消息\x01",
"让大家感到安心才行呢。\x02",
)
)
CloseMessageWindow()
OP_A2(0x8)
Jump("loc_2848")
label("loc_27F4")
ChrTalk( #194
0xFE,
(
"关于学院的事件\x01",
"刚刚收到了报告呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #195
0xFE,
(
"听说平安解决了,\x01",
"我和市长总算都放心了。\x02",
)
)
CloseMessageWindow()
label("loc_2848")
Jump("loc_296B")
label("loc_284B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2915")
ChrTalk( #196
0xFE,
(
"呼,应付市民的意见\x01",
"总算告一段落了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #197
0xFE,
(
"众多的市民一时间全涌过来,\x01",
"那时候这里也够辛苦的。\x02",
)
)
CloseMessageWindow()
ChrTalk( #198
0xFE,
(
"但是,导力器的问题\x01",
"还没有解决的头绪。\x02",
)
)
CloseMessageWindow()
ChrTalk( #199
0xFE,
(
"总之现在光是支持市民生活\x01",
"就已经竭尽全力了。\x02",
)
)
CloseMessageWindow()
OP_A2(0x8)
Jump("loc_296B")
label("loc_2915")
ChrTalk( #200
0xFE,
(
"导力器的问题\x01",
"还没有解决的头绪。\x02",
)
)
CloseMessageWindow()
ChrTalk( #201
0xFE,
(
"总之现在光是支持市民生活\x01",
"就已经竭尽全力了。\x02",
)
)
CloseMessageWindow()
label("loc_296B")
TalkEnd(0xFE)
Return()
# Function_12_24EC end
def Function_13_296F(): pass
label("Function_13_296F")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_2FBC")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x417, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2D55")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x5)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2A60")
TurnDirection(0xFE, 0x106, 0)
OP_62(0xFE, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(400)
ChrTalk( #202
0xFE,
"咦,阿加特先生……\x02",
)
CloseMessageWindow()
ChrTalk( #203
0x106,
(
"#051F哦,好久不见了啊。\x02\x03",
"看来还是\x01",
"很有精神嘛。\x02",
)
)
CloseMessageWindow()
ChrTalk( #204
0xFE,
"哈哈,托你的福……\x02",
)
CloseMessageWindow()
ChrTalk( #205
0xFE,
(
"老爸当了市长,\x01",
"我就来帮他的忙了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #206
0xFE,
(
"这种情况下,\x01",
"很多事都需要忙呢。\x02",
)
)
CloseMessageWindow()
Jump("loc_2B57")
label("loc_2A60")
ChrTalk( #207
0xFE,
"咦,你们是……\x02",
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x2)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2ABC")
ChrTalk( #208
0x101,
"#1000F啊,好久不见。\x02",
)
CloseMessageWindow()
ChrTalk( #209
0x103,
"#021F呵呵,很有精神嘛。\x02",
)
CloseMessageWindow()
Jump("loc_2AE6")
label("loc_2ABC")
ChrTalk( #210
0x101,
(
"#1000F啊,好久不见。\x02\x03",
"怎样?还好吧?\x02",
)
)
CloseMessageWindow()
label("loc_2AE6")
TurnDirection(0xFE, 0x101, 400)
ChrTalk( #211
0xFE,
"哈哈,托你的福还算好。\x02",
)
CloseMessageWindow()
ChrTalk( #212
0xFE,
(
"老爸当了市长,\x01",
"我现在正在帮他的忙。\x02",
)
)
CloseMessageWindow()
ChrTalk( #213
0xFE,
(
"这种情况下,\x01",
"很多事都需要忙呢。\x02",
)
)
CloseMessageWindow()
label("loc_2B57")
ChrTalk( #214
0x101,
(
"#1011F哦~这可是\x01",
"正经的工作呢。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xFE, 0x101, 400)
ChrTalk( #215
0xFE,
(
"嗯,现在就和\x01",
"打工差不多。\x02",
)
)
CloseMessageWindow()
ChrTalk( #216
0xFE,
(
"不过着急也不是办法,\x01",
"我打算脚踏实地地努力看看。\x02",
)
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x5)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2C51")
ChrTalk( #217
0x106,
(
"#051F有这觉悟就没问题了。\x02\x03",
"……好好干哦。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xFE, 0x106, 400)
ChrTalk( #218
0xFE,
(
"是,是。\x01",
"非常感谢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #219
0xFE,
"阿加特先生也多保重。\x02",
)
CloseMessageWindow()
Jump("loc_2D4C")
label("loc_2C51")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x2)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2CDC")
ChrTalk( #220
0x103,
(
"#020F嗯嗯,有这觉悟就\x01",
"一定没问题了。\x02\x03",
"那么,好好干哦。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xFE, 0x103, 400)
ChrTalk( #221
0xFE,
"嗯、嗯,我会努力的。\x02",
)
CloseMessageWindow()
ChrTalk( #222
0xFE,
(
"那么,\x01",
"你们也多加小心。\x02",
)
)
CloseMessageWindow()
Jump("loc_2D4C")
label("loc_2CDC")
ChrTalk( #223
0x101,
(
"#1006F有这觉悟就\x01",
"一定没问题了。\x02\x03",
"那么,加油工作哦。\x02",
)
)
CloseMessageWindow()
ChrTalk( #224
0xFE,
"嗯、嗯,我会努力的。\x02",
)
CloseMessageWindow()
ChrTalk( #225
0xFE,
(
"那么,\x01",
"你们也多加小心。\x02",
)
)
CloseMessageWindow()
label("loc_2D4C")
OP_A2(0xB)
OP_A2(0x20BE)
Jump("loc_2FBC")
label("loc_2D55")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 3)), scpexpr(EXPR_END)), "loc_2E0A")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x5)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2DC3")
ChrTalk( #226
0xFE,
(
"总之我打算\x01",
"脚踏实地的努力看看。\x02",
)
)
CloseMessageWindow()
ChrTalk( #227
0xFE,
(
"这种非常时期,\x01",
"阿加特先生你们也要多加小心。\x02",
)
)
CloseMessageWindow()
Jump("loc_2E07")
label("loc_2DC3")
ChrTalk( #228
0xFE,
(
"总之我打算\x01",
"脚踏实地的努力看看。\x02",
)
)
CloseMessageWindow()
ChrTalk( #229
0xFE,
(
"那么,\x01",
"你们也要多加小心。\x02",
)
)
CloseMessageWindow()
label("loc_2E07")
Jump("loc_2FBC")
label("loc_2E0A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_2EF9")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2E9A")
ChrTalk( #230
0xFE,
(
"学院的事件……\x01",
"从准游击士那里听说啦。\x02",
)
)
CloseMessageWindow()
ChrTalk( #231
0xFE,
(
"这种时候还发生人质事件,\x01",
"真是受不了啊。\x02",
)
)
CloseMessageWindow()
ChrTalk( #232
0xFE,
(
"犯人真是的,\x01",
"到底在想什么呢。\x02",
)
)
CloseMessageWindow()
OP_A2(0xA)
Jump("loc_2EF6")
label("loc_2E9A")
ChrTalk( #233
0xFE,
(
"这种时候还发生人质事件,\x01",
"真是受不了啊。\x02",
)
)
CloseMessageWindow()
ChrTalk( #234
0xFE,
(
"大家都在齐心协力的时候,\x01",
"真是不能原谅啊。\x02",
)
)
CloseMessageWindow()
label("loc_2EF6")
Jump("loc_2FBC")
label("loc_2EF9")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2F79")
ChrTalk( #235
0xFE,
"我老爸也因为各种事忙得不得了。\x02",
)
CloseMessageWindow()
ChrTalk( #236
0xFE,
(
"应付市民的意见、\x01",
"食品和医药品的确保……\x02",
)
)
CloseMessageWindow()
ChrTalk( #237
0xFE,
(
"真是,要做的事\x01",
"多得堆成山。\x02",
)
)
CloseMessageWindow()
OP_A2(0xA)
Jump("loc_2FBC")
label("loc_2F79")
ChrTalk( #238
0xFE,
"我老爸也因为各种事忙得不得了。\x02",
)
CloseMessageWindow()
ChrTalk( #239
0xFE,
(
"为什么这么喜欢\x01",
"当市长呢。\x02",
)
)
CloseMessageWindow()
label("loc_2FBC")
TalkEnd(0xFE)
Return()
# Function_13_296F end
def Function_14_2FC0(): pass
label("Function_14_2FC0")
FadeToDark(300, 0, 100)
SetChrName("")
SetMessageWindowPos(-1, -1, -1, -1)
AnonymousTalk( #240
(
"\x07\x05『苍耀之灯火』\x01",
" 被认为是初期导力艺术的\x01",
" 极致作品。\x01",
" 导力革命之后\x01",
" 由卢安市民\x01",
" 赠送给为城市发展\x01",
" 作出贡献的戴尔蒙家。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
SetMessageWindowPos(72, 320, 56, 3)
TalkEnd(0xFF)
Return()
# Function_14_2FC0 end
def Function_15_306D(): pass
label("Function_15_306D")
NewScene("ED6_DT21/T2210 ._SN", 123, 1, 0)
IdleLoop()
Return()
# Function_15_306D end
def Function_16_3077(): pass
label("Function_16_3077")
NewScene("ED6_DT21/T2210 ._SN", 121, 1, 0)
IdleLoop()
Return()
# Function_16_3077 end
def Function_17_3081(): pass
label("Function_17_3081")
FadeToDark(300, 0, 100)
SetChrName("")
AnonymousTalk( #241
"\x07\x05有吊桥的控制装置。\x02",
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
SetMessageWindowPos(72, 320, 56, 3)
TalkEnd(0xFF)
Return()
# Function_17_3081 end
SaveToFile()
Try(main)
|
[
"zhenjian.c.yang@gmail.com"
] |
zhenjian.c.yang@gmail.com
|
c638b2c52850b9a3d8881b615ddec38e4799910a
|
d4015bdfa9ab411d04bb8366262f73678f9cb31a
|
/bin/django-admin
|
13b7fb980727f38607fb40b37019794c4b4ecfa0
|
[] |
no_license
|
Stainislav/isb
|
28fe40940c4715555bba1b37fb9f5b81f61d2d99
|
7749497f26ae44f45f8eb461f4ed0b928cdd4134
|
refs/heads/master
| 2020-06-13T05:56:03.379388
| 2019-06-30T20:58:59
| 2019-06-30T20:58:59
| 194,561,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
#!/home/stanislav/MegaFon/CloudDev/Books/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"swatogor@mail.ru"
] |
swatogor@mail.ru
|
|
05d0319ccd4828d064b1c1dec90c8c72497eabf3
|
e87bb24f697085e8aa18f7a2245effb45b40ff3a
|
/basicExeptionHandling/trySt.py~
|
93e3227ec4591e96d3e044e75be5c192038fbb44
|
[] |
no_license
|
Julian-Carlosama/PersonalExersises
|
e74cb45e913a2490855f07e54d6499dead109d7d
|
888a750ed467d187e25bb970db6ec21aa8c0964b
|
refs/heads/main
| 2023-07-16T02:52:45.942662
| 2021-09-06T14:54:03
| 2021-09-06T14:54:03
| 402,678,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
#!/usr/bin/env python3
#Handle exeptions
while True:
try:
x = int(raw_input("Por favor ingrese un número: "))
break
except ValueError:
print ("Oops! No era válido. Intente nuevamente...")
|
[
"cromocelcarlosama@gmail.com"
] |
cromocelcarlosama@gmail.com
|
|
38376f59bb4dc3c9026277d80f6a376078358f21
|
b75048b53dd10f20ad4bd7fca61c4844f2545c66
|
/test.py
|
3225df14315773aa3d57405c80e350ef1fa9e273
|
[] |
no_license
|
kali20gakki/YOLOV3
|
16615998587d13670efcc5e8c8d3f79796f41986
|
7c8815194fc614e1cba8b3c160422c2dbf33cdb8
|
refs/heads/master
| 2021-03-17T20:34:09.615901
| 2021-03-08T07:41:38
| 2021-03-08T07:41:38
| 247,015,776
| 2
| 1
| null | 2021-03-08T07:41:38
| 2020-03-13T07:50:45
|
Python
|
UTF-8
|
Python
| false
| false
| 159
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from time import strftime, localtime
anchor_mask= [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
print(anchor_mask[0])
|
[
"zhangwei3.0@qq.com"
] |
zhangwei3.0@qq.com
|
eae9c6e885efc10991a0442e56f10ef883ce3903
|
36c2713bec30903e11b636ef2d9d68b61ea3ab0a
|
/Homework1/PartB/main.py
|
8f53430e79a4c50efb3661b64dd8b3abfade2fcd
|
[] |
no_license
|
nateehuang/AlgorTradingGithub
|
1186cd27840ca1dbed8ab59a0d40c71d481b32dc
|
4aabbb41b2e9ce18172e010527c59d53ffb95984
|
refs/heads/master
| 2023-01-08T06:25:30.125978
| 2020-11-06T21:13:14
| 2020-11-06T21:13:14
| 310,706,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
'''
Created on Feb 25, 2020
@author: Nate, Zibin, Martin
'''
from PartB.StockStatPlot import oneStockTradeStat, oneStockQuoteStat
if __name__ == '__main__':
plot = False # Change to True if want to plot
if plot == True:
oneStockTradeStat('AYE', False) # False: before clean
oneStockTradeStat('AYE', True) # True: after clean
oneStockTradeStat('RRC', False)
oneStockTradeStat('RRC', True)
oneStockQuoteStat('AYE', False)
oneStockQuoteStat('AYE', True)
oneStockQuoteStat('RRC', False)
oneStockQuoteStat('RRC', True)
|
[
"huangnatee@gmail.com"
] |
huangnatee@gmail.com
|
540332d79a4582991e14c4c8f295cd883341c233
|
78a720310e3dd64b15faa75442a9cc7ca46f3ddc
|
/sns_app/flaskr/forms.py
|
38ca5ff9139c05c69f4e64b08261f8e91edcbc00
|
[] |
no_license
|
HiroakiMorita/sns_flask_app
|
93c6d61c786421c9de0024a8ec924b32c66d5e8a
|
e4040c5a70a930ec6bf6df98357e27f7b5bba0e9
|
refs/heads/master
| 2022-12-12T07:09:50.053016
| 2020-09-03T07:01:15
| 2020-09-03T07:01:15
| 285,998,246
| 0
| 0
| null | 2020-09-03T07:01:16
| 2020-08-08T07:46:38
|
Python
|
UTF-8
|
Python
| false
| false
| 4,085
|
py
|
from flask_wtf import FlaskForm
from wtforms.fields import (
StringField, FileField, PasswordField,
SubmitField, HiddenField, TextAreaField
)
from wtforms.validators import DataRequired, Email, EqualTo
from wtforms import ValidationError
from flask_login import current_user
from flask import flash
from flaskr.models import User, UserConnect
#ログイン用フォーム
class LoginForm(FlaskForm):
email = StringField('メール: ', validators=[DataRequired(), Email()])
password = PasswordField(
'パスワード: ',
validators=[DataRequired(),
EqualTo('confirm_password', message='パスワードが一致しません')]
)
confirm_password = PasswordField('パスワード再入力: ',validators=[DataRequired()])
submit = SubmitField('ログイン')
#登録用フォーム
class RegisterForm(FlaskForm):
email = StringField(
'メール:', validators=[DataRequired(), Email('メールアドレスが誤っています')]
)
username = StringField('名前: ', validators=[DataRequired()])
submit = SubmitField('登録')
def validate_email(self, field):
if User.select_user_by_email(field.data):
raise ValidationError('メールアドレスは既に登録されています')
#パスワード設定用のフォーム
class ResetPasswordForm(FlaskForm):
password = PasswordField(
'パスワード',
validators=[DataRequired(), EqualTo('confirm_password', message='パスワードが一致しません')]
)
confirm_password = PasswordField(
'パスワード確認: ', validators=[DataRequired()]
)
submit = SubmitField('パスワードを更新する')
def validate_password(self, field):
if len(field.data) < 8:
raise ValidationError('パスワードは8文字以上です')
class ForgotPasswordForm(FlaskForm):
email = StringField('メール:', validators=[DataRequired(), Email()])
submit = SubmitField('パスワードを再設定する')
def validate_email(self, field):
if not User.select_user_by_email(field.data):
raise ValidationError('そのメールアドレスは存在しません')
class UserForm(FlaskForm):
email = StringField('メール: ', validators=[DataRequired(), Email('メールアドレスが誤っています')]
)
username = StringField('名前: ', validators=[DataRequired()])
picture_path = FileField('ファイルアップロード')
submit = SubmitField('登録情報更新')
def validate(self):
if not super(FlaskForm, self).validate():
return False
user = User.select_user_by_email(self.email.data)
if user:
if user.id != int(current_user.get_id()):
flash('そのメールアドレスは既に登録されています')
return False
return True
class ChangePasswordForm(FlaskForm):
password = PasswordField(
'パスワード',
validators=[DataRequired(), EqualTo('confirm_password', message='パスワードが一致しません')]
)
confirm_password = PasswordField(
'パスワード確認: ', validators=[DataRequired()]
)
submit = SubmitField('パスワードの更新')
def validate_password(self, field):
if len(field.data) < 8:
raise ValidationError('パスワードは8文字以上です')
class UserSearchForm(FlaskForm):
username = StringField(
'名前: ', validators=[DataRequired()]
)
submit = SubmitField('ユーザー検索')
class ConnectForm(FlaskForm):
connect_condition = HiddenField()
to_user_id = HiddenField()
submit = SubmitField()
class MessageForm(FlaskForm):
to_user_id = HiddenField()
message = TextAreaField()
submit = SubmitField('メッセージ送信')
def validate(self):
if not super(FlaskForm, self).validate():
return False
is_friend = UserConnect.is_friend(self.to_user_id.data)
if not is_friend:
return False
return True
|
[
"taiyou3291@outlook.jp"
] |
taiyou3291@outlook.jp
|
ad18b7efb9bf4fe2a6a973acc2c84fbcc41117e3
|
d7546f7f7730c17dd8ce3f9eb5e03e91c0b2d44c
|
/Python3/283. Move Zeroes.py
|
30bc0045f6cff0cd2ba091d8f1cbf817578135d6
|
[] |
no_license
|
zhangyudong0215/FightAgainstLeetCode
|
05edf84a378bc0c696aabe1bf4cfb0938c81f5e6
|
fe702bb2b3ebd35bb9ec233bf3acc1291127bf04
|
refs/heads/master
| 2021-04-30T11:24:46.813783
| 2018-05-28T15:15:09
| 2018-05-28T15:15:09
| 121,253,253
| 3
| 0
| null | 2018-02-18T08:38:00
| 2018-02-12T14:00:08
|
Go
|
UTF-8
|
Python
| false
| false
| 663
|
py
|
class Solution:
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
# count = nums.count(0) # 56ms
# index = 0
# for num in nums:
# if num:
# nums[index] = num
# index += 1
# while index < len(nums):
# nums[index] = 0
# index += 1
position = 0 # 52ms 没有显著改善
for index, num in enumerate(nums):
if num:
nums[position], nums[index] = nums[index], nums[position]
position += 1
|
[
"petenlf1025@gmail.com"
] |
petenlf1025@gmail.com
|
b80f3341e01c927cd8220c8b5e567848a7c8a259
|
229f4ec6272c5a730da44923a94f211fba04d38f
|
/cltk/prosody/latin/HendecasyllableScanner.py
|
15a3a006967237e83df65eef182e34fe46ab2867
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Sedictious/cltk
|
d8fd364f66eb5fdbc85057b490ddd842b15e96a4
|
681170d58de61d50bec6cca9ca5753844506c3f6
|
refs/heads/master
| 2021-04-30T02:10:41.988706
| 2018-12-10T17:14:17
| 2018-12-10T17:14:17
| 121,495,814
| 1
| 0
|
MIT
| 2018-12-10T17:01:16
| 2018-02-14T10:05:17
|
Python
|
UTF-8
|
Python
| false
| false
| 9,228
|
py
|
"""Utility class for producing a scansion pattern for a Latin hendecasyllables.
Given a line of hendecasyllables, the scan method performs a series of transformation and checks
are performed and for each one performed successfully, a note is added to the scansion_notes
list so that end users may view the provenance of a scansion.
"""
import re
from Levenshtein import distance
from cltk.prosody.latin.Verse import Verse
from cltk.prosody.latin.MetricalValidator import MetricalValidator
from cltk.prosody.latin.ScansionConstants import ScansionConstants
from cltk.prosody.latin.ScansionFormatter import ScansionFormatter
from cltk.prosody.latin.Syllabifier import Syllabifier
import cltk.prosody.latin.StringUtils as StringUtils
from cltk.prosody.latin.VerseScanner import VerseScanner
__author__ = ['Todd Cook <todd.g.cook@gmail.com>']
__license__ = 'MIT License'
class HendecasyllableScanner(VerseScanner):
"""The scansion symbols used can be configured by passing a suitable constants class to
the constructor."""
def __init__(self, constants=ScansionConstants(), syllabifier=Syllabifier(),
optional_tranform=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.constants = constants
self.remove_punct_map = StringUtils.remove_punctuation_dict()
self.punctuation_substitutions = StringUtils.punctuation_for_spaces_dict()
self.metrical_validator = MetricalValidator(constants)
self.formatter = ScansionFormatter(constants)
self.syllabifier = syllabifier
self.inverted_amphibrach_re = re.compile(
r"{}\s*{}\s*{}".format(self.constants.STRESSED,
self.constants.UNSTRESSED,
self.constants.STRESSED))
self.syllable_matcher = re.compile(r"[{}]".format(self.constants.VOWELS +
self.constants.ACCENTED_VOWELS +
self.constants.LIQUIDS +
self.constants.MUTES))
self.optional_transform = optional_tranform
def scan(self, original_line: str, optional_transform: bool = False) -> Verse:
"""Scan a line of Latin hendecasyllables and produce a scansion pattern, and other data.
:return: a Verse object
>>> scanner = HendecasyllableScanner()
>>> print(scanner.scan("Cui dono lepidum novum libellum"))
Verse(original='Cui dono lepidum novum libellum', scansion=' - U - U U - U - U - U ', meter='hendecasyllable', valid=True, syllable_count=11, accented='Cui donō lepidūm novūm libēllum', scansion_notes=['Corrected invalid start.'], syllables = ['Cui', 'do', 'no', 'le', 'pi', 'dūm', 'no', 'vūm', 'li', 'bēl', 'lum'])
>>> print(scanner.scan(
... "ārida modo pumice expolitum?").scansion) # doctest: +NORMALIZE_WHITESPACE
- U - U U - U - U - U
"""
verse = Verse(original_line, meter='hendecasyllable')
# replace punctuation with spaces
line = original_line.translate(self.punctuation_substitutions)
# conservative i to j
line = self.transform_i_to_j(line)
working_line = self.elide_all(line)
working_line = self.accent_by_position(working_line)
syllables = self.syllabifier.syllabify(working_line)
if optional_transform:
working_line = self.transform_i_to_j_optional(line)
working_line = self.elide_all(working_line)
working_line = self.accent_by_position(working_line)
syllables = self.syllabifier.syllabify(working_line)
verse.scansion_notes += [self.constants.NOTE_MAP["optional i to j"]]
verse.working_line = working_line
verse.syllable_count = self.syllabifier.get_syllable_count(syllables)
verse.syllables = syllables
# identify some obvious and probably choices based on number of syllables
if verse.syllable_count > 11:
verse.valid = False
verse.scansion_notes += [self.constants.NOTE_MAP["> 11"]]
return verse
if verse.syllable_count < 11:
verse.valid = False
verse.scansion_notes += [self.constants.NOTE_MAP["< 11"]]
return verse
stresses = self.flag_dipthongs(syllables)
syllables_wspaces = StringUtils.to_syllables_with_trailing_spaces(working_line, syllables)
offset_map = self.calc_offset(syllables_wspaces)
for idx, syl in enumerate(syllables):
for accented in self.constants.ACCENTED_VOWELS:
if accented in syl:
stresses.append(idx)
# second to last syllable is always long
stresses.append(verse.syllable_count - 2)
verse.scansion = self.produce_scansion(stresses,
syllables_wspaces, offset_map)
if len(StringUtils.stress_positions(self.constants.STRESSED, verse.scansion)) != \
len(set(stresses)):
verse.valid = False
verse.scansion_notes += [self.constants.NOTE_MAP["invalid syllables"]]
return verse
if self.metrical_validator.is_valid_hendecasyllables(verse.scansion):
verse.scansion_notes += [self.constants.NOTE_MAP["positionally"]]
return self.assign_candidate(verse, verse.scansion)
smoothed = self.correct_invalid_start(verse.scansion)
if distance(verse.scansion, smoothed) > 0:
verse.scansion_notes += [self.constants.NOTE_MAP["invalid start"]]
verse.scansion = smoothed
stresses += StringUtils.differences(verse.scansion, smoothed)
if self.metrical_validator.is_valid_hendecasyllables(verse.scansion):
return self.assign_candidate(verse, verse.scansion)
smoothed = self.correct_antepenult_chain(verse.scansion)
if distance(verse.scansion, smoothed) > 0:
verse.scansion_notes += [self.constants.NOTE_MAP["antepenult chain"]]
verse.scansion = smoothed
stresses += StringUtils.differences(verse.scansion, smoothed)
if self.metrical_validator.is_valid_hendecasyllables(verse.scansion):
return self.assign_candidate(verse, verse.scansion)
candidates = self.metrical_validator.closest_hendecasyllable_patterns(verse.scansion)
if candidates is not None:
if len(candidates) == 1 \
and len(verse.scansion.replace(" ", "")) == len(candidates[0]) \
and len(StringUtils.differences(verse.scansion, candidates[0])) == 1:
tmp_scansion = self.produce_scansion(
StringUtils.differences(verse.scansion, candidates[0]),
syllables_wspaces, offset_map)
if self.metrical_validator.is_valid_hendecasyllables(tmp_scansion):
verse.scansion_notes += [self.constants.NOTE_MAP["closest match"]]
return self.assign_candidate(verse, tmp_scansion)
# if the line doesn't scan "as is", if may scan if the optional i to j transformations
# are made, so here we set them and try again.
if self.optional_transform and not verse.valid:
return self.scan(original_line, optional_transform=True)
verse.accented = self.formatter.merge_line_scansion(
verse.original, verse.scansion)
return verse
def correct_invalid_start(self, scansion: str) -> str:
"""The third syllable of a hendecasyllabic line is long, so we will convert it
:param scansion:
:return: scansion string with corrected start
>>> print(HendecasyllableScanner().correct_invalid_start(
... "- U U U U - U - U - U").strip())
- U - U U - U - U - U
"""
mark_list = StringUtils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
corrected = vals[:2] + [self.constants.STRESSED] + vals[3:]
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line)
def correct_antepenult_chain(self, scansion: str) -> str:
"""For hendecasyllables the last three feet of the verse are predictable
and do not regularly allow substitutions.
:param scansion: scansion line thus far
:return: corrected line of scansion
>>> print(HendecasyllableScanner().correct_antepenult_chain(
... "-U -UU UU UU UX").strip())
-U -UU -U -U -X
"""
mark_list = StringUtils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
new_vals = vals[:len(vals) - 6] + [self.constants.TROCHEE +
self.constants.TROCHEE +
self.constants.STRESSED] + vals[-1:]
corrected = "".join(new_vals)
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line)
|
[
"kyle@kyle-p-johnson.com"
] |
kyle@kyle-p-johnson.com
|
acd92fa6257280f3779d78caf9b3b72b1ac3e54f
|
b94f9692393f2a1f24e36ea49f1b61995dc93214
|
/Study/test_func.py
|
5941bc023a16becdd8501b3eed1473f3400baed8
|
[] |
no_license
|
annomad/ZB_tools
|
d1d1932be853dc330a7ca38dd369d7570093f3c4
|
f64428f7d8348f3f625c8541b4061a5b30e7e5ad
|
refs/heads/master
| 2023-05-07T07:16:15.491417
| 2021-05-26T03:32:57
| 2021-05-26T03:32:57
| 349,976,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
class TestFunc():
def __init__(self, a=0, b=0):
self.a = a
self.b = b
def Add(self):
return (self.a + self.b)
|
[
"25004472@qq.com"
] |
25004472@qq.com
|
426c5e0d5a83f6df17a3d005e7214aa7f8ce9038
|
189d79c0e0fcdce192a6034306416fd492202501
|
/LeetCode/Python/306 Additive Number.py
|
c7806ee6c106e36c199ee794f0ded80b76622235
|
[] |
no_license
|
digant0705/Algorithm
|
294fbc84eaa4b6e0ea864924b71c4773c2e1c0c6
|
01f04bcc5e8f55014973d4eef069245f3f663eb9
|
refs/heads/master
| 2021-07-25T16:44:34.366974
| 2021-06-05T23:37:17
| 2021-06-05T23:37:17
| 251,144,249
| 0
| 0
| null | 2020-03-29T22:05:29
| 2020-03-29T22:05:28
| null |
UTF-8
|
Python
| false
| false
| 1,981
|
py
|
# -*- coding: utf-8 -*-
'''
Additive Number
===============
Additive number is a string whose digits can form additive sequence.
A valid additive sequence should contain at least three numbers. Except for the
first two numbers, each subsequent number in the sequence must be the sum of
the preceding two.
For example:
"112358" is an additive number because the digits can form an additive
sequence: 1, 1, 2, 3, 5, 8.
1 + 1 = 2, 1 + 2 = 3, 2 + 3 = 5, 3 + 5 = 8
"199100199" is also an additive number, the additive sequence is:
1, 99, 100, 199.
1 + 99 = 100, 99 + 100 = 199
Note: Numbers in the additive sequence cannot have leading zeros, so sequence
1, 2, 03 or 1, 02, 3 is invalid.
Given a string containing only digits '0'-'9', write a function to determine
if it's an additive number.
Follow up:
How would you handle overflow for very large input integers?
'''
import collections
class Solution(object):
'''算法思路:
前两个数字固定,那么就可以判断整个序列,所以枚举前两个不同的数字即可
'''
def add(self, a, b):
i, j, carry, r = len(a) - 1, len(b) - 1, 0, collections.deque()
while i >= 0 or j >= 0:
carry, mod = divmod(
(int(a[i]) if i >= 0 else 0) +
(int(b[j]) if j >= 0 else 0) + carry, 10)
r.appendleft(mod)
i -= 1
j -= 1
if carry:
r.appendleft(carry)
return ''.join(map(str, r))
def check(self, a, b, num):
if not num:
return True
sum = self.add(a, b)
if num.startswith(sum):
return self.check(b, sum, num[len(sum):])
return False
def isAdditiveNumber(self, num):
return any(
self.check(num[:i + 1], num[i + 1:j + 1], num[j + 1:])
for i in xrange(len(num) - 2)
for j in xrange(i + 1, len(num) - 1)
)
s = Solution()
print s.isAdditiveNumber("11")
|
[
"shiyanhui66@gmail.com"
] |
shiyanhui66@gmail.com
|
be938368f2fbe8f503a6259a20e3e9714ac29b5c
|
5af4b89949a703bcc53bdc25a19a5ff079817cce
|
/papermerge/core/models/folder.py
|
00f6881892ed5ee47048c385c945b3f38b07f4ff
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
0xpointer42/papermerge
|
4b176a865ffa3044605844406fecd3ac5f3c5657
|
9bea16e96d460d00229e813f7063e45bfd07b4e2
|
refs/heads/master
| 2022-09-09T09:18:56.596921
| 2020-06-02T15:45:11
| 2020-06-02T15:45:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
from django.utils.translation import ugettext_lazy as _
from papermerge.core import mixins
from papermerge.core.models.kvstore import KVNode
from papermerge.core.models.node import BaseTreeNode
from papermerge.search import index
class Folder(mixins.ExtractIds, BaseTreeNode):
search_fields = [
index.SearchField('title'),
index.SearchField('text', partial_match=True, boost=2),
index.SearchField('notes')
]
@property
def kv(self):
return KVNode(instance=self)
class Meta:
verbose_name = _("Folder")
verbose_name_plural = _("Folders")
def __str__(self):
return self.title
|
[
"eugen@django-lessons.com"
] |
eugen@django-lessons.com
|
85a6d295adf2b06ebfa2ea5e0abcc2746a7fbf07
|
6544fa558a6c08e4c67de393ed0d0ab554533839
|
/DjangoProjects/DjangoProjects11RentAccount/rento/enquiry/views.py
|
55dd0ff91d83125c2c00f2759da4de7eb06e3656
|
[] |
no_license
|
tennnmani/bitproject7-DjangoPython-
|
498878276ca0c847d0cf2ca73c1091074720d6e5
|
fe13b4822c4cc5686a478dbfee915c108b6f9278
|
refs/heads/main
| 2023-02-21T16:56:10.842672
| 2021-02-25T04:13:52
| 2021-02-25T04:13:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,712
|
py
|
from django.shortcuts import render, redirect
from .models import Enquiry
from rooms.models import Room
from user.models import User
from enquiry.forms import EnquiryForm
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, HttpResponse
from user.decorators import unauthenticated_user, allowed_users
@login_required(login_url='user-login')
@allowed_users(allowed_roles=['rento_user'])
def enquirylist(request):
# rooms = Room.objects.filter(user=request.user)
enquirys = Enquiry.objects.filter(user=request.user)
context = {
'enquirys': enquirys,
# 'rooms': rooms
}
return render(request, 'user/enquirylist.html', context)
def enquirycreate(request, pk):
if request.method == 'POST':
enquiryform = EnquiryForm(request.POST)
if enquiryform.is_valid():
data = Enquiry()
user = Room.objects.get(id=pk)
data.room = Room.objects.get(id=pk)
data.user = User.objects.get(username=user.user)
data.name = enquiryform.cleaned_data['name']
data.phone = enquiryform.cleaned_data['phone']
data.email = enquiryform.cleaned_data['email']
data.address = enquiryform.cleaned_data['address']
data.occupation = enquiryform.cleaned_data['occupation']
data.question = enquiryform.cleaned_data['question']
data.save()
return redirect('roomdetail', pk)
return redirect('roomdetail', pk)
def roomenqiury(request, pk):
enquirys = Enquiry.objects.get(id=pk)
context = {
'enquirys': enquirys
}
return render(request, 'user/roomenqiury.html', context)
|
[
"diwakartop10now@gmail.com"
] |
diwakartop10now@gmail.com
|
1e0067c9c51a9acc16bb60a8b1ab9eaf983a11fb
|
da39b485bf020c88519d367219d3aaf119539409
|
/0x01-python-if_else_loops_functions/102-magic_calculation.py
|
ed6e72f487a69b3e7e125a4be234932900943fd2
|
[] |
no_license
|
JulianArbini97/holbertonschool-higher_level_programming
|
e67356dc04a5af22c4dadec6daeb043d3864c913
|
68865e995e69443b4ee43f5174b21052e755f97b
|
refs/heads/main
| 2023-04-19T22:36:25.226180
| 2021-05-04T20:13:06
| 2021-05-04T20:13:06
| 319,322,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
#!/usr/bin/python3
def magic_calculation(a, b, c):
if (a < b):
return c
elif (c > b):
return (a + b)
return ((a * b) - c)
|
[
"1890@holbertonschool.com"
] |
1890@holbertonschool.com
|
478f8c7ada5ddf9f251c892adde96e027b636b33
|
d5ed141e513dcb6fc8ab851835ec9a4630e3651b
|
/anaconda/anaconda/pkgs/anaconda-navigator-1.4.3-py27_0/lib/python2.7/site-packages/anaconda_navigator/widgets/dialogs/license.py
|
89276b1e51fc11996f430c0cb1cfb33639b87002
|
[] |
no_license
|
starrysky1211/starrysky
|
713998b366449a5ae4371e38723c56ea40532593
|
abb642548fb9b431551133657f1a67858041a7e6
|
refs/heads/master
| 2022-11-09T21:51:22.558151
| 2017-02-25T14:42:37
| 2017-02-25T14:42:37
| 67,608,074
| 0
| 1
| null | 2022-10-16T05:17:25
| 2016-09-07T13:16:45
|
Python
|
UTF-8
|
Python
| false
| false
| 13,033
|
py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2016 Continuum Analytics, Inc.
#
# May be copied and distributed freely only as part of an Anaconda or
# Miniconda installation.
# -----------------------------------------------------------------------------
"""License Manager Dialog."""
# yapf: disable
# Third party imports
from qtpy.compat import getopenfilename, to_qvariant
from qtpy.QtCore import (QAbstractTableModel, QModelIndex,
QSortFilterProxyModel, Qt, Signal)
from qtpy.QtGui import QColor
from qtpy.QtWidgets import (QAbstractItemView, QHBoxLayout, QStyle,
QStyledItemDelegate, QTableView, QVBoxLayout)
# Local imports
from anaconda_navigator.api.anaconda_api import AnacondaAPI
from anaconda_navigator.config import LICENSE_PATH, get_home_dir
from anaconda_navigator.utils.qthelpers import qapplication
from anaconda_navigator.widgets import (ButtonLink, ButtonNormal,
ButtonPrimary, LabelBase,
SpacerHorizontal, SpacerVertical)
from anaconda_navigator.widgets.dialogs import (DialogBase,
MessageBoxInformation,
MessageBoxRemove)
# yapf: enable
# Extra data added to the license dicts to track the file it comes from
# Defined as a constant as it is used in several places so this avoidd hard
# coding a string
COL_MAP = {
0: '__type__',
1: 'product',
2: 'end_date',
3: '__status__',
4: 'sig',
5: LICENSE_PATH,
}
HIDDEN_COLUMNS = [LICENSE_PATH, 'sig']
class LicenseModel(QAbstractTableModel):
"""Table model for the license view."""
def __init__(self, parent=None, licenses=None):
"""Table model for the license view."""
super(LicenseModel, self).__init__(parent=parent)
self._parent = parent
self._rows = licenses if licenses else []
@staticmethod
def flags(index):
"""Override Qt method."""
if index.isValid():
return Qt.ItemFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
def data(self, index, role=Qt.DisplayRole):
"""Override Qt method."""
if not index.isValid() or not 0 <= index.row() < len(self._rows):
return None
row = index.row()
column = index.column()
license_data = self._rows[row]
if role == Qt.DisplayRole:
data_key = COL_MAP.get(column)
if data_key:
return license_data.get(data_key)
elif role == Qt.TextAlignmentRole:
return Qt.AlignCenter
return to_qvariant()
@staticmethod
def headerData(section, orientation, role=Qt.DisplayRole):
"""Override Qt method."""
title = COL_MAP.get(section)
title = title.replace('__', '')
title = title.replace('_', ' ').capitalize()
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return to_qvariant(int(Qt.AlignHCenter | Qt.AlignVCenter))
return to_qvariant(int(Qt.AlignRight | Qt.AlignVCenter))
elif role == Qt.ToolTipRole:
return to_qvariant()
elif role == Qt.DisplayRole and orientation == Qt.Horizontal:
return to_qvariant(title)
def rowCount(self, index=QModelIndex()):
"""Override Qt method."""
return len(self._rows)
@staticmethod
def columnCount(index=QModelIndex()):
"""Override Qt method."""
return len(COL_MAP)
# --- Helpers
# -------------------------------------------------------------------------
def row(self, rownum):
"""Return the row data."""
return self._rows[rownum] if rownum < len(self._rows) else None
def load_licenses(self, licenses=None):
"""(Re)Load license data."""
self._rows = licenses if licenses else []
class BackgroundDelegate(QStyledItemDelegate):
"""
Delegate for handling background color in table.
QTableView CSS styling rules are too limited so in order to get an even
styling that matches the overall look, this delegate is needed.
"""
def __init__(self, parent=None):
"""Delegate for handling background color in table."""
super(BackgroundDelegate, self).__init__(parent=parent)
self._parent = parent
def paint(self, painter, option, index):
"""Override Qt method."""
# To draw a border on selected cells
if option.state & QStyle.State_Selected:
if self._parent.hasFocus():
color = QColor('#43B02A') # TODO: Get this from the scss
else:
color = QColor('#cecece') # TODO: Get this from the scss
painter.save()
painter.fillRect(option.rect, color)
painter.restore()
# Disable the state for the super() painter method
option.state ^= QStyle.State_Selected
super(BackgroundDelegate, self).paint(painter, option, index)
class LicenseTableView(QTableView):
"""License table manager view."""
sig_dropped = Signal(object)
sig_entered = Signal()
sig_left = Signal()
def __init__(self, parent=None):
"""License table manager view."""
super(LicenseTableView, self).__init__(parent=parent)
self.setMinimumWidth(500)
self.setMinimumHeight(200)
self.setAcceptDrops(True)
self.setShowGrid(False)
self.setSortingEnabled(True)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setAlternatingRowColors(True)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.verticalHeader().hide()
self.horizontalHeader().setStretchLastSection(True)
def focusInEvent(self, event):
"""Override Qt Method."""
super(LicenseTableView, self).focusInEvent(event)
self.sig_entered.emit()
def focusOutEvent(self, event):
"""Override Qt Method."""
super(LicenseTableView, self).focusInEvent(event)
self.sig_left.emit()
def dragEnterEvent(self, event):
"""Override Qt Method."""
self.setProperty('dragin', True)
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dragLeaveEvent(self, event):
"""Override Qt Method."""
self.setProperty('dragin', False)
@staticmethod
def dragMoveEvent(event):
"""Override Qt Method."""
if event.mimeData().hasUrls:
event.setDropAction(Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
"""Override Qt Method."""
self.setProperty('dragin', False)
if event.mimeData().hasUrls:
event.setDropAction(Qt.CopyAction)
event.accept()
links = []
for url in event.mimeData().urls():
links.append(str(url.toLocalFile()))
self.sig_dropped.emit(tuple(links))
else:
event.ignore()
def setProperty(self, name, value):
"""Override Qt method."""
QTableView.setProperty(self, name, value)
self.style().unpolish(self)
self.style().polish(self)
self.update()
class LicenseManagerDialog(DialogBase):
"""License Manager main dialog."""
CONTACT_LINK = 'https://support.continuum.io/' # TODO: Centralize this?
# Url, Sender
sig_url_clicked = Signal(object, object)
def __init__(self, parent=None):
"""License Manager main dialog."""
super(LicenseManagerDialog, self).__init__(parent=parent)
self.api = AnacondaAPI()
# Widgets
self.message_box = None # For testing
self.button_add = ButtonPrimary('Add license')
self.button_close = ButtonNormal('Close')
self.button_remove = ButtonNormal('Remove license')
self.button_contact = ButtonLink('Please contact us.')
self.label_info = LabelBase(
'Manage your Continuum Analytics '
'license keys.'
)
self.label_contact = LabelBase('Got a problem with your license? ')
self.proxy_model = QSortFilterProxyModel(parent=self)
self.model = LicenseModel(parent=self)
self.table = LicenseTableView(parent=self)
self.delegate = BackgroundDelegate(self.table)
# Widget setup
self.proxy_model.setSourceModel(self.model)
self.table.setItemDelegate(self.delegate)
self.table.setModel(self.proxy_model)
self.setWindowTitle('License Manager')
# Layouts
layout_buttons = QHBoxLayout()
layout_buttons.addWidget(self.label_info)
layout_buttons.addWidget(SpacerHorizontal())
layout_buttons.addStretch()
layout_buttons.addWidget(self.button_add)
layout_buttons.addWidget(SpacerHorizontal())
layout_buttons.addWidget(self.button_remove)
layout_buttons_bottom = QHBoxLayout()
layout_buttons_bottom.addWidget(self.label_contact)
layout_buttons_bottom.addWidget(self.button_contact)
layout_buttons_bottom.addStretch()
layout_buttons_bottom.addWidget(self.button_close)
layout = QVBoxLayout()
layout.addLayout(layout_buttons)
layout.addWidget(SpacerVertical())
layout.addWidget(self.table)
layout.addWidget(SpacerVertical())
layout.addWidget(SpacerVertical())
layout.addLayout(layout_buttons_bottom)
self.setLayout(layout)
# Signals
self.button_add.clicked.connect(lambda: self.add_license())
self.button_remove.clicked.connect(self.remove_license)
self.button_close.clicked.connect(self.accept)
self.button_contact.clicked.connect(
lambda v=None: self.sig_url_clicked.
emit(self.CONTACT_LINK, 'License Manager')
)
self.table.sig_dropped.connect(self.api.add_license)
# Setup
self.button_add.setFocus()
self.load_licenses()
def _hide_columns(self):
"""Hide columns."""
for key, val in COL_MAP.items():
if val in HIDDEN_COLUMNS:
self.table.setColumnHidden(key, True)
def add_license(self, v=None, path=None):
"""Add license file."""
if path is None:
filename, selected_filter = getopenfilename(
self,
'Select license file',
filters='License files (*.txt)',
basedir=get_home_dir(),
)
if filename:
paths = [filename]
else:
paths = []
else:
paths = [path]
valid_licenses, invalid_licenses = self.api.add_license(paths)
for invalid_license in invalid_licenses:
text = ('File: <b>"{0}"</b>'
'<br>is not a valid license file.').format(path)
self.message_box = MessageBoxInformation(
text=text, title="Invalid license file"
)
self.message_box.exec_()
if valid_licenses:
self.load_licenses()
def remove_license(self, row=None):
"""Remove license from file."""
if row is None:
index = self.table.currentIndex()
else:
index = self.proxy_model.index(row, 0)
model_index = self.proxy_model.mapToSource(index)
row_data = self.model.row(model_index.row())
if row_data:
text = (
'Do you want to remove license for product:<br><br>'
'<b>{product}</b> ({issued} - {end_date})'
)
text = text.format(
product=row_data.get('product'),
end_date=row_data.get('end_date'),
issued=row_data.get('issued')
)
self.message_box = MessageBoxRemove(
title='Remove license', text=text
)
if self.message_box.exec_():
self.api.remove_license(row_data)
self.load_licenses()
def load_licenses(self):
"""Load license files."""
res = self.api.load_licenses()
self.model.load_licenses(res)
self.proxy_model.setSourceModel(self.model)
self.table.resizeColumnsToContents()
self._hide_columns()
self.update_status()
def count(self):
"""Return the number of items in the table."""
return self.table.model().rowCount()
def update_status(self):
"""Update visible and enabled status for widgets based on actions."""
self.button_remove.setEnabled(bool(self.count()))
def test(): # pragma: no cover
"""Run local test."""
app = qapplication()
w = LicenseManagerDialog()
w.update_style_sheet()
w.show()
app.exec_()
if __name__ == '__main__':
test()
|
[
"starry_sky_@outlook.com"
] |
starry_sky_@outlook.com
|
6356fa85d64646c23a6d3e5a624cc94cc5e9fa29
|
0348515f8ed14df0e005073ad911360f78eff574
|
/backend/controllers/relations/learns.py
|
4604e9720eea23fbdb8dd6176ba9cdb03a8759f2
|
[] |
no_license
|
Graduation-Team-2021/LMS-Graduation-Project
|
e54e3fa3a6ae0b92ddc2d687fdf34c52dbdca844
|
02a6402bf8dfdd72a19107f007779672b62009dc
|
refs/heads/main
| 2023-06-22T14:00:44.361803
| 2021-07-26T06:06:12
| 2021-07-26T06:06:12
| 309,816,360
| 1
| 2
| null | 2021-07-24T17:14:36
| 2020-11-03T21:54:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 6,306
|
py
|
from controllers.course.deliverables import deliverable_controller
from models.relations.learns import Learns_Relation
from models.course.courses import Course
from models.user.students import Student
from methods.errors import *
from flask import jsonify
from models.user.users import User
deliv_object = deliverable_controller()
class student_course_relation_controller():
def get_courses_by_student_id(self, student_id):
try:
courses = Learns_Relation.query.join(Student).filter(Student.user_id == student_id)\
.join(Course).filter(Course.course_code == Learns_Relation.course_code).\
with_entities(Course.course_code, Course.course_name,
Course.course_description, Course.post_owner_id, Course.course_pic, Course.mid, Course.final)
except SQLAlchemyError as e:
error = str(e)
raise ErrorHandler({
'description': error,
'status_code': 500
})
results_array = list()
for i in courses:
results_array.append(
{
'course_code': i[0],
'course_name': i[1],
'course_description': i[2],
'post_owner_id': i[3],
'course_pic': i[4],
'mid': i[5],
'final': i[6]
}
)
return results_array
# data = [course for course in courses]
# return data
def get_students_in_course(self, course_code):
try:
courses = Learns_Relation.query.filter(Learns_Relation.course_code == course_code)\
.join(Student).filter(Student.user_id == Learns_Relation.student_id)\
.join(User).filter(User.user_id == Student.user_id).\
with_entities(User.user_id, User.name)
except SQLAlchemyError as e:
error = str(e)
raise ErrorHandler({
'description': error,
'status_code': 500
})
results_array = list()
for i in courses:
results_array.append(
{
'user_id': i[0],
'name': i[1]
}
)
return results_array
def post_student_course_relation(self, student_course_relation):
try:
new_learns_relation = Learns_Relation(**student_course_relation)
new_learns_relation = Learns_Relation.insert(new_learns_relation)
except SQLAlchemyError as e:
error = str(e)
raise ErrorHandler({
'description': error,
'status_code': 500
})
return
def update_student_course_relation(self, student_id, course_code, new_relation):
relation = Learns_Relation.query.filter_by(
student_id=student_id, course_code=course_code).first()
if not relation:
raise ErrorHandler(
'relation does not exist,please recheck your data')
relation = Learns_Relation(**new_relation)
try:
relation.update()
except SQLAlchemyError as e:
error = str(e)
raise ErrorHandler({
'description': error,
'status_code': 500
})
return relation.serialize()
def delete_student_course_relation(self, student_id, course_code):
try:
relation = Learns_Relation.query.filter_by(
student_id=student_id, course_code=course_code).first()
if relation is None:
raise ErrorHandler({
'description': 'relation not found',
'status_code': 500
})
Learns_Relation.delete(relation)
except SQLAlchemyError as e:
error = str(e)
raise ErrorHandler({
'description': error,
'status_code': 500
})
return
def get_all_students_in_one_course(self, course_code):
students = Learns_Relation.query.filter_by(
course_code=course_code).all()
student_id_list = [s.serialize() for s in students]
names = []
for i in student_id_list:
# names=[ n.serialize()["name"] for n in User.query.filter(User.user_id==i).all()]
# User.query.filter(User.user_id==i).first()
t2 = {}
temp = User.query.filter(
User.user_id == i["student_id"]).first().serialize()
t2['name'] = temp['name']
t2['id'] = temp['user_id']
t2['mid'] = i['mid_term_mark']
t2['final'] = i['final_exam_mark']
t2['deliv'] = []
deliv_list = deliv_object.get_all_course_deliverables(
course_code, i["student_id"], 'student')
for d in deliv_list:
print(d)
t2['deliv'].append({
'submit': d['submit'],
"id": d["deliverable_id"],
'name': d['deliverable_name'],
'total': d['mark'],
'value': d['smark']
})
names.append(t2)
# s=[]
# for i in range(len(names)):
# s.append( f"{names[i]}:{student_id_list[i]}")
# return s
return names
def get_student_marks(self, student_id, course_code):
try:
t2 = Learns_Relation.query.filter(Learns_Relation.student_id == student_id, Learns_Relation.course_code == course_code).first().serialize()
t2['assign'] = []
deliv_list = deliv_object.get_all_course_deliverables(
course_code, student_id, 'student')
for d in deliv_list:
print(d)
t2['assign'].append({
"id": d["deliverable_id"],
'name': d['deliverable_name'],
'total': d['mark'],
'value': d['smark']
})
except SQLAlchemyError as e:
error = str(e)
raise ErrorHandler({
'description': error,
'status_code': 500
})
return t2
|
[
"davidmesak@hotmail.com"
] |
davidmesak@hotmail.com
|
29d64bfeff13d2d620664beeb544713fc033e990
|
614d5ec96dcd9c6bb7a4384ea5420a7757c43d34
|
/examples/checkable.py
|
3bb79a1ddb3669a679ec3b68eab1e3c9bd9625ce
|
[
"MIT"
] |
permissive
|
githeshuai/dayu_widgets_tag
|
52ae4816addd58505b6bbd0e4cd12f931df89e95
|
f843e8f100b698af74353ec7595c26213574bc15
|
refs/heads/master
| 2023-04-05T10:04:03.726767
| 2021-04-01T16:02:42
| 2021-04-01T16:02:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,032
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.8
# Email : muyanru345@163.com
###################################################################
from dayu_widgets_tag import MCheckableTag
from dayu_widgets.qt import QWidget, QHBoxLayout, QApplication, Slot
from dayu_widgets import dayu_theme, MLabel
@dayu_theme.deco
class Checkable(QWidget):
def __init__(self, parent=None):
super(Checkable, self).__init__(parent)
label = MLabel('Categories:')
topic_lay = QHBoxLayout()
topic_lay.addWidget(label)
for i in ['Movies', 'Books', 'Music', 'Sports']:
topic_lay.addWidget(MCheckableTag(text=i))
topic_lay.addStretch()
main_lay = QHBoxLayout()
main_lay.addLayout(topic_lay)
self.setLayout(main_lay)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
test = Checkable()
test.show()
sys.exit(app.exec_())
|
[
"muyanru345@163.com"
] |
muyanru345@163.com
|
f34dcd6dad26fc4831dfaf360693efd91e8f2dd5
|
6b6021013c7bb08fbb293e414b19d102bae8a1a8
|
/main.py
|
6ef986d1b03c79ff4092f7c1cbc4431f2401e31e
|
[] |
no_license
|
Ray9090/Invisible-Cloak
|
0f7799571ccd989fa65c69690e72cd374871d237
|
e0e4c6bcdf3e275d40ec9718d9a51a1585024953
|
refs/heads/main
| 2023-06-16T16:36:01.854224
| 2021-07-18T17:02:28
| 2021-07-18T17:02:28
| 387,226,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,480
|
py
|
#Importing libraries
import numpy as np
import cv2
import time
# Recording and caching the background for each frame.
cap = cv2.VideoCapture(0) # Read from the web cam
time.sleep(3) # for the system to sleep for 3 second before the webcam starts
for i in range(30):
retval, back = cap.read()
back = np.flip(back, axis=1)
cap = cv2.VideoCapture(0)
## detecting the red portion In each frame
while (cap.isOpened()): ##Read every Frame from the webcam, until the camera is open
ret, img = cap.read()
if ret:
img = np.flip(img, axis=1)
##convert the color space from BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
##Generat masks to detect red color
lower_red = np.array([0, 120, 70])
upper_red = np.array([10, 255, 255])
mask1 = cv2.inRange(hsv, lower_red, upper_red)
lower_red = np.array([170, 120, 70])
upper_red = np.array([180, 255, 255])
mask2 = cv2.inRange(hsv, lower_red, upper_red)
mask1 += mask2
###Replacing the red portion with a mask image in each frame
mask = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))
img[np.where(mask == 255)] = back[np.where(mask == 255)]
# Final output
cv2.imshow("Harry Potter's invisible secret revealed", img)
key = cv2.waitKey(1)
if key == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
Ray9090.noreply@github.com
|
fc47576fb7e929ac7a3d75b13445277f353da61c
|
e5ad4dee7ed547435355c235dc05180ab9507405
|
/Python/1512-number-of-good-pairs.py
|
103c6030eb331ba9798584f36f95dc2305d69d4c
|
[] |
no_license
|
ulises-c/LeetCodeSolutions
|
a70c2178c9ef626593e343b6b31a6951c7a73ec9
|
effab97aaa78b7c4d62071f0945eac777acf3f1d
|
refs/heads/main
| 2023-05-19T11:56:19.803749
| 2021-05-26T23:12:24
| 2021-05-26T23:12:24
| 358,153,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
# https://leetcode.com/problems/number-of-good-pairs/
class Solution:
def numIdenticalPairs(self, nums: List[int]) -> int:
good_pairs = 0
for i in range(len(nums)):
for j in range(len(nums)):
if(nums[i] == nums[j] and i < j):
good_pairs += 1
return good_pairs
|
[
"ez1ollie@yahoo.com"
] |
ez1ollie@yahoo.com
|
d946aabf1c1931c6d83f1723fac5e4b9fe703fcc
|
20b9a508844f79eb4670fe33f87db2897f5bae7f
|
/PythonClient/cv_mode.py
|
f71455c4fdf2d638a601f379ab38dd4ba96daa46
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jelaredulla/thesis
|
2f5714c013552c1775ad3b36274c0929fb0bc18c
|
dc348652cc0bd0a35e5d7506144d641510c2483b
|
refs/heads/master
| 2021-05-15T04:12:38.795907
| 2017-11-21T01:33:47
| 2017-11-21T01:33:47
| 105,950,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,474
|
py
|
# In settings.json first activate computer vision mode:
# https://github.com/Microsoft/AirSim/blob/master/docs/image_apis.md#computer-vision-mode
from AirSimClient import *
import pprint
pp = pprint.PrettyPrinter(indent=4)
client = CarClient()
client.confirmConnection()
for x in range(3): # do few times
z = x * -20 - 5 # some random number
client.simSetPose(Pose(Vector3r(z, z, z), AirSimClientBase.toQuaternion(x / 3.0, 0, x / 3.0)), True)
responses = client.simGetImages([
ImageRequest(0, AirSimImageType.DepthVis),
ImageRequest(1, AirSimImageType.DepthPerspective, True),
ImageRequest(0, AirSimImageType.Segmentation),
ImageRequest(0, AirSimImageType.Scene),
ImageRequest(0, AirSimImageType.DisparityNormalized),
ImageRequest(0, AirSimImageType.SurfaceNormals)])
for i, response in enumerate(responses):
if response.pixels_as_float:
print("Type %d, size %d" % (response.image_type, len(response.image_data_float)))
AirSimClientBase.write_pfm(os.path.normpath('/temp/cv_mode_' + str(x) + "_" + str(i) + '.pfm'), AirSimClientBase.getPfmArray(response))
else:
print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
AirSimClientBase.write_file(os.path.normpath('/temp/cv_mode_' + str(x) + "_" + str(i) + '.png'), response.image_data_uint8)
pose = client.simGetPose()
pp.pprint(pose)
time.sleep(3)
|
[
"s4284683@eait.uq.edu.au"
] |
s4284683@eait.uq.edu.au
|
9ec8dfb6896bd3defa4e777b809942f49b4b449d
|
3f597d5c1363f1f6f77764bcdb864167c3e51795
|
/qwapp/defaults.py
|
ac08eacd7647f3441469ca0c64e9eeeb3df07f45
|
[] |
no_license
|
mbr/qwapp
|
558c58b47398abcaca41b1814c7b5e8363b8eaf0
|
44fa2ecefcb61d2fb5c2280d30af2b1140f3f03b
|
refs/heads/master
| 2023-06-06T20:48:59.776375
| 2013-06-06T01:46:49
| 2013-06-06T01:46:49
| 1,467,990
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
WIKI_NAME = 'qwapp Wiki'
REPOSITORY_PATH = './wiki'
DEBUG = True
SECRET_KEY = 'development key'
# use password.py to generate
PASSWORD_HASH = '06ab2f79d3fb9d86c75f0bb981c95f5d68497b311bdb1ed32918717547b4a6c31017a7a04908c6d39a93c8f748e312d5bfd255cbfbf15530cf374c1861dc73a7' # "devpass"
CACHE_TYPE = 'simple' # set this to 'null' to disable or use memcached, or others
#CACHE_MEMCACHED_SERVERS = ['localhost:11211']
CACHE_THRESHOLD = 200
CACHE_DEFAULT_TIMEOUT = 50 # 50 seconds default cache timeout
CACHE_KEY_PREFIX = PASSWORD_HASH[:10]
# no plugins loaded by default
PLUGINS = ['headershift','wikilinks']
PLUGIN_HEADERSHIFT_LEVEL = 1
|
[
"git@marcbrinkmann.de"
] |
git@marcbrinkmann.de
|
8aa3a5eaa85358372544b8993ca3d799b9320850
|
57f6566f646bac9932ad290158dee8ddd5ae845a
|
/146A.py
|
f2d8e898ede60843755f44db403c51fa65154b3b
|
[] |
no_license
|
Praveen230102/Codeforces-Solution-in-Python-and-C-
|
cf6cf921d6fd0095b833c0b6cb7b5303df3bbf0d
|
6210f775ab3d7d03adb7cea631223b02f7566871
|
refs/heads/master
| 2022-07-29T19:24:30.348682
| 2020-05-25T03:04:03
| 2020-05-25T03:04:03
| 266,518,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
# bsdk idhar kya dekhne ko aaya hai, khud kr!!!
# from math import *
# from itertools import *
# import random
n = int(input())
s = list(input())
if "0" in s or "1" in s or "2" in s or "3" in s or "5" in s or "6" in s or "8" in s or "9" in s:
print("NO")
else:
first = s[0:n // 2]
last = s[n // 2:]
first_sum_ = 0
last_sum_ = 0
for i in first:
first_sum_ += int(i)
for i in last:
last_sum_ += int(i)
if first_sum_ == last_sum_:
print("YES")
else:
print("NO")
|
[
"noreply@github.com"
] |
Praveen230102.noreply@github.com
|
7e35990b45388256e3e32d8beda84a60ab4fa84c
|
d563246783b542dbe00080a030c1d1a136e71b2c
|
/setup.py
|
31e4c03fdde1a814108d447dd3c21e09cb928868
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
CloneAssassin/cve_bot
|
03590397d4747e896525d3aaaf7c6abb9761b41e
|
92c53cbd37b14a44aecd6b8956bd0b8fe55f6c5b
|
refs/heads/main
| 2023-08-03T17:13:10.720314
| 2021-09-15T07:14:14
| 2021-09-15T07:14:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,204
|
py
|
import os
from importlib.machinery import SourceFileLoader
from pkg_resources import parse_requirements
from setuptools import find_packages, setup
module_name = "cve_bot"
module = SourceFileLoader(
module_name, os.path.join(module_name, "__init__.py")
).load_module()
def load_requirements(fname: str) -> list:
requirements = []
with open(fname, "r") as fp:
for req in parse_requirements(fp.read()):
extras = "[{0}]".format(",".join(req.extras)) if req.extras else ""
requirements.append(
"{0}{1}{2}".format(req.name, extras, req.specifier)
)
return requirements
with open("README.md") as readme:
long_description = readme.read()
setup(
name=module.__name__,
version=module.__version__,
author=module.__author__,
author_email=module.__email__,
license=module.__license__,
description=module.__doc__,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/weastur/cve_bot",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: POSIX",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Security",
"Topic :: System",
"Topic :: System :: Operating System",
"Topic :: System :: Systems Administration",
],
packages=find_packages(exclude=["tests"]),
install_requires=load_requirements("requirements.txt"),
extras_require={"dev": load_requirements("requirements.dev.txt")},
entry_points={
"console_scripts": [
"{0} = {0}.main:main".format(module_name),
]
},
package_data={
"cve_bot": ["alembic.ini", "migrations/versions/*.py"],
},
)
|
[
"me@weastur.com"
] |
me@weastur.com
|
ffe677165dccb88d9f37a995aa2f5a489fc75489
|
239c49bfe13774340515cfd336e88b0f3cc82a5a
|
/tensorflow_synthetic_features.py
|
ba67ae278004df42865a4eb8451f7a9a0dc2058b
|
[] |
no_license
|
mhmdsamir92/MLCrashCourse
|
2b8c3186b7590f0c552dab6659c2892e24d60873
|
bf788a7f928a4c88bb8889574abca5a64457e1b2
|
refs/heads/master
| 2020-03-20T16:10:52.211731
| 2018-06-27T07:47:45
| 2018-06-27T07:47:45
| 137,532,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,050
|
py
|
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv(
"california_housing_train.csv", sep=",")
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
california_housing_dataframe["median_house_value"] /= 1000.0
# Define the input feature: total_rooms.
my_feature = california_housing_dataframe[["total_rooms"]]
# Configure a numeric feature column for total_rooms.
feature_columns = [tf.feature_column.numeric_column("total_rooms")]
# Define the label.
targets = california_housing_dataframe["median_house_value"]
# Use gradient descent as the optimizer for training the model.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0000001)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
# Configure the linear regression model with our feature columns and optimizer.
# Set a learning rate of 0.0000001 for Gradient Descent.
linear_regressor = tf.estimator.LinearRegressor(feature_columns=feature_columns, optimizer=my_optimizer)
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a linear regression model of one feature.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key: np.array(value) for key, value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features, targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(buffer_size=10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
def train_model(learning_rate, steps, batch_size, input_feature="total_rooms"):
"""Trains a linear regression model of one feature.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
input_feature: A `string` specifying a column from `california_housing_dataframe`
to use as input feature.
"""
periods = 10
steps_per_period = steps / periods
my_feature = input_feature
my_feature_data = california_housing_dataframe[[my_feature]]
my_label = "median_house_value"
targets = california_housing_dataframe[my_label]
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column(my_feature)]
# Create input functions.
training_input_fn = lambda: my_input_fn(my_feature_data, targets, batch_size=batch_size)
prediction_input_fn = lambda: my_input_fn(my_feature_data, targets, num_epochs=1, shuffle=False)
# Create a linear regressor object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(feature_columns=feature_columns, optimizer=my_optimizer)
# Set up to plot the state of our model's line each period.
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
plt.title("Learned Line by Period")
plt.ylabel(my_label)
plt.xlabel(my_feature)
sample = california_housing_dataframe.sample(n=300)
plt.scatter(sample[my_feature], sample[my_label])
colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)]
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print "Training model..."
print "RMSE (on training data):"
root_mean_squared_errors = []
for period in range(0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(input_fn=training_input_fn, steps=steps_per_period)
# Take a break and compute predictions.
predictions = linear_regressor.predict(input_fn=prediction_input_fn)
predictions = np.array([item['predictions'][0] for item in predictions])
# Compute loss.
root_mean_squared_error = math.sqrt(metrics.mean_squared_error(predictions, targets))
# Occasionally print the current loss.
print " period %02d : %0.2f" % (period, root_mean_squared_error)
# Add the loss metrics from this period to our list.
root_mean_squared_errors.append(root_mean_squared_error)
# Finally, track the weights and biases over time.
# Apply some math to ensure that the data and line are plotted neatly.
y_extents = np.array([0, sample[my_label].max()])
weight = linear_regressor.get_variable_value('linear/linear_model/%s/weights' % input_feature)[0]
bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')
x_extents = (y_extents - bias) / weight
x_extents = np.maximum(np.minimum(x_extents, sample[my_feature].max()), sample[my_feature].min())
y_extents = weight * x_extents + bias
plt.plot(x_extents, y_extents, color=colors[period])
print "Model training finished."
# Output a graph of loss metrics over periods.
plt.subplot(1, 2, 2)
plt.ylabel('RMSE')
plt.xlabel('Periods')
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(root_mean_squared_errors)
calibration_data = pd.DataFrame()
calibration_data["predictions"] = pd.Series(predictions)
calibration_data["targets"] = pd.Series(targets)
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
plt.scatter(calibration_data["predictions"], calibration_data["targets"])
plt.subplot(1, 2, 2)
_ = california_housing_dataframe["rooms_per_person"].hist()
plt.show()
# Output a table with calibration data.
display.display(calibration_data.describe())
print calibration_data.describe()
print "Final RMSE (on training data): %0.2f" % root_mean_squared_error
california_housing_dataframe["rooms_per_person"] = (
california_housing_dataframe["total_rooms"] / california_housing_dataframe["population"])
train_model(
learning_rate=0.05,
steps=1000,
batch_size=40,
input_feature="rooms_per_person"
)
|
[
"mhmdsamir92@gmail.com"
] |
mhmdsamir92@gmail.com
|
d17bf5c34fd35badf9407e18ada3ecc5a2daf043
|
0a82ddc06b00c95c87382dab920f025c93f285b7
|
/Scraps/Functions/__init__.py
|
280a2f24ad97cbca8aff359b8b9a4b8f023056df
|
[] |
no_license
|
bmauss/Science-Standards-NLP-Analysis
|
ffea37bff81f531bf5376db0df8f6ad685b91da5
|
84e2d4a29580525507b1f7f75f5b5871960f5585
|
refs/heads/main
| 2023-02-26T04:29:30.574512
| 2021-02-03T23:17:40
| 2021-02-03T23:17:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39
|
py
|
from Functions.final_functions import *
|
[
"kristendavis27@gmail.com"
] |
kristendavis27@gmail.com
|
d22fcf08ad900c44a6935d22cbe6759656c6a23e
|
cf525299a2cca27df8750d64bf87e72cd367e63e
|
/telas/Saguao.py
|
46e7f9848a0659ad0909abae02c1bc92d4cf83fe
|
[] |
no_license
|
FBMLJ/es2-trabalho-war
|
09ab1fab39145309af94d2bf6d900979cab919ac
|
e5a1017a963297f5674376b90e8a9e2f959028e7
|
refs/heads/master
| 2023-08-17T05:45:15.982998
| 2021-09-18T01:11:14
| 2021-09-18T01:11:14
| 390,141,619
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,328
|
py
|
from componentes.campoTexto import *
from componentes.botao import *
from componentes.RetanguloTexto import *
from firebase_admin import firestore
from constant import estados
from servico.firestore import db
from datetime import datetime
from PPlay.gameimage import *
from PPlay.sprite import *
class Saguao:
def __init__(self, janela, usuario, id_anfitriao):
self.janela = janela
self.fundo = GameImage("assets/imagem/saguao/saguao.png")
self.fundo.set_position(janela.width/2 - self.fundo.width/2, self.janela.height/2 - self.fundo.height/2)
self.fundo_real = GameImage("assets/imagem/tela_inicial/fundo.png")
self.id_anfitriao = id_anfitriao
self.usuario = usuario
self.codigo_botoes = {
"sair": 1,
"pronto": 2,
"enviar": 3,
"iniciar": 4
}
self.pronto = id_anfitriao == usuario.uid
self.botoes = []
self.barra_superior = GameImage("assets/imagem/saguao/barra_saguao.png")
self.barra_superior.set_position(self.fundo.x + 12, self.fundo.y + 10)
posicao_x = self.fundo.x + 95
posicao_y = self.fundo.y + self.fundo.height - 165
self.tanquezin_verde = GameImage("assets/imagem/saguao/tank.png")
self.tanquezin_verde.set_position(posicao_x, posicao_y)
self.tanquezin_vermelho = GameImage("assets/imagem/saguao/tank_ready.png")
self.tanquezin_vermelho.set_position(posicao_x, posicao_y)
sprite_x = Sprite("assets/imagem/saguao/x_saguao.png")
botao_x = Botao(sprite_x, sprite_x, self.codigo_botoes["sair"])
botao_x.setposition(self.fundo.x + self.fundo.width - botao_x.width - 16, self.fundo.y + 15)
self.botoes.append(botao_x)
sprite_enviar_normal = Sprite("assets/imagem/saguao/botao_enviar.png")
sprite_enviar_destacado = Sprite("assets/imagem/saguao/botao_enviar_select.png")
botao_enviar = Botao(sprite_enviar_normal, sprite_enviar_destacado, self.codigo_botoes["enviar"])
botao_enviar.setposition(
self.fundo.x + self.fundo.width - botao_enviar.width - 40,
self.fundo.y + self.fundo.height - botao_enviar.height - 40
)
self.botoes.append(botao_enviar)
if self.usuario.uid == id_anfitriao:
caminho_pronto = "assets/imagem/saguao/botao_iniciar.png"
caminho_pronto_destacado = "assets/imagem/saguao/botao_iniciar_select.png"
codigo_botao = self.codigo_botoes["iniciar"]
else:
caminho_pronto = "assets/imagem/saguao/botao_pronto.png"
caminho_pronto_destacado = "assets/imagem/saguao/botao_pronto_select.png"
codigo_botao = self.codigo_botoes["pronto"]
sprite_pronto_normal = Sprite(caminho_pronto)
sprite_pronto_destacado = Sprite(caminho_pronto_destacado)
botao_pronto = Botao(sprite_pronto_normal, sprite_pronto_destacado, codigo_botao)
botao_pronto.setposition(
self.fundo.x + 80,
self.fundo.y + self.fundo.height - botao_pronto.height - 40
)
self.botoes.append(botao_pronto)
self.campo_chat = CampoTexto(
janela,
GameImage("assets/imagem/saguao/digite_mensagem.png"),
self.fundo.x + self.fundo.width - 640,
self.fundo.y + self.fundo.height - botao_pronto.height - 36,
450,
50,
25
)
self.legenda_participantes = GameImage("assets/imagem/saguao/participantes.png")
self.legenda_participantes.set_position(
self.fundo.x + 50,
self.barra_superior.y + self.barra_superior.height + 10
)
self.legenda_chat = GameImage("assets/imagem/saguao/chat.png")
self.legenda_chat.set_position(
self.fundo.x + self.fundo.width - self.legenda_chat.width - 300,
self.barra_superior.y + self.barra_superior.height + 10
)
self.participantes = []
self.participantes_retangulos = []
self.mensagens = []
self.mensagens_retangulos = []
def loop(self):
self.carregaParticipantes()
self.lerMensagens()
self.janela.clear()
self.janela.set_background_color([0, 0, 0])
self.janela.input_pygame = True
botao_clicado = -1
clicou_botao = False
clicou = False
mouse = Mouse()
while True:
for botao in self.botoes:
clicou = botao.update()
if clicou:
clicou_botao = True
botao_clicado = botao.code
if clicou_botao and not mouse.is_button_pressed(1):
clicou_botao = False
if botao_clicado == self.codigo_botoes["sair"]:
self.janela.input_pygame = False
return estados["buscar_sala"]
if botao_clicado == self.codigo_botoes["pronto"]:
self.trocaEstado()
if botao_clicado == self.codigo_botoes["enviar"]:
self.enviarMensagem(self.campo_chat.texto)
self.campo_chat.texto = ""
self.trataEvento()
self.render()
self.janela.update()
def trataEvento(self):
for evento in pygame.event.get():
self.campo_chat.evento(evento)
if evento.type == pygame.QUIT:
exit()
def render(self):
self.fundo_real.draw()
self.fundo.draw()
self.barra_superior.draw()
self.campo_chat.draw()
self.legenda_participantes.draw()
self.legenda_chat.draw()
if self.pronto:
self.tanquezin_verde.draw()
else:
self.tanquezin_vermelho.draw()
for mensagem in self.mensagens_retangulos:
mensagem.render()
for participante in self.participantes_retangulos:
participante.render()
for botao in self.botoes:
botao.render()
def carregaParticipantes(self):
db.collection('saguoes')\
.document(self.id_anfitriao)\
.collection('participantes').on_snapshot(self.escutaParticipantes)
def escutaParticipantes(self, col_snapshot, changes, read_time):
self.participantes = []
self.participantes_retangulos = []
i = 0
tamanho_acumulado = 0
for documento in col_snapshot:
doc = documento.to_dict()
self.participantes.append(doc)
self.participantes_retangulos.append(
RetanguloTexto(
self.janela,
doc["nome"],
0,
200,
30
)
)
if doc["pronto"]:
self.participantes_retangulos[i].cor_atual = (50, 205, 50)
else:
self.participantes_retangulos[i].cor_atual = (255, 0, 0)
self.participantes_retangulos[i].set_position(
self.fundo.x + 45,
self.barra_superior.y + self.barra_superior.height + 50 + tamanho_acumulado + i*10
)
tamanho_acumulado += 30
i += 1
def trocaEstado(self):
if self.pronto:
self.pronto = False
else:
self.pronto = True
novos_dados = {
"nome": self.usuario.display_name,
"id_usuario": self.usuario.uid,
"pronto": self.pronto
}
db.collection("saguoes").document(self.id_anfitriao).collection("participantes").document(self.usuario.uid).set(novos_dados)
# Funcao para enviar mensagem para a subcolecao de chat_saguao do respectivo saguao
def enviarMensagem(self, msg):
user_name = self.usuario.display_name
hora_envio = datetime.now()
db.collection("saguoes").document(self.id_anfitriao).collection("chat_saguao").add({"msg": msg, "remetente": user_name, "hora_envio": hora_envio})
# Funcao para ler as mensagens do chat do saguao no banco de dados
def lerMensagens(self):
db.collection('saguoes')\
.document(self.id_anfitriao)\
.collection('chat_saguao')\
.order_by("hora_envio", direction=firestore.Query.DESCENDING)\
.limit(8)\
.on_snapshot(self.escutaMensagens)
def escutaMensagens(self, col_snapshot, changes, read_time):
self.mensagens = []
self.mensagens_retangulos = []
i = 0
tamanho_acumulado = 0
for documento in col_snapshot:
doc = documento.to_dict()
self.mensagens.append(doc)
self.mensagens_retangulos.append(
RetanguloTexto(
self.janela,
doc["remetente"] + ": " + doc["msg"],
69,
580,
30,
moldura=False
)
)
self.mensagens_retangulos[i].set_position(
self.fundo.x + 295,
self.barra_superior.y + self.barra_superior.height + 50 + tamanho_acumulado + i*10
)
tamanho_acumulado += 30
i += 1
|
[
"danielbougleux@id.uff.br"
] |
danielbougleux@id.uff.br
|
cda6d1cac436f71e288b1965f757877d5981c273
|
7b6451a4ca3184f3f3f6e2fb7244c658338b533c
|
/AI/Self-Driving-Car-Python/dataprep.py
|
bbf49bd6dce582e9d6f768312bfb28501f3ab7b6
|
[] |
no_license
|
alexandrerussi/Autonomous-Car
|
9a2cc279bb181d586091a17748e3489b6c7cc0e1
|
3b6c1172829900b3afc0d392d7d756603493f377
|
refs/heads/master
| 2020-06-29T14:05:25.513080
| 2019-09-30T01:48:41
| 2019-09-30T01:48:41
| 200,557,385
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,222
|
py
|
import numpy as np
import cv2
import re
import os
from datetime import datetime
import collections
from random import shuffle
from util import sanitize_data_folders
def make_gamma_tables(gammas):
gamma_map = collections.OrderedDict()
for gamma in gammas:
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
gamma_map[gamma] = table
return gamma_map
def adjust_gamma(image, table):
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def process_session(session_path,gamma_map,rgb=True):
# Overlay target images for visual troubleshooting of processed video
up_arrow, left_arrow, right_arrow = read_arrow_key_images()
cap = cv2.VideoCapture(session_path + "/output.mov")
video_timestamps = []
with open(session_path + '/video_timestamps.txt') as video_timestamps_reader:
for line in video_timestamps_reader:
line = line.replace("\n", "")
ts = datetime.strptime(line, '%Y-%m-%d %H:%M:%S.%f')
video_timestamps.append(ts)
commands = []
with open(session_path + '/clean_session.txt') as clean_session_reader:
for line in clean_session_reader:
line = line.replace("\n", "")
# Ignore / skip all invalid commands
if "down" in line:
continue
if "left" in line and "right" in line:
continue
if "left" in line and "up" in line:
continue
if "right" in line and "up" in line:
continue
if "left" in line and "up" in line and "right" in line:
continue
match = re.match(r"^.*\['(.*)'\].*$", line)
if match is not None:
command = match.group(1)
else:
command = 'no command'
raw_ts = line[line.index(" ") + 1:]
ts = datetime.strptime(raw_ts, '%Y-%m-%d %H:%M:%S.%f')
commands.append([command, ts])
# time after which no other data is relevant because driving session has ended
end_time = commands[len(commands) - 1][1]
# cleanup to track only command transitions
compact_commands = []
prev_command = None
for item in commands:
command, ts = item[0], item[1]
if command != prev_command and command != 'no command' and command != 'down':
compact_commands.append([command, ts])
prev_command = command
commands = compact_commands
# time before which no other data is relevant because driving session just started
start_time = commands[0][1]
current_command = commands[0][0]
command_counter = 1
# Fixes bug that arises when only one command type is used the whole session
future_command = None
future_command_ts = None
if command_counter < len(commands):
future_command = commands[command_counter][0]
future_command_ts = commands[command_counter][1]
else:
future_command = "END"
future_command_ts = end_time
predictors = []
targets = []
frame_counter = -1
while (cap.isOpened()):
frame_counter = frame_counter + 1
ret, frame = cap.read()
if cv2.waitKey(1) & 0xFF == ord('q'): # don't remove this if statement or video feed will die
break
if frame_counter == len(video_timestamps):
break # Fixes bug where video timestamps not as long as video frames
video_timestamp = video_timestamps[frame_counter]
if video_timestamp > start_time:
if video_timestamp < end_time:
if video_timestamp > future_command_ts:
current_command = future_command
command_counter = command_counter + 1
if command_counter < len(commands):
future_command = commands[command_counter][0]
future_command_ts = commands[command_counter][1]
else:
future_command = "END"
future_command_ts = end_time
target = [0, 0, 0] # in order: left, up, right
key_image = get_key_image(current_command)
if current_command == 'left':
target[0] = 1
elif current_command == 'up':
target[1] = 1
elif current_command == 'right':
target[2] = 1
if rgb == True:
for gamma, gamma_table in gamma_map.items():
gamma_image = adjust_gamma(frame, gamma_table)
targets.append(target)
predictors.append(gamma_image)
else:
for gamma, gamma_table in gamma_map.items():
bw_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#half_image = bw_frame[120:, :]
#cv2.imshow(str(gamma), half_image)
#print(half_image.shape)
#bw_frame = half_image
gamma_image = adjust_gamma(bw_frame, gamma_table)
#gamma_image = np.reshape(gamma_image, [120, 320, 1])
gamma_image = np.reshape(gamma_image, [240, 320, 1])
targets.append(target)
predictors.append(gamma_image)
#cv2.imshow(str(gamma), gamma_image)
# Uncomment below if you want to increase display (looks very pixelated/ugly)
#frame_scale = 2
#arrow_key_scale = 0.25v
#frame = cv2.resize(frame, None, fx=frame_scale, fy=frame_scale, interpolation=cv2.INTER_CUBIC)
show_image_with_command(frame, key_image)
cap.release()
cv2.destroyAllWindows()
return predictors, targets
def data_prep(data_path,rgb=True):
#gamma_map = make_gamma_tables(np.arange(1.0,3.0,0.5))
gamma_map = make_gamma_tables([1])
data_folders = os.listdir(data_path)
data_folders = sanitize_data_folders(data_folders)
shuffle(data_folders)
#data_folders = data_folders[:10]
train_folder_size = int(len(data_folders) * 0.8)
train_predictors = []
train_targets = []
for folder in data_folders[:train_folder_size]:
print("Started session: " + str(folder))
predictors, targets = process_session(data_path+'/'+folder,gamma_map,rgb)
train_predictors.extend(predictors)
train_targets.extend(targets)
print("Completed session: "+str(folder))
train_predictors_np = np.array(train_predictors)
train_targets_np = np.array(train_targets)
validation_predictors = []
validation_targets = []
for folder in data_folders[train_folder_size:]:
print("Started session: " + str(folder))
predictors, targets = process_session(data_path + '/' + folder,gamma_map,rgb)
validation_predictors.extend(predictors)
validation_targets.extend(targets)
print("Completed session: " + str(folder))
validation_predictors_np = np.array(validation_predictors)
validation_targets_np = np.array(validation_targets)
max_folder = max([int(folder) for folder in data_folders])
new_file_name = None
if rgb:
new_file_name = '/data_rgb_'+str(max_folder)
else:
new_file_name = '/data_bw_'+str(max_folder)
np.savez(data_path+new_file_name, train_predictors=train_predictors_np,
train_targets=train_targets_np,validation_predictors = validation_predictors_np,
validation_targets = validation_targets_np)
# Built so that a 3D convolution doesn't span multiple sessions (since there is no continuity between sessions)
def video_to_rgb_npz(session_path,predictors,targets):
np.savez(session_path + '/predictors_and_targets', predictors=predictors,targets=targets)
def show_image_with_command(frame, key_image):
arrow_key_scale = 0.125
resized_image = cv2.resize(key_image, None, fx=arrow_key_scale, fy=arrow_key_scale, interpolation=cv2.INTER_CUBIC)
# Thresholding requires grayscale only, so that threshold only needs to happen in one dimension
img2gray = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
# Create mask where anything greater than 240 bright is made super white (255) / selected
ret, mask = cv2.threshold(img2gray, 240, 255, cv2.THRESH_BINARY)
# TODO: understand how this copy-pasted OpenCV masking code works
mask_inv = cv2.bitwise_not(mask) # invert the mask
rows, cols, channels = resized_image.shape # get size of image
region_of_interest = frame[0:rows, 0:cols]
img1_bg = cv2.bitwise_and(region_of_interest, region_of_interest, mask=mask) # ???
img2_fg = cv2.bitwise_and(resized_image, resized_image, mask=mask_inv) # ???
dst = cv2.add(img1_bg, img2_fg) # ???
frame[0:rows, 0:cols] = dst
# Finally, show image with the an overlay of identified target key image
cv2.imshow('frame', frame)
def read_arrow_key_images():
image_path = str(os.path.dirname(os.path.realpath(__file__))) + "/arrow_key_images"
up_arrow = cv2.imread(image_path + '/UpArrow.tif')
left_arrow = cv2.imread(image_path + '/LeftArrow.tif')
right_arrow = cv2.imread(image_path + '/Right Arrow.tif')
return up_arrow, left_arrow, right_arrow
def get_key_image(label):
up_arrow, left_arrow, right_arrow = read_arrow_key_images()
key_image = None
if label == 'left':
key_image = left_arrow
elif label == 'up':
key_image = up_arrow
elif label == 'right':
key_image = right_arrow
return key_image
def get_key_image_from_array(label):
up_arrow, left_arrow, right_arrow = read_arrow_key_images()
key_image = None
if label[0][0] == 1:
key_image = left_arrow
elif label[0][1] == 1:
key_image = up_arrow
elif label[0][2] == 1:
key_image = right_arrow
return key_image
|
[
"alexandrerussijunior@gmail.com"
] |
alexandrerussijunior@gmail.com
|
07464cdad35ad7b4f680e3ab926989fbcf8d020a
|
f8aa467bbaa5dbdddf2085b6121f90cb19bc13c7
|
/my_happy_pandas/plotting/_matplotlib/tools.py
|
eaf9e090f77dc0e2f0813c2bad7a2a826fda6779
|
[
"Apache-2.0"
] |
permissive
|
ggservice007/my-happy-pandas
|
ec5520383aa887b12f07a2dc5e2944d0ca9b260e
|
63145d54e452177f7d5b2fc8fdbc1fdf37dd5b16
|
refs/heads/default
| 2023-02-22T00:24:01.164363
| 2021-01-27T11:22:24
| 2021-01-27T11:22:24
| 332,691,761
| 0
| 0
|
Apache-2.0
| 2021-01-26T07:39:47
| 2021-01-25T09:18:59
|
Python
|
UTF-8
|
Python
| false
| false
| 12,266
|
py
|
# being a bit too dynamic
from math import ceil
import warnings
import matplotlib.table
import matplotlib.ticker as ticker
import numpy as np
from my_happy_pandas.core.dtypes.common import is_list_like
from my_happy_pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from my_happy_pandas.plotting._matplotlib import compat
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
for label in ax.get_xticklabels():
label.set_ha("right")
label.set_rotation(rot)
fig = ax.get_figure()
fig.subplots_adjust(bottom=0.2)
def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
if isinstance(data, ABCSeries):
data = data.to_frame()
elif isinstance(data, ABCDataFrame):
pass
else:
raise ValueError("Input data must be DataFrame or Series")
if rowLabels is None:
rowLabels = data.index
if colLabels is None:
colLabels = data.columns
cellText = data.values
table = matplotlib.table.table(
ax, cellText=cellText, rowLabels=rowLabels, colLabels=colLabels, **kwargs
)
return table
def _get_layout(nplots, layout=None, layout_type="box"):
if layout is not None:
if not isinstance(layout, (tuple, list)) or len(layout) != 2:
raise ValueError("Layout must be a tuple of (rows, columns)")
nrows, ncols = layout
# Python 2 compat
ceil_ = lambda x: int(ceil(x))
if nrows == -1 and ncols > 0:
layout = nrows, ncols = (ceil_(float(nplots) / ncols), ncols)
elif ncols == -1 and nrows > 0:
layout = nrows, ncols = (nrows, ceil_(float(nplots) / nrows))
elif ncols <= 0 and nrows <= 0:
msg = "At least one dimension of layout must be positive"
raise ValueError(msg)
if nrows * ncols < nplots:
raise ValueError(
f"Layout of {nrows}x{ncols} must be larger than required size {nplots}"
)
return layout
if layout_type == "single":
return (1, 1)
elif layout_type == "horizontal":
return (1, nplots)
elif layout_type == "vertical":
return (nplots, 1)
layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)}
try:
return layouts[nplots]
except KeyError:
k = 1
while k ** 2 < nplots:
k += 1
if (k - 1) * k >= nplots:
return k, (k - 1)
else:
return k, k
# copied from matplotlib/pyplot.py and modified for pandas.plotting
def _subplots(
naxes=None,
sharex=False,
sharey=False,
squeeze=True,
subplot_kw=None,
ax=None,
layout=None,
layout_type="box",
**fig_kw,
):
"""
Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Parameters
----------
naxes : int
Number of required axes. Exceeded axes are set invisible. Default is
nrows * ncols.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharey : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing is done: the returned axis object is always
a 2-d array containing Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
ax : Matplotlib axis object, optional
layout : tuple
Number of rows and columns of the subplot grid.
If not specified, calculated from naxes and layout_type
layout_type : {'box', 'horizontal', 'vertical'}, default 'box'
Specify how to layout the subplot grid.
fig_kw : Other keyword arguments to be passed to the figure() call.
Note that all keywords not recognized above will be
automatically included here.
Returns
-------
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one subplot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
Examples
--------
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
if is_list_like(ax):
ax = _flatten(ax)
if layout is not None:
warnings.warn(
"When passing multiple axes, layout keyword is ignored", UserWarning
)
if sharex or sharey:
warnings.warn(
"When passing multiple axes, sharex and sharey "
"are ignored. These settings must be specified when creating axes",
UserWarning,
stacklevel=4,
)
if len(ax) == naxes:
fig = ax[0].get_figure()
return fig, ax
else:
raise ValueError(
f"The number of passed axes must be {naxes}, the "
"same as the output plot"
)
fig = ax.get_figure()
# if ax is passed and a number of subplots is 1, return ax as it is
if naxes == 1:
if squeeze:
return fig, ax
else:
return fig, _flatten(ax)
else:
warnings.warn(
"To output multiple subplots, the figure containing "
"the passed axes is being cleared",
UserWarning,
stacklevel=4,
)
fig.clear()
nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)
nplots = nrows * ncols
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw["sharex"] = ax0
if sharey:
subplot_kw["sharey"] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
kwds = subplot_kw.copy()
# Set sharex and sharey to None for blank/dummy axes, these can
# interfere with proper axis limits on the visible axes if
# they share axes e.g. issue #7528
if i >= naxes:
kwds["sharex"] = None
kwds["sharey"] = None
ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)
axarr[i] = ax
if naxes != nplots:
for ax in axarr[naxes:]:
ax.set_visible(False)
_handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots == 1:
axes = axarr[0]
else:
axes = axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
axes = axarr.reshape(nrows, ncols)
return fig, axes
def _remove_labels_from_axis(axis):
for t in axis.get_majorticklabels():
t.set_visible(False)
# set_visible will not be effective if
# minor axis has NullLocator and NullFormatter (default)
if isinstance(axis.get_minor_locator(), ticker.NullLocator):
axis.set_minor_locator(ticker.AutoLocator())
if isinstance(axis.get_minor_formatter(), ticker.NullFormatter):
axis.set_minor_formatter(ticker.FormatStrFormatter(""))
for t in axis.get_minorticklabels():
t.set_visible(False)
axis.get_label().set_visible(False)
def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
if nplots > 1:
if compat._mpl_ge_3_2_0():
row_num = lambda x: x.get_subplotspec().rowspan.start
col_num = lambda x: x.get_subplotspec().colspan.start
else:
row_num = lambda x: x.rowNum
col_num = lambda x: x.colNum
if nrows > 1:
try:
# first find out the ax layout,
# so that we can correctly handle 'gaps"
layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool_)
for ax in axarr:
layout[row_num(ax), col_num(ax)] = ax.get_visible()
for ax in axarr:
# only the last row of subplots should get x labels -> all
# other off layout handles the case that the subplot is
# the last in the column, because below is no subplot/gap.
if not layout[row_num(ax) + 1, col_num(ax)]:
continue
if sharex or len(ax.get_shared_x_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
except IndexError:
# if gridspec is used, ax.rowNum and ax.colNum may different
# from layout shape. in this case, use last_row logic
for ax in axarr:
if ax.is_last_row():
continue
if sharex or len(ax.get_shared_x_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
if ncols > 1:
for ax in axarr:
# only the first column should get y labels -> set all other to
# off as we only have labels in the first column and we always
# have a subplot there, we can skip the layout test
if ax.is_first_col():
continue
if sharey or len(ax.get_shared_y_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.yaxis)
def _flatten(axes):
if not is_list_like(axes):
return np.array([axes])
elif isinstance(axes, (np.ndarray, ABCIndexClass)):
return axes.ravel()
return np.array(axes)
def _set_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None):
import matplotlib.pyplot as plt
for ax in _flatten(axes):
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return axes
def _get_all_lines(ax):
lines = ax.get_lines()
if hasattr(ax, "right_ax"):
lines += ax.right_ax.get_lines()
if hasattr(ax, "left_ax"):
lines += ax.left_ax.get_lines()
return lines
def _get_xlim(lines):
left, right = np.inf, -np.inf
for l in lines:
x = l.get_xdata(orig=False)
left = min(np.nanmin(x), left)
right = max(np.nanmax(x), right)
return left, right
|
[
"ggservice007@126.com"
] |
ggservice007@126.com
|
84d759bb04610b3c0237f3c151ca8917b2c27f4b
|
456433ac78b70cb8ae076ae166a85e349f181d7f
|
/systems/KURSSKLAD/KURSTERM/SELECTDC/templates/taskMWares.py
|
f515a3b448eecf874d1c05dd1dd6fe8fc60e5da9
|
[] |
no_license
|
shybkoi/WMS-Demo
|
854c1679b121c68323445b60f3992959f922be8d
|
2525559c4f56654acfbc21b41b3f5e40387b89e0
|
refs/heads/master
| 2021-01-23T01:51:20.074825
| 2017-03-23T11:51:18
| 2017-03-23T11:51:18
| 85,937,726
| 0
| 0
| null | null | null | null |
WINDOWS-1251
|
Python
| false
| false
| 19,848
|
py
|
#!/usr/bin/env python
# -*- coding: cp1251 -*-
##################################################
## DEPENDENCIES
import sys
import os
import os.path
from os.path import getmtime, exists
import time
import types
import __builtin__
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import DummyTransaction
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from systems.KURSSKLAD.KURSTERM.templates.main import main
from systems.KURSSKLAD.cheetahutils import viewQuantity
##################################################
## MODULE CONSTANTS
try:
True, False
except NameError:
True, False = (1==1), (1==0)
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.0rc8'
__CHEETAH_versionTuple__ = (2, 0, 0, 'candidate', 8)
__CHEETAH_genTime__ = 1482336170.5510001
__CHEETAH_genTimestamp__ = 'Wed Dec 21 18:02:50 2016'
__CHEETAH_src__ = 'systems\\KURSSKLAD\\KURSTERM\\SELECTDC\\templates\\taskMWares.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Dec 21 09:10:13 2016'
__CHEETAH_docstring__ = 'Autogenerated by CHEETAH: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class taskMWares(main):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
main.__init__(self, *args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def mainData(self, **KWS):
## CHEETAH: generated from #def mainData at line 5, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(''' \xb9 <b>''')
_v = VFFSL(SL,"docnum",True) # '$docnum' on line 6, col 10
if _v is not None: write(_filter(_v, rawExpr='$docnum')) # from line 6, col 10.
write('''</b> \xee\xf2 <b>''')
_orig_filter_22397595 = _filter
filterName = 'DateFilter'
if self._CHEETAH__filters.has_key("DateFilter"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"docdate",True) # '$docdate' on line 6, col 46
if _v is not None: write(_filter(_v, rawExpr='$docdate')) # from line 6, col 46.
_filter = _orig_filter_22397595
write(''' (<u>''')
_v = VFFSL(SL,"TID",True) # '$TID' on line 6, col 71
if _v is not None: write(_filter(_v, rawExpr='$TID')) # from line 6, col 71.
write('''</u>)</b>
''')
if VFFSL(SL,"varExists",False)('$TONAME') and VFFSL(SL,"TONAME",True): # generated from line 7, col 5
write(''' <br>''')
_v = VFFSL(SL,"TONAME",True) # '$TONAME' on line 8, col 13
if _v is not None: write(_filter(_v, rawExpr='$TONAME')) # from line 8, col 13.
write('''<br>
''')
write(''' <b><u>''')
_v = VFFSL(SL,"INFONAME",True) # '$INFONAME' on line 10, col 11
if _v is not None: write(_filter(_v, rawExpr='$INFONAME')) # from line 10, col 11.
write('''</u></b>
<hr>
<form action="taskMWares">
<input type="hidden" name="taskid" value="''')
_v = VFFSL(SL,"TID",True) # '$TID' on line 13, col 51
if _v is not None: write(_filter(_v, rawExpr='$TID')) # from line 13, col 51.
write('''">
<input type="hidden" name="waresid" value="''')
_v = VFFSL(SL,"WID",True) # '$WID' on line 14, col 52
if _v is not None: write(_filter(_v, rawExpr='$WID')) # from line 14, col 52.
write('''">
''')
if False:
_('ШК')
_v = VFFSL(SL,"_",False)('ШК') # "$_('\xd8\xca')" on line 15, col 9
if _v is not None: write(_filter(_v, rawExpr="$_('\xd8\xca')")) # from line 15, col 9.
write(''': <input type="text" id=":scan:text" name="barcode" title="''')
if False:
_('Товар')
_v = VFFSL(SL,"_",False)('Товар') # "$_('\xd2\xee\xe2\xe0\xf0')" on line 15, col 76
if _v is not None: write(_filter(_v, rawExpr="$_('\xd2\xee\xe2\xe0\xf0')")) # from line 15, col 76.
write('''"><br>
</form>
<hr>
<b><u>(''')
_v = VFFSL(SL,"WCODE",True) # '$WCODE' on line 19, col 12
if _v is not None: write(_filter(_v, rawExpr='$WCODE')) # from line 19, col 12.
write(''')</u></b>''')
_v = VFFSL(SL,"WNAME",True) # '$WNAME' on line 19, col 27
if _v is not None: write(_filter(_v, rawExpr='$WNAME')) # from line 19, col 27.
write('''
''')
if VFFSL(SL,"varExists",False)('$ARTICUL') and VFFSL(SL,"ARTICUL",True): # generated from line 20, col 5
write(''' \t<b>(<u>''')
_v = VFFSL(SL,"ARTICUL",True) # '$ARTICUL' on line 21, col 13
if _v is not None: write(_filter(_v, rawExpr='$ARTICUL')) # from line 21, col 13.
write('''</u>)</b>
''')
write(''' <br>
''')
if VFFSL(SL,"VWUID",True): # generated from line 24, col 5
write(''' <b>''')
_v = VFFSL(SL,"VWUCODE",True) # '$VWUCODE' on line 25, col 12
if _v is not None: write(_filter(_v, rawExpr='$VWUCODE')) # from line 25, col 12.
write(''' = ''')
_orig_filter_68095643 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"VWUFACTOR",True) # '$VWUFACTOR' on line 25, col 39
if _v is not None: write(_filter(_v, rawExpr='$VWUFACTOR')) # from line 25, col 39.
_filter = _orig_filter_68095643
write(''' ''')
_v = VFFSL(SL,"MWUCODE",True) # '$MWUCODE' on line 25, col 62
if _v is not None: write(_filter(_v, rawExpr='$MWUCODE')) # from line 25, col 62.
write(''' </b><br>
''')
write(''' <br>
''')
if VFFSL(SL,"varExists",False)('$datalist') and VFFSL(SL,"datalist",True) and len(VFFSL(SL,"datalist",True))>0: # generated from line 29, col 5
wuamount = 0
amount = 0
write(''' <form action=taskMWaresSave method=post>
<input type=hidden name=waresid value=''')
_v = VFFSL(SL,"wid",True) # '$wid' on line 33, col 47
if _v is not None: write(_filter(_v, rawExpr='$wid')) # from line 33, col 47.
write('''>
<input type=hidden name=taskid value=''')
_v = VFFSL(SL,"tid",True) # '$tid' on line 34, col 46
if _v is not None: write(_filter(_v, rawExpr='$tid')) # from line 34, col 46.
write('''>
<input type=hidden name=dbeg value="''')
_v = VFFSL(SL,"dbeg",True) # '$dbeg' on line 35, col 45
if _v is not None: write(_filter(_v, rawExpr='$dbeg')) # from line 35, col 45.
write('''">
<table>
<thead>
<tr>
<th>''')
if False:
_('Дата')
_v = VFFSL(SL,"_",False)('Дата') # "$_('\xc4\xe0\xf2\xe0')" on line 39, col 25
if _v is not None: write(_filter(_v, rawExpr="$_('\xc4\xe0\xf2\xe0')")) # from line 39, col 25.
write('''</th>
<th>
<select name=wuid id=":focus:">
<option value=''')
_v = VFFSL(SL,"MWUID",True) # '$MWUID' on line 42, col 43
if _v is not None: write(_filter(_v, rawExpr='$MWUID')) # from line 42, col 43.
write(''' selected>''')
_v = VFFSL(SL,"MWUCODE",True) # '$MWUCODE' on line 42, col 59
if _v is not None: write(_filter(_v, rawExpr='$MWUCODE')) # from line 42, col 59.
write('''</option>
''')
if VFFSL(SL,"VWUID",True): # generated from line 43, col 27
write(''' <option value=''')
_v = VFFSL(SL,"VWUID",True) # '$VWUID' on line 44, col 43
if _v is not None: write(_filter(_v, rawExpr='$VWUID')) # from line 44, col 43.
write('''>''')
_v = VFFSL(SL,"VWUCODE",True) # '$VWUCODE' on line 44, col 50
if _v is not None: write(_filter(_v, rawExpr='$VWUCODE')) # from line 44, col 50.
write('''</option>
''')
write(''' </select>
</th>
<th>''')
if False:
_('Кол-во')
_v = VFFSL(SL,"_",False)('Кол-во') # "$_('\xca\xee\xeb-\xe2\xee')" on line 48, col 25
if _v is not None: write(_filter(_v, rawExpr="$_('\xca\xee\xeb-\xe2\xee')")) # from line 48, col 25.
write('''</th>
<tr>
</thead>
<tbody>
''')
for item in VFFSL(SL,"datalist",True): # generated from line 52, col 13
if VFFSL(SL,"item.canedit",True) == '0': # generated from line 53, col 17
trClass = 'class="inactive"'
else: # generated from line 55, col 17
trClass = ''
write(''' <tr ''')
_v = VFFSL(SL,"trClass",True) # '$trClass' on line 58, col 21
if _v is not None: write(_filter(_v, rawExpr='$trClass')) # from line 58, col 21.
write('''>
<td>''')
_orig_filter_88920082 = _filter
filterName = 'DateFilter2'
if self._CHEETAH__filters.has_key("DateFilter2"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"item.productdate",True) # '$item.productdate' on line 59, col 44
if _v is not None: write(_filter(_v, rawExpr='$item.productdate')) # from line 59, col 44.
_filter = _orig_filter_88920082
write('''</td>
''')
if VFFSL(SL,"item.canedit",True) == '1': # generated from line 60, col 19
write(''' <td><input type="text" name="WL_''')
_v = VFFSL(SL,"item.WLOTID",True) # '$item.WLOTID' on line 61, col 53
if _v is not None: write(_filter(_v, rawExpr='$item.WLOTID')) # from line 61, col 53.
write('''" id="::float" title="''')
_v = VFFSL(SL,"item.WLNUMBER",True) # '$item.WLNUMBER' on line 61, col 87
if _v is not None: write(_filter(_v, rawExpr='$item.WLNUMBER')) # from line 61, col 87.
write('''" value="''')
_orig_filter_30262987 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"item.AMOUNT",True) # '$item.AMOUNT' on line 61, col 126
if _v is not None: write(_filter(_v, rawExpr='$item.AMOUNT')) # from line 61, col 126.
_filter = _orig_filter_30262987
write('''" size="4"></td>
''')
else: # generated from line 62, col 19
write(''' <td><a href=\'#\' title="''')
_v = VFFSL(SL,"item.WLNUMBER",True) # '$item.WLNUMBER' on line 63, col 44
if _v is not None: write(_filter(_v, rawExpr='$item.WLNUMBER')) # from line 63, col 44.
write('''">''')
_orig_filter_14753798 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"item.AMOUNT",True) # '$item.AMOUNT' on line 63, col 76
if _v is not None: write(_filter(_v, rawExpr='$item.AMOUNT')) # from line 63, col 76.
_filter = _orig_filter_14753798
write('''</a></td>
''')
write(''' <td>''')
_v = VFFSL(SL,"viewQuantity",False)(VFFSL(SL,"item.AMOUNT",True),VFFSL(SL,"VWUFACTOR",True),VFFSL(SL,"VWUCODE",True),VFFSL(SL,"MWUFACTOR",True),VFFSL(SL,"MWUCODE",True)) # '$viewQuantity($item.AMOUNT,$VWUFACTOR,$VWUCODE,$MWUFACTOR,$MWUCODE)' on line 65, col 25
if _v is not None: write(_filter(_v, rawExpr='$viewQuantity($item.AMOUNT,$VWUFACTOR,$VWUCODE,$MWUFACTOR,$MWUCODE)')) # from line 65, col 25.
write('''(<b><u>''')
_orig_filter_57748425 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"item.AMOUNT",True) # '$item.AMOUNT' on line 65, col 115
if _v is not None: write(_filter(_v, rawExpr='$item.AMOUNT')) # from line 65, col 115.
_filter = _orig_filter_57748425
write('''</u></b>)</td>
</tr>
''')
amount += float(VFFSL(SL,"item.AMOUNT",True))
write(''' </tbody>
<tfoot>
<tr>
<th>''')
if False:
_('Итого')
_v = VFFSL(SL,"_",False)('Итого') # "$_('\xc8\xf2\xee\xe3\xee')" on line 72, col 25
if _v is not None: write(_filter(_v, rawExpr="$_('\xc8\xf2\xee\xe3\xee')")) # from line 72, col 25.
write(''':</th>
<th colspan=2>''')
_v = VFFSL(SL,"viewQuantity",False)(VFFSL(SL,"amount",True),VFFSL(SL,"VWUFACTOR",True),VFFSL(SL,"VWUCODE",True),VFFSL(SL,"MWUFACTOR",True),VFFSL(SL,"MWUCODE",True)) # '$viewQuantity($amount,$VWUFACTOR,$VWUCODE,$MWUFACTOR,$MWUCODE)' on line 73, col 35
if _v is not None: write(_filter(_v, rawExpr='$viewQuantity($amount,$VWUFACTOR,$VWUCODE,$MWUFACTOR,$MWUCODE)')) # from line 73, col 35.
write('''(<b><u>''')
_orig_filter_11841561 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"amount",True) # '$amount' on line 73, col 120
if _v is not None: write(_filter(_v, rawExpr='$amount')) # from line 73, col 120.
_filter = _orig_filter_11841561
write('''</u></b>)</th>
</tr>
</tfoot>
</table>
<input type="submit" value="''')
if False:
_('Сохранить')
_v = VFFSL(SL,"_",False)('Сохранить') # "$_('\xd1\xee\xf5\xf0\xe0\xed\xe8\xf2\xfc')" on line 77, col 37
if _v is not None: write(_filter(_v, rawExpr="$_('\xd1\xee\xf5\xf0\xe0\xed\xe8\xf2\xfc')")) # from line 77, col 37.
write('''">
</form>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def writeBody(self, **KWS):
## CHEETAH: main method generated for this template
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('''
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_taskMWares= 'writeBody'
## END CLASS DEFINITION
if not hasattr(taskMWares, '_initCheetahAttributes'):
templateAPIClass = getattr(taskMWares, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(taskMWares)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=taskMWares()).run()
|
[
"s.shybkoi@gmail.com"
] |
s.shybkoi@gmail.com
|
3042812bdbd8a115621ce18b49ec5776b9227138
|
3b9d763180410bf0abf5b9c37391a64319efe839
|
/toontown/town/TTTownLoader.py
|
0780028cd0ae5e5b0b03c022cae3ac05115db2fc
|
[] |
no_license
|
qphoton/Reverse_Engineering_Project_ToonTown
|
442f15d484324be749f6f0e5e4e74fc6436e4e30
|
11468ab449060169191366bc14ff8113ee3beffb
|
refs/heads/master
| 2021-05-08T00:07:09.720166
| 2017-10-21T02:37:22
| 2017-10-21T02:37:22
| 107,617,661
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
# File: T (Python 2.4)
import TownLoader
import TTStreet
from toontown.suit import Suit
class TTTownLoader(TownLoader.TownLoader):
def __init__(self, hood, parentFSM, doneEvent):
TownLoader.TownLoader.__init__(self, hood, parentFSM, doneEvent)
self.streetClass = TTStreet.TTStreet
self.musicFile = 'phase_3.5/audio/bgm/TC_SZ.mid'
self.activityMusicFile = 'phase_3.5/audio/bgm/TC_SZ_activity.mid'
self.townStorageDNAFile = 'phase_5/dna/storage_TT_town.dna'
def load(self, zoneId):
TownLoader.TownLoader.load(self, zoneId)
Suit.loadSuits(1)
dnaFile = 'phase_5/dna/toontown_central_' + str(self.canonicalBranchZone) + '.dna'
self.createHood(dnaFile)
def unload(self):
Suit.unloadSuits(1)
TownLoader.TownLoader.unload(self)
|
[
"Infinitywilee@rocketmail.com"
] |
Infinitywilee@rocketmail.com
|
09a9431a6f962fe8e4e27dafefa004f31c465bad
|
a4cc0abde394380db62a8d276d9b1a6bb237334f
|
/my_func.py
|
388855fe75c05937c6573996727995da06c6fa3d
|
[
"Apache-2.0"
] |
permissive
|
pythongirl/Python_Week2
|
50341d00fa7149bcf0c4ad136e83178bfa6f202a
|
c40d95d022c872b61ea46da2ef1943cb370bc844
|
refs/heads/master
| 2020-12-25T15:18:06.757764
| 2016-08-14T09:06:00
| 2016-08-14T09:06:00
| 65,646,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
'''
Write a simple Python module that contains one function that prints 'hello' (module name = my_func.py). Do a test where you impor my_func into a new Python script. Test this using the following contexts:
* my_func.py is located in the same directory as your script
* my_func.py is located in some random subdirectory (not the same directory as your script)
* my_func.py is located in ~/applied_python/lib/python2.7/site-packages/
'''
def print_hello():
print "Hello!!"
|
[
"vinita.shah@colorado.edu"
] |
vinita.shah@colorado.edu
|
8e0c2ea46e515db066552441ae389a1cae55bd63
|
62a14e6c14c8bed51ad92326b08df6e6d60b62ff
|
/PyTorchProj/src/data/dataset.py
|
ca13418bd4fe0b3ea53bf0b4cd37d3a4aa71e2c4
|
[] |
no_license
|
dwHou/PyTorchProj
|
7aee6229434cdd6469e984dbe65d128c41cebcb0
|
ffe5f4cdab542206058818156c5f47c8395913ec
|
refs/heads/master
| 2021-05-20T19:56:18.825972
| 2021-03-07T11:11:18
| 2021-03-07T11:11:18
| 252,398,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,759
|
py
|
import torch.utils.data as data
from os import listdir
from os.path import join
from PIL import Image
from util import common
import numpy as np
import pickle
import lmdb
from option import opt
# def is_image_file(filename):
# return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"])
# def load_img(filepath):
# # img = Image.open(filepath)
# # return img
# img = Image.open(filepath).convert('YCbCr')
# y, _, _ = img.split()
# return y
class Demo_lmdb(data.Dataset):
def __init__(self, db_path, flag):
super(Demo_lmdb, self).__init__()
env = lmdb.open(db_path)
txn = env.begin()
self.txn = txn
self.flag = flag
def __getitem__(self, index):
self.index = index
np_in, np_tar = self._load_lmdb(self.index)
if self.flag == 'train':
self.patch_size = opt.patchSize
patch_in, patch_tar = common.get_patch(np_in, np_tar, self.patch_size)
elif self.flag == 'test':
patch_in, patch_tar = np_in, np_tar
patch_in, patch_tar = common.np2Tensor([patch_in, patch_tar], opt.rgb_range)
return patch_in, patch_tar
def __len__(self):
return self.txn.stat()['entries']
def _load_lmdb(self, index):
pairs = self.txn.get('{}'.format(index).encode())
np_pairs = pickle.loads(pairs)
return np_pairs[0], np_pairs[1]
class Demo(data.Dataset):
def __init__(self, txt_path, flag):
super(Demo, self).__init__()
# fh = open(txt_path, 'r')
with open(txt_path, 'r') as fh:
pairs = []
for line in fh:
line = line.rstrip()
words = line.split()
pairs.append((words[0], words[1]))
self.pairs = pairs
# fh.close()
self.flag = flag
def __getitem__(self, index):
# input, target = self.pairs[index]
# img_in = Image.open(input).convert('RGB')
# img_tar = Image.open(target).convert('RGB')
self.index = index
np_in, np_tar = self._load_file(index)
if self.flag == 'train':
self.patch_size = opt.patchSize
patch_in, patch_tar = common.get_patch(np_in, np_tar, self.patch_size)
else:
patch_in, patch_tar = np_in, np_tar
patch_in, patch_tar = common.np2Tensor([patch_in, patch_tar], opt.rgb_range)
return patch_in, patch_tar
def __len__(self):
return len(self.pairs)
def _load_file(self, index):
#index = self.index
input, target = self.pairs[index]
img_in = Image.open('/Applications/Programming/Dataset/VSR/youkudataset/'+input).convert('RGB')
img_tar = Image.open('/Applications/Programming/Dataset/VSR/youkudataset/'+target).convert('RGB')
np_in = np.asarray(img_in)
np_tar = np.asarray(img_tar)
return np_in, np_tar
# 读文件的指定行数可以用
from itertools import islice
f=open("pyhpd.txt")
for a in islice(f,2,6):
print(a)
islice(iterable, [start, ] stop [, step]):
创建一个迭代器,生成项的方式类似于切片返回值: iterable[start : stop : step],将跳过前start个项,迭代在stop所指定的位置停止,step指定用于跳过项的步幅。与切片不同,负值不会用于任何start,stop和step,如果省略了start,迭代将从0开始,如果省略了step,步幅将采用
————————————————
版权声明:本文为CSDN博主「wzg2016」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/strive_for_future/article/details/95388081
|
[
"noreply@github.com"
] |
dwHou.noreply@github.com
|
c342c102e65a81dd1e162fc1885fdcdf358c04f7
|
5d4b2a5d0bde6dbe7d41f64b6fcc066b129b2544
|
/src/women_shoes/serializers.py
|
753c0683700a1f4f839fd970fa0692abb05e2dc5
|
[] |
no_license
|
vanemaster/project_persephone
|
c133b3138d1a18e2a411a210d7f6597e4d1ffb7b
|
4cdc8d0e675fab349aedd17ad757ae0083960455
|
refs/heads/main
| 2023-06-25T06:06:21.174579
| 2021-07-15T20:56:42
| 2021-07-15T20:56:42
| 368,344,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,297
|
py
|
from .models import WomenShoes
from rest_framework import serializers
import datetime
class WomenShoesSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
brand = serializers.CharField(required=False, allow_blank=True, max_length=50)
colors = serializers.CharField(max_length=50)
count = serializers.IntegerField(required=False)
dateAdded = serializers.DateTimeField(initial=datetime.date.today)
dateUpdated = serializers.DateTimeField(initial=datetime.date.today)
manufacturer = serializers.CharField(required=False, allow_blank=True, max_length=50)
name = serializers.CharField(style={'base_template': 'textarea.html'})
price = serializers.DecimalField(max_digits=9, decimal_places=2)
weight = serializers.DecimalField(max_digits=9, decimal_places=2)
def create(self, validated_data):
"""
Create and return a new `WomenShoes` instance, given the validated data.
"""
return WomenShoes.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Update and return an existing `WomenShoes` instance, given the validated data.
"""
instance.brand = validated_data.get('brand', instance.brand)
instance.categories = validated_data.get('categories', instance.categories)
instance.colors = validated_data.get('colors', instance.colors)
instance.count = validated_data.get('count', instance.count)
instance.dateAdded = validated_data.get('dateAdded', instance.dateAdded)
instance.dateUpdated = validated_data.get('dateUpdated', instance.dateUpdated)
instance.manufacturer = validated_data.get('manufacturer', instance.manufacturer)
instance.name = validated_data.get('name', instance.name)
instance.price = validated_data.get('price', instance.price)
instance.weight = validated_data.get('weight', instance.weight)
instance.save()
return instance
class Meta:
model = WomenShoes
fields = [
'id',
'brand',
'colors',
'count',
'dateAdded',
'dateUpdated',
'manufacturer',
'name',
'price',
'weight'
]
|
[
"vafranca@gmail.com"
] |
vafranca@gmail.com
|
d89cfb8f0978fc0bca985f2f530f9406acc32058
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2465/60631/246241.py
|
f35f636406a4f9cd9aa0b1ec54ba471016376403
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
si = input()
li = si.split(',')
out = []
for i in range(len(li)):
p = len(li)-1-i
h = li[p]
if i < int(h):
out.append(i+1)
print(max(out))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
aaad07c5c9cec442a2ff560d06925993644a66f9
|
d4b28b0c0b8c33a0189474342d14eb0cac3594ad
|
/homemaid/maids/models.py
|
1db8d036681149edb7b1ef7931b848ce6c0fcbc1
|
[] |
no_license
|
thana-arnam/homemaid
|
f05d2f90b5b7b2c19d471a78c7a5295e8d9c62a8
|
b5b25c20534968693890c3faff05204b09a2517c
|
refs/heads/master
| 2023-08-18T03:09:01.913549
| 2020-07-18T09:54:37
| 2020-07-18T09:54:37
| 280,569,682
| 0
| 0
| null | 2021-09-22T19:27:20
| 2020-07-18T02:58:07
|
Python
|
UTF-8
|
Python
| false
| false
| 338
|
py
|
from django.db import models
from django_extensions.db.models import TimeStampedModel
class Maid(TimeStampedModel):
name = models.CharField(max_length=300)
profile_image = models.FileField()
birthdate = models.DateField()
description=models.TextField()
certificate=models.TextField()
salary=models.IntegerField()
|
[
"thanamat@odds.team"
] |
thanamat@odds.team
|
dda5656ea860805745000de4d7aafc852e8a8153
|
bf88052e625b654ff603cd344eb7c747c16d095b
|
/src/FusionFilter.py
|
6da86d750748052cd290812f030c9510911708dd
|
[] |
no_license
|
joshuaburkhart/ReactomeFIFusion
|
361db6fdc227d922123a3e97e973cea1e25f822a
|
ac1745ce8a93fe071e5e430f540303f46d75bf96
|
refs/heads/master
| 2020-04-13T22:21:24.782181
| 2016-04-30T03:03:14
| 2016-04-30T03:03:14
| 50,455,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,030
|
py
|
import os
import re
FUSION_GENE_CAPTURE = "^\"[0-9]+?\"\s+\"" \
"(?P<gene1>.+?)" \
"\"\s+\".+?\"\s+\".+?\"\s+\".+?\"\s+\"" \
"(?P<gene2>.+?)\""
GENE_PDB_CAPTURE = "^(?P<gene1>[A-Z]+[0-9A-Z]+)," \
"(?P<pdbset1>\[?[',\s0-9A-Z]+\]?)," \
"(?P<gene2>[A-Z]+[0-9A-Z]+)," \
"(?P<pdbset2>\[?[',\s0-9A-Z]+\]?)"
FIINT_CAPTURE = "^(?P<gene1>[A-Z]+[0-9A-Z]+)," \
"(?P<swissprot1>[A-Z]+[0-9A-Z]+)," \
"(?P<gene2>[A-Z]+[0-9A-Z]+)," \
"(?P<swissprot2>[A-Z]+[0-9A-Z]+)," \
"(?P<interactome3d>.*)"
PROJ_DIR = "/Users/joshuaburkhart/Research/ReactomePPI"
OUT_DIR = PROJ_DIR + "/data/output"
FIINT_FN = OUT_DIR + "/FIInteract.txt"
ANNOT_PDB_FN = OUT_DIR + "/FIAnnotPdb.txt"
FUSION_RESULTS_FN = PROJ_DIR + "/gene-fusion-analysis/results/newDescription.txt"
FIINT_FUSION_INTERSECT_FN = OUT_DIR + "/FIInteractFusionEvents.txt"
fusion_gene_dict = dict()
gene_pdb_dict = dict()
fi_interactome_dict= dict()
def string2list(s):
return s.replace('[', '') \
.replace('\'', '') \
.replace(',', '') \
.replace(']', '') \
.split()
# read FUSION_RESULTS_FN (if it exists)
if os.path.isfile(FUSION_RESULTS_FN):
print("read FUSION_RESULTS_FN...")
in_fptr = open(FUSION_RESULTS_FN)
while 1:
line = in_fptr.readline()
if not line:
break
match = re.match(FUSION_GENE_CAPTURE, line)
if match:
# store map each gene to fusion event
fusion_gene_dict[match.group('gene1')] = "{0}-{1}".format(match.group('gene1'),match.group('gene2'))
fusion_gene_dict[match.group('gene2')] = "{0}-{1}".format(match.group('gene1'),match.group('gene2'))
# close file
in_fptr.close()
# open ANNOT_PDB_FN file
in_fptr = open(ANNOT_PDB_FN)
print("read ANNOT_PDB_FN...")
while 1:
line = in_fptr.readline()
if not line:
break
match = re.match(GENE_PDB_CAPTURE, line)
if match:
# store gene to pdb list map
gene_pdb_dict[match.group('gene1')] = string2list(match.group('pdbset1'))
gene_pdb_dict[match.group('gene2')] = string2list(match.group('pdbset2'))
# close file
in_fptr.close()
# open FIINT_FN file
in_fptr = open(FIINT_FN)
print("read FIINT_FN...")
while 1:
line = in_fptr.readline()
if not line:
break
match = re.match(FIINT_CAPTURE, line)
if match:
fi_interactome_dict[match.group('gene1')] = (match.group('gene1'),
match.group('swissprot1'),
match.group('gene2'),
match.group('swissprot2'),
match.group('interactome3d'))
fi_interactome_dict[match.group('gene2')] = (match.group('gene1'),
match.group('swissprot1'),
match.group('gene2'),
match.group('swissprot2'),
match.group('interactome3d'))
# close file
in_fptr.close()
intersection = fusion_gene_dict.keys() & gene_pdb_dict.keys() & fi_interactome_dict.keys()
print("intersection yields {0} genes...".format(len(intersection)))
if len(intersection) > 0:
with open(FIINT_FUSION_INTERSECT_FN, 'w') as out_fptr:
out_fptr.write("FUSION EVENT,INTERACTION AFFECTED,"
"GENE 1 NAME, GENE 1 SWISSPROT, GENE 1 PDB SET,"
"GENE 2 NAME, GENE 2 SWISSPROT, GENE 2 PDB SET,"
"Interactome3D Result\n")
# the thought is to later report only maximum pdb-pdb zdock scores (so as to fit them in one row) and link them to their zdock.out
for gene in intersection:
print("producing row for {0} with reported interaction {1}...".format(gene,fi_interactome_dict[gene]))
with open(FIINT_FUSION_INTERSECT_FN, 'a') as out_fptr:
out_fptr.write("F {0},I {1},{2},{3},{4},{5},{6},{7},{8}\n".format(
fusion_gene_dict[gene].replace(',','~'), #FUSION EVENT
"{0}-{1}".format(gene,fi_interactome_dict[gene][2]).replace(',','~'), #INTERACTION AFFECTED
gene.replace(',','~'), #GENE 1 NAME
fi_interactome_dict[gene][1].replace(',','~'), #GENE 1 SWISSPROT
'~'.join(map(str,gene_pdb_dict[gene])), #GENE 1 PDB SET
fi_interactome_dict[gene][2].replace(',','~'), #GENE 2 NAME
fi_interactome_dict[gene][3].replace(',','~'), #GENE 2 SWISSPROT
'~'.join(map(str,gene_pdb_dict[fi_interactome_dict[gene][2]])), #GENE 2 PDB SET
fi_interactome_dict[gene][4].replace(',','~') #Interactome3D Result
))
|
[
"burkhart.joshua@gmail.com"
] |
burkhart.joshua@gmail.com
|
c7406e98ca05ca9ae9967f7ac9503ceb05764380
|
ffb5f8fd5e7bcaf8c68e172af2032c85941c1099
|
/growth_inflation/growth_inflation_v2.py
|
4269112d1324333b182f904a0ea71140820adf1e
|
[] |
no_license
|
dpsugasa/Macro_Data
|
558aec75165c25a5176a899cdbd2f58898096766
|
6051fd6b672b12f522329b07e3ad3345871f7d48
|
refs/heads/master
| 2020-03-28T17:22:11.988668
| 2019-02-08T14:23:00
| 2019-02-08T14:23:00
| 148,782,420
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,830
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 21 14:04:28 2019
@author: dpsugasa
"""
#import all modules
import pandas as pd
from pandas.tseries.offsets import *
import numpy as np
import matplotlib.pyplot as plt
import datetime #for dates
from datetime import datetime
import quandl #for data
from math import sqrt
from tia.bbg import LocalTerminal
#import plotly
import plotly.plotly as py #for plotting
import plotly.offline as offline
import plotly.graph_objs as go
import plotly.tools as tls
import plotly.figure_factory as ff
from fredapi import Fred
import credentials
fred = credentials.fred
#set the script start time
start_time = datetime.now()
date_now = "{:%m_%d_%Y}".format(datetime.now())
start_date = '01/01/1950'
end_date = "{:%m/%d/%Y}".format(datetime.now())
IDs = ['GDP CYOY Index', 'CPI YOY Index'] #'SPXT Index']
fields = ['LAST PRICE']
df = LocalTerminal.get_historical(IDs, fields, start_date, end_date).as_frame() #period = 'QUARTERLY',
#non_trading_day_fill_option = 'ALL_CALENDAR_DAYS',
#non_trading_day_fill_method = 'PREVIOUS_VALUE').as_frame()
df.columns = df.columns.droplevel(-1)
df = df.resample('Q').mean()
df = df.dropna()
df['gdp_ror'] = df['GDP CYOY Index'].pct_change()
df['cpi_ror'] = df['CPI YOY Index'].pct_change()
df['gdp_dir'] = df.apply(lambda x: 1 if x['gdp_ror'] > 0 else(-1 if \
x['gdp_ror'] < 0 else 0), axis = 1)
df['gdp_dir'] = df['gdp_dir'].replace(to_replace = 0, method = 'ffill')
df['cpi_dir'] = df.apply(lambda x: 1 if x['cpi_ror'] > 0 else(-1 if \
x['cpi_ror'] < 0 else 0), axis = 1)
df['cpi_dir'] = df['cpi_dir'].replace(to_replace = 0, method = 'ffill')
df['regime'] = df.apply(lambda x: 2 if x['gdp_dir'] == 1 and x['cpi_dir'] == 1 else \
(1 if x['gdp_dir'] == 1 and x['cpi_dir'] == -1 else \
(3 if x['gdp_dir'] == -1 and x['cpi_dir'] == 1 else 4)), axis = 1)
#df['eq_direction'] = df.apply(lambda x: 1 if x['SPXT Index'] > 0 else(-1 if \
# x['SPXT Index'] < 0 else 0), axis = 1)
#
#df['gdp_direction'] = df.apply(lambda x: 1 if x['gdp_ror'] > 0 else(-1 if \
# x['gdp_ror'] < 0 else 0), axis = 1)
#
#df['cpi_direction'] = df.apply(lambda x: 1 if x['cpi_ror'] > 0 else(-1 if \
# x['cpi_ror'] < 0 else 0), axis = 1)
#
##df['direction'] = df['gdp_direction'] + df['cpi_direction']
trace = go.Scatter(
x = df['cpi_dir'].values,
y = df['gdp_dir'].values,
name = 'GDP',
mode='markers',
marker=dict(
size=10,
color = list(range(1,len(df['gdp_dir'].values))), #set color equal to a variable
colorscale='Viridis',
showscale=True
)
)
#trace1 = go.Scatter(
# #x = df['gdp_direction'].index,
# y = df['cpi_direction'].values,
# name = 'CPI',
# mode='markers',
# marker=dict(
# size=16,
# color = np.random.randn(500), #set color equal to a variable
# colorscale='Viridis',
# showscale=True
# )
#)
#trace0 = go.Bar(
# x = df['cpi_direction'].index,
# y = df['cpi_direction'].values,
# name = 'inflation direction',
# yaxis = 'y',
# marker = dict(color = ('#a6a6a6')),
## line = dict(
## color = ('#ccccff'),
## width = 1.0,
## ),
## fill = 'tonexty',
# opacity = 1,
#
#
#
# )
#
#
#trace1 = go.Bar( x = df['eq_direction'].index,
# y = df['eq_direction'].values,
# name = 'eq directions',
# yaxis = 'y',
# marker = dict(color = ('#1aff1a')),
## line = dict(
## color = ('#1aff1a'),
## width = 1.0,
## ),
# #fill = 'tonexty',
# opacity = 0.50,
# )
layout = {'title' : f'4 Regime',
'xaxis' : {'title' : 'Direction', #'type': 'date',
'fixedrange': True},
'yaxis' : {'title' : 'Direction', 'fixedrange': True},
# 'shapes': [{'type': 'rect',
# 'x0': r[i]['scr_1y'].index[0],
# 'y0': -2,
# 'x1': r[i]['scr_1y'].index[-1],
# 'y1': 2,
# 'name': 'Z-range',
# 'line': {
# 'color': '#f48641',
# 'width': 2,},
# 'fillcolor': '#f4ad42',
# 'opacity': 0.25,
# },]
}
data = [trace, trace1]
figure = go.Figure(data=data, layout=layout)
py.iplot(figure, filename = f'Growth Direction')
start_date = '01/01/1980'
end_date = "{:%m/%d/%Y}".format(datetime.now())
IDs = ['RTY Index', 'INDU Index', 'SPX Index'] #'SPXT Index']
fields = ['LAST PRICE']
df_eq = LocalTerminal.get_historical(IDs, fields, start_date, end_date).as_frame()
df_eq.columns = df_eq.columns.droplevel(-1)
df_eq = df_eq.fillna(method = 'ffill') #.resample('M').last()
df_eq = df_eq.pct_change()
df_eq = df_eq.resample('Q').sum()
frames = [df_eq, df]
baf = pd.concat(frames, join='outer', axis =1)
#baf = baf.fillna(method = 'ffill') #removes last quarter; also discover if changes in prolonged periods or good markets precipitate higher vol
baf = baf.dropna()
q4 = baf['RTY Index'][(baf['regime'] == 4)].dropna() #i think this is using the returns of zero
print(q4.mean())
print(q4.std())
q1 = baf['RTY Index'][(baf['regime'] == 1)].dropna()
print(q1.mean())
print(q1.std())
q3 = baf['RTY Index'][(baf['regime'] == 3)].dropna()
print(q3.mean())
print(q3.std())
q2 = baf['RTY Index'][(baf['regime'] == 2)].dropna()
print(q2.mean())
print(q2.std())
q_4 = baf['INDU Index'][(baf['regime'] == 4)].dropna()
print(q_4.mean())
print(q_4.std())
q_1 = baf['INDU Index'][(baf['regime'] == 1)].dropna()
print(q_1.mean())
print(q_1.std())
q_3 = baf['INDU Index'][(baf['regime'] == 3)].dropna()
print(q_3.mean())
print(q_3.std())
q_2 = baf['INDU Index'][(baf['regime'] == 2)].dropna()
print(q_2.mean())
print(q_2.std())
q_4_spx = baf['SPX Index'][(baf['regime'] == 4)].dropna()
print(q_4_spx.mean())
print(q_4_spx.std())
q_1_spx = baf['SPX Index'][(baf['regime'] == 1)].dropna()
print(q_1_spx.mean())
print(q_1_spx.std())
q_3_spx = baf['SPX Index'][(baf['regime'] == 3)].dropna()
print(q_3_spx.mean())
print(q_3_spx.std())
q_2_spx = baf['SPX Index'][(baf['regime'] == 2)].dropna()
print(q_2_spx.mean())
print(q_2_spx.std())
#d[name].apply(lambda x: "Long" if x['LAST PRICE'] > x['MA_30'] else "Short", axis=1)
#df = df.dropna()
#d_fx.columns = d_fx.columns.droplevel(-1)
|
[
"dustin.sugasa@gmail.com"
] |
dustin.sugasa@gmail.com
|
3459de3607f81b8e3cd2943b8031dbd163d4b650
|
1268030197a27bf2ef5e3f5ab8df38993457fed5
|
/run_bot.py
|
552b71c22140a9c5e5e54878a65f05870a32fd77
|
[] |
no_license
|
parimalpate123/rasa_slack_chatbot
|
439abd9a541d6314b46c6fb303c0275803fc9357
|
206aacab62f12be9df9f009f65736caed3e8edac
|
refs/heads/master
| 2020-04-17T14:13:49.917604
| 2019-05-07T11:08:07
| 2019-05-07T11:08:07
| 166,649,129
| 0
| 1
| null | 2019-01-29T11:09:07
| 2019-01-20T10:32:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
#import json
from rasa_core.channels.slack import SlackInput
from rasa_core.agent import Agent
from rasa_core.interpreter import RegexInterpreter
from rasa_core.channels import HttpInputChannel
#from rasa_core.utils import EndpointConfig
# load your trained agent
#agent = Agent.load(models\current\dialogue, interpreter=RegexInterpreter())
agent = Agent.load('models/current/dialogue', interpreter='models/current/nlu')
#action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
input_channel = \
SlackInput(slack_token='xoxb-525465834114-525382855891-SYt6HyWl7IfVyhtX19z6jJec'
, slack_channel='@devops') # this is the `bot_user_o_auth_access_token`
# the name of your channel to which the bot posts (optional)
# set serve_forever=True if you want to keep the server running
#agent.handle_channel(HttpInputChannel(5004, "/chat", input_channel))
agent.handle_channel(HttpInputChannel(5004, "", input_channel))
#s = agent.handle_channels([input_channel], 5004, serve_forever=False)
#agent.handle_channels([input_channel], 5004, serve_forever=True)
|
[
"noreply@github.com"
] |
parimalpate123.noreply@github.com
|
b096276667e51556fbc5afb449f519a9223b8673
|
d87a64f76caa183c5c85580401827ae5ba64ef11
|
/env/bin/rst2odt.py
|
7a8ba53d8361e8a7016459ba5ee2d890b79fea60
|
[
"BSD-3-Clause"
] |
permissive
|
wahello/sminor
|
4c56ee4e5bc56cb81448bca9067b6c1aaa992d4f
|
24ffcafedfd0c0eff8db4a53e2f1f5f6ff3ebfc9
|
refs/heads/master
| 2022-10-04T03:33:55.066685
| 2018-06-25T14:25:49
| 2018-06-25T14:25:49
| 139,105,033
| 0
| 0
|
BSD-3-Clause
| 2022-09-16T03:59:19
| 2018-06-29T05:32:10
|
Python
|
UTF-8
|
Python
| false
| false
| 789
|
py
|
#!/home/gulli/Github/saleor/env/bin/python3
# $Id: rst2odt.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
A front end to the Docutils Publisher, producing OpenOffice documents.
"""
import sys
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline_to_binary, default_description
from docutils.writers.odf_odt import Writer, Reader
description = ('Generates OpenDocument/OpenOffice/ODF documents from '
'standalone reStructuredText sources. ' + default_description)
writer = Writer()
reader = Reader()
output = publish_cmdline_to_binary(reader=reader, writer=writer,
description=description)
|
[
"gnorland93@gmail.com"
] |
gnorland93@gmail.com
|
a21dc69a9c06c325f19eb0f5e598d8983b278e69
|
d5108a669b9788d0ce8b40758eef118f26e11f99
|
/payment/urls.py
|
da1bd6bffc2139bb47b20cdf75b9e420c9f9f959
|
[] |
no_license
|
StephanGolubev/BlogAndStore
|
6b2c58e4065ca455b1e450b2d66cf1fc69c61369
|
ed0312c3ccf6e08d96744de90e573d17d64efa23
|
refs/heads/master
| 2020-07-04T05:04:19.863974
| 2019-08-13T14:44:15
| 2019-08-13T14:44:15
| 202,164,884
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
from django.urls import path
from . import views
app_name = 'ZooShop'
urlpatterns = [
path('process/', views.payment_process
, name='process'),
path('done/', views.payment_done
, name='done'),
path('canceled/', views.payment_canceled
, name='canceled'),
]
|
[
"golubevstephan@gmail.com"
] |
golubevstephan@gmail.com
|
5f80568a7b2d0594272a7ca8a3950f22dc63eb7e
|
83d42cd72c62df5d618aeb5bafed8d12ca5bdf9e
|
/betweenHiraKata.py
|
a08d7d68aa56dffce0c903148af74b96fa4fbd9a
|
[] |
no_license
|
akbaker54/jsl
|
3d4163e3c87cc33d4c344cedab6b3b6c3cb24a4a
|
7e28e1e9846a4f6e80c7a97ed8225f55b5357e8a
|
refs/heads/master
| 2021-01-19T06:14:50.114363
| 2016-06-28T15:50:59
| 2016-06-28T15:50:59
| 62,068,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,504
|
py
|
import sys
import os
import codecs
r = ["A", "a","I","i","U","u","E","e","O","o","ka","ga","ki","gi","ku","gu","ke","ge","ko","go","sa","za","si","zi","su","zu","se","ze","so","zo","ta","da","ti","di","TU","tu","du","te","de","to","do","na","ni","nu","ne","no","ha","ba","pa","hi","bi","pi","hu","bu","pu","he","be","pe","ho","bo","po","ma","mi","mu","me","mo","YA","ya","YU","yu","YO","yo","ra","ri","ru","re","ro","WA","wa","wi","we","wo","n̄","vu","KA","KE","kya","kyu","kyo","gya","gyu","gyo","sya","syu","syo","zya","zyu","zyo","tya","tyu","tyo","dya","dyu","dyo","nya","nyu","nyo","hya","hyu","hyo","bya","byu","byo","pya","pyu","pyo","mya","myu","myo","rya","ryu","ryo","fa","fi","fe","fo","va","vi","ve","vo"]
k = ["ァ","ア","ィ","イ","ゥ","ウ","ェ","エ","ォ","オ","カ","ガ","キ","ギ","ク","グ","ケ","ゲ","コ","ゴ","サ","ザ","シ","ジ","ス","ズ","セ","ゼ","ソ","ゾ","タ","ダ","チ","ヂ","ッ","ツ","ヅ","テ","デ","ト","ド","ナ","ニ","ヌ","ネ","ノ","ハ","バ","パ","ヒ","ビ","ピ","フ","ブ","プ","ヘ","ベ","ペ","ホ","ボ","ポ","マ","ミ","ム","メ","モ","ャ","ヤ","ュ","ユ","ョ","ヨ","ラ","リ","ル","レ","ロ","ヮ","ワ","ヰ","ヱ","ヲ","ン","ヴ","ヵ","ヶ","きゃ","きゅ","きょ","ぎゃ","ぎゅ","ぎょ","しゃ","しゅ","しょ","じゃ","じゅ","じょ","ちゃ","ちゅ","ちょ","ぢゃ","ぢゅ","ぢょ","にゃ","にゅ","にょ","ひゃ","ひゅ","ひょ","びゃ","びゅ","びょ","ぴゃ","ぴゅ","ぴょ","みゃ","みゅ","みょ","りゃ","りゅ","りょ","ファ","フィ","フェ","フォ","ヴァ","ヴィ","ヴェ","ヴォ"]
h = ["ぁ","あ","ぃ","い","ぅ","う","ぇ","え","ぉ","お","か","が","き","ぎ","く","ぐ","け","げ","こ","ご","さ","ざ","し","じ","す","ず","せ","ぜ","そ","ぞ","た","だ","ち","ぢ","っ","つ","づ","て","で","と","ど","な","に","ぬ","ね","の","は","ば","ぱ","ひ","び","ぴ","ふ","ぶ","ぷ","へ","べ","ぺ","ほ","ぼ","ぽ","ま","み","む","め","も","ゃ","や","ゅ","ゆ","ょ","よ","ら","り","る","れ","ろ","ゎ","わ","ゐ","ゑ","を","ん","ゔ","ゕ","ゖ","キャ","キュ","キョ","ギャ","ギュ","ギョ","シャ","シュ","ショ","ジャ","ジュ","ジョ","チャ","チュ","チョ","ヂャ","ヂュ","ヂョ","ニャ","ニュ","ニョ","ヒャ","ヒュ","ヒョ","ビャ","ビュ","ビョ","ピャ","ピュ","ピョ","ミャ","ミュ","ミョ","リャ","リュ","リョ","ふぁ","ふぃ","ふぇ","ふぉ","ゔぁ","ゔぃ","ゔぇ","ゔぉ"]
rkh = {}
khr = {}
hrk = {}
for i in range(0,len(r)-1):
rkh[r[i]] = (k[i], h[i])
khr[k[i]] = (h[i], r[i])
hrk[h[i]] = (r[i], k[i])
def scnd(x):
(first,second) = x
return second
def frst(x):
(first,second) = x
return first
def RtoH(exr):
if exr not in r:
print("Sorry, ",exr," isn't in the romaji list. Are you using JSL transliteration? (RtoH)")
return scnd(rkh[exr])
def RtoK(exr):
if exr not in r:
print("Sorry, ",exr," isn't in the romaji list. Are you using JSL transliteration? (RtoK)")
return frst(rkh[exr])
def KtoH(exk):
if exk not in k:
print("Sorry, ",exk," isn't in the katakana list. Maybe you put hiragana in instead? (KtoH)")
return frst(khr[exk])
def KtoR(exk):
if exk not in k:
print("Sorry, ",exk," isn't in the katakana list. Maybe you put hiragana in instead? (KtoR)")
return scnd(khr[exk])
def HtoR(exh):
if exh not in h:
print("Sorry, ",exh," isn't in the hiragana list. Maybe you put katakana in instead? (HtoR)")
return frst(hrk[exh])
def HtoK(exh):
if exh not in h:
print("Sorry, ",exh," isn't in the hiragana list. Maybe you put katakana in instead? (HtoK)")
return scnd(hrk[exh])
failCountRHKR = 0
failCountRKHR = 0
RtoHtoKtoR = False
RtoKtoHtoR = False
for i in range(0,len(r)-1):
print("checking on",i,"... the romaji is",r[i]+", the katakana is",k[i]+", and the hiragana is",h[i])
if RtoH(r[i]) != h[i]:
print("FAIL! RtoH(r[", i , "] should equal h[", i , "] =", h[i], "but instead it equals", RtoH(r[i]), "!!")
if RtoK(r[i]) != k[i]:
print("FAIL! RtoK(r[", i , "] should equal k[", i , "] =", k[i], "but instead it equals", RtoK(r[i]), "!!")
if KtoH(k[i]) != h[i]:
print("FAIL! KtoH(k[", i , "] should equal h[", i , "] =", h[i], "but instead it equals", KtoH(k[i]), "!!")
if KtoR(k[i]) != r[i]:
print("FAIL! KtoR(k[", i , "] should equal r[", i , "] =", r[i], "but instead it equals", KtoR(k[i]), "!!")
if HtoR(h[i]) != r[i]:
print("FAIL! HtoR(h[", i , "] should equal r[", i , "] =", r[i], "but instead it equals", HtoR(h[i]), "!!")
if HtoK(h[i]) != k[i]:
print("FAIL! HtoK(h[", i , "] should equal k[", i , "] =", k[i], "but instead it equals", HtoK(h[i]), "!!")
if KtoR(HtoK(RtoH(r[i])))!=r[i]:
RtoHtoKtoR = True
failCountRHKR += 1
if HtoR(KtoH(RtoK(r[i])))!=r[i]:
RtoKtoHtoR = True
failCountRKHR +=1
if RtoHtoKtoR and RtoKtoHtoR:
print("failure in both directions!")
elif RtoHtoKtoR:
print("failure in the RHKR direction only...")
elif RtoKtoHtoR:
print("failure in the RKHR direction only...")
# else:
# print("success...\#teamwork")
RtoHtoKtoR = False
RtoKtoHtoR = False
print(len(r))
print(failCountRKHR)
print(failCountRHKR)
|
[
"noreply@github.com"
] |
akbaker54.noreply@github.com
|
e531492075fab779d13d15fa4a18293e35a2f0b4
|
eaf00759ebec00f930c936b0beacc4b65cee6452
|
/7.0/avalara_salestax/account_tax.py
|
efb0b55d2ca02408eefb47630160d8591c23dd5c
|
[] |
no_license
|
alephobjects/ao-openerp
|
b73bbeedc2149b08a946660aeb6d78f4eafa3698
|
3a0d7ddb85d497b4f576678370a1fbbfd71379f4
|
refs/heads/master
| 2020-04-12T03:53:47.210221
| 2016-07-27T09:44:24
| 2016-07-27T09:44:24
| 15,320,631
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,020
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import string
from osv import osv, fields
from tools.translate import _
from avalara_api import AvaTaxService, BaseAddress, Line
class account_tax(osv.osv):
"""Inherit to implement the tax using avatax API"""
_inherit = "account.tax"
def _get_currency(self, cr, uid, ctx):
comp = self.pool.get('res.users').browse(cr,uid,uid).company_id
if not comp:
comp_id = self.pool.get('res.company').search(cr, uid, [])[0]
comp = self.pool.get('res.company').browse(cr, uid, comp_id)
return comp.currency_id.name
def _get_compute_tax(self, cr, uid, avatax_config, doc_date, doc_code, doc_type, partner, ship_from_address_id, shipping_address_id,
lines, user=None, exemption_number=None, exemption_code_name=None, commit=False, invoice_date=False, reference_code=False, location_code=False, context=None):
address_obj = self.pool.get('res.partner')
currency_code = self._get_currency(cr, uid, context)
if not partner.customer_code:
raise osv.except_osv(_('Avatax: Warning !'), _('Customer Code for customer %s not define'% (partner.name)))
if not shipping_address_id:
raise osv.except_osv(_('Avatax: No Shipping Address Defined !'), _('There is no shipping address defined for the partner.'))
#it's show destination address
shipping_address = address_obj.browse(cr, uid, shipping_address_id, context=context)
if not lines:
raise osv.except_osv(_('Avatax: Error !'), _('AvaTax needs atleast one sale order line defined for tax calculation.'))
if avatax_config.force_address_validation:
if not shipping_address.date_validation:
raise osv.except_osv(_('Avatax: Address Not Validated !'), _('Please validate the shipping address for the partner %s.'
% (partner.name)))
if not ship_from_address_id:
raise osv.except_osv(_('Avatax: No Ship from Address Defined !'), _('There is no company address defined.'))
#it's show source address
ship_from_address = address_obj.browse(cr, uid, ship_from_address_id, context=context)
if not ship_from_address.date_validation:
raise osv.except_osv(_('Avatax: Address Not Validated !'), _('Please validate the company address.'))
#For check credential
avalara_obj = AvaTaxService(avatax_config.account_number, avatax_config.license_key,
avatax_config.service_url, avatax_config.request_timeout, avatax_config.logging)
avalara_obj.create_tax_service()
addSvc = avalara_obj.create_address_service().addressSvc
origin = BaseAddress(addSvc, ship_from_address.street or None,
ship_from_address.street2 or None,
ship_from_address.city, ship_from_address.zip,
ship_from_address.state_id and ship_from_address.state_id.code or None,
ship_from_address.country_id and ship_from_address.country_id.code or None, 0).data
destination = BaseAddress(addSvc, shipping_address.street or None,
shipping_address.street2 or None,
shipping_address.city, shipping_address.zip,
shipping_address.state_id and shipping_address.state_id.code or None,
shipping_address.country_id and shipping_address.country_id.code or None, 1).data
#using get_tax method to calculate tax based on address
result = avalara_obj.get_tax(avatax_config.company_code, doc_date, doc_type,
partner.customer_code, doc_code, origin, destination,
lines, exemption_number,
exemption_code_name,
user and user.name or None, commit, invoice_date, reference_code, location_code, currency_code, partner.vat_id or None)
return result
def cancel_tax(self, cr, uid, avatax_config, doc_code, doc_type, cancel_code):
"""Sometimes we have not need to tax calculation, then method is used to cancel taxation"""
avalara_obj = AvaTaxService(avatax_config.account_number, avatax_config.license_key,
avatax_config.service_url, avatax_config.request_timeout,
avatax_config.logging)
avalara_obj.create_tax_service()
try:
result = avalara_obj.get_tax_history(avatax_config.company_code, doc_code, doc_type)
except:
return True
result = avalara_obj.cancel_tax(avatax_config.company_code, doc_code, doc_type, cancel_code)
return result
account_tax()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"bkannan@ursainfosystems.com"
] |
bkannan@ursainfosystems.com
|
07160874012b6c06686cd008254ddb273c4a7ef8
|
6b255557f6e5609fcadf13d11c0eb734a78777fb
|
/transmembrane.py
|
87968181b803538cdfb2c6284470d23bc14cd611
|
[
"MIT"
] |
permissive
|
jeansabety/learning_python
|
1d37750a65d6c8531430f177506ff0ca9f9d4613
|
02d37999e1a3195b9697b3f5e90e37bb4b0b5882
|
refs/heads/main
| 2023-05-08T03:07:42.551880
| 2021-06-03T17:03:16
| 2021-06-03T17:03:16
| 354,625,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,452
|
py
|
#!/usr/bin/env python3
import sys
# Write a program that predicts if a protein is trans-membrane
# Trans-membrane proteins have the following properties
# Signal peptide: https://en.wikipedia.org/wiki/Signal_peptide
# Hydrophobic regions(s): https://en.wikipedia.org/wiki/Transmembrane_protein
# No prolines in hydrophobic regions (alpha helix)
# Hydrophobicity is measued via Kyte-Dolittle
# https://en.wikipedia.org/wiki/Hydrophilicity_plot
# For our purposes:
# Signal peptide is 8 aa long, KD > 2.5, first 30 aa
# Hydrophobic region is 11 aa long, KD > 2.0, after 30 aa
#1. function for kd hydrophobicity
#for each AA, add it's value to a total and divide by the number of AA
def kd(seq): #kd = kite-doolittle = way of calc hydropho
kdsum = 0 #should this go here?
for i in range(len(seq)):
if seq[i] == 'I' : kdsum += 4.5
elif seq[i] == 'V' : kdsum += 4.2
elif seq[i] == 'L' : kdsum += 3.8
elif seq[i] == 'F' : kdsum += 2.8
elif seq[i] == 'C' : kdsum += 2.5
elif seq[i] == 'M' : kdsum += 1.9
elif seq[i] == 'A' : kdsum += 1.8
elif seq[i] == 'G' : kdsum -= 0.4
elif seq[i] == 'T' : kdsum -= 0.7
elif seq[i] == 'S' : kdsum -= 0.8
elif seq[i] == 'W' : kdsum -= 0.9
elif seq[i] == 'Y' : kdsum -= 1.3
elif seq[i] == 'P' : kdsum -= 1.6 #what do I do about the proline?
elif seq[i] == 'H' : kdsum -= 3.2
elif seq[i] == 'E' : kdsum -= 3.5
elif seq[i] == 'Q' : kdsum -= 3.5
elif seq[i] == 'D' : kdsum -= 3.5
elif seq[i] == 'N' : kdsum -= 3.5
elif seq[i] == 'K' : kdsum -= 3.9
elif seq[i] == 'R' : kdsum -= 4.5
return kdsum/len(seq)
#this doesn't work
#2. function that checks for a proline
def proline(seq):
for c in seq:
if c == 'P' : return True
else : pass
#print(kd('R'))
#2. get all the sequences:
proteins = [] #sequences
ids = [] #name of the proteins
with open(sys.argv[1]) as fp: #put file name in command line - python3 Work/learning_python/classwork.py
seq = [] #sequence of AA - here so we can look at one sequence at a time
for line in fp.readlines(): #read every line
line = line.rstrip()
if line.startswith('>'): #this is the standard beginning of a new section/definition (new protein + seq in this case )
words = line.split() #lets you collect multiple lines of sequence, as many lines needed until next >
ids.append(words[0][1:])
if len(seq) > 0 : proteins.append(''.join(seq))
seq = [] #now know what each sequence is
else : #if it doesn't start with >, it is a sequence ...
seq.append(line) #...so add that line to seq
proteins.append(''.join(seq)) #add your sequence (made from mashing the lines between >) to proteins
#3. look for hydrophobic regions in all sequences
w = 11
s = 8
for id, seq in zip(ids, proteins): #go through ids and proteins together
#calculate hydrophobicity
hd_region = False
sig_pep = False
for i in range(len(seq[29:]) - w + 1 ) :
pep = seq[i:i+w]
if kd(pep) > 2 and not proline(pep) : hd_region = True
#print(i, kd(seq[i:i+w]))
for i in range(len(seq[:29]) - s + 1) :
if kd(pep) > 2.5 : sig_pep = True
#print(sig_pep)
#want to say if they are both true
if hd_region == True and sig_pep == True : print(id) #could say hd_region and sig_pep - check itself, and is a boolean
#also tried .append
#can use 'in' to ask if something is in your sequence
"""
python3 Programs/transmembrane.py Data/at_prots.fa
AT1G75120.1
AT1G10950.1
AT1G75110.1
AT1G74790.1
AT1G12660.1
AT1G75130.1
"""
|
[
"jeansabety@MacBook-Air.local"
] |
jeansabety@MacBook-Air.local
|
b4d4ead5d2cf9d7f08614c609f00ee0eb0df9021
|
de21b77219fcdd07bdd18bbb3f027eb42f9580d6
|
/loops.py
|
5527b0ebef6f208895b9e4d18d2b5d9a3a8e27ec
|
[] |
no_license
|
Miriam-Gerharz/UCL-project
|
1c52bd8f645755c596a7f314a90c2dbc62b5de9e
|
5eff451be8251453778025547aeee64d5fff42ca
|
refs/heads/master
| 2022-04-20T12:32:36.866978
| 2020-04-07T17:01:37
| 2020-04-07T17:01:37
| 218,821,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,449
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 10:52:53 2020
@author: miriam
"""
import tools
import numpy as np
import pickle
import time
###########################
param = tools.parameters()
omega = np.arange(0, 700, 10e-2)*1e3*2*np.pi # freq for spectrum
##################################################
### PARAMETER ###
param.WX = 0.6e-6 #m, adjusted parameters
param.WY = 0.705e-6 #m, adjusted parameters
param.theta0 = 0.47 #pi, adjusted parameters
param.X0 = 0.03 * param.lambda_tw
# LOOP PARAMETER
theta = np.arange(0.00, 1.00001, 0.005) # [pi]
detuning = np.arange(-500, -100, 5)*1e3 #[Hz/2pi]
# run
run = '6'
filename = 'files/3D/loop_over_theta_and_detuning_with_Finesse_' + str(round(param.Finesse)) + '_run_' + run
#############################################################################
print('Finesse: ', str(param.Finesse), ', run: ', run)
# initialize arrays for n_i
n_x = [[0 for j in range(len(detuning))] for i in range(len(theta))]
n_y = [[0 for j in range(len(detuning))] for i in range(len(theta))]
n_z = [[0 for j in range(len(detuning))] for i in range(len(theta))]
# step with omega
Delta_omega = omega[1] - omega[0]
### CALCULATIONS ####
start = time.time()
for i in range(len(theta)):
for j in range(len(detuning)):
# Set parameters
param.theta0 = theta[i]
param.detuning = detuning[j]
# Calculate resulting parameters
param.prepare_calc()
param.Gamma = param.Gamma*2 # the calculated gamma is actually gamma/2
# actually the detuning is given as an angular freq
param.detuning = param.detuning *2*np.pi
# Calculate spectra
SXX_plus_3D = tools.spectrum_output(omega, 0, param, True)
SXX_minus_3D = tools.spectrum_output(-omega, 0, param, True)
SYY_plus_3D = tools.spectrum_output(omega, 1, param, True)
SYY_minus_3D = tools.spectrum_output(-omega, 1, param, True)
SZZ_plus_3D = tools.spectrum_output(omega, 2, param, True)
SZZ_minus_3D = tools.spectrum_output(-omega, 2, param, True)
# Calculate n_i
n_x[i][j] = tools.n_from_area(SXX_plus_3D, SXX_minus_3D, Delta_omega, _N=0, _name='', printing = False)[2]
n_y[i][j] = tools.n_from_area(SYY_plus_3D, SYY_minus_3D, Delta_omega, _N=0, _name='', printing = False)[2]
n_z[i][j] = tools.n_from_area(SZZ_plus_3D, SZZ_minus_3D, Delta_omega, _N=0, _name='', printing = False)[2]
# print progress
tools.loop_progress(len(detuning), len(theta), j, i, start)
### SAVE INTO FILE ###
to_save = [param, omega, theta, detuning, n_x, n_y, n_z]
f = open(filename + '.pckl', 'wb')
pickle.dump(to_save, f)
f.close()
# save settings of run
f = open('files/3D/overview_loop_over_theta_and_detuning', 'a')
f.write('\n********************************************************************')
f.write('\nFinesse: '+ str(param.Finesse) + ', run: ' + str(run))
f.write('\npressure: ' + str(param.Press))
f.write('\nX0: ' + str(param.X0/param.lambda_tw) + ' [lambda]')
f.write('\ntheta: ' + str(theta[0])+ ' ' + str(theta[-1])+ ' ' + str(theta[1]-theta[0]) + ' (start, end, step)')
f.write('\ndetuning: '+ str(detuning[0]*1e-3)+ ' ' + str(detuning[-1]*1e-3)+ ' ' + str(detuning[1]*1e-3-detuning[0]*1e-3) + ' (start, end, step)')
f.write('\n********************************************************************')
f.write('\n')
f.close()
|
[
"gerharz@stud.uni-heidelberg.de"
] |
gerharz@stud.uni-heidelberg.de
|
4dfdf0673670dd303109bbb0c959e9128805d683
|
e131c6ac8335ef97908b940a6fc21554871957c4
|
/uWSGI/ScrapyNews/ScrapyUdn/settings.py
|
8fe9b3c6b63be86a15109e4a510e4feab69e0f06
|
[] |
no_license
|
valosz66842/DockerUdn
|
2e65e6a838f4214d684a82ded1f16289a7708949
|
6204ed6d7ecf96470736674c101c3e61962b1b3e
|
refs/heads/master
| 2023-05-14T20:04:35.861286
| 2020-05-12T16:45:21
| 2020-05-12T16:45:21
| 257,435,731
| 0
| 0
| null | 2021-06-10T22:53:55
| 2020-04-21T00:08:24
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,317
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for ScrapyUdn project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import sys
import os
import django
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), ".."))
os.environ["DJANGO_SETTINGS_MODULE"]= "ScrapyNews.settings"
django.setup()
BOT_NAME = 'ScrapyUdn'
SPIDER_MODULES = ['ScrapyUdn.spiders']
NEWSPIDER_MODULE = 'ScrapyUdn.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'ScrapyUdn (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'ScrapyUdn.middlewares.ScrapyudnSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'ScrapyUdn.middlewares.ScrapyudnDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'ScrapyUdn.pipelines.ScrapyudnPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"valosz66842@gmail.com"
] |
valosz66842@gmail.com
|
483c301f953c3c537727a973289e3a0073e0e2cc
|
c6797c1fc28a8af86415772e4d4e4aa2520dfd4f
|
/polls/views.py
|
7166272d67947522c2ad42da1ba5cc683b51fe8e
|
[] |
no_license
|
kaeruco/basic_timer_test
|
b7759b02ae7fa447c34bfd63fb8ea39925805e21
|
268411581aadcffacb1f5fbb25ec568e0c55259a
|
refs/heads/master
| 2023-02-19T23:24:57.624281
| 2021-01-09T08:06:55
| 2021-01-09T08:06:55
| 328,108,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,109
|
py
|
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Choice, Question
#def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# #template = loader.get_template('polls/index.html')
# context = {'latest_question_list': latest_question_list}
#
# #return HttpResponse(template.render(context, request))
# return render(request, 'polls/index.html', context)
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
#"""Return the last five published questions."""
#return Question.objects.order_by('-pub_date')[:5]
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
#def detail(request, question_id):
# #try:
# # question = Question.objects.get(pk=question_id)
# #except Question.DoesNotExist:
# # raise Http404("Question does not exist")
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/detail.html', {'question': question})
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
#def results(request, question_id):
# #response = "You're looking at the results of question %s."
# #return HttpResponse(response % question_id)
#
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/results.html', {'question': question})
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
def semitimer(request):
return render(request, 'polls/semitimer.html', {})
def semitimer0(request):
return render(request, 'polls/semitimer0.html', {})
def basic_timer(request):
return render(request, 'polls/basic_timer.html', {})
|
[
"tom@example.com"
] |
tom@example.com
|
4cf47cf2f86dade167984e4feeec446abe091806
|
7f2e38505200032dcc337cc18d22bd6bcaa00d2e
|
/call.py
|
231398bf0e0c8ef97b22eec4e96f9741958600d6
|
[] |
no_license
|
lestate44/535final
|
b210be5013ad283318e5fd28383e8cebb40a4083
|
6b050605a75e1e7e683f4317b1366531deef50c6
|
refs/heads/master
| 2021-01-20T16:11:07.705622
| 2017-05-10T05:56:14
| 2017-05-10T05:56:14
| 90,825,465
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
import numpy as np
import lda
import lda.datasets
import math
import matplotlib.pyplot as plt
import operator
docfile='src/kos1000.txt'
vocfile='src/kosvoc.txt'
def loadv():
f=open(vocfile,'r')
vocab={}
i=1
for line in f.readlines():
line=line.strip('\n')
vocab[i]=line
i=i+1
return vocab
dict=loadv()
f = open(docfile, 'r')
file=open(str('src/ori.txt'),'w')
for line in f.readlines():
line = line.split()
st = int(line[1])
if dict.has_key(st):
file.write(line[0]+' '+dict[st]+'\n')
f.close()
file.close()
|
[
"kaichen.nyc@gmail.com"
] |
kaichen.nyc@gmail.com
|
3984f48344d482c4b43d4c48cf7947780c1092f0
|
a83557042d61b793545380d87b312b16c0f62da4
|
/data_cleanning_script/host_clean.py
|
90e9c781a7b0fada5c51d6392277753ce762a479
|
[] |
no_license
|
ashleyxu99/safe_travel
|
748214d9b60fcae31f1585898109cd79459767c8
|
e1fc44e570cd86c30e9f095ac59dedd0787553df
|
refs/heads/main
| 2023-07-05T10:50:12.591126
| 2021-08-16T03:02:29
| 2021-08-16T03:02:29
| 396,545,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
import csv
with open('host.csv', 'r') as file:
csvreader = csv.reader(file)
host = []
host_ids = []
for lines in csvreader:
id = lines[0]
if id not in host_ids:
host_ids.append(id)
host.append(lines)
print(host[0])
fields = ['HostId', 'HostName', 'HostSince', 'HostResponseRate',
'HostIsSuperhost', 'HostNeighborhood', 'HostListingCount']
with open('host_clean.csv', 'w') as output:
csvwriter = csv.writer(output)
csvwriter.writerow(fields)
for i in range(1, len(host)):
csvwriter.writerow(host[i])
|
[
"noreply@github.com"
] |
ashleyxu99.noreply@github.com
|
14b577ec46ee9d7038f9abbef96019ef6af5fd26
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/RiskFinishLabel.py
|
70510d2ed4724524faa93b6970839d177175fd54
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,630
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class RiskFinishLabel(object):
def __init__(self):
self._code = None
self._label = None
self._path = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def label(self):
return self._label
@label.setter
def label(self, value):
self._label = value
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.label:
if hasattr(self.label, 'to_alipay_dict'):
params['label'] = self.label.to_alipay_dict()
else:
params['label'] = self.label
if self.path:
if hasattr(self.path, 'to_alipay_dict'):
params['path'] = self.path.to_alipay_dict()
else:
params['path'] = self.path
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = RiskFinishLabel()
if 'code' in d:
o.code = d['code']
if 'label' in d:
o.label = d['label']
if 'path' in d:
o.path = d['path']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
086d71d8403da4e51cc18131dfe0d147a579032b
|
cf99bd701a390d2911f87350a4ddcb15cdf82365
|
/common/migrations/0002_client.py
|
323790608e2f2c8f11a60f31b9fc2622a97f21b8
|
[] |
no_license
|
prescience-labs/auth-service
|
7a7d6bb9d5fca742c5588774c7ba2ba008d5057a
|
951e6ea291041f0a0dcd2349ca0232e2a8b3b467
|
refs/heads/master
| 2022-12-10T04:43:19.932832
| 2020-01-09T01:48:31
| 2020-01-09T01:48:31
| 202,735,903
| 0
| 0
| null | 2022-12-08T06:07:34
| 2019-08-16T13:50:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
# Generated by Django 2.2.4 on 2019-10-14 12:47
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('common', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=255, verbose_name='name')),
('token', models.CharField(default=uuid.UUID('c2b3f047-dda0-4435-8a47-0051b9273c77'), max_length=255, unique=True, verbose_name='client token')),
('secret', models.CharField(default=uuid.UUID('2fcbb051-330b-4391-a870-b803c62db4a9'), max_length=255, unique=True, verbose_name='client token')),
],
options={
'abstract': False,
},
),
]
|
[
"jdick@solutionreach.com"
] |
jdick@solutionreach.com
|
768de293d85f995da0ae44763caa347d16ad330d
|
027800302a2a5cb4638c608832deab5280131572
|
/send.py
|
0450d031709a65a622145030123988d1b87de685
|
[] |
no_license
|
luigitercero/consumirRabbitSopes
|
f5e81975890846e1d8f75e7982a935ef76a20ee8
|
61e3e5abf0aa78e6eb33c27e221473149c5c0a05
|
refs/heads/master
| 2020-03-22T03:56:23.650293
| 2018-07-04T13:45:25
| 2018-07-04T13:45:25
| 139,461,698
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
#!/usr/bin/env python
import pika
credentials = pika.PlainCredentials('admin', '123')
parameters = pika.ConnectionParameters('35.229.58.120',
5672,
'/',
credentials)
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body='Hello World!')
print(" [x] Sent 'Hello World!'")
connection.close()
|
[
"luigitercero3@gmail.com"
] |
luigitercero3@gmail.com
|
617b9bc9422490bfc0bace688f400d0718d6e2b6
|
ccf218c6ec57e48316f81238c029a542a0ddd905
|
/django-sites/mooncouch/journal/views.py
|
ae4dad50011418bb63eb774c42a15308fb518a3d
|
[] |
no_license
|
mooncouch/hello
|
5d3d925cc9bf00e74e2b5c90b060d3b4f32b093f
|
8991ece7c480f6aeb8202293d334c1ff49fe8911
|
refs/heads/master
| 2020-03-27T20:14:42.300618
| 2018-09-23T03:50:04
| 2018-09-23T03:50:04
| 147,051,705
| 0
| 0
| null | 2018-09-10T03:36:05
| 2018-09-02T03:36:51
|
Python
|
UTF-8
|
Python
| false
| false
| 611
|
py
|
from django.http import HttpResponse
def index(request):
return HttpResponse("You're at the journal index.")
def detail(request, author_id):
return HttpResponse("you're looking at author %s." %author_id)
def entry_sum(request, author_id):
response = "You're looking at the entries of author %s."
return HttpResponse(response % author_id)
def summary(request, author_id):
return HttpResponse("You're the summary of author %s." % author_id)
def entry(request, author_id, entry_id):
response = "You're looking at the details of entry %s."
return HttpResponse(response % entry_id)
|
[
"noreply@github.com"
] |
mooncouch.noreply@github.com
|
28784c69c73d492028e87b1c012edc1da26e063b
|
49492055fb670efae0aa0e554570272e6fbffc52
|
/document.py
|
417f1f785afe953faecd31913d4a43dcfa3120d2
|
[] |
no_license
|
dima-kov/parser
|
fc713b1510f4ed318ef2850ce4743d10309b8bf3
|
2754101f4e44ef9fc01f5516b6f275f48e9b811c
|
refs/heads/master
| 2020-03-23T10:23:07.783054
| 2018-07-19T18:20:08
| 2018-07-19T18:20:08
| 141,440,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,034
|
py
|
import datetime
from mongoengine import QuerySet
from mongoengine import document
from mongoengine import fields
class PageQuerySet(QuerySet):
def create(self, **kwargs):
page = Page(**kwargs)
page.save()
return page
def exists(self, **kwargs):
return self.filter(**kwargs) .count() > 0
def get_non_parsed(self):
return self.filter(parsed=None)
def get_parsed(self):
return self.filter(parsed__ne=None)
def get_or_create(self, url):
pages = self.filter(url=url)
if pages.count() == 0:
page = Page(url=url)
page.save()
return page
return pages.first()
class Page(document.Document):
url = fields.StringField(required=True, max_length=6000, unique=True)
parsed = fields.DateTimeField(required=False)
created = fields.DateTimeField(default=datetime.datetime.utcnow)
meta = {'queryset_class': PageQuerySet}
def parsed_now(self):
self.parsed = datetime.datetime.utcnow()
|
[
"dima.kovalchuk.v@gmail.com"
] |
dima.kovalchuk.v@gmail.com
|
8402564387ae855252d09edce5c9e530bfb1827c
|
ce4c2d2d88234c6dfa5a74d1c8997101119c5d14
|
/arsenalqa/models.py
|
9547964871c25230b73553b04c71fbbfa1c6b034
|
[
"Apache-2.0"
] |
permissive
|
ovoron1/arsenalqa
|
dea7fd1099310a6a9f0019b8e92311331ab185a0
|
c8c0af0da1b4b94687cbd36c1d2bc3eac5596cdc
|
refs/heads/main
| 2023-01-14T10:35:35.013006
| 2020-11-19T18:43:14
| 2020-11-19T18:43:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,338
|
py
|
"""
* Copyright 2020 Wargaming Group Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from collections.abc import MutableMapping
from arsenalqa.base.iterables import ListObject
from arsenalqa.base.utils import Dummy
from arsenalqa.fields import Field
class DataDescriptor:
def __get__(self, instance, owner):
if instance is not None:
if getattr(instance, '_data', None) is None:
setattr(instance, '_data', {})
return getattr(instance, '_data')
return {}
def __set__(self, instance, value):
setattr(instance, '_data', value)
class Model(MutableMapping):
data = DataDescriptor()
_filter_fields = frozenset()
_fields = frozenset()
def __init__(self, **kwargs):
"""
Parameters:
kwargs: key-value for flexible instance attributes addition
"""
for key, value in kwargs.items():
self[key] = value
@classmethod
def transform_incoming_data(cls, data):
"""Method for transform incoming data from transport
Args:
data(Any): argument of any type, which should be represented in list or dict.
Returns:
Union[list, dict]
"""
return data
@classmethod
def _wrap(cls, data):
new_class = cls()
new_class.data = data
return new_class
@classmethod
def wrap(cls, data):
"""Method for wrap incoming data to new instance of model or list of model instances
Args:
data(Any): data for wrap. Uses with transform_incoming_data method
Returns:
Union[Model, List[Model]]
"""
data = cls.transform_incoming_data(data)
if isinstance(data, list):
return ListObject(wrapper=cls._wrap, data=data)
return cls._wrap(data)
def alter(self, keep_fields=None, clean_fields=None, **new_fields) -> 'Model':
"""Creates new model type(AlteredModel) and new instance of AlteredModel.
Add new or clear current fields for model.
Args:
keep_fields(list): if exists all fields except "keep_fields" will be removed
clean_fields(list): if exists all fields except "clean_fields" will be presented
**new_fields: add new fields with value to AlteredModel
Returns:
AlteredModel
>>> class MyModel(Model):
>>> id = Field()
>>> name = Field()
>>>
>>> my_model = MyModel()
>>> my_model.id = 1
>>> my_model.name = 'test'
>>> print(my_model.alter(keep_fields=['id']))
M:{'id': 1}
>>> print(my_model.alter(clean_fields=['id']))
M:{'name': 'test'}
>>> print(my_model.alter(keep_fields=['id'], new_field='new value'))
M:{'new_field': 'new value', 'id': 1}
"""
fields = self._fields
if clean_fields:
fields -= frozenset(clean_fields)
if keep_fields:
fields = keep_fields
AlteredModel = type(
'Altered{}'.format(self.__class__.__name__),
(self.__class__,),
{i: Field() for i in new_fields.keys()}
)
model = AlteredModel.wrap(new_fields)
[setattr(model, field, getattr(self, field)) for field in fields]
return model
@classmethod
def get_field_key(cls, field):
""" Returns key name from dict data
>>> class MyModel(Model):
>>>
>>> id = Field(name='name+id')
>>>
>>> my_model = MyModel()
>>> my_model.id = 1
>>> print(my_model)
M:{'name+id': 1}
>>> print(my_model.id)
1
>>> print(MyModel.get_field_key('id'))
name+id
"""
return cls.__dict__[field].name
def model_filter(self):
"""
>>> class MyModel(Model):
>>> id = Field(filter_field=True, name='not_id')
>>> name = Field()
>>>
>>> my_model = MyModel()
>>> my_model.id = 1
>>> my_model.name = 'test'
>>> print(my_model.model_filter())
{'id': 1}
"""
dct = {}
for i in self._filter_fields:
try:
dct[i] = getattr(self, i)
except AttributeError:
pass
return dct
def data_filter(self):
"""
>>> class MyModel(Model):
>>> id = Field(filter_field=True, name='not_id')
>>> name = Field()
>>>
>>> my_model = MyModel()
>>> my_model.id = 1
>>> my_model.name = 'test'
>>> print(my_model.data_filter())
{'not_id': 1}
"""
dct = {}
for i in self._filter_fields:
name = self.get_field_key(i)
try:
dct[name] = self.data[name]
except KeyError:
pass
return dct
def __str__(self):
return 'M:{}'.format(self.data)
def __repr__(self):
return self.__str__()
def __len__(self):
return len(self.data)
def __iter__(self):
for elem in self._fields:
if getattr(self, elem, Dummy) != Dummy:
yield elem
def __getitem__(self, item):
try:
return getattr(self, item)
except AttributeError:
raise KeyError(f'Model: {self.__class__.__name__} has no Field: {item}')
def __setitem__(self, key, value):
if key not in self._fields:
raise KeyError(f'Model: {self.__class__.__name__} has no Field: {key}')
setattr(self, key, value)
def __delitem__(self, key):
try:
delattr(self, key)
except AttributeError:
raise KeyError(f'Model: {self.__class__.__name__} has no Field: {key}')
|
[
"r_romanyuk@wargaming.net"
] |
r_romanyuk@wargaming.net
|
a852a43246a94a9b6e75f57bdf385b9ca4208862
|
512f07d1c9485fd1d533ee015d659edcaea7acf9
|
/basic-syntax/divmod.py
|
22a0b1fdaa7ac400e05a76ab41e35c68618de43d
|
[] |
no_license
|
SathvikPN/beginPython
|
7d173407bacab7e1a7aa685497ec08da4055693a
|
81f8aa3132475c9d369e70e211537744ff6aeb07
|
refs/heads/master
| 2023-01-19T14:16:45.922701
| 2020-11-24T14:10:46
| 2020-11-24T14:10:46
| 298,954,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
quotient, remainder = divmod(a, b)
|
[
"noreply@github.com"
] |
SathvikPN.noreply@github.com
|
f254f69848a95f326b53f8ce3d6c7f556a3e272f
|
5130754859e274cd06f63260439e5203c2000a11
|
/core/jobs/batch_jobs/blog_post_search_indexing_jobs.py
|
9b9440e7125be3ee12d6e27e9720636aeb7227bd
|
[
"Apache-2.0"
] |
permissive
|
oppia/oppia
|
8ebc9c7c7f2b336e9a79ce04533abe3956f48cbe
|
d16fdf23d790eafd63812bd7239532256e30a21d
|
refs/heads/develop
| 2023-09-04T07:50:13.661276
| 2023-09-03T09:21:32
| 2023-09-03T09:21:32
| 40,687,563
| 6,172
| 4,666
|
Apache-2.0
| 2023-09-14T18:25:11
| 2015-08-14T00:16:14
|
Python
|
UTF-8
|
Python
| false
| false
| 3,766
|
py
|
# coding: utf-8
#
# Copyright 2022 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jobs that are run by CRON scheduler."""
from __future__ import annotations
from core.domain import blog_domain
from core.domain import blog_services
from core.domain import search_services
from core.jobs import base_jobs
from core.jobs.io import ndb_io
from core.jobs.transforms import job_result_transforms
from core.jobs.types import job_run_result
from core.platform import models
import apache_beam as beam
import result
from typing import Final, Iterable, List
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import blog_models
from mypy_imports import search_services as platform_search_services
(blog_models,) = models.Registry.import_models([models.Names.BLOG])
platform_search_services = models.Registry.import_search_services()
class IndexBlogPostsInSearchJob(base_jobs.JobBase):
"""Job that indexes the blog posts in Elastic Search."""
MAX_BATCH_SIZE: Final = 1000
def run(self) -> beam.PCollection[job_run_result.JobRunResult]:
"""Returns a PCollection of 'SUCCESS' or 'FAILURE' results from
the Elastic Search.
Returns:
PCollection. A PCollection of 'SUCCESS' or 'FAILURE' results from
the Elastic Search.
"""
return (
self.pipeline
| 'Get all non-deleted models' >> (
ndb_io.GetModels(
blog_models.BlogPostSummaryModel.get_all(
include_deleted=False
)
))
| 'Convert BlogPostSummaryModels to domain objects' >> beam.Map(
blog_services.get_blog_post_summary_from_model)
| 'Split models into batches' >> beam.transforms.util.BatchElements(
max_batch_size=self.MAX_BATCH_SIZE)
| 'Index batches of models' >> beam.ParDo(
IndexBlogPostSummaries())
| 'Count the output' >> (
job_result_transforms.ResultsToJobRunResults())
)
# TODO(#15613): Here we use MyPy ignore because the incomplete typing of
# apache_beam library and absences of stubs in Typeshed, forces MyPy to
# assume that PTransform class is of type Any. Thus to avoid MyPy's error
# (Class cannot subclass 'PTransform' (has type 'Any')), we added an
# ignore here.
class IndexBlogPostSummaries(beam.DoFn): # type: ignore[misc]
"""DoFn to index blog post summaries."""
def process(
self, blog_post_summaries: List[blog_domain.BlogPostSummary]
) -> Iterable[result.Result[None, Exception]]:
"""Index blog post summaries and catch any errors.
Args:
blog_post_summaries: list(BlogPostSummaries). List of Blog Post
Summary domain objects to be indexed.
Yields:
JobRunResult. List containing one element, which is either SUCCESS,
or FAILURE.
"""
try:
search_services.index_blog_post_summaries(
blog_post_summaries)
for _ in blog_post_summaries:
yield result.Ok()
except platform_search_services.SearchException as e:
yield result.Err(e)
|
[
"noreply@github.com"
] |
oppia.noreply@github.com
|
02106294b4d4b980e76f0077bd730aa8cb529c27
|
9c14bb4d3029a9fff23cf0d3e9fdce9ca4e369ab
|
/prettyqt/widgets/composed/imageviewer.py
|
ac1daae24ae902a88755ea0c2d5992f940896d16
|
[
"MIT"
] |
permissive
|
fossabot/PrettyQt
|
0e1ae074ca0776fa02ee0b8e6c04f9d545408855
|
d435b8d8c68d16c704c39972457497c93741859f
|
refs/heads/master
| 2020-05-14T16:50:48.896440
| 2019-04-17T11:48:25
| 2019-04-17T11:48:25
| 181,880,405
| 0
| 0
| null | 2019-04-17T11:48:19
| 2019-04-17T11:48:19
| null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
# -*- coding: utf-8 -*-
"""
@author: Philipp Temminghoff
"""
import pathlib
import sys
from prettyqt import widgets
class ImageViewer(widgets.Widget):
def __init__(self, title="", parent=None):
super().__init__(parent)
self.title = title
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
path = pathlib.Path("decisiontree.png")
self.image = widgets.Label.image_from_path(path, parent=self)
self.show()
if __name__ == "__main__":
app = widgets.Application(sys.argv)
ex = ImageViewer()
sys.exit(app.exec_())
|
[
"phil65@kodi.tv"
] |
phil65@kodi.tv
|
a7fb7c75827d25da06f35074ee9c7c1030698b40
|
8236101e4b42f03dac2063a3ad0bd588e41de5f1
|
/parsers/LibriSpeech_parser.py
|
51ea53648d04a06ffe418cce5af450aa37d67185
|
[] |
no_license
|
tzuhsien/Voice-conversion-evaluation
|
e803fbe7dee52840f4965537037efb5b522e7be0
|
8e15b75273c5cd1073bafc562cc8d0d10696dc1e
|
refs/heads/master
| 2023-06-14T05:29:15.976029
| 2021-07-11T10:31:15
| 2021-07-11T10:31:15
| 355,152,491
| 36
| 5
| null | 2021-05-29T13:32:29
| 2021-04-06T10:45:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,889
|
py
|
"""LibriSpeech Corpus parser."""
import random
from collections import defaultdict
from pathlib import Path, PurePosixPath
import librosa
from librosa.util import find_files
class Parser:
"""Parser"""
def __init__(self, root):
seed = random.randint(1, 1000)
random.seed(seed)
wav_files = []
metadata = defaultdict(list)
speaker_dirs = [
speaker_dir for speaker_dir in Path(root).iterdir() if speaker_dir.is_dir()]
for speaker_dir in speaker_dirs:
for wav_file in find_files(speaker_dir):
wav_file = str(PurePosixPath(wav_file).relative_to(root))
wav_files.append(wav_file)
speaker_id = self.get_speaker(wav_file)
metadata[speaker_id].append(wav_file)
self.root = root
self.seed = seed
self.wav_files = wav_files
self.metadata = metadata
def set_random_seed(self, seed):
"""Set random seed"""
random.seed(seed)
self.seed = seed
def sample_source(self):
"""Sample as source"""
wav_file = random.choice(self.wav_files)
speaker_id = self.get_speaker(wav_file)
content = self.get_content(wav_file)
wav, _ = librosa.load(Path(self.root) / wav_file)
wav, sample_rate = librosa.load(Path(self.root) / wav_file)
second = len(wav) / sample_rate
return wav_file, speaker_id, content, second
def sample_targets(self, number, ignore_id):
"""Sample as target"""
negative_speakers = list(self.metadata.keys())
try:
negative_speakers.remove(ignore_id)
except ValueError:
pass
speaker_id = random.choice(negative_speakers)
wav_files = random.choices(self.metadata[speaker_id], k=number)
return wav_files, speaker_id
def get_content(self, file_path):
"""Get text for LibriSpeech Corpus."""
wav_name = Path(file_path).stem
speaker_id, chapter_id, _ = wav_name.split("-")
file_name = speaker_id + "-" + chapter_id + ".trans.txt"
file_path = Path(self.root) / speaker_id / chapter_id / file_name
with file_path.open() as file_text:
for line in file_text:
fileid_text, utterance = line.strip().split(" ", 1)
if wav_name == fileid_text:
break
else:
# Translation not found
raise FileNotFoundError(
"Translation not found for " + wav_name)
return utterance
def get_speaker_number(self):
"""Get the number of speaker."""
return len(self.metadata)
@classmethod
def get_speaker(cls, file_path):
"""Get speaker for LibriSpeech Corpus."""
speaker_id = Path(file_path).stem.split("-")[0]
return speaker_id
|
[
"tzuhsien1016@gmail.com"
] |
tzuhsien1016@gmail.com
|
e0a0bfe842755d832225f9678234d2d59ed708fb
|
90047daeb462598a924d76ddf4288e832e86417c
|
/build/android/pylib/utils/emulator.py
|
a5aa544b4c70ec402b7a00d23e4684e671fb52db
|
[
"BSD-3-Clause"
] |
permissive
|
massbrowser/android
|
99b8c21fa4552a13c06bbedd0f9c88dd4a4ad080
|
a9c4371682c9443d6e1d66005d4db61a24a9617c
|
refs/heads/master
| 2022-11-04T21:15:50.656802
| 2017-06-08T12:31:39
| 2017-06-08T12:31:39
| 93,747,579
| 2
| 2
|
BSD-3-Clause
| 2022-10-31T10:34:25
| 2017-06-08T12:36:07
| null |
UTF-8
|
Python
| false
| false
| 17,458
|
py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to start and stop Android emulator.
Emulator: The class provides the methods to launch/shutdown the emulator with
the android virtual device named 'avd_armeabi' .
"""
import logging
import os
import signal
import subprocess
import time
from devil.android import device_errors
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.utils import cmd_helper
from pylib import constants
from pylib import pexpect
from pylib.utils import time_profile
# Default sdcard size in the format of [amount][unit]
DEFAULT_SDCARD_SIZE = '512M'
# Default internal storage (MB) of emulator image
DEFAULT_STORAGE_SIZE = '1024M'
# Each emulator has 60 secs of wait time for launching
_BOOT_WAIT_INTERVALS = 6
_BOOT_WAIT_INTERVAL_TIME = 10
# Path for avd files and avd dir
_BASE_AVD_DIR = os.path.expanduser(os.path.join('~', '.android', 'avd'))
_TOOLS_ANDROID_PATH = os.path.join(constants.ANDROID_SDK_ROOT,
'tools', 'android')
# Template used to generate config.ini files for the emulator
CONFIG_TEMPLATE = """avd.ini.encoding=ISO-8859-1
hw.dPad=no
hw.lcd.density=320
sdcard.size={sdcard.size}
hw.cpu.arch={hw.cpu.arch}
hw.device.hash=-708107041
hw.camera.back=none
disk.dataPartition.size=800M
hw.gpu.enabled={gpu}
skin.path=720x1280
skin.dynamic=yes
hw.keyboard=yes
hw.ramSize=1024
hw.device.manufacturer=Google
hw.sdCard=yes
hw.mainKeys=no
hw.accelerometer=yes
skin.name=720x1280
abi.type={abi.type}
hw.trackBall=no
hw.device.name=Galaxy Nexus
hw.battery=yes
hw.sensors.proximity=yes
image.sysdir.1=system-images/android-{api.level}/default/{abi.type}/
hw.sensors.orientation=yes
hw.audioInput=yes
hw.camera.front=none
hw.gps=yes
vm.heapSize=128
{extras}"""
CONFIG_REPLACEMENTS = {
'x86': {
'{hw.cpu.arch}': 'x86',
'{abi.type}': 'x86',
'{extras}': ''
},
'arm': {
'{hw.cpu.arch}': 'arm',
'{abi.type}': 'armeabi-v7a',
'{extras}': 'hw.cpu.model=cortex-a8\n'
},
'mips': {
'{hw.cpu.arch}': 'mips',
'{abi.type}': 'mips',
'{extras}': ''
}
}
class EmulatorLaunchException(Exception):
"""Emulator failed to launch."""
pass
def WaitForEmulatorLaunch(num):
"""Wait for emulators to finish booting
Emulators on bots are launch with a separate background process, to avoid
running tests before the emulators are fully booted, this function waits for
a number of emulators to finish booting
Arg:
num: the amount of emulators to wait.
"""
for _ in range(num*_BOOT_WAIT_INTERVALS):
emulators = [device_utils.DeviceUtils(a)
for a in adb_wrapper.AdbWrapper.Devices()
if a.is_emulator]
if len(emulators) >= num:
logging.info('All %d emulators launched', num)
return
logging.info(
'Waiting for %d emulators, %d of them already launched', num,
len(emulators))
time.sleep(_BOOT_WAIT_INTERVAL_TIME)
raise Exception("Expected %d emulators, %d launched within time limit" %
(num, len(emulators)))
def KillAllEmulators():
"""Kill all running emulators that look like ones we started.
There are odd 'sticky' cases where there can be no emulator process
running but a device slot is taken. A little bot trouble and we're out of
room forever.
"""
logging.info('Killing all existing emulators and existing the program')
emulators = [device_utils.DeviceUtils(a)
for a in adb_wrapper.AdbWrapper.Devices()
if a.is_emulator]
if not emulators:
return
for e in emulators:
e.adb.Emu(['kill'])
logging.info('Emulator killing is async; give a few seconds for all to die.')
for _ in range(10):
if not any(a.is_emulator for a in adb_wrapper.AdbWrapper.Devices()):
return
time.sleep(1)
def DeleteAllTempAVDs():
"""Delete all temporary AVDs which are created for tests.
If the test exits abnormally and some temporary AVDs created when testing may
be left in the system. Clean these AVDs.
"""
logging.info('Deleting all the avd files')
avds = device_utils.GetAVDs()
if not avds:
return
for avd_name in avds:
if 'run_tests_avd' in avd_name:
cmd = [_TOOLS_ANDROID_PATH, '-s', 'delete', 'avd', '--name', avd_name]
cmd_helper.RunCmd(cmd)
logging.info('Delete AVD %s', avd_name)
class PortPool(object):
"""Pool for emulator port starting position that changes over time."""
_port_min = 5554
_port_max = 5585
_port_current_index = 0
@classmethod
def port_range(cls):
"""Return a range of valid ports for emulator use.
The port must be an even number between 5554 and 5584. Sometimes
a killed emulator "hangs on" to a port long enough to prevent
relaunch. This is especially true on slow machines (like a bot).
Cycling through a port start position helps make us resilient."""
ports = range(cls._port_min, cls._port_max, 2)
n = cls._port_current_index
cls._port_current_index = (n + 1) % len(ports)
return ports[n:] + ports[:n]
def _GetAvailablePort():
"""Returns an available TCP port for the console."""
used_ports = []
emulators = [device_utils.DeviceUtils(a)
for a in adb_wrapper.AdbWrapper.Devices()
if a.is_emulator]
for emulator in emulators:
used_ports.append(emulator.adb.GetDeviceSerial().split('-')[1])
for port in PortPool.port_range():
if str(port) not in used_ports:
return port
def LaunchTempEmulators(emulator_count, abi, api_level, enable_kvm=False,
kill_and_launch=True, sdcard_size=DEFAULT_SDCARD_SIZE,
storage_size=DEFAULT_STORAGE_SIZE, wait_for_boot=True,
headless=False):
"""Create and launch temporary emulators and wait for them to boot.
Args:
emulator_count: number of emulators to launch.
abi: the emulator target platform
api_level: the api level (e.g., 19 for Android v4.4 - KitKat release)
wait_for_boot: whether or not to wait for emulators to boot up
headless: running emulator with no ui
Returns:
List of emulators.
"""
emulators = []
for n in xrange(emulator_count):
t = time_profile.TimeProfile('Emulator launch %d' % n)
# Creates a temporary AVD.
avd_name = 'run_tests_avd_%d' % n
logging.info('Emulator launch %d with avd_name=%s and api=%d',
n, avd_name, api_level)
emulator = Emulator(avd_name, abi, enable_kvm=enable_kvm,
sdcard_size=sdcard_size, storage_size=storage_size,
headless=headless)
emulator.CreateAVD(api_level)
emulator.Launch(kill_all_emulators=(n == 0 and kill_and_launch))
t.Stop()
emulators.append(emulator)
# Wait for all emulators to boot completed.
if wait_for_boot:
for emulator in emulators:
emulator.ConfirmLaunch(True)
logging.info('All emulators are fully booted')
return emulators
def LaunchEmulator(avd_name, abi, kill_and_launch=True, enable_kvm=False,
sdcard_size=DEFAULT_SDCARD_SIZE,
storage_size=DEFAULT_STORAGE_SIZE, headless=False):
"""Launch an existing emulator with name avd_name.
Args:
avd_name: name of existing emulator
abi: the emulator target platform
headless: running emulator with no ui
Returns:
emulator object.
"""
logging.info('Specified emulator named avd_name=%s launched', avd_name)
emulator = Emulator(avd_name, abi, enable_kvm=enable_kvm,
sdcard_size=sdcard_size, storage_size=storage_size,
headless=headless)
emulator.Launch(kill_all_emulators=kill_and_launch)
emulator.ConfirmLaunch(True)
return emulator
class Emulator(object):
"""Provides the methods to launch/shutdown the emulator.
The emulator has the android virtual device named 'avd_armeabi'.
The emulator could use any even TCP port between 5554 and 5584 for the
console communication, and this port will be part of the device name like
'emulator-5554'. Assume it is always True, as the device name is the id of
emulator managed in this class.
Attributes:
emulator: Path of Android's emulator tool.
popen: Popen object of the running emulator process.
device: Device name of this emulator.
"""
# Signals we listen for to kill the emulator on
_SIGNALS = (signal.SIGINT, signal.SIGHUP)
# Time to wait for an emulator launch, in seconds. This includes
# the time to launch the emulator and a wait-for-device command.
_LAUNCH_TIMEOUT = 120
# Timeout interval of wait-for-device command before bouncing to a a
# process life check.
_WAITFORDEVICE_TIMEOUT = 5
# Time to wait for a 'wait for boot complete' (property set on device).
_WAITFORBOOT_TIMEOUT = 300
def __init__(self, avd_name, abi, enable_kvm=False,
sdcard_size=DEFAULT_SDCARD_SIZE,
storage_size=DEFAULT_STORAGE_SIZE, headless=False):
"""Init an Emulator.
Args:
avd_name: name of the AVD to create
abi: target platform for emulator being created, defaults to x86
"""
android_sdk_root = constants.ANDROID_SDK_ROOT
self.emulator = os.path.join(android_sdk_root, 'tools', 'emulator')
self.android = _TOOLS_ANDROID_PATH
self.popen = None
self.device_serial = None
self.abi = abi
self.avd_name = avd_name
self.sdcard_size = sdcard_size
self.storage_size = storage_size
self.enable_kvm = enable_kvm
self.headless = headless
@staticmethod
def _DeviceName():
"""Return our device name."""
port = _GetAvailablePort()
return ('emulator-%d' % port, port)
def CreateAVD(self, api_level):
"""Creates an AVD with the given name.
Args:
api_level: the api level of the image
Return avd_name.
"""
if self.abi == 'arm':
abi_option = 'armeabi-v7a'
elif self.abi == 'mips':
abi_option = 'mips'
else:
abi_option = 'x86'
api_target = 'android-%s' % api_level
avd_command = [
self.android,
'--silent',
'create', 'avd',
'--name', self.avd_name,
'--abi', abi_option,
'--target', api_target,
'--sdcard', self.sdcard_size,
'--force',
]
avd_cmd_str = ' '.join(avd_command)
logging.info('Create AVD command: %s', avd_cmd_str)
avd_process = pexpect.spawn(avd_cmd_str)
# Instead of creating a custom profile, we overwrite config files.
avd_process.expect('Do you wish to create a custom hardware profile')
avd_process.sendline('no\n')
avd_process.expect('Created AVD \'%s\'' % self.avd_name)
# Replace current configuration with default Galaxy Nexus config.
ini_file = os.path.join(_BASE_AVD_DIR, '%s.ini' % self.avd_name)
new_config_ini = os.path.join(_BASE_AVD_DIR, '%s.avd' % self.avd_name,
'config.ini')
# Remove config files with defaults to replace with Google's GN settings.
os.unlink(ini_file)
os.unlink(new_config_ini)
# Create new configuration files with Galaxy Nexus by Google settings.
with open(ini_file, 'w') as new_ini:
new_ini.write('avd.ini.encoding=ISO-8859-1\n')
new_ini.write('target=%s\n' % api_target)
new_ini.write('path=%s/%s.avd\n' % (_BASE_AVD_DIR, self.avd_name))
new_ini.write('path.rel=avd/%s.avd\n' % self.avd_name)
custom_config = CONFIG_TEMPLATE
replacements = CONFIG_REPLACEMENTS[self.abi]
for key in replacements:
custom_config = custom_config.replace(key, replacements[key])
custom_config = custom_config.replace('{api.level}', str(api_level))
custom_config = custom_config.replace('{sdcard.size}', self.sdcard_size)
custom_config.replace('{gpu}', 'no' if self.headless else 'yes')
with open(new_config_ini, 'w') as new_config_ini:
new_config_ini.write(custom_config)
return self.avd_name
def _DeleteAVD(self):
"""Delete the AVD of this emulator."""
avd_command = [
self.android,
'--silent',
'delete',
'avd',
'--name', self.avd_name,
]
logging.info('Delete AVD command: %s', ' '.join(avd_command))
cmd_helper.RunCmd(avd_command)
def ResizeAndWipeAvd(self, storage_size):
"""Wipes old AVD and creates new AVD of size |storage_size|.
This serves as a work around for '-partition-size' and '-wipe-data'
"""
userdata_img = os.path.join(_BASE_AVD_DIR, '%s.avd' % self.avd_name,
'userdata.img')
userdata_qemu_img = os.path.join(_BASE_AVD_DIR, '%s.avd' % self.avd_name,
'userdata-qemu.img')
resize_cmd = ['resize2fs', userdata_img, '%s' % storage_size]
logging.info('Resizing userdata.img to ideal size')
cmd_helper.RunCmd(resize_cmd)
wipe_cmd = ['cp', userdata_img, userdata_qemu_img]
logging.info('Replacing userdata-qemu.img with the new userdata.img')
cmd_helper.RunCmd(wipe_cmd)
def Launch(self, kill_all_emulators):
"""Launches the emulator asynchronously. Call ConfirmLaunch() to ensure the
emulator is ready for use.
If fails, an exception will be raised.
"""
if kill_all_emulators:
KillAllEmulators() # just to be sure
self._AggressiveImageCleanup()
(self.device_serial, port) = self._DeviceName()
self.ResizeAndWipeAvd(storage_size=self.storage_size)
emulator_command = [
self.emulator,
# Speed up emulator launch by 40%. Really.
'-no-boot-anim',
]
if self.headless:
emulator_command.extend([
'-no-skin',
'-no-window'
])
else:
emulator_command.extend([
'-gpu', 'on'
])
emulator_command.extend([
# Use a familiar name and port.
'-avd', self.avd_name,
'-port', str(port),
# all the argument after qemu are sub arguments for qemu
'-qemu', '-m', '1024',
])
if self.abi == 'x86' and self.enable_kvm:
emulator_command.extend([
# For x86 emulator --enable-kvm will fail early, avoiding accidental
# runs in a slow mode (i.e. without hardware virtualization support).
'--enable-kvm',
])
logging.info('Emulator launch command: %s', ' '.join(emulator_command))
self.popen = subprocess.Popen(args=emulator_command,
stderr=subprocess.STDOUT)
self._InstallKillHandler()
@staticmethod
def _AggressiveImageCleanup():
"""Aggressive cleanup of emulator images.
Experimentally it looks like our current emulator use on the bot
leaves image files around in /tmp/android-$USER. If a "random"
name gets reused, we choke with a 'File exists' error.
TODO(jrg): is there a less hacky way to accomplish the same goal?
"""
logging.info('Aggressive Image Cleanup')
emulator_imagedir = '/tmp/android-%s' % os.environ['USER']
if not os.path.exists(emulator_imagedir):
return
for image in os.listdir(emulator_imagedir):
full_name = os.path.join(emulator_imagedir, image)
if 'emulator' in full_name:
logging.info('Deleting emulator image %s', full_name)
os.unlink(full_name)
def ConfirmLaunch(self, wait_for_boot=False):
"""Confirm the emulator launched properly.
Loop on a wait-for-device with a very small timeout. On each
timeout, check the emulator process is still alive.
After confirming a wait-for-device can be successful, make sure
it returns the right answer.
"""
seconds_waited = 0
number_of_waits = 2 # Make sure we can wfd twice
device = device_utils.DeviceUtils(self.device_serial)
while seconds_waited < self._LAUNCH_TIMEOUT:
try:
device.adb.WaitForDevice(
timeout=self._WAITFORDEVICE_TIMEOUT, retries=1)
number_of_waits -= 1
if not number_of_waits:
break
except device_errors.CommandTimeoutError:
seconds_waited += self._WAITFORDEVICE_TIMEOUT
device.adb.KillServer()
self.popen.poll()
if self.popen.returncode != None:
raise EmulatorLaunchException('EMULATOR DIED')
if seconds_waited >= self._LAUNCH_TIMEOUT:
raise EmulatorLaunchException('TIMEOUT with wait-for-device')
logging.info('Seconds waited on wait-for-device: %d', seconds_waited)
if wait_for_boot:
# Now that we checked for obvious problems, wait for a boot complete.
# Waiting for the package manager is sometimes problematic.
device.WaitUntilFullyBooted(timeout=self._WAITFORBOOT_TIMEOUT)
logging.info('%s is now fully booted', self.avd_name)
def Shutdown(self):
"""Shuts down the process started by launch."""
self._DeleteAVD()
if self.popen:
self.popen.poll()
if self.popen.returncode == None:
self.popen.kill()
self.popen = None
def _ShutdownOnSignal(self, _signum, _frame):
logging.critical('emulator _ShutdownOnSignal')
for sig in self._SIGNALS:
signal.signal(sig, signal.SIG_DFL)
self.Shutdown()
raise KeyboardInterrupt # print a stack
def _InstallKillHandler(self):
"""Install a handler to kill the emulator when we exit unexpectedly."""
for sig in self._SIGNALS:
signal.signal(sig, self._ShutdownOnSignal)
|
[
"xElvis89x@gmail.com"
] |
xElvis89x@gmail.com
|
81080dddd1dc963925b712b8617d42546f849962
|
d4811f4ba6b20d9cba2ae63736e58fe8d1de198a
|
/tests/serializing/wrappers/test_int64.py
|
d3ea0090079d2f956301c3591f3e0688bf7f6e5a
|
[
"MIT"
] |
permissive
|
mlga/schematics-proto3
|
f6de009cff5ac6ff096dbe784618001b3c3bd5fb
|
588fe5bc212e203688166638a1c52dfeda931403
|
refs/heads/master
| 2020-09-11T06:31:21.146154
| 2020-08-19T17:32:45
| 2020-08-19T17:32:45
| 221,972,314
| 0
| 0
|
MIT
| 2020-08-19T17:32:47
| 2019-11-15T17:28:45
|
Python
|
UTF-8
|
Python
| false
| false
| 390
|
py
|
# -*- coding:utf-8 -*-
from schematics_proto3 import types
from tests import schematics_proto3_tests_pb2 as pb2
from tests.serializing.wrappers import CommonWrappersTests
class TestInt64(CommonWrappersTests):
field_type_class = types.IntWrapperType
protobuf_msg_class = pb2.WrappedInt64
def get_value(self):
return 42
def get_zero_value(self):
return 0
|
[
"github@mlga.io"
] |
github@mlga.io
|
c7ce6a26eabd9e0321bd10daacd750f082343174
|
b8d2f095a4b7ea567ccc61ee318ba879318eec3d
|
/树 Tree/538. 把二叉搜索树转换为累加树.py
|
9a2100675571f2350424587e70a2d48bbd0aa325
|
[] |
no_license
|
f1amingo/leetcode-python
|
a3ef78727ae696fe2e94896258cfba1b7d58b1e3
|
b365ba85036e51f7a9e018767914ef22314a6780
|
refs/heads/master
| 2021-11-10T16:19:27.603342
| 2021-09-17T03:12:59
| 2021-09-17T03:12:59
| 205,813,698
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from util.ZTree import TreeNode
class Solution:
def convertBST(self, root: TreeNode) -> TreeNode:
def dfs(r: TreeNode):
if r:
dfs(r.right)
nonlocal total
total += r.val
r.val = total
dfs(r.left)
total = 0
dfs(root)
return root
|
[
"zsjperiod@foxmail.com"
] |
zsjperiod@foxmail.com
|
0c5e81e31f3423a12125f91838a1aa195b0987ba
|
ca47ebf432f787e0ae78a54afcd3c60d0af2d476
|
/GitProgs/152002016_PythonLabCode1_R_Parnika_Murty/Q2.py
|
1bd4495f446a8e5de2c579c00a17269c90c17d39
|
[] |
no_license
|
Parnika1102/My_Assignments
|
0659c70f8f8473107b49a611ee9d16823331c535
|
b0ecf3df0107c627944f5ef98f72996efdf42f37
|
refs/heads/master
| 2023-03-20T11:37:02.821148
| 2021-03-10T12:14:45
| 2021-03-10T12:14:45
| 344,998,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 949
|
py
|
#!/bin/python3
#Class Polygon with attributes numsides and area.
class Polygon:
#__init__() constructor.
def __init__(self,numSides,area):
#The class attributes "numSides" and "area".
self.numSides = numSides
self.area = area
#For the string representation of our object.
def __str__(self):
#To display error message if number of sides is less than 3.
if self.numSides<3 :
raise Exception("Number of sides should be atleast 3")
#To display error message if polygon has negative area.
elif self.area<0 :
raise Exception("Polygon should have postive area")
#To display details about the polygon.
else:
return "Polygon with % s sides and area % s" % (self.numSides, self.area)
try:
#Creating a polygon object with respective number of sides and area.
p1 = Polygon(1,23)
#Printing the object.
print(p1)
#Printing the exception type and respective message.
except Exception as e:
print(type(e))
print(e)
|
[
"email"
] |
email
|
43d762c0f1e0bca9d0979907653b5f49f247f87c
|
b1fc997484ea9c3b2be6b7151ddb6600821daea9
|
/UbiGraph-alpha-0.2.4-Linux64-Ubuntu-8.04/examples/Python/callback_webcrawler.py
|
895ece7611c78468b967690ec251c3425d4bfb5a
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
JLiangWaterloo/sat
|
0e76f055fc9cd38e850f448ea49f9e730a423a46
|
df5f09cda06c8ab70ff9eacf098d8ce6bb9b91fc
|
refs/heads/master
| 2021-01-21T12:43:29.168066
| 2013-11-29T16:08:50
| 2013-11-29T16:08:50
| 13,397,989
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,402
|
py
|
# Omit from run_all.sh
import urllib, htmllib, formatter, xmlrpclib, random, os.path, time
from urlparse import urlparse
from urlparse import urljoin
from sets import Set
# Maximum vertices to render at once
max_vertices = 100
start_url = "http://planetmath.org/encyclopedia/GraphTheory.html"
# To avoid expanding too many urls, limit ourselves to those that
# look like "/enclopedia..."
def isUrlInteresting(url):
return (url.startswith("http://planetmath.org/encyclopedia/")
and url.find('#') == -1)
def brief_url(url):
url_without_server = url.replace('http://planetmath.org/encyclopedia/', '')
return url_without_server.replace('.html', '')
class LinksExtractor(htmllib.HTMLParser):
def __init__(self, formatter) : # class constructor
htmllib.HTMLParser.__init__(self, formatter) # base class constructor
self.links = [] # create an empty list for storing hyperlinks
def start_a(self, attrs) : # override handler of <A ...>...</A> tags
# process the attributes
if len(attrs) > 0 :
for attr in attrs :
if attr[0] == "href" : # ignore all non HREF attributes
self.links.append(attr[1]) # save the link info in the list
def get_links(self) : # return the list of extracted links
return self.links
# Create an object to represent our server.
server_url = 'http://127.0.0.1:20738/RPC2'
server = xmlrpclib.Server(server_url)
G = server.ubigraph
G.clear()
G.set_edge_style_attribute(0, "spline", "true")
G.set_edge_style_attribute(0, "arrow", "true")
# Set up some styles
unopenedVertexSize = 0.5
openedVertexSize = 1.5
unopenedVertexStyle = G.new_vertex_style(0)
G.set_vertex_style_attribute(unopenedVertexStyle, "shape", "dodecahedron")
G.set_vertex_style_attribute(unopenedVertexStyle, "size",
str(unopenedVertexSize))
G.set_vertex_style_attribute(unopenedVertexStyle, "fontcolor", "#a0a0a0")
openedVertexStyle = G.new_vertex_style(0)
G.set_vertex_style_attribute(openedVertexStyle, "shape", "sphere")
G.set_vertex_style_attribute(openedVertexStyle, "size", str(openedVertexSize))
G.set_vertex_style_attribute(openedVertexStyle, "fontcolor", "#ffffff")
G.set_vertex_style_attribute(openedVertexStyle, "fontsize", "18")
G.set_vertex_style_attribute(openedVertexStyle, "color", "#ffff00")
expandedVertices = Set()
urlToVertex = dict()
vertexToUrl = dict()
format = formatter.NullFormatter()
htmlparser = LinksExtractor(format)
def smoothly_change_vertex_size(v, size1, size2):
for i in range(0,6):
G.set_vertex_attribute(v, "size", str(size1 + (i/5.0)*(size2-size1)))
time.sleep(0.05)
def url_color(url):
urlbits = urlparse(url)
return str(1 + (hash(urlbits[1]) % 512))
def createurl(url):
if (urlToVertex.has_key(url)):
return urlToVertex[url]
v = G.new_vertex()
urlToVertex[url] = v
vertexToUrl[v] = url
G.change_vertex_style(v, unopenedVertexStyle)
G.set_vertex_attribute(v, "label", brief_url(url))
G.set_vertex_attribute(v, "color", url_color(url))
return v
def expand_vertex(v):
if (not vertexToUrl.has_key(v)):
return -1
if v in expandedVertices:
return 0
expandedVertices.add(v)
url = vertexToUrl[v]
print ""
print "Expanding " + url
print "===================================="
G.set_vertex_attribute(v, "color", "#ffff00")
smoothly_change_vertex_size(v, unopenedVertexSize, 3.0)
G.set_vertex_attribute(v, "label", "Working: " + brief_url(url))
G.set_vertex_attribute(v, "fontcolor", "#ff0000")
G.set_vertex_attribute(v, "fontsize", "24")
urlbits = urlparse(url)
data = urllib.urlopen(url)
htmlparser.feed(data.read())
htmlparser.close()
links = htmlparser.get_links()
G.set_vertex_attribute(v, "fontcolor", "#ffffff")
G.set_vertex_attribute(v, "shape", "sphere")
unique_links = Set()
for url2 in links:
url2 = urljoin(url, url2)
print "urljoin(" + url + ", " + url2 + ") = " + url2
unique_links.add(url2)
print ""
print "Unique links:"
for url2 in unique_links:
print url2
if (url2 != url) and isUrlInteresting(url2):
v2 = createurl(url2)
G.new_edge(v,v2)
smoothly_change_vertex_size(v, 3.0, openedVertexSize)
G.change_vertex_style(v, openedVertexStyle)
G.set_vertex_attribute(v, "label", brief_url(url))
return 0
root = createurl(start_url)
note = G.new_vertex()
G.set_vertex_attribute(note, "shape", "none")
G.set_vertex_attribute(note, "label", "Left-doubleclick to expand a node")
G.set_vertex_attribute(note, "fontsize", "18")
e = G.new_edge(note,root)
G.set_edge_attribute(e, "visible", "false")
G.set_edge_attribute(e, "oriented", "true")
expand_vertex(root)
time.sleep(1)
G.remove_vertex(note)
# Now make our callback engine. Pick a random port (so that
# uncompleted teardowns don't prevent us from running this
# code twice in a row).
myPort = random.randint(20739,20999)
# Set up a callback for left double-clicks on vertices.
G.set_vertex_style_attribute(0, "callback_left_doubleclick",
"http://127.0.0.1:" + str(myPort) + "/expand_vertex")
# Now make an XMLRPC server to handle tha callbacks.
from SimpleXMLRPCServer import SimpleXMLRPCServer
# Create server
server = SimpleXMLRPCServer(("localhost", myPort))
server.register_introspection_functions()
server.register_function(expand_vertex)
# Run the server's main loop
print "Listening for callbacks from ubigraph_server on port " + str(myPort)
server.serve_forever()
|
[
"william.fr.lindsay@gmail.com"
] |
william.fr.lindsay@gmail.com
|
f605d22c1a6336bd346a68aa4ef9452a9cb23cf0
|
f48db3ec691da9eef9ebf130599ef594d0895a3b
|
/samplesite/bboard/migrations/0003_auto_20190813_2119.py
|
ab675b7fa51088c4255942c8f44d0f291929aacd
|
[] |
no_license
|
OnePlusZero/django
|
692fc8ff6cd7126e8a5d0bc4cf937e7f853f9a29
|
78450f78c03c70c5554bb3991edfceb86825a1ef
|
refs/heads/master
| 2020-07-14T05:13:30.975704
| 2019-08-29T20:48:24
| 2019-08-29T20:48:24
| 205,246,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
# Generated by Django 2.2.4 on 2019-08-13 21:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bboard', '0002_auto_20190812_1528'),
]
operations = [
migrations.AddField(
model_name='bb',
name='named',
field=models.TextField(blank=True, null=True, verbose_name='Фамилия и имя'),
),
migrations.AddField(
model_name='bb',
name='phone',
field=models.FloatField(blank=True, null=True, verbose_name='Номер телефона'),
),
]
|
[
"dimasfanzerg@yahoo.com"
] |
dimasfanzerg@yahoo.com
|
773f8b9dce982b9249d251050ac2401104779d8f
|
77462125540636949d28284f848ed7c80c1b4626
|
/dstruct/linkedlist.py
|
ba227a3b6917a4148fbe11f2a825bc5caffe4630
|
[
"MIT"
] |
permissive
|
gmarciani/pymple
|
1a39b1009fa11b699af50000bc3b11a2aec0ae36
|
5da3befa95f2d0138e4a364857e6dd9c421ab6d8
|
refs/heads/master
| 2021-01-10T10:48:18.750183
| 2015-12-12T21:45:21
| 2015-12-12T21:45:21
| 47,878,365
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,316
|
py
|
#Interface Import
from model.base.baselinkedlist import baselinkedlist
class SimpleLinkedList(baselinkedlist):
class Record:
def __init__(self, element):
self.element = element
self._next = None
def __repr__(self):
return str(self.element)
def __str__(self):
return self.__repr__()
def __init__(self):
self._first = None
self._last = None
self._num_elements = 0
def is_empty(self):
"""
Returns True if linked-list is empty, otherwise False.
is_empty() -> True/False
@rtype: bool
@return: True if empty, False otherwise.
"""
return (self._first is None)
def add_as_first(self, element):
"""
Adds element as first into the linked-list.
add_as_first(element) -> None
@type element: object
@param element: element to be added as first into the linked-list.
"""
record = SimpleLinkedList.Record(element)
if self._first is None:
self._first = self._last = record
else:
record._next = self._first
self._first = record
self._num_elements += 1
def add_as_last(self, element):
"""
Adds element as last into the linked-list.
add_as_last(element) -> None
@type element: object
@param element: element to be added as last into the linked-list.
"""
record = SimpleLinkedList.Record(element)
if self._first is None:
self._first = self._last = record
else:
self._last._next = record
self._last = record
self._num_elements += 1
def get_first(self):
"""
Returns the first element into the linked-list.
get_first() -> first_element
@rtype: object
@return: first element into the linked-list.
"""
return None if self._first is None else self._first.element
def get_last(self):
"""
Returns the last element into the linked-list.
get_last() -> last_element
@rtype: object
@return: last element into the linked-list.
"""
return None if self._last is None else self._last.element
def get_first_record(self):
"""
Returns the first record into the linked-list.
get_first_record() -> first_record
@rtype: Record
@return: first record into the linked-list.
"""
return None if self._first is None else self._first
def get_last_record(self):
"""
Returns the last record into the linked-list.
get_last_record() -> last_record
@rtype: Record
@return: last record into the linked-list.
"""
return None if self._first is None else self._last
def pop_first(self):
"""
Deletes the first record from the linked-list, and return the correspondent element.
pop_first() -> first_element
@rtype: object
@return: first element into the linked-list.
"""
if self._first is None:
return None
else:
first_element = self._first.element
self._first = self._first._next
if self._first is None:
self._last = None
self._num_elements -= 1
return first_element
def pop_last(self):
"""
Deletes the last record from the linked-list, and return the correspondent element.
pop_last() -> last_element
@rtype: object
@return: last element into the linked-list.
"""
if self._first is None:
return None
else:
last_element = self._last.element
curr = self.get_first_record()
prev = None
while curr is not self._last:
prev = curr
curr = curr._next
if prev is None:
self._first = None
self._last = None
else:
self._last = prev
prev._next = None
self._num_elements -= 1
return last_element
def delete_record(self, record):
"""
Deletes the specified record from the linked-list.
delete_record(record) -> None
@type record: Record
@param record: record to be deleted from the linked-list.
"""
if self._first is None or record is None:
return
self._num_elements -= 1
curr = self.get_first_record()
prev = None
while curr is not None:
if curr is record:
if prev is None:
self._first = curr._next
elif curr._next is None:
self._last = prev
prev._next = None
else:
prev._next = curr._next
break
prev = curr
curr = curr._next
def __repr__(self):
s = "["
if self._first is not None:
curr = self._first
while curr is not None:
if len(s) > 1:
s += ", "
s += str(curr)
curr = curr._next
s += "]"
return s
def __str__(self):
return self.__repr__()
class DoubleLinkedList(SimpleLinkedList, baselinkedlist):
class Record(SimpleLinkedList.Record):
def __init__(self, element):
SimpleLinkedList.Record.__init__(self, element)
self._prev = None
def __repr__(self):
return str(self.element)
def __str__(self):
return self.__repr__()
def add_as_first(self, element):
"""
Adds element as first into the linked-list.
add_as_first(element) -> None
@type element: object
@param element: element to be added as first into the linked-list.
"""
record = DoubleLinkedList.Record(element)
if self._first is None:
self._first = self._last = record
else:
self._first._prev = record
record._next = self._first
self._first = record
self._num_elements += 1
def add_as_last(self, element):
"""
Adds element as last into the linked-list.
add_as_last(element) -> None
@type element: object
@param element: element to be added as last into the linked-list.
"""
record = DoubleLinkedList.Record(element)
if self._first is None:
self._first = self._last = record
else:
record._prev = self._last
self._last._next = record
self._last = record
self._num_elements += 1
def pop_first(self):
"""
Deletes the first record from the linked-list, and return the correspondent element.
pop_first() -> first_element
@rtype: object
@return: first element into the linked-list.
"""
if self._first is None:
return None
else:
res = self._first.element
self._first = self._first._next
if self._first != None:
self._first._prev = None
else:
self._last = None
self._num_elements -= 1
return res
def pop_last(self):
"""
Deletes the last record from the linked-list, and return the correspondent element.
pop_last() -> last_element
@rtype: object
@return: last element into the linked-list.
"""
if self._first is None:
return None
else:
res = self._last.element
self._last = self._last._prev
if self._last is not None:
self._last._next = None
else:
self._first = None
self._num_elements -= 1
return res
def delete_record(self, record):
"""
Deletes the specified record from the linked-list.
delete_record(record) -> None
@type record: Record
@param record: record to be deleted from the linked-list.
"""
if record is None:
return
self._num_elements -= 1
if record._prev is not None:
record._prev._next = record._next
else:
self._first = record._next
if record._next is not None:
record._next._prev = record._prev
else:
self._last = record._prev
def __test(linked_list):
"""
Linked-List Test.
__test(linked_list) -> None
@type linked_list: baselinkedlist
@param linked_list: linked_list instance.
"""
if not isinstance(linked_list, baselinkedlist):
raise TypeError("Expected type was LinkedList.")
print "### iPATH TEST DATA STRUCTURE"
print "### Data Type: Linked List ({})".format(str(linked_list.__class__.__bases__[0].__name__))
print "### Implementation: {}".format(str(linked_list.__class__.__name__))
print "\n*** INSERT AS FIRST ***\n"
for i in range(5):
print "add_as_first({})".format(str(i))
linked_list.add_as_first(i)
print "\n*** INSERT AS LAST ***\n"
for i in range(5, 10):
print "add_as_last({})".format(str(i))
linked_list.add_as_last(i)
print "\n{}\n".format(str(linked_list))
print "\n*** GET FIRST/LAST ELEMENT ***\n"
print "get_first(): {}\n".format(str(linked_list.get_first()))
print "get_last(): {}\n".format(str(linked_list.get_last()))
print "\n*** GET FIRST/LAST RECORD ***\n"
print("record1 = get_first_record(): {}").format(str(linked_list.get_first_record()))
record1 = linked_list.get_first_record()
print("record2 = get_last_record(): {}").format(str(linked_list.get_last_record()))
record2 = linked_list.get_last_record()
print "\n*** DELETE RECORD ***\n"
print("delete_record(record1)")
linked_list.delete_record(record1)
print("delete_record(record2)")
linked_list.delete_record(record2)
print "\n*** POP FIRST/LAST ***\n"
print "pop_first(): {}".format(str(linked_list.pop_first()))
print "pop_last(): {}".format(str(linked_list.pop_last()))
print "\n{}\n".format(linked_list)
print "\n*** EMPTYING ***\n"
while not linked_list.is_empty():
linked_list.pop_last()
print "{}".format(str(linked_list))
print "\n### END OF TEST ###\n"
if __name__ == "__main__":
linkedlist = SimpleLinkedList()
__test(linkedlist)
linkedlist = DoubleLinkedList()
__test(linkedlist)
|
[
"giacomo.marciani@gmail.com"
] |
giacomo.marciani@gmail.com
|
f5ef3bb85a2fca98469989b50eaeeec674b16b69
|
22cec042bc4e600f2a42ff974cb1a958daf6cb39
|
/helpers/exceptions.py
|
c1c3bb2421bde2374636ef9caad6e8638b31359e
|
[] |
no_license
|
dal3006/edat
|
86d9a39b9218a57eaf989eddf614c01fa2401585
|
29468da54de6c9167e869e5fe6901aa7a667150c
|
refs/heads/main
| 2023-04-02T16:17:09.792206
| 2021-03-01T00:26:01
| 2021-03-01T00:26:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
""" Custom Exception classes used throughout the application
"""
class FileAccessError(Exception):
""" Exceptions called in case there was any file access problem """
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class PlotRenderError(Exception):
""" Exceptions called in case there was any file access problem """
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
|
[
"ibolsch@gmail.com"
] |
ibolsch@gmail.com
|
5e6330450d613daf77b5516864df671d6b111029
|
65320420ac129fc49d8737dcb608cc9a97ed8f79
|
/src/s010cognatematch/s005splitFilesV09.py
|
e24c13dc2d49fa4f8bc7dd601cdbde74e59298d2
|
[
"Apache-2.0"
] |
permissive
|
bogdanbabych/morphosyntax
|
625fe6d037cdbf4c6aafd51d9e5716a5fea5b50c
|
c345be0f9b0274e2ef8a2961c0d176b7f0d43f6c
|
refs/heads/master
| 2021-01-19T02:09:17.572907
| 2019-08-22T10:18:39
| 2019-08-22T10:18:39
| 73,376,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,095
|
py
|
'''
Created on 9 Dec 2016
@author: bogdan
'''
import sys, os, re, codecs
class cSplitFile(object):
'''
classdocs
'''
def __init__(self, SFNameI, ILenSplit=50):
'''
Constructor
'''
SNameInm , SNameIext = os.path.splitext(SFNameI)
SFNameO = SNameInm + '-1000' + SNameIext
# FNameI = open(SFNameI, 'rU')
FNameO = open(SFNameO, 'w')
SFNameSh = SFNameO + '.sh'
FNameSh = open(SFNameSh, 'w')
FNameSh.write('python3 md070crosslevenshteinPhonV09.py %(SFNameO)s ../../../xdata/morpho/ru.num ua ru >%(SFNameO)s.res\n' % locals())
i = 0
k = 0
# with codecs.open(SFNameI, "r",encoding='utf-8', errors='ignore') as FNameI:
FNameI = codecs.open(SFNameI, "r",encoding='utf-8', errors='ignore')
for SLine in FNameI:
SLine = FNameI.readline()
i += 1
SLine = SLine.rstrip()
SLine = SLine.lstrip()
LLine = re.split('[\t ]+', SLine)
try:
SWord = LLine[1]
SPoS = LLine[2]
IFrq = int(LLine[0])
except:
SWord = 'NONE'
SPoS = 'NONE'
IFrq = 0
continue
try:
LRest = LLine[3:]
except:
LRest = []
SRest = '\t'.join(LRest)
SPoSM = self.mapPoS(SPoS)
if LRest == []:
FNameO.write(str(IFrq) + '\t' + SWord + '\t' + SPoSM + '\n')
elif len(LRest) > 0:
FNameO.write(str(IFrq) + '\t' + SWord + '\t' + SPoSM + '\t' + SRest + '\n')
if ILenSplit == 0: continue
if i % ILenSplit == 0:
k += 1
k0 = 1000+k
SFNameO = SNameInm + '-' + str(k0) + SNameIext
FNameO.close()
FNameO = open(SFNameO, 'w')
SFNameSh = SFNameO + '.sh'
FNameSh.close()
FNameSh = open(SFNameSh, 'w')
FNameSh.write('python3 md070crosslevenshteinPhonV09.py %(SFNameO)s ../../../xdata/morpho/ru.num ua ru >%(SFNameO)s.res\n' % locals())
def mapPoS(self, SPoS):
SPoSM = ''
DPoS = {'s':'ADP', 'q':'CONJ', 'n':'NOUN', 'v':'VERB', 'a':'ADJ', 'p':'PRON', 'r':'ADV', 'q':'PART', 'i':'INTJ', 'y':'NOUN', 'm':'NUM'}
try:
SPoSM = DPoS[SPoS]
except:
SPoSM = SPoS
return SPoSM
if __name__ == '__main__':
OSplitFile = cSplitFile(sys.argv[1], ILenSplit=int(sys.argv[2]))
|
[
"bogdan.babych@gmail.com"
] |
bogdan.babych@gmail.com
|
71f29bd8fe9c38bb4dfb37161179b569edf632fa
|
76a2a0846ba5cf4f89ee39442448894a8a2cf5a6
|
/mysite/apps/mpemail/signals/__init__.py
|
6c22c4b7f54511e02b320178ef7ad84173266284
|
[] |
no_license
|
littlehome-eugene/mpemail
|
a2d86410c3d5f17a50ad22352dec708f86ae083c
|
7bb93175d5a98add93570302cc14db863ba477fd
|
refs/heads/master
| 2020-03-26T21:39:43.828954
| 2018-09-19T00:26:13
| 2018-09-19T00:26:13
| 145,401,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
import mpemail.signals.email
|
[
"p.compassion@gmail.com"
] |
p.compassion@gmail.com
|
ee84fabbea404d237c006dfce600cd586c85153c
|
ade2f7fb02a8e45ee276f2c61b32af0b9795ccb6
|
/planner/migrations/0004_remove_event_date.py
|
a8c1651d3402c69c773ed95979f714a9e8100ba2
|
[] |
no_license
|
Jacqui-E-McGowan/MyWeek
|
11fb96c2eac6cfe0a7469f5ebdea7c29a3745754
|
c002a3d218de8760dfedb140920570d7fb027547
|
refs/heads/main
| 2023-03-12T12:36:13.381130
| 2021-02-28T05:30:23
| 2021-02-28T05:30:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
# Generated by Django 3.1.6 on 2021-02-25 19:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('planner', '0003_event_address'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='date',
),
]
|
[
"nheyland@gmail.com"
] |
nheyland@gmail.com
|
13670695bd691fcc3a0ecaf59ac201dc5ef20834
|
d0aec3726aa4d16de7df0948a782f7373ae14189
|
/tests/day02_test.py
|
f246260bc4b04be9b4870bca8ae0e445c468c76f
|
[
"MIT"
] |
permissive
|
zoeimogen/AoC2019
|
1b70c46bc9ede73ab9aa3db540373a237a57b59d
|
44ffc08a38cb07273d7c4fd49200fb7912d4a1cb
|
refs/heads/master
| 2020-09-24T19:32:49.786707
| 2019-12-23T17:29:08
| 2019-12-23T17:29:08
| 225,826,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
#!/usr/bin/python3
'''Advent of Code 2019 Day 2 tests'''
import unittest
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from aoc2019 import intcode # pylint: disable=wrong-import-position
class TestUM(unittest.TestCase):
'''Unit Tests'''
def test_day02(self) -> None:
'''Tests from day two, although all directly test intcode rather than day02.py'''
pgm = intcode.Program('standard', [1, 9, 10, 3, 2, 3, 11, 0, 99, 30, 40, 50])
self.assertEqual(pgm.run(), [3500])
pgm = intcode.Program('standard', [1, 0, 0, 0, 99])
self.assertEqual(pgm.run(), [2])
pgm = intcode.Program('standard', [2, 3, 0, 3, 99])
pgm.run()
self.assertEqual(pgm.state['pgm'][3], 6)
pgm = intcode.Program('standard', [2, 4, 4, 5, 99, 0])
pgm.run()
self.assertEqual(pgm.state['pgm'][5], 9801)
pgm = intcode.Program('standard', [1, 1, 1, 4, 99, 5, 6, 0, 99])
self.assertEqual(pgm.run(), [30])
|
[
"zoe@complicity.co.uk"
] |
zoe@complicity.co.uk
|
f6ac16a6d52366142f9463e5bdd5f3b1ba0ccd99
|
169f45b84ac3f325626981f0f128cf188fc32f7e
|
/code/sentence_level_EE/data_utils.py
|
bab720d815ed04bf684d13bf26e1230101abf648
|
[] |
no_license
|
HangYang-NLP/DCFEE
|
89e8464159dbc05f2fb717f44e99d92c5c7bb559
|
8d3cfe9bcb5caa876bc6237b6dc4747d3d98ab89
|
refs/heads/main
| 2023-05-06T23:32:13.804313
| 2021-06-01T08:17:29
| 2021-06-01T08:17:29
| 372,746,553
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,773
|
py
|
# encoding = utf8
import re
import math
import codecs
import random
import numpy as np
import jieba
jieba.initialize()
def create_dico(item_list):
"""
Create a dictionary of items from a list of list of items.
"""
assert type(item_list) is list
dico = {}
for items in item_list:
for item in items:
if item not in dico:
dico[item] = 1
else:
dico[item] += 1
return dico
def create_mapping(dico):
"""
Create a mapping (item to ID / ID to item) from a dictionary.
Items are ordered by decreasing frequency.
"""
sorted_items = sorted(dico.items(), key=lambda x: (-x[1], x[0]))
id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}
item_to_id = {v: k for k, v in id_to_item.items()}
return item_to_id, id_to_item
def zero_digits(s):
"""
Replace every digit in a string by a zero.
"""
return re.sub('\d', '0', s)
def iob2(tags):
"""
Check that tags have a valid IOB format.
Tags in IOB1 format are converted to IOB2.
"""
# print(tags)
for i, tag in enumerate(tags):
if tag == 'O':
continue
split = tag.split('-')
if len(split) != 2 or split[0] not in ['I', 'B']:
return False
if split[0] == 'B':
continue
elif i == 0 or tags[i - 1] == 'O': # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
elif tags[i - 1][1:] == tag[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
return True
def iob_iobes(tags):
"""
IOB -> IOBES
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
elif tag.split('-')[0] == 'B':
if i + 1 != len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('B-', 'S-'))
elif tag.split('-')[0] == 'I':
if i + 1 < len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('I-', 'E-'))
else:
raise Exception('Invalid IOB format!')
return new_tags
def iobes_iob(tags):
"""
IOBES -> IOB
"""
new_tags = []
for i, tag in enumerate(tags):
if tag.split('-')[0] == 'B':
new_tags.append(tag)
elif tag.split('-')[0] == 'I':
new_tags.append(tag)
elif tag.split('-')[0] == 'S':
new_tags.append(tag.replace('S-', 'B-'))
elif tag.split('-')[0] == 'E':
new_tags.append(tag.replace('E-', 'I-'))
elif tag.split('-')[0] == 'O':
new_tags.append(tag)
else:
raise Exception('Invalid format!')
return new_tags
def insert_singletons(words, singletons, p=0.5):
"""
Replace singletons by the unknown word with a probability p.
"""
new_words = []
for word in words:
if word in singletons and np.random.uniform() < p:
new_words.append(0)
else:
new_words.append(word)
return new_words
def get_seg_features(string):
"""
Segment text with jieba
features are represented in bies format
s donates single word
"""
seg_feature = []
for word in jieba.cut(string):
if len(word) == 1:
seg_feature.append(0)
else:
tmp = [2] * len(word)
tmp[0] = 1
tmp[-1] = 3
seg_feature.extend(tmp)
return seg_feature
def create_input(data):
"""
Take sentence data and return an input for
the training or the evaluation function.
"""
inputs = list()
inputs.append(data['chars'])
inputs.append(data["segs"])
inputs.append(data['tags'])
return inputs
def load_word2vec(emb_path, id_to_word, word_dim, old_weights):
"""
Load word embedding from pre-trained file
embedding size must match
"""
new_weights = old_weights
print('Loading pretrained embeddings from {}...'.format(emb_path))
pre_trained = {}
emb_invalid = 0
for i, line in enumerate(codecs.open(emb_path, 'r', 'utf-8')):
line = line.rstrip().split()
if len(line) == word_dim + 1:
pre_trained[line[0]] = np.array(
[float(x) for x in line[1:]]
).astype(np.float32)
else:
emb_invalid += 1
if emb_invalid > 0:
print('WARNING: %i invalid lines' % emb_invalid)
c_found = 0
c_lower = 0
c_zeros = 0
n_words = len(id_to_word)
# Lookup table initialization
for i in range(n_words):
word = id_to_word[i]
if word in pre_trained:
new_weights[i] = pre_trained[word]
c_found += 1
elif word.lower() in pre_trained:
new_weights[i] = pre_trained[word.lower()]
c_lower += 1
elif re.sub('\d', '0', word.lower()) in pre_trained:
new_weights[i] = pre_trained[
re.sub('\d', '0', word.lower())
]
c_zeros += 1
print('Loaded %i pretrained embeddings.' % len(pre_trained))
print('%i / %i (%.4f%%) words have been initialized with '
'pretrained embeddings.' % (
c_found + c_lower + c_zeros, n_words,
100. * (c_found + c_lower + c_zeros) / n_words)
)
print('%i found directly, %i after lowercasing, '
'%i after lowercasing + zero.' % (
c_found, c_lower, c_zeros
))
return new_weights
def full_to_half(s):
"""
Convert full-width character to half-width one
"""
n = []
for char in s:
num = ord(char)
if num == 0x3000:
num = 32
elif 0xFF01 <= num <= 0xFF5E:
num -= 0xfee0
char = chr(num)
n.append(char)
return ''.join(n)
def cut_to_sentence(text):
"""
Cut text to sentences
"""
sentence = []
sentences = []
len_p = len(text)
pre_cut = False
for idx, word in enumerate(text):
sentence.append(word)
cut = False
if pre_cut:
cut=True
pre_cut=False
if word in u"。;!?\n":
cut = True
if len_p > idx+1:
if text[idx+1] in ".。”\"\'“”‘’?!":
cut = False
pre_cut=True
if cut:
sentences.append(sentence)
sentence = []
if sentence:
sentences.append("".join(list(sentence)))
return sentences
def replace_html(s):
s = s.replace('"','"')
s = s.replace('&','&')
s = s.replace('<','<')
s = s.replace('>','>')
s = s.replace(' ',' ')
s = s.replace("“", "“")
s = s.replace("”", "”")
s = s.replace("—","")
s = s.replace("\xa0", " ")
return(s)
def input_from_line(line, char_to_id):
"""
Take sentence data and return an input for
the training or the evaluation function.
"""
line = full_to_half(line)
line = replace_html(line)
inputs = list()
inputs.append([line])
line.replace(" ", "$")
inputs.append([[char_to_id[char] if char in char_to_id else char_to_id["<UNK>"]
for char in line]])
inputs.append([get_seg_features(line)])
inputs.append([[]])
return inputs
class BatchManager(object):
def __init__(self, data, batch_size):
self.batch_data = self.sort_and_pad(data, batch_size)
self.len_data = len(self.batch_data)
def sort_and_pad(self, data, batch_size):
num_batch = int(math.ceil(len(data) /batch_size))
sorted_data = sorted(data, key=lambda x: len(x[0]))
batch_data = list()
for i in range(num_batch):
batch_data.append(self.pad_data(sorted_data[int(i*batch_size) : int((i+1)*batch_size)]))
return batch_data
@staticmethod
def pad_data(data):
strings = []
chars = []
segs = []
targets = []
max_length = max([len(sentence[0]) for sentence in data])
print(max_length)
for line in data:
string, char, seg, target = line
padding = [0] * (max_length - len(string))
strings.append(string + padding)
chars.append(char + padding)
segs.append(seg + padding)
targets.append(target + padding)
return [strings, chars, segs, targets]
def iter_batch(self, shuffle=False):
if shuffle:
random.shuffle(self.batch_data)
for idx in range(self.len_data):
yield self.batch_data[idx]
|
[
"noreply@github.com"
] |
HangYang-NLP.noreply@github.com
|
aef7ea599fa129eaf6944dc71a68f690d72162c4
|
2dc3cc7fad04cd3e314e28dd5f8ca4979cce038d
|
/tests/test_analysis.py
|
1006a9e6dca399ff3770cecbbab1ef2cd7a26f89
|
[
"MIT"
] |
permissive
|
TangleSpace/hotstepper
|
32ce07300ff38825df6297162d460885d64b93c6
|
4d8a278d94f19fee2bc4d3ba25628fa69ed3653d
|
refs/heads/master
| 2023-03-30T03:05:02.968659
| 2021-03-21T18:32:57
| 2021-03-21T18:32:57
| 338,184,036
| 14
| 2
|
MIT
| 2021-03-13T21:44:31
| 2021-02-11T23:55:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,414
|
py
|
import os
import sys
sys.path.insert(0, r"..//")
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import numpy as np
import hotstepper as hs
import hotstepper.samples as samples
vessel_stats = {
'integrate': 67562.4666666666,
'nintegrate': 3165.1666666666665,
'mean': 7.7218513022624045,
'var': 23.431962388696526,
'mode': 6.0,
'median': 7.0,
'min': 0.0,
'max': 23.0,
'percentile50': 7.0,
'percentile37': 6.0
}
def test_statistic_values():
vessel_steps = samples.vessel_queue_sample()
np.testing.assert_almost_equal(vessel_steps.integrate(), vessel_stats['integrate'])
np.testing.assert_almost_equal(vessel_steps.normalise().integrate(), vessel_stats['nintegrate'])
np.testing.assert_almost_equal(vessel_steps.mean(), vessel_stats['mean'])
np.testing.assert_almost_equal(vessel_steps.var(), vessel_stats['var'])
np.testing.assert_almost_equal(vessel_steps.mode(), vessel_stats['mode'])
np.testing.assert_almost_equal(vessel_steps.median(), vessel_stats['median'])
np.testing.assert_almost_equal(vessel_steps.min(), vessel_stats['min'])
np.testing.assert_almost_equal(vessel_steps.max(), vessel_stats['max'])
np.testing.assert_almost_equal(vessel_steps.percentile(50), vessel_stats['percentile50'])
np.testing.assert_almost_equal(vessel_steps.percentile(37), vessel_stats['percentile37'])
|
[
"lenardeuler@hotmail.com"
] |
lenardeuler@hotmail.com
|
0c7234b5f0ad13145c42a218a6e23787e0f2cd00
|
b7d09fd039405d7adbb5a8dd05f8fb1fbc17c0be
|
/viewer.py
|
296976fbc159849aaaf225c4f34afbea539079b7
|
[] |
no_license
|
Smart-SE/sse2019-group4
|
e0957db58af62f786c53873e3e3b47608f3e0d23
|
eb5aff13c0b233ba73242657725ae0476933151e
|
refs/heads/master
| 2020-06-05T23:06:04.670019
| 2019-07-13T07:39:39
| 2019-07-13T07:39:39
| 192,570,281
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
"""
Task : #6 画面に会話アシスタントの内容を表示する
"""
import asyncio
import time
import os
from pyppeteer import launch
from pyppeteer import connection
from watcher import async_watch
loop = asyncio.get_event_loop()
def patch_pyppeteer():
original_method = connection.websockets.client.connect
def new_method(*args, **kwargs):
kwargs['ping_interval'] = None
kwargs['ping_timeout'] = None
return original_method(*args, **kwargs)
connection.websockets.client.connect = new_method
patch_pyppeteer()
async def view(page, url):
print(url)
try:
await page.goto(url)
except Exception as e:
print(e)
async def main():
browser = None
if os.path.exists("/usr/bin/chromium-browser"):
browser = await launch(headless=False, executablePath="/usr/bin/chromium-browser", args=['--start-fullscreen'])
else:
browser = await launch(headless=False, args=['--start-fullscreen'])
page = await browser.newPage()
await page.setViewport({'width': 1920, 'height': 1080})
await async_watch("./data", "url", lambda text: view(page, text))
loop.run_until_complete(main())
|
[
"benishouga@gmail.com"
] |
benishouga@gmail.com
|
4566132d1837ae90005a6bcb84a812d52f51a117
|
311b6d51936d76dbb88212a06ee52e4b48fa5cd3
|
/models/train_classifier.py
|
bd3f1444d4b1ba5ef88cb117838f97a13d75ef1d
|
[] |
no_license
|
jiayijyc/Disaster-Response-Pipeline-Project
|
e50475eafeef24f15d89a345d1463255265d1220
|
3486ea5485177979e87298e13a7497aa56b1c728
|
refs/heads/master
| 2020-04-07T04:09:34.097704
| 2018-11-24T10:54:16
| 2018-11-24T10:54:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,295
|
py
|
import sys
import nltk
nltk.download(['punkt', 'wordnet'])
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.grid_search import GridSearchCV
from sklearn.base import BaseEstimator, TransformerMixin
import pickle
def load_data(database_filepath):
save_path = 'sqlite:///' + database_filepath
engine = create_engine(save_path)
table_name = database_filepath.split('/')[-1].split('.')[0]
df = pd.read_sql("SELECT * FROM "+table_name, engine)
X = df.message.values
Y = df.loc[:,'related':].values
category_names=df.loc[:,'related':].columns
return X,Y,category_names
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('moc', MultiOutputClassifier(RandomForestClassifier()))
])
parameters = {'moc__estimator__n_estimators':[10,20],'moc__estimator__min_samples_split':[2,3]}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
best_model=model.best_estimator_
Y_pred = best_model.predict(X_test)
for ind in range(len(category_names)):
accuracy=classification_report(Y_test[:,ind],Y_pred[:,ind])
print ('Accuracy Result of Column '+category_names[ind])
print(accuracy)
pass
def save_model(model, model_filepath):
best_model=model.best_estimator_
pickle.dump(best_model, open(model_filepath, 'wb'))
pass
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
jiayijyc.noreply@github.com
|
483d27e67d1c7cbafd00dac1937a7658f04d1979
|
29b9f17a3081eb4cb64c60f66de562a903362884
|
/levenshtein.py
|
7a033dca6051191e8501dc3af5fc3954818bf128
|
[] |
no_license
|
swetabhch/Database-Cleaning-SeSTA
|
bf91b70b4e98c107f6d0cb9d3817579ca7ee2350
|
20536aaa04cc596eaafc1c41b5cdbb4b352135ac
|
refs/heads/master
| 2023-02-01T23:28:48.565728
| 2020-12-17T07:16:58
| 2020-12-17T07:16:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
import numpy as np
# compute levenshtein distance between strings a and b
def levenshtein(a, b):
m, n = len(a)+1, len(b)+1
matrix = np.zeros((m, n))
for x in range(m):
matrix[x, 0] = x
for y in range(n):
matrix[0, y] = y
for x in range(1, m):
for y in range(1, n):
if a[x-1] == b[y-1]:
substitutionCost = 0
else:
substitutionCost = 1
matrix[x, y] = min(matrix[x-1, y]+1, # deletion
matrix[x, y-1]+1, # insertion
matrix[x-1, y-1] + substitutionCost) # substitution
#print matrix
return np.int(matrix[m-1, n-1])
|
[
"noreply@github.com"
] |
swetabhch.noreply@github.com
|
5d1c3752c86fc8b5ab35d3855687ecd4dc0a64bc
|
5ed150a21c4754a27abe2dcf627aa630bfb00db3
|
/mySiteRest/mysite/urls.py
|
24cf814a1717d4dc3b179f94c4d1583638729036
|
[] |
no_license
|
samiul24/apiproject
|
a6819349e7097121fa3daa5ff15a3042f790d857
|
9425c06f68691ec767c639b3b5fe46da326d0fc9
|
refs/heads/master
| 2023-04-05T22:36:49.448805
| 2021-05-01T06:12:37
| 2021-05-01T06:12:37
| 363,335,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.contrib.staticfiles.urls import static, staticfiles_urlpatterns
from home import views
from order import views as orderviews
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('home.urls')),
path('home/', include('home.urls')),
path('product/', include('product.urls')),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
"""
path('order/', include('order.urls')),
path('ckeditor/', include('ckeditor_uploader.urls')),
path('user/', include('user.urls')),
path('about/', views.aboutus, name='aboutus'),
path('contact/', views.contactus, name='contactus'),
path('category/<int:id>/<slug:slug>/', views.category_product, name='category_product'),
path('search/', views.search, name='search_product'),
path('product/<int:id>/<slug:slug>', views.product_details, name='product_details'),
path('order/', orderviews.shopcart, name='shopcart' ),
path('faq/', views.faq, name='faq')"""
|
[
"samiul110624@gmail.com"
] |
samiul110624@gmail.com
|
87f27491103c863122d5b540b57be42f6faccd47
|
5b28005b6ee600e6eeca2fc7c57c346e23da285f
|
/nomadic_recording_lib/comm/dmx/OSCtoOLA.py
|
c5c93f2ac60ce93d0dcc09a1ffe7fb3941cf2212
|
[] |
no_license
|
nocarryr/wowza_logparse
|
c31d2db7ad854c6b0d13495a0ede5f406c2fce3f
|
d6daa5bf58bae1db48ac30031a845bf975c7d5cc
|
refs/heads/master
| 2021-01-17T07:19:00.347206
| 2017-06-24T16:57:32
| 2017-06-24T16:57:32
| 25,835,704
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,980
|
py
|
import socket
import threading
import array
#import jsonpickle
from Bases import OSCBaseObject, Serialization
from ola_IO import olaIO
from ..osc.osc_io import oscIO
from ..BaseIO import detect_usable_address
class OSCtoOLAHost(OSCBaseObject):
osc_address = 'OSCtoOLA'
ui_name = 'OLA (Open Lighting Architecture)'
_Properties = {'connected':dict(fget='_connected_getter', fset='_connected_setter')}
def __init__(self, **kwargs):
self.osc_io = kwargs.get('osc_io')
self.root_address = 'OSCtoOLA-' + socket.gethostname()
self.direct_mode = False
# if not self.osc_io:
# self.direct_mode = True
# s = 'OSCtoOLA'
# io_kwargs = dict(confsection=s + '_io', app_address=s, root_address=s)
# for key in ['hostaddr', 'hostport', 'mcastaddr', 'mcastport']:
# if key in kwargs:
# io_kwargs.update({key:kwargs[key]})
# self.osc_io = oscIO(**io_kwargs)
# self.osc_io.add_client_name(socket.gethostname())
self.osc_parent_node = self.osc_io.root_node
super(OSCtoOLAHost, self).__init__(**kwargs)
self.register_signal('state_changed')
self.universes = {}
self.olaIO = olaIO()
#self.osc_io.add_client_name(self.root_address, update_conf=False)
addr = detect_usable_address()
port = self.osc_io.hostdata['recvport']
self.osc_io.add_client(name=self.root_address, address=addr, port=port,
update_conf=False, isLocalhost=False)
self.osc_io.connect('new_master', self.on_osc_new_master)
self.olaIO.connect('new_universe', self.on_new_ola_universe)
self.olaIO.connect('state_changed', self.on_ola_state_changed)
#self.add_osc_handler(callbacks={'request-universes':self.on_universes_requested})
#self.do_connect()
# @property
# def connected(self):
# return self.olaIO.connected
# @connected.setter
# def connected(self, value):
# self.olaIO.connected = value
def _connected_getter(self):
return self.olaIO.connected
def _connected_setter(self, value):
self.olaIO.connected = value
def do_connect(self):
if self.direct_mode:
self.osc_io.do_connect()
self.olaIO.do_connect()
def do_disconnect(self):
def _do_disconnect():
if self.direct_mode:
self.osc_io.do_disconnect()
self.olaIO.do_disconnect()
for univ in self.universes.itervalues():
univ.set_all_zero(True)
t = threading.Timer(.5, _do_disconnect)
t.daemon = True
t.start()
def on_ola_state_changed(self, **kwargs):
self.emit('state_changed', **kwargs)
def on_new_ola_universe(self, **kwargs):
univ = kwargs.get('ola_universe')
if univ.id not in self.universes:
u_kwargs = self.add_osc_child(address=str(univ.id))
u_kwargs.update({'ola_universe':univ, 'root_address':self.root_address})
obj = OSCUniverse(**u_kwargs)
self.universes.update({obj.id:obj})
def on_universes_requested(self, **kwargs):
d = {}
for key, val in self.universes.iteritems():
d.update({key:{}})
for attr in ['id', 'name']:
d[key].update({attr:getattr(val, attr)})
s = Serialization.to_json(d)
self.osc_node.send_message(root_address=self.root_address, address='universes-info', value=s)
def on_osc_new_master(self, **kwargs):
for univ in self.universes.itervalues():
univ.set_all_zero(not self.osc_node.oscMaster)
def on_app_exit(self, *args, **kwargs):
self.LOG.info('oscola app exit')
self.olaIO.on_app_exit()
class OSCUniverse(OSCBaseObject):
def __init__(self, **kwargs):
self._values = None
self.all_zero = False
super(OSCUniverse, self).__init__(**kwargs)
self.register_signal('value_update')
self.values = array.array('B', [0]*513)
#print 'osc path: ', self.osc_node.get_full_path()
self.root_address = kwargs.get('root_address')
self.ola_universe = kwargs.get('ola_universe')
self.ola_universe.Universe = self
#self.id = self.ola_universe.id
self.add_osc_handler(callbacks={'set-channel':self.on_universe_set_channel,
'dump-response':self.on_universe_dump_response})
self.osc_node.send_message(root_address=self.root_address, client=self.root_address, address='request-dump')
#print 'OSCtoOLA new_universe: uid=%s, name=%s, pyid=%s' % (self.id, self.name, id(self))
@property
def id(self):
return self.ola_universe.id
@property
def name(self):
return self.ola_universe.name
@property
def values(self):
if self.all_zero:
return array.array('B', [0]*513)
return self._values
@values.setter
def values(self, values):
self._values = values
def on_universe_set_channel(self, **kwargs):
values = kwargs.get('values')
chan = values[0]
value = values[1]
self.values[chan-1] = value
#print 'oscola univ update: ', chan, value
#print 'update from osc: chan=%s, value=%s' % (chan, value)
if not self.all_zero:
self.emit('value_update', universe=self, values=self.values)
def on_universe_dump_response(self, **kwargs):
values = kwargs.get('values')
for i, value in enumerate(values):
self.values[i] = value
self.emit('value_update', universe=self, values=self.values)
def set_all_zero(self, state):
self.all_zero = state
self.emit('value_update', universe=self, values=self.values)
|
[
"matt@nomadic-recording.com"
] |
matt@nomadic-recording.com
|
65922f0129961d9a17ab0e0c398410b698cc34a7
|
10fe9643c97b2dd976329753b58ba7c72e837783
|
/misc/perform_pose.py
|
57f45613f4f31fd69e4189e83e5bd9487fbcfacf
|
[
"GPL-3.0-only"
] |
permissive
|
sgoldenlab/simba
|
456bc6663222e6deec27f98e0549b64e13930846
|
ab5162ba23d1970b2b97f329d1607feec5d4ef7c
|
refs/heads/master
| 2023-08-31T20:11:27.839593
| 2023-08-31T00:41:15
| 2023-08-31T00:41:15
| 206,670,333
| 247
| 127
|
MIT
| 2023-05-21T15:54:20
| 2019-09-05T22:53:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,494
|
py
|
import cv2
from tools.misc import load_config_yaml
from pylab import *
from dlclive import DLCLive, Processor
from tools.misc import read_bp_config_csv
from tools.determine_fps import determine_fps
from multiprocessing import shared_memory
from tools.image_manipulations import change_color, change_size
from multiprocessing.shared_memory import ShareableList, SharedMemory
from tools.unit_tests.check_shared_memory_processes import create_shared_memory_process
class CamPoseInitalizer():
def __init__(self):
self.shared_status = shared_memory.ShareableList(name='pose_status')
self.shared_latency = shared_memory.ShareableList(name='latency_data')
self.config = load_config_yaml(self.shared_status[0])
if not self.config['CAMERA']['IP CAMERA']['status']:
if not self.config['CAMERA']['CAMERA SETTINGS']['VIDEO FILE']['use video file']:
self.id = int(self.config['CAMERA']['CAMERA SETTINGS']['input channel'])
else:
self.id = self.config['CAMERA']['CAMERA SETTINGS']['VIDEO FILE']['video path']
self.bp_df, self.animal_names, self.body_part_names, _ = read_bp_config_csv(
self.config['POSE']['MODEL']['bp config path'])
self.no_animals = len(set(self.animal_names))
self.no_bps = len(set(self.body_part_names))
self.first_image = None
self.session_name = self.config['GENERAL']['session name']
self.custom_img_size = self.config['CAMERA']['IMAGE']['CUSTOM IMG SIZE']['status']
self.modify_img = self.config['CAMERA']['IMAGE']['MODIFY_IMG']['status']
self.bp_array = np.empty((self.no_animals, self.no_bps * self.no_animals, 3))
self.shared_status[-1] += 1
def initialize_pose(self):
self.cap = cv2.VideoCapture(self.id)
self.img_size = dict()
self.dlc_live_object = DLCLive(self.config['POSE']['MODEL']['model path'], processor=Processor())
while self.first_image is None:
_, self.first_image = self.cap.read()
self.dlc_live_object.init_inference(self.first_image)
self.fps = determine_fps(self)
self.img_size['width'] = self.first_image.shape[0]
self.img_size['height'] = self.first_image.shape[1]
self.img_size['color'] = self.first_image.shape[2]
self.shared_status[-1] += 1
self.shared_latency[0] = self.fps
def perform_pose(self):
self.dlc_live_object.init_inference(self.first_image)
self.shared_status[-1] += 1
self.bp_array = np.empty((self.no_animals, self.no_bps * self.no_animals, 3))
self.shm_bp = create_shared_memory_process(shared_memory_name=self.session_name, shared_memory_size=self.bp_array.nbytes * 2000)
self.shm_img = create_shared_memory_process(shared_memory_name='shared_img',shared_memory_size=self.first_image.nbytes)
frame_counter, start_time, session_timer = 0, time.time(), 0
self.shared_status[-1] += 1
while True:
captured, np_frame = self.cap.read()
if captured:
frame_counter += 1
if self.custom_img_size:
np_frame = change_size(np_frame, self.config['CAMERA']['IMAGE']['CUSTOM IMG SIZE'])
if self.modify_img:
method = self.config['CAMERA']['IMAGE']['MODIFY_IMG']['color']
np_frame = change_color(np_frame, method)
frame_pose_results = self.dlc_live_object.get_pose(np_frame)
self.bp_array = np.concatenate((self.bp_array, [frame_pose_results]))
if self.bp_array.shape[0] >= 20:
self.bp_array = self.bp_array[-20:,:]
shared_array = np.ndarray(self.bp_array.shape, dtype=self.bp_array.dtype, buffer=self.shm_bp.buf)
shared_img = np.ndarray(np_frame.shape, dtype=np_frame.dtype, buffer=self.shm_img.buf)
shared_img[:] = np_frame[:]
shared_array[:] = self.bp_array[:]
current_fps = round(frame_counter / (time.time() - start_time), 2)
self.shared_latency[0] = current_fps
self.shared_latency[1] = frame_counter
else:
print('No camera feed detected.')
if __name__ == "__main__":
pose_session = Cam_Pose_Instance()
pose_session.initialize_pose()
pose_session.perform_pose()
|
[
"noreply@github.com"
] |
sgoldenlab.noreply@github.com
|
4a99c6319ece2159f2b4e58a9e147858fb422ca3
|
f74b34ead276aff6e3ce04894b609d6bf26095c0
|
/github api example.py
|
036ce936f710a0a2ff80b56468ee08342b668842
|
[
"MIT"
] |
permissive
|
LincT/PythonExamples
|
5f234ef3e386bb72be7076b4ac1b83f30b056eec
|
a0b61e8c60cd0754f2406b6b72fcd562667c9bb0
|
refs/heads/master
| 2023-03-01T08:10:06.648350
| 2021-02-11T18:46:36
| 2021-02-11T18:46:36
| 111,839,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
import requests
def wip():
print()
def apiToDictionary(url, *args):
request_string = url
response = (requests.get(request_string))
json = response.json()
response.close()
return dict(json)
def main():
# docDict = {"text":"592da8d73b39d3e1f54304fedf7456b1", "markdown":"6a4cccf1c66c780e72264a9fbcb9d5fe"}
# resultDict = apiToDictionary("https://api.github.com/gists/" + docDict.get("markdown"))
# print(dict(dict(resultDict.get('files')).get('MineCTC: Rules.md')).get('content'))
# resultDict = apiToDictionary("https://en.wikipedia.org/w/api.php?action=query&titles=Hebrew_alphabet&prop=revisions&rvprop=content&format=json&formatversion=2")
# print(dict(resultDict["query"]))
wip()
if __name__ == '__main__':
main()
|
[
"test@testserver.com"
] |
test@testserver.com
|
807b21863986eed838bfcb55599dff00e23de588
|
c0e0d9036f8bb478defa07781a2b1f0d9523c051
|
/Tasks_app/admin.py
|
df5de86546b915690391bee2e0de9a1e8bfde73e
|
[] |
no_license
|
NGarteman/Blog
|
3fdbec2e69c562fbe37782c5813ea54084a8e729
|
571fb5f35f5c474bfabc9d544f4e1713017c0d32
|
refs/heads/main
| 2023-09-05T13:33:51.435913
| 2021-11-20T07:10:41
| 2021-11-20T07:10:41
| 429,866,569
| 1
| 1
| null | 2021-11-19T17:37:42
| 2021-11-19T16:35:00
|
HTML
|
UTF-8
|
Python
| false
| false
| 156
|
py
|
from django.contrib import admin
from .models import Task, Tag, Comment
admin.site.register(Task)
admin.site.register(Tag)
admin.site.register(Comment)
|
[
"maxlestor2@gmail.com"
] |
maxlestor2@gmail.com
|
0646c3635e117745dcdb20cb226c8c8661c8993d
|
c97b8be8a59da8ba73e22f4a1406b6c80f0884ed
|
/py-learn-code/code-blackChocolate/learn/day_5文字识别/learn_day_05.py
|
478374f7bab37c5d5669acd26549955a23951186
|
[
"MIT"
] |
permissive
|
F1ame12/for-py
|
8668d3247da91278543a336a82666dd3e15a5141
|
1353ee977a25f36caa754edf23c3e934724145c2
|
refs/heads/master
| 2022-09-04T06:02:00.351533
| 2018-07-21T05:02:41
| 2018-07-21T05:02:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
#pip install keyboard 监控键盘
import keyboard
from PIL import ImageGrab
import time
import sys
import os
import win32con
import win32clipboard as w
from 文字识别 import BaiDuAPI
from 剪切板 import GetTexts
#截图方法
def screenShot():
#截图的开始
if keyboard.wait('alt+ctrl+a') == None:
#截图的结束
if keyboard.wait('enter') == None:
time.sleep(0.01)
#获取图片
img = ImageGrab.grabclipboard()
#保存到项目中
img.save("for-py/py-learn-code/code-blackChocolate/learn/day_5文件识别/picture.png")
for n in range(sys.maxsize):
print("第%d次截图"%(n+1))
screenShot()
#将图片转换成文本
str = BaiDuAPI("for-py/py-learn-code/code-blackChocolate/learn/day_5文件识别/password.ini").picture2Texts("for-py/py-learn-code/code-blackChocolate/learn/day_5文件识别/picture.png")
print(str)
#将识别的文字添加到剪切板
GetTexts().setText(str)
flag = input()
flag = input("是否继续截图(Y/N)")
if flag == "N":
break
'''
print(sys.path)
'''
|
[
"1692959384@qq.com"
] |
1692959384@qq.com
|
4dd478a47e59c6f2986c31dcf7582023c8aff44c
|
738da30162f7d009826792121cab4123aafdd9c6
|
/isbn-verifier/isbn_verifier.py
|
b5fbdb737d84d748879183ac78d1396bc246bc24
|
[] |
no_license
|
TommieHG/Exercism-Python
|
ef3d70370a5ee6d4c2242255490d563812de127b
|
6e8b72b2f6ffc2bcc5096aba5c87b4632dbb91dc
|
refs/heads/master
| 2023-02-18T02:18:01.765803
| 2021-01-14T12:06:16
| 2021-01-14T12:06:16
| 322,819,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
import re
def is_valid(isbn):
list_of_numbers = re.findall(r"\w", isbn)
#remove too short or long numbers
if len(list_of_numbers) is not 10:
return False
#remove chars in the middle of sequence
for i in range(0,9):
if not list_of_numbers[i].isdigit():
return False
#change X to 10 or return false if it is anything else
if "X" in list_of_numbers[9]:
list_of_numbers[list_of_numbers.index("X")] = '10'
elif list_of_numbers[9].isalpha():
return False
#add and multiply according to algorithm
sum = 0
for i in range(len(list_of_numbers)):
sum += int(list_of_numbers[i]) * (10 - i)
return sum % 11 == 0
|
[
"tommieg@kth.se"
] |
tommieg@kth.se
|
a0e5f256d2cb86d124c17c2a9422fdeb7ca8e033
|
bc8a20a5f1d8115941b41d8ad8de2eb0f6213dae
|
/Strings/decode_string.py
|
9f42243ae23eecc064dbc2a3d723f048f35d0230
|
[] |
no_license
|
kapc/problems
|
773e17a654ee516816e80da03c6cde217d264a5f
|
8c2734a8084aa58d9278f3ec034cec200454a826
|
refs/heads/master
| 2020-03-31T22:00:09.142964
| 2018-10-11T14:10:45
| 2018-10-11T14:10:45
| 152,601,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,396
|
py
|
#! /usr/env/python
"""
A message containing letters from A-Z is being encoded to numbers using the following mapping:
'A' -> 1
'B' -> 2
...
'Z' -> 26
Given a non-empty string containing only digits, determine the total number of ways to decode it.
Example 1:
Input: "12"
Output: 2
Explanation: It could be decoded as "AB" (1 2) or "L" (12).
Example 2:
Input: "226"
Output: 3
Explanation: It could be decoded as "BZ" (2 26), "VF" (22 6), or "BBF" (2 2 6).
226
2 22
2 26 6
6
"""
def lookup(num):
"""
:param digit:
:return:
"""
num = int(num)
return chr(ord('A') + num - 1)
def decode_helper(in_str, max_len, result_till_now, result):
"""
:param in_str:
:param result_till_now:
:param result:
:return:
"""
if not in_str:
result.append(result_till_now)
return
decode_helper(in_str[1:], max_len, result_till_now + lookup(in_str[0]), result)
if in_str[1:] and int(in_str[0:2]) <= 26 and int(in_str[0:2]) >= 0:
decode_helper(in_str[2:], max_len, result_till_now + lookup(in_str[0:2]), result)
def decode(in_str):
"""
:param in_str:
:return:
"""
result = []
if not in_str:
raise Exception("Invalid String")
decode_helper(in_str, len(in_str), "", result)
return len(result)
decode("226")
decode("1")
decode("12332344342343244")
|
[
"ckap@Chandreshs-MacBook-Pro.local"
] |
ckap@Chandreshs-MacBook-Pro.local
|
b01d7a3cdcd836c5768a3d047858412d1628bd52
|
ec6812f0a2fbc8cad17bb13510f9071894f7d24e
|
/01_first_test_v1.py
|
292740e5f78cb7c727f1abbb1e3179f27c3fafd7
|
[] |
no_license
|
naoe1999/Training02_TensorFlow_v2
|
7044cf17c9d7e21a6a76aeab52ec59fdd516d0ea
|
a0f9828ee2b2f5893eb24717a25425cfbf83bf03
|
refs/heads/master
| 2023-07-16T16:04:57.474255
| 2021-09-02T11:41:54
| 2021-09-02T11:41:54
| 402,398,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
# 텐서플로우 2.x 설치된 환경에서 1.x 코드를 돌리기 위해 아래와 같이 import
import tensorflow.compat.v1 as tf
# 텐서플로우 2.x에 와서는 eager_execution이 기본적으로 enable 되어 있음
# tf.Session을 사용하기 위해 이를 disable 시켜야 함
tf.disable_eager_execution()
# 여기부터는 기존 코드와 동일
msg = tf.constant('Hello, TensorFlow')
sess = tf.Session()
print(sess.run(msg))
a = tf.constant(1)
b = tf.constant(2)
print(sess.run(a + b))
|
[
"naoe1999@naver.com"
] |
naoe1999@naver.com
|
3ce2bc9fc56982061e585feab4245d388dd09ad7
|
da489e1e388643174101981fbbdf12fd498a3ba0
|
/ihome13/ihome/api_1_0/houses.py
|
04060d610fb1249835258fd7910565bf95ce96a1
|
[] |
no_license
|
zb14755456464/home
|
f5344b90e91a538283524dbd21fecf51cdfdbe50
|
3ec478083c2f5792ddfbfdb92e8bd43f51d6242d
|
refs/heads/master
| 2023-01-04T16:37:28.869627
| 2018-03-11T08:25:38
| 2018-03-11T08:25:38
| 124,736,942
| 0
| 0
| null | 2022-12-27T14:57:48
| 2018-03-11T08:23:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 16,644
|
py
|
# coding=utf-8
import logging
import json
from . import api
from ihome import redis_store, constants, db
from ihome.models import Area
from flask import request, jsonify, g, session, current_app
from ihome.response_code import RET
from ihome.models import House, Facility, HouseImage, User, Order
from ihome.utils.commons import login_required
from ihome.utils.image_storage import storage
from datetime import datetime
@api.route('/areas/')
def get_area_info():
"""
1. 访问redis获取缓存
2. 没有缓存, 查询MySQL
3. 需要对数据转JSON
4. 保存redis中
5. 如果有缓存, 返回缓存数据
6. 返回给浏览器
"""
# 一. 处理业务逻辑
#1. 访问redis获取缓存
try:
# 直接获取JSON数据, 保存的也是JSON数据. 为了方便把数据返回给前端, 因此保存JSON返回JSON
areas_json = redis_store.get('area_info')
except Exception as e:
logging.error(e)
# 为了避免异常的事情发生, 如果执行失败, 就把数据设置为None
areas_json = None
# 2. 没有缓存, 查询MySQL
if areas_json is None:
# 查询MySQL所有的数据
areas_list = Area.query.all()
# 3. 需要对数据转JSON
areas = []
for area in areas_list:
# 调用模型的转字典方法, 不断拼接成一个areas
areas.append(area.to_dict())
# 将areas转换成JSON, 方便将来保存redis, 方便返回数据
areas_json = json.dumps(areas)
# 4. 保存redis中
try:
redis_store.setex('area_info', constants.AREA_INFO_REDIS_EXPIRES, areas_json)
db.session.commit()
except Exception as e:
logging.error(e)
db.session.rollback()
# 这里如果出错, 可以不用返回错误信息. 因此如果redis没有保存, 那么下一次会直接访问Mysql读取数据, 再次保存
# 5.如果有缓存, 返回缓存数据
else:
logging.info('当前数据从redis中读取的')
# 二. 返回数据
# return jsonify() --> contentType --> 'application/json'
# 如果调用了jsonify, 那么里面传递的数据, 是字符串. 而我们的城区数据已经转换成了JSON, 因此不能用jsonify
# 此时, 我们可以返回字典, 并告知是json格式的
# return jsonify(errno=RET.THIRDERR, errmsg='上传图像异常')
return '{"errno": 0, "errmsg": "查询城区信息成功", "data":{"areas": %s}}' % areas_json, 200, \
{"Content-Type": "application/json"}
@api.route("/houses/info", methods=["POST"])
@login_required
def save_house_info():
"""保存房屋的基本信息
前端发送过来的json数据
{
"title":"",
"price":"",
"area_id":"1",
"address":"",
"room_count":"",
"acreage":"",
"unit":"",
"capacity":"",
"beds":"",
"deposit":"",
"min_days":"",
"max_days":"",
"facility":["7","8"]
}
"""
# 一. 获取参数
house_data = request.get_json()
if house_data is None:
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
title = house_data.get("title") # 房屋名称标题
price = house_data.get("price") # 房屋单价
area_id = house_data.get("area_id") # 房屋所属城区的编号
address = house_data.get("address") # 房屋地址
room_count = house_data.get("room_count") # 房屋包含的房间数目
acreage = house_data.get("acreage") # 房屋面积
unit = house_data.get("unit") # 房屋布局(几室几厅)
capacity = house_data.get("capacity") # 房屋容纳人数
beds = house_data.get("beds") # 房屋卧床数目
deposit = house_data.get("deposit") # 押金
min_days = house_data.get("min_days") # 最小入住天数
max_days = house_data.get("max_days") # 最大入住天数
# 二. 校验参数
if not all((title, price, area_id, address, room_count,acreage, unit, capacity, beds, deposit, min_days, max_days)):
return jsonify(errno=RET.PARAMERR, errmsg="参数不完整")
# 判断单价和押金格式是否正确
# 前端传送过来的金额参数是以元为单位,浮点数,数据库中保存的是以分为单位,整数
try:
price = int(float(price) * 100)
deposit = int(float(deposit) * 100)
except Exception as e:
return jsonify(errno=RET.DATAERR, errmsg="参数有误")
# 三. 保存信息
# 1. 创建房屋对象
user_id = g.user_id
house = House(
user_id=user_id,
area_id=area_id,
title=title,
price=price,
address=address,
room_count=room_count,
acreage=acreage,
unit=unit,
capacity=capacity,
beds=beds,
deposit=deposit,
min_days=min_days,
max_days=max_days
)
# 2. 处理房屋的设施信息
facility_id_list = house_data.get("facility")
if facility_id_list:
# 表示用户勾选了房屋设施
# 过滤用户传送的不合理的设施id
# select * from facility where id in (facility_id_list)
try:
facility_list = Facility.query.filter(Facility.id.in_(facility_id_list)).all()
except Exception as e:
logging.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库异常")
# 为房屋添加设施信息
if facility_list:
house.facilities = facility_list
# 3. 保存数据库
try:
db.session.add(house)
db.session.commit()
except Exception as e:
logging.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据失败")
# 四. 返回
return jsonify(errno=RET.OK, errmsg="保存成功", data={"house_id": house.id})
@api.route("/houses/image", methods=["POST"])
@login_required
def save_house_image():
"""保存房屋的图片"""
# 获取参数 房屋的图片、房屋编号
house_id = request.form.get("house_id")
image_file = request.files.get("house_image")
# 校验参数
if not all([house_id, image_file]):
return jsonify(errno=RET.PARAMERR, errmsg="参数不完整")
# 1. 判断房屋是否存在
# 2. 上传房屋图片到七牛中
# 3. 保存图片信息到数据库中
# 4. 处理房屋基本信息中的主图片
# 5. 统一提交数据
# 1. 判断房屋是否存在
try:
house = House.query.get(house_id)
except Exception as e:
logging.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库异常")
if house is None:
return jsonify(errno=RET.NODATA, errmsg="房屋不存在")
# 2. 上传房屋图片到七牛中
image_data = image_file.read()
try:
file_name = storage(image_data)
except Exception as e:
logging.error(e)
return jsonify(errno=RET.THIRDERR, errmsg="保存房屋图片失败")
# 3. 保存图片信息到数据库中
house_image = HouseImage(
house_id=house_id,
url=file_name
)
db.session.add(house_image)
# 4. 处理房屋基本信息中的主图片
if not house.index_image_url:
house.index_image_url = file_name
db.session.add(house)
# 5. 统一提交数据
try:
db.session.commit()
except Exception as e:
logging.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存图片信息失败")
image_url = constants.QINIU_URL_DOMAIN + file_name
return jsonify(errno=RET.OK, errmsg="保存图片成功", data={"image_url": image_url})
@api.route("/users/houses", methods=["GET"])
@login_required
def get_user_houses():
"""获取房东发布的房源信息条目"""
user_id = g.user_id
try:
user = User.query.get(user_id)
houses = user.houses
# houses = House.query.filter_by(user_id=user_id)
except Exception as e:
logging.error(e)
return jsonify(errno=RET.DBERR, errmsg="获取数据失败")
# 将查询到的房屋信息转换为字典存放到列表中
houses_list = []
if houses:
for house in houses:
houses_list.append(house.to_basic_dict())
return jsonify(errno=RET.OK, errmsg="OK", data={"houses": houses_list})
@api.route("/houses/index", methods=["GET"])
def get_house_index():
"""获取主页幻灯片展示的房屋基本信息"""
# 从缓存中尝试获取数据
try:
ret = redis_store.get("home_page_data")
except Exception as e:
logging.error(e)
ret = None
if ret:
logging.info("hit house index info redis")
# 因为redis中保存的是json字符串,所以直接进行字符串拼接返回
return '{"errno":0, "errmsg":"OK", "data":%s}' % ret, 200, {"Content-Type": "application/json"}
else:
try:
# 查询数据库,返回房屋订单数目最多的5条数据
houses = House.query.order_by(House.order_count.desc()).limit(constants.HOME_PAGE_MAX_HOUSES)
except Exception as e:
logging.error(e)
return jsonify(errno=RET.DBERR, errmsg="查询数据失败")
if not houses:
return jsonify(errno=RET.NODATA, errmsg="查询无数据")
houses_list = []
for house in houses:
# 如果房屋未设置主图片,则跳过
if not house.index_image_url:
continue
houses_list.append(house.to_basic_dict())
# 将数据转换为json,并保存到redis缓存
json_houses = json.dumps(houses_list)
try:
redis_store.setex("home_page_data", constants.HOME_PAGE_DATA_REDIS_EXPIRES, json_houses)
except Exception as e:
logging.error(e)
return '{"errno":0, "errmsg":"OK", "data":%s}' % json_houses, 200, {"Content-Type": "application/json"}
@api.route("/houses/<int:house_id>", methods=["GET"])
def get_house_detail(house_id):
"""获取房屋详情"""
# 前端在房屋详情页面展示时,如果浏览页面的用户不是该房屋的房东,则展示预定按钮,否则不展示,
# 所以需要后端返回登录用户的user_id
# 尝试获取用户登录的信息,若登录,则返回给前端登录用户的user_id,否则返回user_id=-1
user_id = session.get("user_id", "-1")
# 校验参数
if not house_id:
return jsonify(errno=RET.PARAMERR, errmsg="参数缺失")
# 先从redis缓存中获取信息
try:
ret = redis_store.get("house_info_%s" % house_id)
except Exception as e:
logging.error(e)
ret = None
if ret:
logging.info("hit house info redis")
return '{"errno":"0", "errmsg":"OK", "data":{"user_id":%s, "house":%s}}' % (user_id, ret), 200, {"Content-Type": "application/json"}
# 查询数据库
try:
house = House.query.get(house_id)
except Exception as e:
logging.error(e)
return jsonify(errno=RET.DBERR, errmsg="查询数据失败")
if not house:
return jsonify(errno=RET.NODATA, errmsg="房屋不存在")
# 将房屋对象数据转换为字典
try:
house_data = house.to_full_dict()
except Exception as e:
logging.error(e)
return jsonify(errno=RET.DATAERR, errmsg="数据出错")
# 存入到redis中
json_house = json.dumps(house_data)
try:
redis_store.setex("house_info_%s" % house_id, constants.HOUSE_DETAIL_REDIS_EXPIRE_SECOND, json_house)
except Exception as e:
current_app.logger.error(e)
resp = '{"errno":"0", "errmsg":"OK", "data":{"user_id":%s, "house":%s}}' % (user_id, json_house), 200, {"Content-Type": "application/json"}
return resp
# /api/v1_0/houses?sd=xxxx-xx-xx&ed=xxxx-xx-xx&aid=xx&sk=new&p=1
@api.route("/houses", methods=["GET"])
def get_house_list():
"""获取房屋列表信息"""
# 一. 获取参数
start_date_str = request.args.get("sd", "") # 想要查询的起始时间
end_date_str = request.args.get("ed", "") # 想要查询的终止时间
area_id = request.args.get("aid", "") # 区域id
sort_key = request.args.get("sk", "new") # 排序关键字
page = request.args.get("p", 1) # 页数
# 二. 校验参数
# 2.1判断日期
try:
start_date = None
if start_date_str:
start_date = datetime.strptime(start_date_str, "%Y-%m-%d")
end_date = None
if end_date_str:
end_date = datetime.strptime(end_date_str, "%Y-%m-%d")
if start_date and end_date:
assert start_date <= end_date
except Exception as e:
return jsonify(errno=RET.PARAMERR, errmsg="日期参数有误")
# 2.2判断页数
try:
page = int(page)
except Exception:
page = 1
# 三. 业务逻辑处理
# 3.1 先从redis缓存中获取数据
try:
redis_key = "houses_%s_%s_%s_%s" % (start_date_str, end_date_str, area_id, sort_key)
resp_json = redis_store.hget(redis_key, page)
except Exception as e:
current_app.logger.error(e)
resp_json = None
if resp_json:
# 表示从缓存中拿到了数据
return resp_json, 200, {"Content-Type": "application/json"}
# 3.2 定义查询数据的参数空列表
filter_params = []
# 3.3 处理区域信息
if area_id:
filter_params.append(House.area_id == area_id)
# 3.4 处理时间, 获取不冲突的房屋信息
try:
conflict_orders_li = []
if start_date and end_date:
# 从订单表中查询冲突的订单,进而获取冲突的房屋id
conflict_orders_li = Order.query.filter(Order.begin_date <= end_date, Order.end_date >= start_date).all()
elif start_date:
# 从订单表中查询冲突的订单,进而获取冲突的房屋id
conflict_orders_li = Order.query.filter(Order.end_date >= start_date).all()
elif end_date:
# 从订单表中查询冲突的订单,进而获取冲突的房屋id
conflict_orders_li = Order.query.filter(Order.begin_date <= end_date).all()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库异常")
if conflict_orders_li:
conflict_house_id_li = [order.house_id for order in conflict_orders_li]
# 添加条件,查询不冲突的房屋
filter_params.append(House.id.notin_(conflict_house_id_li))
# 3.5 排序
if sort_key == "booking":
house_query = House.query.filter(*filter_params).order_by(House.order_count.desc())
elif sort_key == "price-inc":
house_query = House.query.filter(*filter_params).order_by(House.price.asc())
elif sort_key == "price-des":
house_query = House.query.filter(*filter_params).order_by(House.price.desc())
else:
house_query = House.query.filter(*filter_params).order_by(House.create_time.desc())
# 3.6 分页 sqlalchemy的分页
try:
# 页数 每页数量 错误输出
house_page = house_query.paginate(page, constants.HOUSE_LIST_PAGE_CAPACITY, False)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库异常")
# 3.7 将数据转为JSON
house_li = house_page.items # 当前页中的数据结果
total_page = house_page.pages # 总页数
houses = []
for house in house_li:
houses.append(house.to_basic_dict())
# 将结果转换json字符串
resp = dict(errno=RET.OK, errmsg="查询成功", data={"houses": houses, "total_page": total_page, "current_page":page})
resp_json = json.dumps(resp)
# 3.8 将结果缓存到redis中
if page <= total_page:
# 用redis的哈希类型保存分页数据
redis_key = "houses_%s_%s_%s_%s" % (start_date_str, end_date_str, area_id, sort_key)
try:
# 使用redis中的事务
pipeline = redis_store.pipeline()
# 开启事务
pipeline.multi()
pipeline.hset(redis_key, page, resp_json)
pipeline.expire(redis_key, constants.HOUSE_LIST_PAGE_REDIS_EXPIRES)
# 执行事务
pipeline.execute()
except Exception as e:
current_app.logger.error(e)
# 四. 数据返回
return resp_json, 200, {"Content-Type": "application/json"}
|
[
"1273844671@qq.com"
] |
1273844671@qq.com
|
8a799e2cabd700ee77f0ad33017d302c27b1e94b
|
5a89fbc3f61e9eb1fd97545f223ebef4fd206cb9
|
/image_ces.py
|
c82242b58ab936dcdbefb7ca85b76324b0cbac66
|
[] |
no_license
|
MichealRGB/ImageRender
|
5abb46b0170b7a4a8976316f5eda9ac134f933f6
|
1c174c63cd17e9d450ae16e195a3fe93b86ca60f
|
refs/heads/master
| 2021-01-20T15:07:10.583758
| 2017-05-25T07:43:35
| 2017-05-25T07:43:35
| 90,722,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,395
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__= "Micheal"
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('images/ceshi2.jpg')
## 先进行灰度处理
gray_image = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
## 二值法进行处理
ret,thresh = cv2.threshold(gray_image,127,255,cv2.THRESH_BINARY)
## 腐蚀处理
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))
eroded = cv2.erode(thresh, kernel)
#
## 提取轮廓
tmp_image, contours, hierarchy = cv2.findContours(eroded, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
## 绘制轮廓
cv2.drawContours(img, contours, -1, (0, 0, 0), 2)
for singel in contours:
if cv2.contourArea(singel) < 10.0 or cv2.contourArea(singel) > 200.0 :
print '-' *100
else:
print cv2.contourArea(singel)
x, y, w, h = cv2.boundingRect(singel)
img = cv2.rectangle(img, (x - 1, y - 1), (x + w + 1, y + h + 1), (0, 0, 255), 2)
# cv2.imshow('image',img)
# cv2.imshow('lunkuo',tmp_image)
# cv2.imshow("Eroded Image", eroded)
# cv2.imshow('threshold',thresh)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def draw_circle(event,x,y,flags,param):
if event==cv2.EVENT_MOUSEMOVE:
cv2.circle(img,(x,y),100,(255,0,0),-1)
img = np.zeros((512,512,3),np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_circle)
while(1):
cv2.imshow('image',img)
if cv2.waitKey(20)&0xFF==27:
break
cv2.destroyAllWindows()
def show_threshold(image_path):
image = cv2.imread(image_path)
gray_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5), (-1, -1))
dilate = cv2.dilate(gray_image, kernel)
ret,thresh1 = cv2.threshold(dilate,255,255,cv2.THRESH_BINARY|cv2.THRESH_OTSU)
ret,thresh2 = cv2.threshold(dilate,255,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)
ret,thresh3 = cv2.threshold(dilate,0,255,cv2.THRESH_TRUNC)
ret,thresh4 = cv2.threshold(dilate,0,255,cv2.THRESH_TOZERO)
ret,thresh5 = cv2.threshold(dilate,0,255,cv2.THRESH_TOZERO_INV)
titles = ['Gray Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
images = [gray_image, thresh1, thresh2, thresh3, thresh4, thresh5]
for i in xrange(6):
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
# show_threshold("images/zhenshiba.jpg")
|
[
"510943034@qq.com"
] |
510943034@qq.com
|
7880bcad5a3a3c0cfe1efef41f3c6bcba6189d35
|
49a0010d8c6c3dc4c92a5795ddee418de976ada4
|
/CH03/0311.py
|
e40cc572a518f4ea487a43c2a36bcac7623a0484
|
[] |
no_license
|
mytree/Test_PythonCV
|
4c20ee4f073558488d2bf947fca500f677f36d13
|
9ba1e0bc8e7d84f1f7df3ca051a3d7e70e1745bb
|
refs/heads/master
| 2020-09-13T06:20:04.743092
| 2019-11-19T11:37:40
| 2019-11-19T11:37:40
| 222,679,573
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
#0311.py
import cv2
import numpy as np
def onMouse(event, x, y, flags, param):
## global img
if event == cv2.EVENT_LBUTTONDOWN: # 마우스 왼쪽 버튼 클릭
if flags & cv2.EVENT_FLAG_SHIFTKEY: # shift 키와 함께
cv2.rectangle(param[0], (x-5,y-5),(x+5,y+5),(255,0,0))
else:
cv2.circle(param[0], (x,y), 5, (255,0,0), 3)
elif event == cv2.EVENT_RBUTTONDOWN: # 마우스 오른쪽 버튼 클릭
cv2.circle(param[0], (x,y), 5, (0,0,255), 3)
elif event == cv2.EVENT_LBUTTONDBLCLK: # 마우스 왼쪽 버튼 더블클릭
param[0] = np.zeros(param[0].shape, np.uint8) + 255
cv2.imshow("img", param[0])
img = np.zeros((512,512,3),np.uint8)+255
cv2.imshow('img',img)
cv2.setMouseCallback('img', onMouse, [img])
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
mytree.noreply@github.com
|
90d59540d8e2afccaf99b13f80cc0a735d81e0a3
|
85a7dde9c48945972a7f521f0fbb2eb56b323aa2
|
/obsolete_files/old/listening_eyes.py
|
69a61d1a1a20e04408df1df5513166b7f89f27b3
|
[] |
no_license
|
jwmcgettigan/renegade
|
1e8f61a14d6a5a7aff5c410f0c26bb166f95bd03
|
ef76bebc6867683e1fb3201be547f42aa6e65881
|
refs/heads/master
| 2021-04-06T13:53:12.945602
| 2018-07-17T22:09:13
| 2018-07-17T22:09:13
| 124,680,527
| 1
| 0
| null | 2018-07-17T22:09:14
| 2018-03-10T17:33:52
|
Makefile
|
UTF-8
|
Python
| false
| false
| 752
|
py
|
#!/usr/bin/env python
import rospy as rp
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
bridge = CvBridge()
def left_callback(data):
cv2.imshow("left_eye", bridge.imgmsg_to_cv2(data, desired_encoding="passthrough"))
if cv2.waitKey(20) & 0xFF == ord('q'):
pass
def right_callback(data):
cv2.imshow("right_eye", bridge.imgmsg_to_cv2(data, desired_encoding="passthrough"))
if cv2.waitKey(20) & 0xFF == ord('q'):
pass
def listener():
rp.init_node('listener', anonymous=True)
rp.Subscriber("left_eye", Image, left_callback)
rp.Subscriber("right_eye", Image, right_callback)
rp.spin()
if __name__ == '__main__':
listener()
cv2.destroyAllWindows()
|
[
"jwmcgettigan@gmail.com"
] |
jwmcgettigan@gmail.com
|
6b54a465ce0fb3d99b380b2741c436f2a04aba50
|
d5ab31874dd279656d7f24780e102b352f7f1e08
|
/reports/configs/only_logd_dmpnn8_2/only_logd_dmpnn8_2
|
2bdd9de08945ebddf565738ef8cab7e248ea5be7
|
[
"MIT"
] |
permissive
|
WangYitian123/graph_networks
|
77f76ab9ffa74bb4d52df52b1a17867c0c86be25
|
542f2a59b1b9708abdc718d77db7111f3ba2df96
|
refs/heads/main
| 2023-07-08T22:43:11.775430
| 2021-07-29T13:01:35
| 2021-07-29T13:01:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,815
|
from dataclasses import dataclass, field
from typing import List
import tensorflow as tf
from graph_networks.utilities import *
import logging
import os
ATOM_FEATURE_DIM = DGIN8_ATOM_FEATURE_DIM
EDGE_FEATURE_DIM = DGIN8_EDGE_FEATURE_DIM
@dataclass
class BasicModelConfig:
"""
Config for model1/2/3 run file.
General model parameters
"""
model_name: str = 'only_logd_dmpnn8_2' # without h_w in DGIN gin part - added h_v_0 instead
# whole train/eval split - no more double split within train data set
# random train/test split in get_data_sd - only change overall_seed
# CHANGES dgin3 10.02.2021:
# *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM
# *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'
# CHANGES dgin3 16.02.2021:
# *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM
# *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'
# CHANGES dgin4 16.02.2021:
# *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin
# encoding before logD prediction
# test_frags_dgin4 was added for species inclusion in model2 call()
batch_size: int =15
override_if_exists: bool = True
overall_seed: int = 2
# path to the project folder
project_path:str = "./"
retrain_model: bool = False
retrain_model_name: str = ''
retrain_model_epoch: str = ''
retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch
train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin8_logd/'
test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin8_logd/'
combined_dataset: bool = False
add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin8_logs/'
add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin8_logs/'
test_model: bool = False
test_model_epoch: str = '887'
# define the number or test runs for the CI.
# the mean and std of the RMSE and r^2 of the combined runs are taken as the output.
test_n_times: int = 1
# do you want to test the model with consensus mode?
# if yes, a defined ML model will be included in the consensus predictions during the testing.
consensus: bool = False
# include dropout during testing?
include_dropout: bool = False
test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch
# To save the prediction values for each property set to True
# When this flag is True - the whole test dataset is taken an test_n_times is set to zero!
save_predictions: bool = False
# define the folder where you want to save the predictions.
# For each property, a file is created under the property name ("./logd.txt","./logs.txt","./logp.txt","./others.txt")
test_prediction_output_folder: str = project_path+"reports/predictions/"+model_name+"/"
encode_hidden: bool = False
log_dir: str = project_path+'reports/logs/'+model_name+'.log'
verbosity_level = logging.INFO
model_type: str = 'DMPNN' # added 31.03.2021 to compare models like 'GIN' 'DMPNN' 'DGIN' 'MLP'
plot_dir: str = project_path+'reports/figures/'+model_name+'/'
tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'
config_log_dir: str = project_path+'reports/configs/'+model_name+'/'
model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'
stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'
@dataclass
class DGINConfig:
"""
Config for direcpted-mpnn class.
"""
dropout_aggregate_dmpnn: bool = False
layernorm_aggregate_dmpnn: bool = True
dropout_passing_dmpnn: bool = False
layernorm_passing_dmpnn: bool = True
dropout_aggregate_gin: bool = False
layernorm_aggregate_gin: bool = True
dropout_passing_gin: bool = False
layernorm_passing_gin: bool = True
gin_aggregate_bias: bool = False
dmpnn_passing_bias: bool = False
init_bias: bool = False
massge_iteration_dmpnn: int = 4
message_iterations_gin: int = 4
dropout_rate: float = 0.15
input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)
passing_hidden_size: int = 56 # this can be changed
input_size_gin: int = (ATOM_FEATURE_DIM) # changed 31.03.2021
return_hv: bool = True # model3 parameter
@dataclass
class Model1Config:
"""
Config model1 class - no subclass configs are defined here.
"""
validation_split: float = 0.90
learning_rate: float = 0.004
clip_rate: float = 0.6
optimizer = tf.keras.optimizers.Adam(learning_rate)
lipo_loss_mse = tf.keras.losses.mse
lipo_loss_mae = tf.keras.losses.mae
logP_loss_mse = tf.keras.losses.mse
logS_loss_mse = tf.keras.losses.mse
other_loss_mse = tf.keras.losses.mse
mw_loss_mse = tf.keras.losses.mse
metric = tf.keras.losses.mae
epochs: int = 1600
# define the number of epochs for each test run.
save_after_epoch: int = 3
# dropout rate for the general model - mainly the MLP for the different log predictions
dropout_rate: float = 0.15 # the overall dropout rate of the readout functions
# the seed to shuffle the training/validation dataset; For the same dataset, even when
# combined_dataset is True, it is the same training/valiation instances
train_data_seed: int = 0
dropout_rate: float = 0.15 # the overall dropout rate of the readout functions
train_data_seed: int = 0
hidden_readout_1: int = 32
hidden_readout_2: int = 14
activation_func_readout = tf.nn.relu
include_logD: bool = True
include_logS: bool = False
include_logP: bool = False
include_other: bool = False
include_mw: bool = False
include_rot_bond: bool = False
include_HBA: bool = False
include_HBD: bool = False
# define the starting threshold for the RMSE of the model. When the comnbined RMSE
# is below this threshold, the model weights are being safed and a new threshold
# is set. It only serves as a starting threshold so that not too many models
# are being safed. Depends on how many log endpoints are being taken into
# consideration - as three endpoints have a higher combined RMSE as only one
# endpoint.
best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/
# define the individual thresholds. If one model is better, the corresponding
# model weights are being saved.
best_evaluation_threshold_logd: float = 1.85
best_evaluation_threshold_logp: float = 1.65
best_evaluation_threshold_logs: float = 2.15
best_evaluation_threshold_other: float = 2.15
# 2.45 for all_logs
# 0.70 logP
# 0.75 logD
# 1.00 logS
# 1.75 logSD
# 1.70 logSP
# 1.45 logDP
include_fragment_conv: bool = False # was introduced on the 4.12.2020
use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss
shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)
add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction
@dataclass
class FrACConfig:
"""
Config fragment aggregation class - no subclass configs are defined here.
"""
input_size_gin: int = 28
layernorm_aggregate: bool = True
reduce_mean: bool = True # when false -> reduce_sum
@dataclass
class MLConfig:
"""
Configs for the ML algorithm
"""
# which algorithm do you want to use for the consensus?
# possibilities are: "SVM", "RF", "KNN" or "LR" - all are regression models!
# SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;
algorithm: str = "SVM"
# which fingerprint to use - possibilities are: "ECFP" or "MACCS"
fp_types: str = "ECFP"
# If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048!
n_bits: int = 2048
# If "ECFP" fingerprint is used, define the radius
radius: int = 4
# define if descriptors should be included into the non-GNN molecular representation
include_descriptors: bool = True
# define if the descriptors should be standardizedby scaling and centering (Sklearn)
standardize: bool = True
@dataclass
class Config():
"""
Overall config class for model2 and run file.
Includes all submodels config
"""
basic_model_config: BasicModelConfig
model1_config: Model1Config
d_gin_config: DGINConfig
frag_acc_config: FrACConfig
ml_config: MLConfig
model: str = 'model11'
|
[
"wieder.oliver@gmail.com"
] |
wieder.oliver@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.