content
stringlengths 5
1.05M
|
|---|
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client.details
def validate_user(username, password):
user_query = list(db.users.find({"$and": [{'username': username}, {'password': password}]}))
if len(user_query) == 0:
return False
else:
return True
def app_settings():
settings_dict = {}
settings = open("./server/config.py", "r")
settings_array = settings.readlines()
for element in settings_array:
key = element.split('=')[0].rstrip('\n')
value = element.split('=')[1].rstrip('\n')
settings_dict[key] = value
return settings_dict
|
from .action import Action
from .contract import SystemAction
|
"""
https://github.github.com/gfm/#disallowed-raw-html-extension-
"""
import pytest
from .utils import act_and_assert
@pytest.mark.skip
def test_disallowed_raw_html_extension_653():
"""
Test case 653: All other HTML tags are left untouched.
"""
# Arrange
source_markdown = """<strong> <title> <style> <em>
<blockquote>
<xmp> is disallowed. <XMP> is also disallowed.
</blockquote>"""
expected_tokens = [
"[para:]",
"[raw-html:a]",
"[raw-html:bab]",
"[raw-html:c2c]",
"[end-para]",
]
expected_gfm = """
"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
|
#!/usr/bin/python
"""
Author: Nick Russo <njrusmc@gmail.com>
File contains custom filters for use in Ansible playbooks.
https://www.ansible.com/
"""
import re
import ipaddress
class FilterModule(object):
"""
Defines a filter module object.
"""
@staticmethod
def filters():
"""
Return a list of hashes where the key is the filter
name exposed to playbooks and the value is the function.
"""
return {
"ios_ospf_neighbor": FilterModule.ios_ospf_neighbor,
"ios_ospf_basic": FilterModule.ios_ospf_basic,
"ios_ospf_dbsum": FilterModule.ios_ospf_dbsum,
"ios_ospf_traffic": FilterModule.ios_ospf_traffic,
"ios_ospf_frr": FilterModule.ios_ospf_frr,
"ios_bfd_neighbor": FilterModule.ios_bfd_neighbor,
"check_bfd_up": FilterModule.check_bfd_up,
"iosxr_ospf_traffic": FilterModule.iosxr_ospf_traffic,
"iosxr_ospf_basic": FilterModule.iosxr_ospf_basic,
"iosxr_ospf_neighbor": FilterModule.iosxr_ospf_neighbor,
"nxos_ospf_basic": FilterModule.nxos_ospf_basic,
"nxos_ospf_neighbor": FilterModule.nxos_ospf_neighbor,
"nxos_ospf_dbsum": FilterModule.nxos_ospf_dbsum,
"nxos_ospf_traffic": FilterModule.nxos_ospf_traffic,
}
@staticmethod
def _read_match(match, key_filler_list=None):
"""
Helper function which consumes a match object and an optional
list of keys to populate with None values if match is invalid.
Many operations follow this basic workflow, which iterates over
the items captured in the match, attempts to make them integers
whenever possible, and returns the resulting dict.
"""
return_dict = None
if match:
return_dict = match.groupdict()
for key in return_dict.keys():
return_dict[key] = FilterModule._try_int(return_dict[key])
elif key_filler_list:
return_dict = {}
for key in key_filler_list:
return_dict.update({key: None})
return return_dict
@staticmethod
def _get_match_items(pattern, text, extra_flags=0):
"""
Helper function that can perform iterative block matching
given a pattern and input text. Additional regex flags (re.DOTALL, etc)
can be optionally specified. Any fields that can be parsed as
integers are converted and the list of dictionaries containing the
matches of each block is returned.
"""
regex = re.compile(pattern, re.VERBOSE + extra_flags)
items = [match.groupdict() for match in regex.finditer(text)]
for item in items:
for key in item.keys():
item[key] = FilterModule._try_int(item[key])
return items
@staticmethod
def nxos_ospf_traffic(text):
"""
Parses information from the Cisco NXOS "show ip ospf traffic" command
family. This is useful for verifying various characteristics of
an OSPF process/area statistics for troubleshooting.
"""
process_pattern = r"""
OSPF\s+Process\s+ID\s+(?P<pid>\d+)\s+
.*?
Ignored\s+LSAs:\s+(?P<ignore_lsa>\d+),\s+
LSAs\s+dropped\s+during\s+SPF:\s+(?P<lsa_drop_spf>\d+)\s+
LSAs\s+dropped\s+during\s+graceful\s+restart:\s+(?P<lsa_drop_gr>\d+)
\s+Errors:\s+
drops\s+in\s+(?P<drops_in>\d+),\s+
drops\s+out\s+(?P<drops_out>\d+),\s+
errors\s+in\s+(?P<errors_in>\d+),\s+
errors\s+out\s+(?P<errors_out>\d+),\s+
hellos\s+in\s+(?P<hellos_in>\d+),\s+
dbds\s+in\s+(?P<dbds_in>\d+),\s+
lsreq\s+in\s+(?P<lsreq_in>\d+),\s+
lsu\s+in\s+(?P<lsu_in>\d+),\s+
lsacks\s+in\s+(?P<lsacks_in>\d+),\s+
unknown\s+in\s+(?P<unk_in>\d+),\s+
unknown\s+out\s+(?P<unk_out>\d+),\s+
no\s+ospf\s+(?P<no_ospf>\d+),\s+
bad\s+version\s+(?P<bad_ver>\d+),\s+
bad\s+crc\s+(?P<bad_crc>\d+),\s+
dup\s+rid\s+(?P<dup_rid>\d+),\s+
dup\s+src\s+(?P<dup_src>\d+),\s+
invalid\s+src\s+(?P<inv_src>\d+),\s+
invalid\s+dst\s+(?P<inv_dst>\d+),\s+
no\s+nbr\s+(?P<no_nbr>\d+),\s+
passive\s+(?P<passive>\d+),\s+
wrong\s+area\s+(?P<wrong_area>\d+),\s+
pkt\s+length\s+(?P<pkt_len>\d+),\s+
nbr\s+changed\s+rid/ip\s+addr\s+(?P<nbr_change>\d+)\s+
bad\s+auth\s+(?P<bad_auth>\d+),\s+
no\s+vrf\s+(?P<no_vrf>\d+)
"""
return FilterModule._get_match_items(process_pattern, text, re.DOTALL)
@staticmethod
def nxos_ospf_dbsum(text):
"""
Parses information from the Cisco NXOS
"show ip ospf database database-summary" command family.
This is useful for verifying various characteristics of
an OSPF database to count LSAs for simple verification.
"""
return_dict = {}
process_pattern = r"""
Process\s+(?P<process_id>\d+)\s+database\s+summary\s+
LSA\s+Type\s+Count\s+
Opaque\s+Link\s+\d+\s+
Router\s+(?P<total_lsa1>\d+)\s+
Network\s+(?P<total_lsa2>\d+)\s+
Summary\s+Network\s+(?P<total_lsa3>\d+)\s+
Summary\s+ASBR\s+(?P<total_lsa4>\d+)\s+
Type-7\s+AS\s+External\s+(?P<total_lsa7>\d+)\s+
Opaque\s+Area\s+\d+\s+
Type-5\s+AS\s+External\s+(?P<total_lsa5>\d+)
"""
regex = re.compile(process_pattern, re.VERBOSE)
match = regex.search(text)
key_filler_list = [
"process_id",
"total_lsa1",
"total_lsa2",
"total_lsa3",
"total_lsa4",
"total_lsa5",
"total_lsa7",
]
process = FilterModule._read_match(match, key_filler_list)
return_dict.update({"process": process})
area_pattern = r"""
Area\s+(?P<id>\d+\.\d+\.\d+\.\d+)\s+database\s+summary\s+
LSA\s+Type\s+Count\s+
Opaque\s+Link\s+\d+\s+
Router\s+(?P<num_lsa1>\d+)\s+
Network\s+(?P<num_lsa2>\d+)\s+
Summary\s+Network\s+(?P<num_lsa3>\d+)\s+
Summary\s+ASBR\s+(?P<num_lsa4>\d+)\s+
Type-7\s+AS\s+External\s+(?P<num_lsa7>\d+)\s+
"""
areas = FilterModule._get_match_items(area_pattern, text)
return_dict.update({"areas": areas})
return return_dict
@staticmethod
def nxos_ospf_neighbor(text):
"""
Parses information from the Cisco NXOS "show ip ospf neighbor" command
family. This is useful for verifying various characteristics of
an OSPF neighbor's state.
"""
pattern = r"""
(?P<rid>\d+\.\d+\.\d+\.\d+)\s+
(?P<priority>\d+)\s+
(?P<state>\w+)/\s*
(?P<role>[A-Z-]+)\s+
(?P<uptime>[0-9:hdwy]+|-)\s+
(?P<peer>\d+\.\d+\.\d+\.\d+)\s+
(?P<intf>[0-9A-Za-z./_-]+)
"""
return FilterModule._ospf_neighbor(pattern, text, ["uptime"])
@staticmethod
def nxos_ospf_basic(text):
"""
Parses information from the Cisco NXOS "show ospf" command
family. This is useful for verifying various characteristics of
an OSPF process and its basic configuration.
"""
return_dict = {}
process_pattern = r"""
Routing\s+Process\s+(?P<id>\d+)\s+with\s+ID\s+(?P<rid>\d+\.\d+\.\d+\.\d+)
.*
\s*Reference\s+Bandwidth\s+is\s+(?P<ref_bw>\d+)\s+Mbps
.*
\s*SPF\s+throttling\s+delay\s+time\s+of\s+(?P<init_spf>\d+)(?:\.\d+)\s+msecs,
\s*SPF\s+throttling\s+hold\s+time\s+of\s+(?P<min_spf>\d+)(?:\.\d+)\s+msecs,
\s*SPF\s+throttling\s+maximum\s+wait\s+time\s+of\s+(?P<max_spf>\d+)(?:\.\d+)\s+msecs
"""
regex = re.compile(process_pattern, re.VERBOSE + re.DOTALL)
match = regex.search(text)
process = FilterModule._read_match(match, ["process"])
if process:
is_abr = text.find("area border") != -1
is_asbr = text.find("autonomous system boundary") != -1
is_stub_rtr = text.find("Originating router LSA with max") != -1
process.update(
{"is_abr": is_abr, "is_asbr": is_asbr, "is_stub_rtr": is_stub_rtr,}
)
return_dict.update({"process": process})
area_pattern = r"""
Area\s+(?:BACKBONE)?\((?P<id_dd>\d+\.\d+\.\d+\.\d+)\)\s+
\s+(?:Area\s+has\s+existed.*)\n
\s+Interfaces\s+in\s+this\s+area:\s+(?P<num_intfs>\d+).*\n
\s+(?:Passive.*)\n
\s+(?:This\s+area\s+is\s+a\s+(?P<type>\w+)\s+area)?
"""
regex = re.compile(area_pattern, re.VERBOSE)
areas = [match.groupdict() for match in regex.finditer(text)]
for area in areas:
area["num_intfs"] = FilterModule._try_int(area["num_intfs"])
converted_dd = ipaddress.IPv4Address(area["id_dd"])
area["id"] = FilterModule._try_int(converted_dd)
if not area["type"]:
area["type"] = "standard"
else:
area["type"] = area["type"].lower()
return_dict.update({"areas": areas})
return return_dict
@staticmethod
def _try_int(text):
"""
Attempts to parse an integer from the input text. If it fails, just
return the text as it was passed in. This is useful for iterating
across structures with many integers which should be stored as
integers, not strings.
"""
try:
return int(text)
except ValueError:
return text
@staticmethod
def ios_ospf_neighbor(text):
"""
Parses information from the Cisco IOS "show ip ospf neighbor" command
family. This is useful for verifying various characteristics of
an OSPF neighbor's state.
"""
pattern = r"""
(?P<rid>\d+\.\d+\.\d+\.\d+)\s+
(?P<priority>\d+)\s+
(?P<state>\w+)/\s*
(?P<role>[A-Z-]+)\s+
(?P<deadtime>[0-9:]+|-)\s+
(?P<peer>\d+\.\d+\.\d+\.\d+)\s+
(?P<intf>[0-9A-Za-z./_-]+)
"""
return FilterModule._ospf_neighbor(pattern, text, ["deadtime"])
@staticmethod
def ios_ospf_basic(text):
"""
Parses information from the Cisco IOS "show ospf" command
family. This is useful for verifying various characteristics of
an OSPF process and its basic configuration.
"""
return_dict = {}
process_pattern = r"""
Routing\s+Process\s+"ospf\s+(?P<id>\d+)"\s+with\s+ID\s+(?P<rid>\d+\.\d+\.\d+\.\d+)
.*
\s*Initial\s+SPF\s+schedule\s+delay\s+(?P<init_spf>\d+)\s+msecs
\s*Minimum\s+hold\s+time\s+between\s+two\s+consecutive\s+SPFs\s+(?P<min_spf>\d+)\s+msecs
\s*Maximum\s+wait\s+time\s+between\s+two\s+consecutive\s+SPFs\s+(?P<max_spf>\d+)\s+msecs
.*
\s*Reference\s+bandwidth\s+unit\s+is\s+(?P<ref_bw>\d+)\s+mbps
"""
regex = re.compile(process_pattern, re.VERBOSE + re.DOTALL)
match = regex.search(text)
process = FilterModule._read_match(match, ["process"])
if process:
process.update(
{
"is_abr": text.find("area border") != -1,
"is_asbr": text.find("autonomous system boundary") != -1,
"is_stub_rtr": text.find("Originating router-LSAs with") != -1,
"has_ispf": text.find("Incremental-SPF enabled") != -1,
"has_bfd": text.find("BFD is enabled") != -1,
"has_ttlsec": text.find("Strict TTL checking enabled") != -1,
}
)
return_dict.update({"process": process})
area_pattern = r"""
Area\s+(?:BACKBONE\()?(?P<id>\d+)(?:\))?\s+
Number\s+of\s+interfaces\s+in\s+this\s+area\s+is\s+(?P<num_intfs>\d+).*\n
\s+(?:It\s+is\s+a\s+(?P<type>\w+)\s+area)?
"""
regex = re.compile(area_pattern, re.VERBOSE)
areas = [match.groupdict() for match in regex.finditer(text)]
for area in areas:
area["num_intfs"] = FilterModule._try_int(area["num_intfs"])
area["id"] = FilterModule._try_int(area["id"])
if not area["type"]:
area["type"] = "standard"
else:
area["type"] = area["type"].lower()
return_dict.update({"areas": areas})
return return_dict
@staticmethod
def ios_ospf_dbsum(text):
"""
Parses information from the Cisco IOS
"show ip ospf database database-summary" command family.
This is useful for verifying various characteristics of
an OSPF database to count LSAs for simple verification.
Note that this parser is generic enough to cover Cisco IOS-XR also.
"""
return_dict = {}
process_pattern = r"""
Process\s+(?P<process_id>\d+)\s+database\s+summary\s+
(?:LSA\s+Type\s+Count\s+Delete\s+Maxage\s+)?
Router\s+(?P<total_lsa1>\d+).*\n\s+
Network\s+(?P<total_lsa2>\d+).*\n\s+
Summary\s+Net\s+(?P<total_lsa3>\d+).*\n\s+
Summary\s+ASBR\s+(?P<total_lsa4>\d+).*\n\s+
Type-7\s+Ext\s+(?P<total_lsa7>\d+).*
\s+Type-5\s+Ext\s+(?P<total_lsa5>\d+)
"""
regex = re.compile(process_pattern, re.VERBOSE + re.DOTALL)
match = regex.search(text)
key_filler_list = [
"process_id",
"total_lsa1",
"total_lsa2",
"total_lsa3",
"total_lsa4",
"total_lsa5",
"total_lsa7",
]
process = FilterModule._read_match(match, key_filler_list)
return_dict.update({"process": process})
area_pattern = r"""
Area\s+(?P<id>\d+)\s+database\s+summary\s+
(?:LSA\s+Type\s+Count\s+Delete\s+Maxage\s+)?
Router\s+(?P<num_lsa1>\d+).*\n\s+
Network\s+(?P<num_lsa2>\d+).*\n\s+
Summary\s+Net\s+(?P<num_lsa3>\d+).*\n\s+
Summary\s+ASBR\s+(?P<num_lsa4>\d+).*\n\s+
Type-7\s+Ext\s+(?P<num_lsa7>\d+)
"""
areas = FilterModule._get_match_items(area_pattern, text)
return_dict.update({"areas": areas})
return return_dict
@staticmethod
def ios_ospf_traffic(text):
"""
Parses information from the Cisco IOS "show ip ospf traffic" command
family. This is useful for verifying various characteristics of
an OSPF process/area statistics for troubleshooting.
"""
interface_pattern = r"""
Interface\s+(?P<intf>[^s]\S+)\s+
.*?
OSPF\s+header\s+errors
\s+Length\s+(?P<length>\d+),
\s+Instance\s+ID\s+(?P<instance_id>\d+),
\s+Checksum\s+(?P<checksum>\d+),
\s+Auth\s+Type\s+(?P<auth_type>\d+),
\s+Version\s+(?P<version>\d+),
\s+Bad\s+Source\s+(?P<bad_src>\d+),
\s+No\s+Virtual\s+Link\s+(?P<no_vl>\d+),
\s+Area\s+Mismatch\s+(?P<area_mismatch>\d+),
\s+No\s+Sham\s+Link\s+(?P<no_sl>\d+),
\s+Self\s+Originated\s+(?P<self_orig>\d+),
\s+Duplicate\s+ID\s+(?P<dup_rid>\d+),
\s+Hello\s+(?P<hello_pkt>\d+),
\s+MTU\s+Mismatch\s+(?P<mtu_mismatch>\d+),
\s+Nbr\s+Ignored\s+(?P<nbr_ignored>\d+),
\s+LLS\s+(?P<lls>\d+),
\s+Unknown\s+Neighbor\s+(?P<unk_nbr>\d+),
\s+Authentication\s+(?P<auth>\d+),
\s+TTL\s+Check\s+Fail\s+(?P<ttlsec_fail>\d+),
\s+Adjacency\s+Throttle\s+(?P<adj_throttle>\d+),
\s+BFD\s+(?P<bfd>\d+),
\s+Test\s+discard\s+(?P<test_discard>\d+)
\s*OSPF\s+LSA\s+errors
\s+Type\s+(?P<lsa_type>\d+),
\s+Length\s+(?P<lsa_length>\d+),
\s+Data\s+(?P<lsa_data>\d+),
\s+Checksum\s+(?P<lsa_checksum>\d+)
"""
return FilterModule._get_match_items(interface_pattern, text, re.DOTALL)
@staticmethod
def ios_ospf_frr(text):
"""
Parses information from the Cisco IOS "show ip ospf fast-reroute"
command family. This is useful for verifying various characteristics of
OSPF FRR/LFA configuration to ensure it is configured correctly.
"""
pattern = r"""
(?P<id>\d+)\s+
(?P<topology>\w+)\s+
(?P<pref_pri>(High|Low))\s+
(?P<rlfa>(Yes|No))\s+
(?P<tilfa>(Yes|No))
"""
regex = re.compile(pattern, re.VERBOSE)
frr_area_dict = {}
for line in text.split("\n"):
match = regex.search(line)
if match:
gdict = match.groupdict()
area = "area" + gdict["id"]
gdict["id"] = FilterModule._try_int(gdict["id"])
gdict["rlfa"] = gdict["rlfa"].lower() == "yes"
gdict["tilfa"] = gdict["tilfa"].lower() == "yes"
gdict["pref_pri"] = gdict["pref_pri"].lower()
gdict["topology"] = gdict["topology"].lower()
frr_area_dict.update({area: gdict})
return frr_area_dict
@staticmethod
def ios_bfd_neighbor(text):
"""
Parses information from the Cisco IOS "show bfd neighbor" command
family. This is useful for verifying various characteristics of
an BFD neighbor's state.
"""
pattern = r"""
(?P<peer>\d+\.\d+\.\d+\.\d+)\s+
(?P<ld>\d+)/
(?P<rd>\d+)\s+
(?P<rhrs>\w+)\s+
(?P<state>\w+)\s+
(?P<intf>[0-9A-Za-z./-]+)
"""
regex = re.compile(pattern, re.VERBOSE)
bfd_neighbors = []
for line in text.split("\n"):
match = regex.search(line)
if match:
gdict = match.groupdict()
gdict["ld"] = FilterModule._try_int(gdict["ld"])
gdict["rd"] = FilterModule._try_int(gdict["rd"])
gdict["rhrs"] = gdict["rhrs"].lower()
gdict["state"] = gdict["state"].lower()
gdict["intf"] = gdict["intf"].lower()
bfd_neighbors.append(gdict)
return bfd_neighbors
@staticmethod
def check_bfd_up(bfd_nbr_list, ospf_nbr):
"""
Used to check if a specific OSPF neighbor (dictionary returned from
ios_ospf_neighbor function) is present in the BFD neighbor list. This
compares the OSPF neighbor interface IP, not router ID, against the
BFD peer IP. It uses a simple linear search as the number of OSPF/BFD
neighbors on a device tends to be small (few hundred).
"""
for bfd_nbr in bfd_nbr_list:
if ospf_nbr["peer"] == bfd_nbr["peer"]:
is_up = bfd_nbr["state"] == "up" and bfd_nbr["rhrs"] == "up"
return is_up
raise ValueError("{0} not in bfd_nbr_list".format(ospf_nbr["peer"]))
@staticmethod
def iosxr_ospf_neighbor(text):
"""
Parses information from the Cisco IOS-XR "show ospf neighbor" command
family. This is useful for verifying various characteristics of
an OSPF neighbor's state.
"""
pattern = r"""
(?P<rid>\d+\.\d+\.\d+\.\d+)\s+
(?P<priority>\d+)\s+
(?P<state>\w+)/\s*
(?P<role>[A-Z-]+)\s+
(?P<deadtime>[0-9:]+|-)\s+
(?P<peer>\d+\.\d+\.\d+\.\d+)\s+
(?P<uptime>[0-9:hdwy]+)\s+
(?P<intf>[0-9A-Za-z./_-]+)
"""
return FilterModule._ospf_neighbor(pattern, text, ["deadtime", "uptime"])
@staticmethod
def _ospf_neighbor(pattern, text, time_keys=None):
"""
Helper function specific to OSPF neighbor parsing. Each device type
is slightly different in terms of the information provided, but
most fields are the same. The time_keys parameter is a list of keys
which are expected to have values in the format "hh:mm:ss". These
are commonly uptime, deadtime, etc ... and are most useful when
converted into seconds as an integer for comparative purposes.
"""
regex = re.compile(pattern, re.VERBOSE)
ospf_neighbors = []
for line in text.split("\n"):
match = regex.search(line)
if match:
gdict = match.groupdict()
gdict["priority"] = FilterModule._try_int(gdict["priority"])
gdict["state"] = gdict["state"].lower()
gdict["role"] = gdict["role"].lower()
gdict["intf"] = gdict["intf"].lower()
# If time keys is specified, iterate over the keys and perform
# the math to convert hh:mm:ss to an integer of summed seconds.
if time_keys:
for k in time_keys:
if gdict[k].count(":") == 2:
times = gdict[k].split(":")
parts = [FilterModule._try_int(t) for t in times]
secsum = parts[0] * 3600 + parts[1] * 60 + parts[2]
gdict.update({k + "_sec": secsum})
else:
# Issue #1, short term fix, static value of 0.
# This information isn't used anywhere yet.
gdict.update({k + "_sec": 0})
ospf_neighbors.append(gdict)
return ospf_neighbors
@staticmethod
def iosxr_ospf_basic(text):
"""
Parses information from the Cisco IOS-XR "show ospf" command
family. This is useful for verifying various characteristics of
an OSPF process and its basic configuration.
"""
return_dict = {}
process_pattern = r"""
Routing\s+Process\s+"ospf\s+(?P<id>\d+)"\s+with\s+ID\s+
(?P<rid>\d+\.\d+\.\d+\.\d+)
.*
\s*Initial\s+SPF\s+schedule\s+delay\s+(?P<init_spf>\d+)\s+msecs
\s*Minimum\s+hold\s+time\s+between\s+two\s+consecutive
\s+SPFs\s+(?P<min_spf>\d+)\s+msecs
\s*Maximum\s+wait\s+time\s+between\s+two\s+consecutive
\s+SPFs\s+(?P<max_spf>\d+)\s+msecs
"""
regex = re.compile(process_pattern, re.VERBOSE + re.DOTALL)
match = regex.search(text)
process = FilterModule._read_match(match, ["process"])
if process:
is_abr = text.find("area border") != -1
is_asbr = text.find("autonomous system boundary") != -1
is_stub_rtr = text.find("Originating router-LSAs with max") != -1
process.update(
{"is_abr": is_abr, "is_asbr": is_asbr, "is_stub_rtr": is_stub_rtr,}
)
return_dict.update({"process": process})
area_pattern = r"""
Area\s+(?:BACKBONE\()?(?P<id>\d+)(?:\))?\s+
Number\s+of\s+interfaces\s+in\s+this\s+area\s+is\s+(?P<num_intfs>\d+).*?\n
\s+(?:It\s+is\s+a\s+(?P<type>\w+)\s+area)?
.*?
Number\s+of\s+LFA\s+enabled\s+interfaces\s+(?P<frr_intfs>\d+)
"""
regex = re.compile(area_pattern, re.VERBOSE + re.DOTALL)
areas = [match.groupdict() for match in regex.finditer(text)]
for area in areas:
area["num_intfs"] = FilterModule._try_int(area["num_intfs"])
area["id"] = FilterModule._try_int(area["id"])
area["frr_intfs"] = FilterModule._try_int(area["frr_intfs"])
if not area["type"]:
area["type"] = "standard"
else:
area["type"] = area["type"].lower()
return_dict.update({"areas": areas})
return return_dict
@staticmethod
def iosxr_ospf_traffic(text):
"""
Parses information from the Cisco IOS-XR "show ip ospf traffic" command
family. This is useful for verifying various characteristics of
an OSPF process/area statistics for troubleshooting.
"""
interface_pattern = r"""
Interface\s+(?P<intf>\S+)\s+
Process\s+ID\s+(?P<pid>\d+)\s+
Area\s+(?P<area_id>\d+)\s+
.*?
OSPF\s+Header\s+Errors
\s+Version\s+(?P<version>\d+)
\s+LLS\s+(?P<lls>\d+)
\s+Type\s+(?P<type>\d+)
\s+Auth\s+RX\s+(?P<auth_rx>\d+)
\s+Length\s+(?P<length>\d+)
\s+Auth\s+TX\s+(?P<auth_tx>\d+)
\s+Checksum\s+(?P<checksum>\d+)
\s*OSPF\s+LSA\s+Errors
\s+Type\s+(?P<lsa_type>\d+)
\s+Checksum\s+(?P<lsa_checksum>\d+)
\s+Length\s+(?P<lsa_length>\d+)
\s+Data\s+(?P<lsa_data>\d+)
\s*OSPF\s+Errors
\s+Bad\s+Source\s+(?P<bad_src>\d+)
\s+Area\s+Mismatch\s+(?P<area_mismatch>\d+)
\s+No\s+Virtual\s+Link\s+(?P<no_vl>\d+)
\s+Self\s+Originated\s+(?P<self_orig>\d+)
\s+No\s+Sham\s+Link\s+(?P<no_sl>\d+)
\s+Duplicate\s+ID\s+(?P<dup_rid>\d+)
\s+Nbr\s+ignored\s+(?P<nbr_ignored>\d+)
\s+Graceful\s+Shutdown\s+(?P<gshut>\d+)
\s+Unknown\s+nbr\s+(?P<unk_nbr>\d+)
\s+Passive\s+intf\s+(?P<passive_intf>\d+)
\s+No\s+DR/BDR\s+(?P<no_dr_bdr>\d+)
\s+Disabled\s+intf\s+(?P<disable_intf>\d+)
\s+Enqueue\s+hello\s+(?P<enq_hello>\d+)
\s+Enqueue\s+router\s+(?P<enq_rtr>\d+)
\s+Unspecified\s+RX\s+(?P<unspec_rx>\d+)
\s+Unspecified\s+TX\s+(?P<unspec_tx>\d+)
\s+Socket\s+(?P<socket>\d+)
"""
return FilterModule._get_match_items(interface_pattern, text, re.DOTALL)
|
from __future__ import annotations
import logging
from typing import Dict, TYPE_CHECKING
import pygame
from scripts.core.base_classes.ui import UI
from scripts.core.constants import DEFAULT_IMAGE_SIZE, FontEffects, FontType, GAP_SIZE, SceneType
from scripts.ui_elements.frame import Frame
from scripts.ui_elements.panel import Panel
if TYPE_CHECKING:
from scripts.core.game import Game
__all__ = ["RunSetupUI"]
class RunSetupUI(UI):
"""
Represent the UI of the RunSetupScene.
"""
def __init__(self, game: Game):
super().__init__(game)
self.set_instruction_text("Choose who will lead the rebellion.")
def update(self, delta_time: float):
super().update(delta_time)
# data editor
if self.game.input.states["toggle_dev_console"]:
self.game.input.states["toggle_dev_console"] = False
self.game.change_scene(SceneType.DEV_DATA_EDITOR)
# panel specific input
if self.current_panel == self.panels["commanders"]:
self.handle_select_commander_input()
# exit panel
elif self.current_panel == self.panels["exit"]:
self.handle_exit_input()
def render(self, surface: pygame.surface):
self.draw_instruction(surface)
self.draw_elements(surface)
def rebuild_ui(self):
super().rebuild_ui()
commanders = self.game.data.commanders
selected_commander = self.game.run_setup.selected_commander
window_width = self.game.window.width
window_height = self.game.window.height
create_font = self.game.assets.create_font
create_fancy_font = self.game.assets.create_fancy_font
# positions
start_x = 20
start_y = 20
default_font = self.game.assets.create_font(FontType.DEFAULT, "")
font_height = default_font.line_height
# draw commanders
current_x = start_x
current_y = start_y
panel_elements = []
for selection_counter, commander in enumerate(commanders.values()):
icon = self.game.assets.commander_animations[commander["type"]]["icon"][0]
icon_width = icon.get_width()
frame = Frame((current_x, current_y), icon, is_selectable=True)
self.elements[commander["type"]] = frame
# highlight selected commander
if commander["type"] == selected_commander or selected_commander is None:
frame.is_selected = True
panel_elements.append(frame)
# increment draw pos and counter
current_x += icon_width + GAP_SIZE
panel = Panel(panel_elements, True)
self.add_panel(panel, "commanders")
# draw info
commander = commanders[selected_commander]
current_y = start_y + DEFAULT_IMAGE_SIZE + GAP_SIZE
info_x = start_x + 200
header_x = start_x
# name
frame = Frame((header_x, current_y), font=create_font(FontType.DISABLED, "Name"), is_selectable=False)
self.elements["name_header"] = frame
frame = Frame((info_x, current_y), font=create_font(FontType.DEFAULT, commander["name"]), is_selectable=False)
self.elements["name"] = frame
current_y += frame.height + GAP_SIZE
# backstory - N.B. no header and needs wider frame
line_width = window_width - (current_x * 2)
max_height = 110
frame = Frame(
(header_x, current_y),
font=create_fancy_font(commander["backstory"], font_effects=[FontEffects.FADE_IN]),
is_selectable=False,
max_width=line_width,
max_height=max_height,
)
self.elements["backstory"] = frame
current_y += frame.height + GAP_SIZE
# resources
frame = Frame((header_x, current_y), font=create_font(FontType.DISABLED, "Charisma"), is_selectable=False)
self.elements["charisma_header"] = frame
frame = Frame(
(info_x, current_y), font=create_font(FontType.DEFAULT, commander["charisma"]), is_selectable=False
)
self.elements["charisma"] = frame
current_y += frame.height + GAP_SIZE
frame = Frame((header_x, current_y), font=create_font(FontType.DISABLED, "Leadership"), is_selectable=False)
self.elements["leadership_header"] = frame
frame = Frame(
(info_x, current_y), font=create_font(FontType.DEFAULT, commander["leadership"]), is_selectable=False
)
self.elements["leadership"] = frame
current_y += frame.height + GAP_SIZE
# allies
frame = Frame((header_x, current_y), font=create_font(FontType.DISABLED, "Allies"), is_selectable=False)
self.elements["allies_header"] = frame
allies = ""
for ally in commander["allies"]:
# add comma
if allies == "":
allies += ally
else:
allies += ", " + ally
frame = Frame((info_x, current_y), font=create_font(FontType.DEFAULT, allies), is_selectable=False)
self.elements["allies"] = frame
current_y += frame.height + GAP_SIZE
# gold
frame = Frame((header_x, current_y), font=create_font(FontType.DISABLED, "Gold"), is_selectable=False)
self.elements["gold_header"] = frame
frame = Frame((info_x, current_y), font=create_font(FontType.DEFAULT, commander["gold"]), is_selectable=False)
self.elements["gold"] = frame
self.add_exit_button()
def refresh_info(self):
elements = self.elements
commander = self.game.data.commanders[self.game.run_setup.selected_commander]
elements["gold"].set_text(commander["gold"])
elements["leadership"].set_text(commander["leadership"])
elements["charisma"].set_text(commander["charisma"])
elements["backstory"].set_text(commander["backstory"])
elements["name"].set_text(commander["name"])
allies = ""
for ally in commander["allies"]:
# add comma
if allies == "":
allies += ally
else:
allies += ", " + ally
elements["allies"].set_text(allies)
def handle_select_commander_input(self):
# selections within panel
if self.game.input.states["left"]:
self.game.input.states["left"] = False
self.current_panel.select_previous_element()
# update selected commander and shown info
selected_commander = list(self.game.data.commanders)[self.current_panel.selected_index]
self.game.run_setup.selected_commander = selected_commander
self.refresh_info()
if self.game.input.states["right"]:
self.game.input.states["right"] = False
self.current_panel.select_next_element()
# update selected commander and shown info
selected_commander = list(self.game.data.commanders)[self.current_panel.selected_index]
self.game.run_setup.selected_commander = selected_commander
self.refresh_info()
# select option and move to exit
if self.game.input.states["select"]:
self.game.input.states["select"] = False
self.select_panel("exit")
def handle_exit_input(self):
if self.game.input.states["select"]:
self.game.run_setup.start_run()
if self.game.input.states["cancel"]:
# unselect current option
self.current_panel.unselect_all_elements()
# change to commanders
self.current_panel = self.panels["commanders"]
|
import asyncio
from tests.integration.it_utils import (
ASYNC_JSON_RPC_TESTNET_CLIENT,
fund_wallet,
sign_and_reliable_submission_async,
)
from xrpl.asyncio.wallet import generate_faucet_wallet
from xrpl.models.amounts import IssuedCurrencyAmount
from xrpl.models.transactions import OfferCreate, PaymentChannelCreate
from xrpl.wallet import Wallet
# TODO: use `asyncio.gather` for these, to parallelize
# TODO: set up wallet for each test instead of using one for all tests (now that it's
# faster)
async def _set_up_reusable_values():
WALLET = Wallet.create()
await fund_wallet(WALLET)
DESTINATION = Wallet.create()
await fund_wallet(DESTINATION)
TESTNET_WALLET = await generate_faucet_wallet(ASYNC_JSON_RPC_TESTNET_CLIENT)
TESTNET_DESTINATION = await generate_faucet_wallet(ASYNC_JSON_RPC_TESTNET_CLIENT)
OFFER = await sign_and_reliable_submission_async(
OfferCreate(
account=WALLET.classic_address,
sequence=WALLET.sequence,
taker_gets="13100000",
taker_pays=IssuedCurrencyAmount(
currency="USD",
issuer=WALLET.classic_address,
value="10",
),
),
WALLET,
)
WALLET.sequence += 1
PAYMENT_CHANNEL = await sign_and_reliable_submission_async(
PaymentChannelCreate(
account=WALLET.classic_address,
sequence=WALLET.sequence,
amount="1",
destination=DESTINATION.classic_address,
settle_delay=86400,
public_key=WALLET.public_key,
),
WALLET,
)
WALLET.sequence += 1
return (
WALLET,
DESTINATION,
TESTNET_WALLET,
TESTNET_DESTINATION,
OFFER,
PAYMENT_CHANNEL,
)
(
WALLET,
DESTINATION,
TESTNET_WALLET,
TESTNET_DESTINATION,
OFFER,
PAYMENT_CHANNEL,
) = asyncio.run(_set_up_reusable_values())
|
#!/usr/bin/env python3
import io
import csv
from model.network import Network
from model.drug import Drug
from model.disease import Disease
from model.edge import Edge
network = Network()
with io.open('../data/PubMed/drug_disease.csv', 'r', encoding='utf-8', newline='') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
next(reader, None)
for row in reader:
drug = Drug(['DrugBank:%s' % row[1]], [row[0]])
disease = Disease([row[4]], [row[3]])
network.add_node(drug)
network.add_node(disease)
network.add_edge(Edge(drug, disease, row[2], {'source': 'PubMed', 'pmid': row[5]}))
network.save('../data/PubMed/graph.json')
|
from fastapi import FastAPI, File, UploadFile
from matplotlib import pyplot as plt
from PIL import Image
# import tensorflow as tf
import paddlehub as hub
import cv2
import json
ocr = hub.Module(name="chinese_ocr_db_crnn_server")
#ocr = hub.Module(name="chinese_ocr_db_crnn_mobile")
# import tensorflow_hub as tf_hub
# detector = tf_hub.load("https://tfhub.dev/tensorflow/faster_rcnn/inception_resnet_v2_640x640/1")
# detector_output = detector(image_tensor)
# class_ids = detector_output["detection_classes"]
app = FastAPI()
import multipart #import decoders
#from multipart.decoders import Base64Decoder
#multipart.decoder
@app.post("/files/")
async def create_file(file: bytes = File(...)):
return {"file_size": len(file)}
@app.post("/uploadfile/")
async def create_upload_file(file: UploadFile):
return {"filename": file.filename}
@app.post("/pic/")
async def create_upload_img(file: UploadFile):
print(file.filename)
_dir_name="/storage/lol/wechaty/getting-started/media/"+str(file.filename)
# #file.write("/storage/lol/wechaty/getting-started/src/"+str(file.filename))
res = await file.read()
with open(_dir_name,"wb") as f:
f.write(res)
result = ocr.recognize_text(images=[cv2.imread(_dir_name)])
#send_data = json.loads(result)
gg=[]
for _message in result[0]['data']:
gg.append(_message['text'])
print(gg)
return gg
# decoder = Base64Decoder(file.file)
# decoder.encode(file.)
#original_image = Image.open(file.file)
#original_image.show()
#print(file.file)
#return {"filename": file.filename}
|
import os
HOST_IP = os.environ.get("HOST_IP", "localhost")
UDP_IP = os.environ.get("UDP_IP", HOST_IP)
UDP_PORT = os.environ.get("UDP_PORT", 46739)
UDP_MAX_BITS_RECV_ONE = os.environ.get("UDP_MAX_BITS_RECV_ONE", 10240)
CACHE_MAX_LOG_COUNT = 1024
LOG_FROMAT_STRING = "%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s"
|
# -*- coding: utf-8 -*-
import requests, time, logging, random
''' CHECK_IN_OUT '''
card_numbers = ['工号1', '工号2']
url = 'http://tpehrweb.tutorabc.com/TIMG_inout/form/SystemHttp.json'
log_file = 'log.txt'
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=log_file,
filemode='a')
def check_inout(inout):
time.sleep(random.randint(60, 600))
for card_number in card_numbers:
payload = {'card_number': card_number, 'inout': inout, 'handlerName': 'Index.Index', 'method': 'Check_InOUT'}
try:
r = requests.post(url, data=payload, timeout=3)
except requests.ConnectionError:
logging.info("ConnectionError..." + "\n\n")
except requests.Timeout:
logging.info("TimeOut..." + "\n\n")
else:
logging.info(r.text)
if inout == 1 and r.status_code == requests.codes.ok:
logging.info("Check In Succeed." + "\n\n")
elif inout == 0 and r.status_code == requests.codes.ok:
logging.info("Check Out Succeed." + "\n\n")
def is_weekend():
day = time.strftime('%w')
if day in ['0', '6']:
return True
return False
def check_in_out():
if not (is_weekend()):
hour = time.strftime('%H')
if int(hour) <= 12:
check_inout(1)
# mail.send_mail('签入成功', '签入成功')
else:
check_inout(0)
# mail.send_mail('签出成功', '签出成功')
else:
logging.warning("It's weekend.")
check_in_out()
|
"""
Pure E/B for flat-sky from Thubaut Louis'
flipperPol adapted to enlib
"""
import numpy as np
from enlib.fft import fft
class Purify(object):
def __init__(self,shape,wcs,window):
px = maps.resolution(shape,wcs)
self.windict = init_deriv_window(taper,px)
lxMap,lyMap,self.modlmap,self.angLMap,lx,ly = get_ft_attributes_enmap(shape,wcs)
def lteb_from_iqu(imap,method='pure'):
fT, fE, fB = iqu_to_pure_lteb(imap[0],imap[1],imap[2],self.modlmap,self.angLMap,windowDict=self.windict,method=method)
def init_deriv_window(window,px):
"""
px is in radians
"""
def matrixShift(l,row_shift,column_shift):
m1=np.hstack((l[:,row_shift:],l[:,:row_shift]))
m2=np.vstack((m1[column_shift:],m1[:column_shift]))
return m2
delta=px
Win=window[:]
dWin_dx=(-matrixShift(Win,-2,0)+8*matrixShift(Win,-1,0)-8*matrixShift(Win,1,0)+matrixShift(Win,2,0))/(12*delta)
dWin_dy=(-matrixShift(Win,0,-2)+8*matrixShift(Win,0,-1)-8*matrixShift(Win,0,1)+matrixShift(Win,0,2))/(12*delta)
d2Win_dx2=(-matrixShift(dWin_dx,-2,0)+8*matrixShift(dWin_dx,-1,0)-8*matrixShift(dWin_dx,1,0)+matrixShift(dWin_dx,2,0))/(12*delta)
d2Win_dy2=(-matrixShift(dWin_dy,0,-2)+8*matrixShift(dWin_dy,0,-1)-8*matrixShift(dWin_dy,0,1)+matrixShift(dWin_dy,0,2))/(12*delta)
d2Win_dxdy=(-matrixShift(dWin_dy,-2,0)+8*matrixShift(dWin_dy,-1,0)-8*matrixShift(dWin_dy,1,0)+matrixShift(dWin_dy,2,0))/(12*delta)
#In return we change the sign of the simple gradient in order to agree with np convention
return {'Win':Win, 'dWin_dx':-dWin_dx,'dWin_dy':-dWin_dy, 'd2Win_dx2':d2Win_dx2, 'd2Win_dy2':d2Win_dy2,'d2Win_dxdy':d2Win_dxdy}
def iqu_to_pure_lteb(T_map,Q_map,U_map,modLMap,angLMap,windowDict,method='pure'):
window = windowDict
win =window['Win']
dWin_dx=window['dWin_dx']
dWin_dy=window['dWin_dy']
d2Win_dx2=window['d2Win_dx2']
d2Win_dy2=window['d2Win_dy2']
d2Win_dxdy=window['d2Win_dxdy']
T_temp=T_map.copy()*win
fT=fft(T_temp,axes=[-2,-1])
Q_temp=Q_map.copy()*win
fQ=fft(Q_temp,axes=[-2,-1])
U_temp=U_map.copy()*win
fU=fft(U_temp,axes=[-2,-1])
fE=fT.copy()
fB=fT.copy()
fE=fQ[:]*np.cos(2.*angLMap)+fU[:]*np.sin(2.*angLMap)
fB=-fQ[:]*np.sin(2.*angLMap)+fU[:]*np.cos(2.*angLMap)
if method=='standard':
return fT, fE, fB
Q_temp=Q_map.copy()*dWin_dx
QWx=fft(Q_temp,axes=[-2,-1])
Q_temp=Q_map.copy()*dWin_dy
QWy=fft(Q_temp,axes=[-2,-1])
U_temp=U_map.copy()*dWin_dx
UWx=fft(U_temp,axes=[-2,-1])
U_temp=U_map.copy()*dWin_dy
UWy=fft(U_temp,axes=[-2,-1])
U_temp=2.*Q_map*d2Win_dxdy-U_map*(d2Win_dx2-d2Win_dy2)
QU_B=fft(U_temp,axes=[-2,-1])
U_temp=-Q_map*(d2Win_dx2-d2Win_dy2)-2.*U_map*d2Win_dxdy
QU_E=fft(U_temp,axes=[-2,-1])
modLMap=modLMap+2
fB[:] += QU_B[:]*(1./modLMap)**2
fB[:]-= (2.*1j)/modLMap*(np.sin(angLMap)*(QWx[:]+UWy[:])+np.cos(angLMap)*(QWy[:]-UWx[:]))
if method=='hybrid':
return fT, fE, fB
fE[:]+= QU_E[:]*(1./modLMap)**2
fE[:]-= (2.*1j)/modLMap*(np.sin(angLMap)*(QWy[:]-UWx[:])-np.cos(angLMap)*(QWx[:]+UWy[:]))
if method=='pure':
return fT, fE, fB
|
import requests
payload = {}
custom_header = {'user-agent': 'Mozilla/5.0 (Linux; Android 9; AOSP on IA Emulator Build/PSR1.180720.117; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/69.0.3497.100 Mobile Safari/537.36', 'Content-Type': 'application/x-www-form-urlencoded', 'X-Requested-With': 'com.solaxcloud.starter'}
r = requests.post('http://5.8.8.8/?optType=ReadRealTimeData', headers=custom_header)
response = r.json()
# print(r.json())
# print(response['Data'])
# print(r.text)
data = response['Data']
for i in range(len(data)):
print(data[i])
|
#!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''An example of running independent concurrent routines on multiple Cozmos.
Each robot requires its own device to control it.
'''
import asyncio
import sys
import cozmo
from cozmo.util import degrees
async def turn_left(sdk_conn):
robot = await sdk_conn.wait_for_robot()
cozmo.logger.info("Turning robot 1")
await robot.turn_in_place(degrees(90)).wait_for_completed()
async def turn_right(sdk_conn):
robot = await sdk_conn.wait_for_robot()
cozmo.logger.info("Turning robot 2")
await robot.turn_in_place(degrees(-90)).wait_for_completed()
if __name__ == '__main__':
cozmo.setup_basic_logging()
loop = asyncio.get_event_loop()
# Connect to both robots
try:
conn1 = cozmo.connect_on_loop(loop)
conn2 = cozmo.connect_on_loop(loop)
except cozmo.ConnectionError as e:
sys.exit("A connection error occurred: %s" % e)
# Run two independent coroutines concurrently, one on each connection
task1 = asyncio.ensure_future(turn_left(conn1), loop=loop)
task2 = asyncio.ensure_future(turn_right(conn2), loop=loop)
# wait for both coroutines to complete before exiting the program
loop.run_until_complete(asyncio.gather(task1, task2))
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: xag
@license: Apache Licence
@contact: xinganguo@gmail.com
@site: http://www.xingag.top
@software: PyCharm
@file: image_utils.py
@time: 5/17/19 13:03
@description:图像工具类
"""
from PIL import Image
import os
from utils.file_utils import *
from utils.video_utils import *
from imgpy import Img
import json
import re
# pip3 install imgpy
def analyseImage(path):
"""
分析图片
:param path:
:return:
"""
im = Image.open(path)
results = {
'size': im.size,
'mode': 'full',
}
try:
while True:
if im.tile:
tile = im.tile[0]
update_region = tile[1]
update_region_dimensions = update_region[2:]
if update_region_dimensions != im.size:
results['mode'] = 'partial'
break
im.seek(im.tell() + 1)
except EOFError:
pass
return results
def get_gif_frames(gif_path, temp_path):
"""
获取一段GIf图片下的所有静态帧
get_gif_frames('./../gifs/3.gif', './../gif_temp/')
:param gif_path:
:return:
"""
# 分析gif图片
mode = analyseImage(gif_path)['mode']
im = Image.open(gif_path)
i = 1
p = im.getpalette()
last_frame = im.convert('RGBA')
try:
while True:
# print("saving %s (%s) frame %d, %s %s" % (gif_path, mode, i, im.size, im.tile))
'''
If the GIF uses local colour tables, each frame will have its own palette.
If not, we need to apply the global palette to the new frame.
'''
if not im.getpalette():
im.putpalette(p)
new_frame = Image.new('RGBA', im.size)
'''
Is this file a "partial"-mode GIF where frames update a region of a different size to the entire image?
If so, we need to construct the new frame by pasting it on top of the preceding frames.
'''
if mode == 'partial':
new_frame.paste(last_frame)
new_frame.paste(im, (0, 0), im.convert('RGBA'))
new_frame.save(temp_path + '/%s-%d.png' % (''.join(os.path.basename(gif_path).split('.')[:-1]), i), 'PNG')
i += 1
last_frame = new_frame
im.seek(im.tell() + 1)
except EOFError:
# print('产生EOFError!!!')
pass
def get_gif_info(gif_path):
"""
获取gif文件的详细信息
每一个gif的帧率不一样,有的<10fps;有的>10fps
:param gif_path:
:return:
"""
with Img(fp=gif_path) as im:
# 1.有多少帧
frame_count = im.frame_count
# 2.帧列表-PIL.Image.Image
# print(im.frames)
# 3.未知
# print(im.exif)
# 4.GIF
# print(im.format)
# 5.图片信息
# {'version': b'GIF89a', 'background': 31, 'duration': 70, 'extension': (b'NETSCAPE2.0', 795), 'loop': 0}
duration_pre = im.info.get('duration')
# 根据规律,除以7位实际的播放时长
duration = duration_pre / 7
# 6.color palette
# print(im.mode_desc)
# print((frame_count, duration))
# 返回帧率和时长
return (frame_count / duration), duration
|
# -- coding:utf-8--
import os
def remove_dir(dir):
#用于删除路径的函数
dir = dir.replace('\\', '/')
if(os.path.isdir(dir)):
for p in os.listdir(dir):
remove_dir(os.path.join(dir,p))
if(os.path.exists(dir)):
os.rmdir(dir)
else:
if(os.path.exists(dir)):
os.remove(dir)
def old_rm():
#检测第一代程序
one_old_az_lj = r"C:\pythonX"
if os.path.exists(one_old_az_lj):
print("检测到旧版本的v2ray\n")
print("第一代v2ray启动器!")
print("正在卸载v2ray\n")
try:
remove_dir(one_old_az_lj)
except:
print("删除失败!")
#删除错误反馈
print("错误!X002\n")
input("按下任意键退出程序!")
sys.exit(0)
print("删除完成!\n")
#检测第二代程序
one_old_az_lj = r"C:\pythonz"
if os.path.exists(one_old_az_lj):
print("检测到旧版本的v2ray\n")
print("第二代v2ray启动器!")
print("正在卸载v2ray\n")
try:
remove_dir(one_old_az_lj)
except:
print("删除失败!")
#删除错误反馈
print("错误!X002\n")
input("按下任意键退出程序!")
sys.exit(0)
print("删除完成!\n")
#检测第四代程序
one_old_az_lj = r"C:\pythonz4"
if os.path.exists(one_old_az_lj):
print("检测到旧版本的v2ray\n")
print("第四代v2ray启动器!")
print("正在卸载v2ray\n")
try:
remove_dir(one_old_az_lj)
except:
print("删除失败!")
#删除错误反馈
print("错误!X002\n")
input("按下任意键退出程序!")
sys.exit(0)
print("删除完成!\n")
|
import numpy as np
import imgaug.augmenters as iaa
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
from PIL import Image
from parameters import tag_image, tag_label, tag_name, label_folder_name
import random
import os
from typing import Union
class AugManager(object):
def __init__(self, iaalist=None):
if iaalist is None:
iaalist = iaa.Sequential([
iaa.Sometimes(0.5, iaa.ChannelShuffle(0.3)),
iaa.Sometimes(0.5, iaa.MultiplyHue((0.5, 1.5))),
iaa.Sometimes(0.5, iaa.AddToHueAndSaturation((-50, 50), per_channel=True)),
iaa.Sometimes(0.5, iaa.Fliplr(0.5)),
iaa.Sometimes(0.5, iaa.Flipud(0.5)),
iaa.Sometimes(0.5, iaa.Rotate((-50, 50)))
], random_order=True)
self.transformSet = iaalist
self.outscale = random.choice([0.8, 0.85, 0.9, 0.95])
def __call__(self, input_dict : {str : Union[Image.Image, np.ndarray]}) -> dict:
image, label = input_dict[tag_image], input_dict[tag_label]
image = np.array(image)
label = np.array(label)
# size measure
y_max = image.shape[0]
x_max = image.shape[1]
# np.ndarray -> imgaug.augmentables.segmaps.SegmentationMapsOnImage
label = SegmentationMapsOnImage(label, shape=image.shape)
# augmentation
zoomset = iaa.OneOf([
iaa.Identity(), # do nothing
iaa.Affine(scale=self.outscale), # zoom out
RandomCrop(y_max, x_max).cut() # zoom in
])
image, label = zoomset(image=image, segmentation_maps=label)
image, label = self.transformSet(image=image, segmentation_maps=label)
# imgaug.augmentables.segmaps.SegmentationMapsOnImage -> np.ndarray
label = label.get_arr()
return {tag_image : image,
tag_label : label}
def augstore(self, src:dict, dst_base:str,
dataname_extension='.tiff', labelname_extension='.tif',
identifier=None):
os.makedirs(dst_base, exist_ok=True)
os.makedirs(os.path.join(dst_base, label_folder_name), exist_ok=True)
# get image
image = src[tag_image] # PIL.Image.Image
label = src[tag_label] # PIL.Image.Image
name = src[tag_name] # str
# PIL -> numpy
image = np.array(image)
label = np.array(label)
# size measure
y_max = image.shape[0]
x_max = image.shape[1]
# np.ndarray -> imgaug.augmentables.segmaps.SegmentationMapsOnImage
label = SegmentationMapsOnImage(label, shape=label.shape)
# augmentation
zoomset = iaa.OneOf([
iaa.Identity(), # do nothing
iaa.Affine(scale=self.outscale), # zoom out
RandomCrop(y_max, x_max).cut() # zoom in
])
image, label = zoomset(image=image, segmentation_maps=label)
image, label = self.transformSet(image=image, segmentation_maps=label)
# imgaug.augmentables.segmaps.SegmentationMapsOnImage -> np.ndarray
label = label.get_arr()
if not identifier == None:
name = name + '_' + str(identifier)
# numpy -> PIL.Image.Image
image = Image.fromarray(image)
label = Image.fromarray(label)
image.save(os.path.join(dst_base, name + dataname_extension))
label.save(os.path.join(dst_base, label_folder_name, name + labelname_extension))
return {tag_image : image,
tag_label : label,
tag_name : name}
class RandomCrop(object):
def __init__(self, max_height, max_width):
assert isinstance(max_height, int) and max_height >= 1, 'max_height must be positive integer type.'
assert isinstance(max_width, int) and max_width >= 1, 'max_width must be positive integer type.'
self.percent_limit = 0.15
self.top, self.right, self.bottom, self.left = self.operate_location(max_height, max_width)
def operate_location(self, max_height, max_width):
import random
max_height = max_height + 1
max_width = max_width + 1
min_height = int(self.percent_limit * max_height)
min_width = int(self.percent_limit * max_width)
fix_height = random.randint(min_height, max_height)
fix_width = random.randint(min_width, max_width)
left = random.randint(0, max_width - fix_width)
up = random.randint(0, max_height - fix_height)
right = max_width - fix_width - left
down = max_height - fix_height - up
return up, right, down, left
def cut(self):
return iaa.Crop(px=(self.top, self.right, self.bottom, self.left))
|
from .fuzzy_set import FuzzySet
|
from glue.config import importer
from glue.dialogs.data_wizard.qt import data_wizard
@importer("Import from directory")
def directory_importer():
return data_wizard(mode='directories')
|
# Copyright 2016 Sanghoon Yoon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import tensorflow as tf
from shycdbn.dataset.photos import FoursqaurePhotos
from shycdbn.core.runner import Runner
# from shycdbn.crbm import CRBM
from shycdbn.cdbn import CDBN
FLAGS = tf.app.flags.FLAGS
def main(argv=None):
# runner = Runner(FoursqaurePhotos(),
# CRBM('layer1', 300, 3, 10, 32, 2, FLAGS.batch_size, FLAGS.learning_rate, True))
runner = Runner(FoursqaurePhotos(),
CDBN(FLAGS.batch_size, FLAGS.learning_rate))
runner.run()
if __name__ == '__main__':
tf.app.run()
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class Survey(models.Model):
_inherit = 'survey.survey'
slide_ids = fields.One2many(
'slide.slide', 'survey_id', string="Certification Slides",
help="The slides this survey is linked to through the e-learning application")
slide_channel_ids = fields.One2many(
'slide.channel', string="Certification Courses", compute='_compute_slide_channel_data',
help="The courses this survey is linked to through the e-learning application",
groups='website_slides.group_website_slides_officer')
slide_channel_count = fields.Integer("Courses Count", compute='_compute_slide_channel_data', groups='website_slides.group_website_slides_officer')
@api.depends('slide_ids.channel_id')
def _compute_slide_channel_data(self):
for survey in self:
survey.slide_channel_ids = survey.slide_ids.mapped('channel_id')
survey.slide_channel_count = len(survey.slide_channel_ids)
# ---------------------------------------------------------
# Actions
# ---------------------------------------------------------
def action_survey_view_slide_channels(self):
action = self.env["ir.actions.actions"]._for_xml_id("website_slides.slide_channel_action_overview")
action['display_name'] = _("Courses")
if self.slide_channel_count == 1:
action.update({'views': [(False, 'form')],
'res_id': self.slide_channel_ids[0].id})
else:
action.update({'views': [[False, 'tree'], [False, 'form']],
'domain': [('id', 'in', self.slide_channel_ids.ids)]})
return action
# ---------------------------------------------------------
# Business
# ---------------------------------------------------------
def _check_answer_creation(self, user, partner, email, test_entry=False, check_attempts=True, invite_token=False):
""" Overridden to allow website_slides_officer to test certifications. """
self.ensure_one()
if test_entry and user.has_group('website_slides.group_website_slides_officer'):
return True
return super(Survey, self)._check_answer_creation(user, partner, email, test_entry=test_entry, check_attempts=check_attempts, invite_token=invite_token)
def _prepare_challenge_category(self):
slide_survey = self.env['slide.slide'].search([('survey_id', '=', self.id)])
return 'slides' if slide_survey else 'certification'
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Test neurom.io.utils'''
import os
import sys
from io import StringIO
import numpy as np
from nose import tools as nt
from neurom import get
from neurom.core import Neuron, SomaError
from neurom.exceptions import NeuroMError, RawDataError, SomaError
from neurom.fst import _neuritefunc as _nf
from neurom.io import utils
_path = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(_path, '../../../test_data')
SWC_PATH = os.path.join(DATA_PATH, 'swc')
VALID_DATA_PATH = os.path.join(_path, DATA_PATH, 'valid_set')
NRN_NAMES = ('Neuron', 'Neuron_h5v1', 'Neuron_h5v2')
FILES = [os.path.join(SWC_PATH, f)
for f in ['Neuron.swc',
'Single_apical_no_soma.swc',
'Single_apical.swc',
'Single_basal.swc',
'Single_axon.swc',
'sequential_trunk_off_0_16pt.swc',
'sequential_trunk_off_1_16pt.swc',
'sequential_trunk_off_42_16pt.swc',
'Neuron_no_missing_ids_no_zero_segs.swc']]
FILENAMES = [os.path.join(VALID_DATA_PATH, f)
for f in ['Neuron.swc', 'Neuron_h5v1.h5', 'Neuron_h5v2.h5']]
NO_SOMA_FILE = os.path.join(SWC_PATH, 'Single_apical_no_soma.swc')
DISCONNECTED_POINTS_FILE = os.path.join(SWC_PATH, 'Neuron_disconnected_components.swc')
MISSING_PARENTS_FILE = os.path.join(SWC_PATH, 'Neuron_missing_parents.swc')
INVALID_ID_SEQUENCE_FILE = os.path.join(SWC_PATH,
'non_increasing_trunk_off_1_16pt.swc')
def _get_name(filename):
return os.path.splitext(os.path.basename(filename))[0]
def _mock_load_neuron(filename):
class MockNeuron(object):
def __init__(self, name):
self.soma = 42
self.neurites = list()
self.name = name
return MockNeuron(_get_name(filename))
def _check_neurites_have_no_parent(nrn):
for n in nrn.neurites:
nt.assert_true(n.root_node.parent is None)
def test_load_neurons():
nrns = utils.load_neurons(FILES, neuron_loader=_mock_load_neuron)
for i, nrn in enumerate(nrns):
nt.assert_equal(nrn.name, _get_name(FILES[i]))
nt.assert_raises(SomaError, utils.load_neurons, NO_SOMA_FILE)
def test_get_morph_files():
ref = set(['Neuron_h5v2.h5', 'Neuron_2_branch_h5v2.h5', 'Neuron_slice.h5',
'Neuron.swc', 'Neuron_h5v1.h5', 'Neuron_2_branch_h5v1.h5'])
FILE_PATH = os.path.abspath(os.path.join(DATA_PATH, 'valid_set'))
files = set(os.path.basename(f) for f in utils.get_morph_files(FILE_PATH))
nt.assert_equal(ref, files)
def test_load_neuron():
nrn = utils.load_neuron(FILENAMES[0])
nt.assert_true(isinstance(NRN, Neuron))
nt.assert_equal(NRN.name, 'Neuron')
_check_neurites_have_no_parent(nrn)
# python2 only test, for unicode strings
if sys.version_info < (3, 0):
nrn = utils.load_neuron(unicode(FILENAMES[0]))
nt.assert_true(isinstance(NRN, Neuron))
nt.assert_equal(NRN.name, 'Neuron')
_check_neurites_have_no_parent(nrn)
neuron_str = u""" 1 1 0 0 0 1. -1
2 3 0 0 0 1. 1
3 3 0 5 0 1. 2
4 3 -5 5 0 0. 3
5 3 6 5 0 0. 3
6 2 0 0 0 1. 1
7 2 0 -4 0 1. 6
8 2 6 -4 0 0. 7
9 2 -5 -4 0 0. 7
"""
utils.load_neuron(StringIO(neuron_str), reader='swc')
def test_neuron_name():
for fn, nn in zip(FILENAMES, NRN_NAMES):
nrn = utils.load_neuron(fn)
nt.eq_(nrn.name, nn)
@nt.raises(SomaError)
def test_load_bifurcating_soma_points_raises_SomaError():
utils.load_neuron(os.path.join(SWC_PATH, 'soma', 'bifurcating_soma.swc'))
def test_load_neuromorpho_3pt_soma():
nrn = utils.load_neuron(os.path.join(SWC_PATH, 'soma', 'three_pt_soma.swc'))
nt.eq_(len(nrn.neurites), 4)
nt.eq_(len(nrn.soma.points), 3)
nt.eq_(nrn.soma.radius, 2)
_check_neurites_have_no_parent(nrn)
NRN = utils.load_neuron(FILENAMES[0])
def test_neuron_section_ids():
# check section IDs
for i, sec in enumerate(NRN.sections):
nt.eq_(i, sec.id)
def test_neurites_have_no_parent():
_check_neurites_have_no_parent(NRN)
def test_neuron_sections():
all_nodes = set(NRN.sections)
neurite_nodes = set(_nf.iter_sections(NRN.neurites))
# check no duplicates
nt.assert_true(len(all_nodes) == len(NRN.sections))
# check all neurite tree nodes are
# in sections attribute
nt.assert_true(len(set(NRN.sections) - neurite_nodes) > 0)
def test_neuron_sections_are_connected():
# check traversal by counting number of sections un trees
for nrt in NRN.neurites:
root_node = nrt.root_node
nt.assert_equal(sum(1 for _ in root_node.ipreorder()),
sum(1 for _ in NRN.sections[root_node.id].ipreorder()))
def test_load_neuron_soma_only():
nrn = utils.load_neuron(os.path.join(DATA_PATH, 'swc', 'Soma_origin.swc'))
nt.eq_(len(nrn.neurites), 0)
nt.assert_equal(nrn.name, 'Soma_origin')
@nt.raises(SomaError)
def test_load_neuron_no_soma_raises_SomaError():
utils.load_neuron(NO_SOMA_FILE)
# TODO: decide if we want to check for this in fst.
@nt.nottest
@nt.raises(RawDataError)
def test_load_neuron_disconnected_points_raises():
utils.load_neuron(DISCONNECTED_POINTS_FILE)
@nt.raises(RawDataError)
def test_load_neuron_missing_parents_raises():
utils.load_neuron(MISSING_PARENTS_FILE)
# TODO: decide if we want to check for this in fst.
@nt.nottest
@nt.raises(RawDataError)
def test_load_neuron_invalid_id_sequence_raises():
utils.load_neuron(INVALID_ID_SEQUENCE_FILE)
def test_load_neurons_directory():
pop = utils.load_neurons(VALID_DATA_PATH)
nt.assert_equal(len(pop.neurons), 6)
nt.assert_equal(len(pop), 6)
nt.assert_equal(pop.name, 'valid_set')
for nrn in pop:
nt.assert_true(isinstance(nrn, Neuron))
def test_load_neurons_directory_name():
pop = utils.load_neurons(VALID_DATA_PATH, name='test123')
nt.assert_equal(len(pop.neurons), 6)
nt.assert_equal(len(pop), 6)
nt.assert_equal(pop.name, 'test123')
for nrn in pop:
nt.assert_true(isinstance(nrn, Neuron))
def test_load_neurons_filenames():
pop = utils.load_neurons(FILENAMES, name='test123')
nt.assert_equal(len(pop.neurons), 3)
nt.assert_equal(pop.name, 'test123')
for nrn, name in zip(pop.neurons, NRN_NAMES):
nt.assert_true(isinstance(nrn, Neuron))
nt.assert_equal(nrn.name, name)
SWC_ORD_PATH = os.path.join(DATA_PATH, 'swc', 'ordering')
SWC_ORD_REF = utils.load_neuron(os.path.join(SWC_ORD_PATH, 'sample.swc'))
def assert_items_equal(a, b):
nt.eq_(sorted(a), sorted(b))
def test_load_neuron_mixed_tree_swc():
nrn_mix = utils.load_neuron(os.path.join(SWC_ORD_PATH, 'sample_mixed_tree_sections.swc'))
assert_items_equal(get('number_of_sections_per_neurite', nrn_mix), [5, 3])
assert_items_equal(get('number_of_sections_per_neurite', nrn_mix),
get('number_of_sections_per_neurite', SWC_ORD_REF))
assert_items_equal(get('number_of_segments', nrn_mix),
get('number_of_segments', SWC_ORD_REF))
assert_items_equal(get('total_length', nrn_mix),
get('total_length', SWC_ORD_REF))
def test_load_neuron_section_order_break_swc():
nrn_mix = utils.load_neuron(os.path.join(SWC_ORD_PATH, 'sample_disordered.swc'))
assert_items_equal(get('number_of_sections_per_neurite', nrn_mix), [5, 3])
assert_items_equal(get('number_of_sections_per_neurite', nrn_mix),
get('number_of_sections_per_neurite', SWC_ORD_REF))
assert_items_equal(get('number_of_segments', nrn_mix),
get('number_of_segments', SWC_ORD_REF))
assert_items_equal(get('total_length', nrn_mix),
get('total_length', SWC_ORD_REF))
H5_PATH = os.path.join(DATA_PATH, 'h5', 'v1', 'ordering')
H5_ORD_REF = utils.load_neuron(os.path.join(H5_PATH, 'sample.h5'))
def test_load_neuron_mixed_tree_h5():
nrn_mix = utils.load_neuron(os.path.join(H5_PATH, 'sample_mixed_tree_sections.h5'))
assert_items_equal(get('number_of_sections_per_neurite', nrn_mix), [5, 3])
assert_items_equal(get('number_of_sections_per_neurite', nrn_mix),
get('number_of_sections_per_neurite', H5_ORD_REF))
def test_load_h5_trunk_points_regression():
# regression test for issue encountered while
# implementing PR #479, related to H5 unpacking
# of files with non-standard soma structure.
# See #480.
nrn = utils.load_neuron(os.path.join(DATA_PATH, 'h5', 'v1', 'Neuron.h5'))
nt.ok_(np.allclose(nrn.neurites[0].root_node.points[1],
[0., 0., 0.1, 0.31646374, 4., 4., 3.]))
nt.ok_(np.allclose(nrn.neurites[1].root_node.points[1],
[0., 0., 0.1, 1.84130445e-01, 3.0, 235., 234.]))
nt.ok_(np.allclose(nrn.neurites[2].root_node.points[1],
[0., 0., 0.1, 5.62225521e-01, 3., 466, 465]))
nt.ok_(np.allclose(nrn.neurites[3].root_node.points[1],
[0., 0., 0.1, 7.28555262e-01, 2., 697, 696]))
def test_load_unknown_type():
nt.assert_raises(NeuroMError, utils.load_data, 'fake.file')
def test_NeuronLoader():
dirpath = os.path.join(DATA_PATH, 'h5', 'v2')
loader = utils.NeuronLoader(dirpath, file_ext='.h5', cache_size=5)
nrn = loader.get('Neuron')
nt.ok_(isinstance(nrn, Neuron))
# check caching
nt.ok_(nrn == loader.get('Neuron'))
nt.ok_(nrn != loader.get('Neuron_2_branch'))
def test_NeuronLoader_mixed_file_extensions():
dirpath = os.path.join(DATA_PATH, 'valid_set')
loader = utils.NeuronLoader(dirpath)
loader.get('Neuron')
loader.get('Neuron_h5v1')
nt.assert_raises(NeuroMError, loader.get, 'NoSuchNeuron')
def test_ignore_exceptions():
pop = utils.load_neurons((NO_SOMA_FILE, ), ignored_exceptions=(SomaError, ))
nt.eq_(len(pop), 0)
pop = utils.load_neurons((NO_SOMA_FILE, ),
ignored_exceptions=(SomaError, RawDataError, ))
nt.eq_(len(pop), 0)
def test_get_files_by_path():
single_neurom = utils.get_files_by_path(NO_SOMA_FILE)
nt.eq_(len(single_neurom), 1)
neuron_dir = utils.get_files_by_path(VALID_DATA_PATH)
nt.eq_(len(neuron_dir), 6)
nt.assert_raises(IOError, utils.get_files_by_path, 'this/is/a/fake/path')
|
#
# Copyright (c) 2016 Matwey V. Kornilov <matwey.kornilov@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from pybeam import beam_file
from pybeam.erlang_types import String
import unittest
import io
class BEAMFileTest(unittest.TestCase):
def setUp(self):
self.raw = b'FOR1\x00\x00\x02\x2cBEAMAtom\x00\x00\x00U\x00\x00\x00\x08\x08ssh_math\x04ipow\x06crypto\x07mod_pow\x10bytes_to_integer\x0bmodule_info\x06erlang\x0fget_module_info\x00\x00\x00Code\x00\x00\x00\\\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x99\x00\x00\x00\x07\x00\x00\x00\x03\x01\x10\x99\x10\x02\x12"0\x01 \'\x15\x01#(\x15\x13\x01\x0c\x000\x99 \x070\x00\x99 \x08\x10\x10\x00\x010\x99\x00\x02\x12b\x00\x01@@\x12\x03\x99\x00N\x10 \x01P\x99\x00\x02\x12b\x10\x01`@\x03\x13@\x12\x03\x99\x00N 0\x03StrT\x00\x00\x00\x00ImpT\x00\x00\x004\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x07\x00\x00\x00\x08\x00\x00\x00\x01\x00\x00\x00\x07\x00\x00\x00\x08\x00\x00\x00\x02ExpT\x00\x00\x00(\x00\x00\x00\x03\x00\x00\x00\x06\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02Attr\x00\x00\x00(\x83l\x00\x00\x00\x01h\x02d\x00\x03vsnl\x00\x00\x00\x01n\x10\x00\x8f\xde\xf9V}\xf3wr\x8a\x93\xc1p\xedDK\x9ajjCInf\x00\x00\x01@\x83l\x00\x00\x00\x04h\x02d\x00\x07optionsl\x00\x00\x00\x04h\x02d\x00\x06outdirk\x00</home/abuild/rpmbuild/BUILD/otp_src_17.1/lib/ssh/src/../ebinh\x02d\x00\x01ik\x007/home/abuild/rpmbuild/BUILD/otp_src_17.1/lib/kernel/srcd\x00\x10warn_unused_varsd\x00\ndebug_infojh\x02d\x00\x07versionk\x00\x055.0.1h\x02d\x00\x04timeh\x06b\x00\x00\x07\xe0a\x02a\x0fa\x0ba\x08a\x12h\x02d\x00\x06sourcek\x00A/home/abuild/rpmbuild/BUILD/otp_src_17.1/lib/ssh/src/ssh_math.erlj'
self.io = io.BytesIO(self.raw)
self.beam = beam_file.BeamFile(self.io)
def test_attr(self):
self.assertDictEqual({'vsn': [205091931631091061218511176690734587535]}, self.beam.attributes)
def test_atoms(self):
self.assertListEqual(['ssh_math','ipow','crypto','mod_pow','bytes_to_integer','module_info','erlang','get_module_info'], self.beam.atoms)
def test_compileinfo(self):
self.assertDictEqual({
'source': String(b'/home/abuild/rpmbuild/BUILD/otp_src_17.1/lib/ssh/src/ssh_math.erl'),
'time': (2016, 2, 15, 11, 8, 18),
'version': String(b'5.0.1'),
'options': [
('outdir', String(b'/home/abuild/rpmbuild/BUILD/otp_src_17.1/lib/ssh/src/../ebin')),
('i', String(b'/home/abuild/rpmbuild/BUILD/otp_src_17.1/lib/kernel/src')),
'warn_unused_vars',
'debug_info'
]}, self.beam.compileinfo)
def test_exports(self):
self.assertListEqual([('module_info', 1, 6), ('module_info', 0, 4), ('ipow', 3, 2)], self.beam.exports)
def test_imports(self):
self.assertListEqual([('crypto', 'mod_pow', 3), ('crypto', 'bytes_to_integer', 1), ('erlang', 'get_module_info', 1), ('erlang', 'get_module_info', 2)], self.beam.imports)
def test_modulename(self):
self.assertEqual('ssh_math', self.beam.modulename)
|
import copy
import logging
import math
import time
from collections import deque
import numpy as np
from panda3d.bullet import BulletVehicle, BulletBoxShape, BulletRigidBodyNode, ZUp, BulletGhostNode
from panda3d.core import Vec3, TransformState, NodePath, LQuaternionf, BitMask32, PythonCallbackObject, TextNode
from pgdrive.pg_config import PGConfig
from pgdrive.pg_config.body_name import BodyName
from pgdrive.pg_config.cam_mask import CamMask
from pgdrive.pg_config.collision_group import CollisionGroup
from pgdrive.pg_config.parameter_space import Parameter, VehicleParameterSpace
from pgdrive.pg_config.pg_space import PGSpace
from pgdrive.scene_creator.ego_vehicle.vehicle_module.lidar import Lidar
from pgdrive.scene_creator.ego_vehicle.vehicle_module.routing_localization import RoutingLocalizationModule
from pgdrive.scene_creator.ego_vehicle.vehicle_module.vehicle_panel import VehiclePanel
from pgdrive.scene_creator.lanes.circular_lane import CircularLane
from pgdrive.scene_creator.lanes.lane import AbstractLane
from pgdrive.scene_creator.lanes.straight_lane import StraightLane
from pgdrive.scene_creator.map import Map
from pgdrive.utils.asset_loader import AssetLoader
from pgdrive.utils.element import DynamicElement
from pgdrive.utils.math_utils import get_vertical_vector, norm, clip
from pgdrive.utils.coordinates_shift import panda_position, pgdrive_position, panda_heading, pgdrive_heading
from pgdrive.utils.scene_utils import ray_localization
from pgdrive.world import RENDER_MODE_ONSCREEN
from pgdrive.world.constants import COLOR, COLLISION_INFO_COLOR
from pgdrive.world.image_buffer import ImageBuffer
from pgdrive.world.pg_physics_world import PGPhysicsWorld
from pgdrive.world.pg_world import PGWorld
class BaseVehicle(DynamicElement):
Ego_state_obs_dim = 9
"""
Vehicle chassis and its wheels index
0 1
II-----II
|
| <---chassis
|
II-----II
2 3
"""
PARAMETER_SPACE = PGSpace(VehicleParameterSpace.BASE_VEHICLE) # it will not sample config from parameter space
COLLISION_MASK = CollisionGroup.EgoVehicle
STEERING_INCREMENT = 0.05
default_vehicle_config = PGConfig(
dict(
lidar=(240, 50, 4), # laser num, distance, other vehicle info num
mini_map=(84, 84, 250), # buffer length, width
rgb_cam=(160, 120), # buffer length, width
depth_cam=(84, 84, True), # buffer length, width, view_ground
show_navi_mark=False,
increment_steering=False,
wheel_friction=0.6,
)
)
born_place = (5, 0)
LENGTH = None
WIDTH = None
def __init__(self, pg_world: PGWorld, vehicle_config: dict = None, random_seed: int = 0, config: dict = None):
"""
This Vehicle Config is different from self.get_config(), and it is used to define which modules to use, and
module parameters.
"""
super(BaseVehicle, self).__init__(random_seed)
self.pg_world = pg_world
self.node_path = NodePath("vehicle")
# config info
self.set_config(self.PARAMETER_SPACE.sample())
if config is not None:
self.set_config(config)
self.vehicle_config = self.get_vehicle_config(
vehicle_config
) if vehicle_config is not None else self.default_vehicle_config
self.increment_steering = self.vehicle_config["increment_steering"]
self.max_speed = self.get_config()[Parameter.speed_max]
self.max_steering = self.get_config()[Parameter.steering_max]
# create
self._add_chassis(pg_world.physics_world)
self.wheels = self._create_wheel()
# modules
self.image_sensors = {}
self.lidar = None
self.routing_localization = None
self.lane = None
self.lane_index = None
self.vehicle_panel = VehiclePanel(self.pg_world) if (self.pg_world.mode == RENDER_MODE_ONSCREEN) else None
# other info
self.throttle_brake = 0.0
self.steering = 0
self.last_current_action = deque([(0.0, 0.0), (0.0, 0.0)], maxlen=2)
self.last_position = self.born_place
self.last_heading_dir = self.heading
# collision info render
self.collision_info_np = self._init_collision_info_render(pg_world)
self.collision_banners = {} # to save time
self.current_banner = None
# done info
self.crash = False
self.out_of_road = False
self.attach_to_pg_world(self.pg_world.pbr_render, self.pg_world.physics_world)
@classmethod
def get_vehicle_config(cls, new_config):
default = copy.deepcopy(cls.default_vehicle_config)
default.update(new_config)
return default
def prepare_step(self, action):
"""
Save info and make decision before action
"""
self.last_position = self.position
self.last_heading_dir = self.heading
self.last_current_action.append(action) # the real step of physics world is implemented in taskMgr.step()
if self.increment_steering:
self.set_incremental_action(action)
else:
self.set_act(action)
if self.vehicle_panel is not None:
self.vehicle_panel.renew_2d_car_para_visualization(self)
def update_state(self, pg_world=None):
if self.lidar is not None:
self.lidar.perceive(self.position, self.heading_theta, self.pg_world.physics_world)
if self.routing_localization is not None:
self.lane, self.lane_index = self.routing_localization.update_navigation_localization(self)
return self._state_check()
def reset(self, map: Map, pos: np.ndarray, heading: float):
"""
pos is a 2-d array, and heading is a float (unit degree)
"""
heading = -np.deg2rad(heading) - np.pi / 2
self.chassis_np.setPos(Vec3(*pos, 1))
self.chassis_np.setQuat(LQuaternionf(np.cos(heading / 2), 0, 0, np.sin(heading / 2)))
self.update_map_info(map)
self.chassis_np.node().clearForces()
self.chassis_np.node().setLinearVelocity(Vec3(0, 0, 0))
self.chassis_np.node().setAngularVelocity(Vec3(0, 0, 0))
self.system.resetSuspension()
# done info
self.crash = False
self.out_of_road = False
# other info
self.throttle_brake = 0.0
self.steering = 0
self.last_current_action = deque([(0.0, 0.0), (0.0, 0.0)], maxlen=2)
self.last_position = self.born_place
self.last_heading_dir = self.heading
if "depth_cam" in self.image_sensors and self.image_sensors["depth_cam"].view_ground:
for block in map.blocks:
block.node_path.hide(CamMask.DepthCam)
"""------------------------------------------- act -------------------------------------------------"""
def set_act(self, action):
para = self.get_config()
steering = action[0]
self.throttle_brake = action[1]
self.steering = steering
self.system.setSteeringValue(self.steering * para[Parameter.steering_max], 0)
self.system.setSteeringValue(self.steering * para[Parameter.steering_max], 1)
self._apply_throttle_brake(action[1])
def set_incremental_action(self, action: np.ndarray):
self.throttle_brake = action[1]
self.steering += action[0] * self.STEERING_INCREMENT
self.steering = clip(self.steering, -1, 1)
steering = self.steering * self.max_steering
self.system.setSteeringValue(steering, 0)
self.system.setSteeringValue(steering, 1)
self._apply_throttle_brake(action[1])
def _apply_throttle_brake(self, throttle_brake):
para = self.get_config()
max_engine_force = para[Parameter.engine_force_max]
max_brake_force = para[Parameter.brake_force_max]
for wheel_index in range(4):
if throttle_brake >= 0:
self.system.setBrake(2.0, wheel_index)
if self.speed > self.max_speed:
self.system.applyEngineForce(0.0, wheel_index)
else:
self.system.applyEngineForce(max_engine_force * throttle_brake, wheel_index)
else:
self.system.applyEngineForce(0.0, wheel_index)
self.system.setBrake(abs(throttle_brake) * max_brake_force, wheel_index)
"""---------------------------------------- vehicle info ----------------------------------------------"""
@property
def position(self):
return pgdrive_position(self.chassis_np.getPos())
@property
def speed(self):
"""
km/h
"""
speed = self.chassis_np.node().get_linear_velocity().length() * 3.6
return clip(speed, 0.0, 100000.0)
@property
def heading(self):
real_heading = self.heading_theta
heading = np.array([np.cos(real_heading), np.sin(real_heading)])
return heading
@property
def heading_theta(self):
"""
Get the heading theta of vehicle, unit [rad]
:return: heading in rad
"""
return (pgdrive_heading(self.chassis_np.getH()) - 90) / 180 * math.pi
@property
def velocity(self) -> np.ndarray:
return self.speed * self.velocity_direction
@property
def velocity_direction(self):
direction = self.system.get_forward_vector()
return np.asarray([direction[0], -direction[1]])
@property
def forward_direction(self):
raise ValueError("This function id deprecated.")
# print("This function id deprecated.")
# direction = self.vehicle.get_forward_vector()
# return np.array([direction[0], -direction[1]])
@property
def current_road(self):
return self.lane_index[0:-1]
"""---------------------------------------- some math tool ----------------------------------------------"""
def heading_diff(self, target_lane):
lateral = None
if isinstance(target_lane, StraightLane):
lateral = np.asarray(get_vertical_vector(target_lane.end - target_lane.start)[1])
elif isinstance(target_lane, CircularLane):
if target_lane.direction == -1:
lateral = self.position - target_lane.center
else:
lateral = target_lane.center - self.position
else:
raise ValueError("Unknown target lane type: {}".format(type(target_lane)))
lateral_norm = norm(lateral[0], lateral[1])
forward_direction = self.heading
# print(f"Old forward direction: {self.forward_direction}, new heading {self.heading}")
forward_direction_norm = norm(forward_direction[0], forward_direction[1])
if not lateral_norm * forward_direction_norm:
return 0
# cos = self.forward_direction.dot(lateral) / (np.linalg.norm(lateral) * np.linalg.norm(self.forward_direction))
cos = (
(forward_direction[0] * lateral[0] + forward_direction[1] * lateral[1]) /
(lateral_norm * forward_direction_norm)
)
# return cos
# Normalize to 0, 1
return clip(cos, -1.0, 1.0) / 2 + 0.5
def projection(self, vector):
# Projected to the heading of vehicle
# forward_vector = self.vehicle.get_forward_vector()
# forward_old = (forward_vector[0], -forward_vector[1])
forward = self.heading
# print(f"[projection] Old forward {forward_old}, new heading {forward}")
norm_velocity = norm(forward[0], forward[1]) + 1e-6
project_on_heading = (vector[0] * forward[0] + vector[1] * forward[1]) / norm_velocity
side_direction = get_vertical_vector(forward)[1]
side_norm = norm(side_direction[0], side_direction[1]) + 1e-6
project_on_side = (vector[0] * side_direction[0] + vector[1] * side_direction[1]) / side_norm
return project_on_heading, project_on_side
def lane_distance_to(self, vehicle, lane: AbstractLane = None) -> float:
assert self.routing_localization is not None, "a routing and localization module should be added " \
"to interact with other vehicles"
if not vehicle:
return np.nan
if not lane:
lane = self.lane
return lane.local_coordinates(vehicle.position)[0] - lane.local_coordinates(self.position)[0]
"""-------------------------------------- for vehicle making ------------------------------------------"""
def _add_chassis(self, pg_physics_world: PGPhysicsWorld):
para = self.get_config()
chassis = BulletRigidBodyNode(BodyName.Ego_vehicle_top)
chassis.setIntoCollideMask(BitMask32.bit(self.COLLISION_MASK))
chassis_shape = BulletBoxShape(
Vec3(
para[Parameter.vehicle_width] / 2, para[Parameter.vehicle_length] / 2,
para[Parameter.vehicle_height] / 2
)
)
ts = TransformState.makePos(Vec3(0, 0, para[Parameter.chassis_height] * 2))
chassis.addShape(chassis_shape, ts)
heading = np.deg2rad(-para[Parameter.heading] - 90)
chassis.setMass(para[Parameter.mass])
self.chassis_np = self.node_path.attachNewNode(chassis)
# not random born now
self.chassis_np.setPos(Vec3(*self.born_place, 1))
self.chassis_np.setQuat(LQuaternionf(np.cos(heading / 2), 0, 0, np.sin(heading / 2)))
chassis.setDeactivationEnabled(False)
chassis.notifyCollisions(True) # advance collision check
self.pg_world.physics_world.dynamic_world.setContactAddedCallback(PythonCallbackObject(self._collision_check))
self.dynamic_nodes.append(chassis)
chassis_beneath = BulletGhostNode(BodyName.Ego_vehicle)
chassis_beneath.setIntoCollideMask(BitMask32.bit(self.COLLISION_MASK))
chassis_beneath.addShape(chassis_shape)
self.chassis_beneath_np = self.chassis_np.attachNewNode(chassis_beneath)
self.dynamic_nodes.append(chassis_beneath)
self.system = BulletVehicle(pg_physics_world.dynamic_world, chassis)
self.system.setCoordinateSystem(ZUp)
self.dynamic_nodes.append(self.system) # detach chassis will also detach system, so a waring will generate
self.LENGTH = para[Parameter.vehicle_length]
self.WIDTH = para[Parameter.vehicle_width]
if self.render:
model_path = 'models/ferra/scene.gltf'
self.chassis_vis = self.loader.loadModel(AssetLoader.file_path(model_path))
self.chassis_vis.setZ(para[Parameter.vehicle_vis_z])
self.chassis_vis.setY(para[Parameter.vehicle_vis_y])
self.chassis_vis.setH(para[Parameter.vehicle_vis_h])
self.chassis_vis.set_scale(para[Parameter.vehicle_vis_scale])
self.chassis_vis.reparentTo(self.chassis_np)
def _create_wheel(self):
para = self.get_config()
f_l = para[Parameter.front_tire_longitude]
r_l = -para[Parameter.rear_tire_longitude]
lateral = para[Parameter.tire_lateral]
axis_height = para[Parameter.tire_radius] + 0.05
radius = para[Parameter.tire_radius]
wheels = []
for k, pos in enumerate([Vec3(lateral, f_l, axis_height), Vec3(-lateral, f_l, axis_height),
Vec3(lateral, r_l, axis_height), Vec3(-lateral, r_l, axis_height)]):
wheel = self._add_wheel(pos, radius, True if k < 2 else False, True if k == 0 or k == 2 else False)
wheels.append(wheel)
return wheels
def _add_wheel(self, pos: Vec3, radius: float, front: bool, left):
wheel_np = self.node_path.attachNewNode("wheel")
if self.render:
model_path = 'models/yugo/yugotireR.egg' if left else 'models/yugo/yugotireL.egg'
wheel_model = self.loader.loadModel(AssetLoader.file_path(model_path))
wheel_model.reparentTo(wheel_np)
wheel_model.set_scale(1.4, radius / 0.25, radius / 0.25)
wheel = self.system.create_wheel()
wheel.setNode(wheel_np.node())
wheel.setChassisConnectionPointCs(pos)
wheel.setFrontWheel(front)
wheel.setWheelDirectionCs(Vec3(0, 0, -1))
wheel.setWheelAxleCs(Vec3(1, 0, 0))
# TODO add them to PGConfig in the future
wheel.setWheelRadius(radius)
wheel.setMaxSuspensionTravelCm(40)
wheel.setSuspensionStiffness(30)
wheel.setWheelsDampingRelaxation(4.8)
wheel.setWheelsDampingCompression(1.2)
wheel.setFrictionSlip(self.vehicle_config["wheel_friction"])
wheel.setRollInfluence(1.5)
return wheel
def add_image_sensor(self, name: str, sensor: ImageBuffer):
self.image_sensors[name] = sensor
def add_lidar(self, laser_num=240, distance=50):
self.lidar = Lidar(self.pg_world.render, laser_num, distance)
def add_routing_localization(self, show_navi_point: bool):
self.routing_localization = RoutingLocalizationModule(self.pg_world, show_navi_point)
def update_map_info(self, map):
"""
Update map info after reset()
:param map: new map
:return: None
"""
self.routing_localization.update(map)
lane, new_l_index = ray_localization((self.born_place), self.pg_world)
assert lane is not None, "Born place is not on road!"
self.lane_index = new_l_index
self.lane = lane
def _state_check(self):
"""
Check States and filter to update info
"""
result = self.pg_world.physics_world.dynamic_world.contactTest(self.chassis_beneath_np.node(), True)
contacts = set()
for contact in result.getContacts():
node0 = contact.getNode0()
node1 = contact.getNode1()
name = [node0.getName(), node1.getName()]
name.remove(BodyName.Ego_vehicle)
if name[0] == "Ground" or name[0] == BodyName.Lane:
continue
elif name[0] == BodyName.Side_walk:
self.out_of_road = True
contacts.add(name[0])
if self.render:
self.render_collision_info(contacts)
return contacts
def _collision_check(self, contact):
"""
It may lower the performance if overdone
"""
node0 = contact.getNode0().getName()
node1 = contact.getNode1().getName()
name = [node0, node1]
name.remove(BodyName.Ego_vehicle_top)
if name[0] == BodyName.Traffic_vehicle:
self.crash = True
logging.debug("Crash with {}".format(name[0]))
def _init_collision_info_render(self, pg_world):
if pg_world.mode == "onscreen":
info_np = NodePath("Collision info nodepath")
info_np.reparentTo(pg_world.aspect2d)
else:
info_np = None
return info_np
def render_collision_info(self, contacts):
contacts = sorted(list(contacts), key=lambda c: COLLISION_INFO_COLOR[COLOR[c]][0])
text = contacts[0] if len(contacts) != 0 else None
if text is None:
text = "Normal" if time.time() - self.pg_world._episode_start_time > 10 else "Press H to see help message"
self.render_banner(text, COLLISION_INFO_COLOR["green"][1])
else:
self.render_banner(text, COLLISION_INFO_COLOR[COLOR[text]][1])
def render_banner(self, text, color=COLLISION_INFO_COLOR["green"][1]):
"""
Render the banner in the left bottom corner.
"""
if self.collision_info_np is None:
return
if self.current_banner is not None:
self.current_banner.detachNode()
if text in self.collision_banners:
self.collision_banners[text].reparentTo(self.collision_info_np)
self.current_banner = self.collision_banners[text]
else:
new_banner = NodePath(TextNode("collision_info:{}".format(text)))
self.collision_banners[text] = new_banner
text_node = new_banner.node()
text_node.setCardColor(color)
text_node.setText(text)
text_node.setCardActual(-5 * self.pg_world.w_scale, 5.1 * self.pg_world.w_scale, -0.3, 1)
text_node.setCardDecal(True)
text_node.setTextColor(1, 1, 1, 1)
text_node.setAlign(TextNode.A_center)
new_banner.setScale(0.05)
new_banner.setPos(-0.75 * self.pg_world.w_scale, 0, -0.8 * self.pg_world.h_scale)
new_banner.reparentTo(self.collision_info_np)
self.current_banner = new_banner
def destroy(self, _=None):
self.dynamic_nodes.remove(self.chassis_np.node())
super(BaseVehicle, self).destroy(self.pg_world)
self.pg_world.physics_world.dynamic_world.clearContactAddedCallback()
self.routing_localization.destroy()
self.routing_localization = None
if self.lidar is not None:
self.lidar.destroy()
self.lidar = None
if len(self.image_sensors) != 0:
for sensor in self.image_sensors.values():
sensor.destroy(self.pg_world)
self.image_sensors = None
if self.vehicle_panel is not None:
self.vehicle_panel.destroy(self.pg_world)
self.pg_world = None
def set_position(self, position):
"""
Should only be called when restore traffic from episode data
:param position: 2d array or list
:return: None
"""
self.chassis_np.setPos(panda_position(position, 0.4))
def set_heading(self, heading_theta) -> None:
"""
Should only be called when restore traffic from episode data
:param heading_theta: float in rad
:return: None
"""
self.chassis_np.setH((panda_heading(heading_theta) * 180 / np.pi) - 90)
def get_state(self):
return {
"heading": self.heading_theta,
"position": self.position.tolist(),
"done": self.crash or self.out_of_road
}
def set_state(self, state: dict):
self.set_heading(state["heading"])
self.set_position(state["position"])
def __del__(self):
super(BaseVehicle, self).__del__()
self.pg_world = None
self.lidar = None
self.mini_map = None
self.rgb_cam = None
self.routing_localization = None
self.wheels = None
|
from typing import Dict, Generic, TypeVar, TYPE_CHECKING
import sys
CacheKey = TypeVar("CacheKey")
CacheValue = TypeVar("CacheValue")
if sys.version_info < (3, 9):
from pip._vendor.typing_extensions import OrderedDict
else:
from collections import OrderedDict
class LRUCache(OrderedDict[CacheKey, CacheValue]):
"""
A dictionary-like container that stores a given maximum items.
If an additional item is added when the LRUCache is full, the least
recently used key is discarded to make room for the new item.
"""
def __init__(self, cache_size: int) -> None:
self.cache_size = cache_size
super().__init__()
def __setitem__(self, key: CacheKey, value: CacheValue) -> None:
"""Store a new views, potentially discarding an old value."""
if key not in self:
if len(self) >= self.cache_size:
self.popitem(last=False)
super().__setitem__(key, value)
def __getitem__(self, key: CacheKey) -> CacheValue:
"""Gets the item, but also makes it most recent."""
value: CacheValue = super().__getitem__(key)
super().__delitem__(key)
super().__setitem__(key, value)
return value
|
import logging
import math
from typing import Callable, List
from datasketches import frequent_strings_sketch
from whylogs.core.statistics.thetasketch import ThetaSketch
from whylogs.core.summaryconverters import from_string_sketch
from whylogs.proto import CharPosMessage, CharPosSummary, StringsMessage, StringsSummary
from whylogs.util import dsketch
from .numbertracker import NumberTracker
MAX_ITEMS_SIZE = 128
MAX_SUMMARY_ITEMS = 100
logger = logging.getLogger(__name__)
class CharPosTracker:
"""
Track statistics for character positions within a string
Parameters
----------
character_list : str
string containing all characters to be tracked
this list can include specific unicode characters to track.
"""
def __init__(self, character_list: str = None):
if character_list is None:
character_list = "abcdefghijklmnopqrstuvwzyz0123456789-+_@!,./?#$%^&*()[]{}"
self.character_list = set(character_list)
self.char_pos_map = {}
def update(self, value: str, character_list: str = None) -> None:
"""update
Parameters
----------
value : str
utf-16 string
character_list : str, optional
use a specific character_list for
the tracked string. Note that modifing
it from a previous saved choice, will
reset the character position map, since
NITL no longer has the same context.
"""
if character_list:
char_set = set(character_list)
if char_set != self.character_list:
# check if any character were previously tracked
if not self.char_pos_map:
logger.warning("Changing character list, a non-empty character position tracker is being reset to remove ambiguities")
self.character_list = char_set
self.char_pos_map = {}
for indx, char in enumerate(value.lower()):
try:
if char in self.character_list:
self.char_pos_map.setdefault(char, NumberTracker())
# print("track")
self.char_pos_map[char].track(indx)
else:
self.char_pos_map.setdefault("NITL", NumberTracker())
self.char_pos_map["NITL"].track(indx)
except UnicodeEncodeError:
# print("exception")
self.char_pos_map.setdefault("NITL", NumberTracker())
self.char_pos_map["NITL"].track(indx)
def merge(self, other: "CharPosTracker") -> "CharPosTracker":
"""
Merges two Char Pos Frequency Maps
Args:
other (CharPosTracker): to be merged
"""
if (self.character_list != other.character_list) and (not self.char_pos_map or not other.char_pos_map):
logger.error("Merging two non-empty Character position tracker with different character lists")
new_character_list = self.character_list.union(other.character_list)
# initialize merged
new_char_pos_tracker = CharPosTracker(character_list=str("".join(list(new_character_list))))
# merge
new_char_pos_map = {}
for character in new_character_list:
pos_tracker = self.char_pos_map.get(character, None)
other_tracker = other.char_pos_map.get(character, None)
if pos_tracker and other_tracker:
new_char_pos_map[character] = pos_tracker.merge(other_tracker)
elif pos_tracker:
new_char_pos_map[character] = pos_tracker
elif other_tracker:
new_char_pos_map[character] = other_tracker
# merge not in the list
nitl_tracker = self.char_pos_map.get("NITL", None)
nitl_other_tracker = other.char_pos_map.get("NITL", None)
if nitl_tracker and nitl_other_tracker:
new_char_pos_map["NITL"] = nitl_tracker.merge(nitl_other_tracker)
elif nitl_tracker:
new_char_pos_map["NITL"] = nitl_tracker
elif nitl_other_tracker:
new_char_pos_map["NITL"] = nitl_other_tracker
new_char_pos_tracker.char_pos_map = new_char_pos_map
return new_char_pos_tracker
def to_protobuf(self):
"""
Return the object serialized as a protobuf message
"""
character_list = list(self.character_list)
character_list.sort()
opts = dict(char_list="".join(character_list), char_pos_map={key: nt.to_protobuf() for key, nt in self.char_pos_map.items()})
msg = CharPosMessage(**opts)
return msg
@staticmethod
def from_protobuf(message: CharPosMessage):
"""
Load from a CharPosMessage protobuf message
Returns
-------
CharPosTracker
"""
opts = dict(character_list=message.char_list)
char_pos_tracker = CharPosTracker(**opts)
for each_key, each_value in message.char_pos_map.items():
char_pos_tracker.char_pos_map[each_key] = NumberTracker.from_protobuf(each_value)
return char_pos_tracker
def to_summary(self):
character_list = list(self.character_list)
character_list.sort()
opts = dict(character_list="".join(character_list), char_pos_map={key: nt.to_summary() for key, nt in self.char_pos_map.items()})
return CharPosSummary(**opts)
class StringTracker:
"""
Track statistics for strings
Parameters
----------
count : int
Total number of processed values
items : frequent_strings_sketch
Sketch for tracking string counts
theta_sketch : ThetaSketch
Sketch for approximate cardinality tracking
length : NumberTracker
tracks the distribution of length of strings
token_length : NumberTracker
counts token per sentence
token_method : funtion
method used to turn string into tokens
char_pos_tracker: CharPosTracker
"""
def __init__(
self,
count: int = None,
items: frequent_strings_sketch = None,
theta_sketch: ThetaSketch = None,
length: NumberTracker = None,
token_length: NumberTracker = None,
char_pos_tracker: CharPosTracker = None,
token_method: Callable[[], List[str]] = None,
):
if count is None:
count = 0
if items is None:
items = frequent_strings_sketch(round(math.log(MAX_ITEMS_SIZE)))
if theta_sketch is None:
theta_sketch = ThetaSketch()
self.count = count
self.items = items
self.theta_sketch = theta_sketch
self.char_pos_tracker = char_pos_tracker if char_pos_tracker else CharPosTracker()
self.length = length if length else NumberTracker()
self.token_length = token_length if token_length else NumberTracker()
self.token_method = token_method if token_method else lambda x: x.split(" ")
def update(self, value: str, character_list=None, token_method=None):
"""
Add a string to the tracking statistics.
If `value` is `None`, nothing will be done
"""
if value is None:
return
self.count += 1
self.theta_sketch.update(value)
self.items.update(value)
self.char_pos_tracker.update(value, character_list)
if token_method:
self.token_method = token_method
self.length.track(len(value))
self.token_length.track(len(self.token_method(value)))
def merge(self, other):
"""
Merge the values of this string tracker with another
Parameters
----------
other : StringTracker
The other StringTracker
Returns
-------
new : StringTracker
Merged values
"""
items_copy = frequent_strings_sketch.deserialize(self.items.serialize())
items_copy.merge(other.items)
new_theta = self.theta_sketch.merge(other.theta_sketch)
count = self.count + other.count
new_length = self.length.merge(other.length)
new_token_length = self.token_length.merge(other.token_length)
new_char_pos_tracker = self.char_pos_tracker.merge(other.char_pos_tracker)
return StringTracker(count, items_copy, new_theta, new_length, new_token_length, new_char_pos_tracker)
def to_protobuf(self):
"""
Return the object serialized as a protobuf message
Returns
-------
message : StringsMessage
"""
return StringsMessage(
count=self.count,
items=self.items.serialize(),
compact_theta=self.theta_sketch.serialize(),
length=self.length.to_protobuf() if self.length else None,
token_length=self.token_length.to_protobuf() if self.token_length else None,
char_pos_tracker=self.char_pos_tracker.to_protobuf() if self.char_pos_tracker else None,
)
@staticmethod
def from_protobuf(message: StringsMessage):
"""
Load from a protobuf message
Returns
-------
string_tracker : StringTracker
"""
theta = None
if message.compact_theta is not None and len(message.compact_theta) > 0:
theta = ThetaSketch.deserialize(message.compact_theta)
elif message.theta is not None and len(message.theta) > 0:
logger.warning("Possible missing data. Non-compact theta sketches are no longer supported")
return StringTracker(
count=message.count,
items=dsketch.deserialize_frequent_strings_sketch(message.items),
theta_sketch=theta,
length=NumberTracker.from_protobuf(message.length),
token_length=NumberTracker.from_protobuf(message.token_length),
char_pos_tracker=CharPosTracker.from_protobuf(message.char_pos_tracker),
)
def to_summary(self):
"""
Generate a summary of the statistics
Returns
-------
summary : StringsSummary
Protobuf summary message.
"""
if self.count == 0:
return None
unique_count = self.theta_sketch.to_summary()
opts = dict(
unique_count=unique_count,
length=self.length.to_summary(),
token_length=self.token_length.to_summary(),
char_pos_tracker=self.char_pos_tracker.to_summary(),
)
if unique_count.estimate < MAX_SUMMARY_ITEMS:
frequent_strings = from_string_sketch(self.items)
if frequent_strings is not None:
opts["frequent"] = frequent_strings
return StringsSummary(**opts)
|
import os
import shutil
from argparse import ArgumentParser
import pandas as pd
def rename_csv(path: str, old_name: str, new_name: str):
# read the csv
dataframe = pd.read_csv(path + old_name + ".csv")
# copy the limited amount of images from the old into the new directory
for index, row in enumerate(dataframe.iterrows()):
number = dataframe.loc[index, 'expression']
if number == 0:
dataframe.loc[index, 'expression'] = 'Neutral'
elif number == 1:
dataframe.loc[index, 'expression'] = 'Happy'
elif number == 2:
dataframe.loc[index, 'expression'] = 'Sad'
elif number == 3:
dataframe.loc[index, 'expression'] = 'Surprise'
elif number == 4:
dataframe.loc[index, 'expression'] = 'Fear'
elif number == 5:
dataframe.loc[index, 'expression'] = 'Disgust'
elif number == 6:
dataframe.loc[index, 'expression'] = 'Anger'
elif number == 7:
dataframe.loc[index, 'expression'] = 'Contempt'
elif number == 8:
dataframe.loc[index, 'expression'] = 'None'
elif number == 9:
dataframe.loc[index, 'expression'] = 'Uncertain'
elif number == 10:
dataframe.loc[index, 'expression'] = 'Non-Face'
if index % 1000 == 0:
print('processed rows: {}'.format(index))
dataframe.to_csv(path + new_name + ".csv", index=False)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-p", "--path",
help="path to the affectnet directory")
parser.add_argument("-o", "--old",
help="old name")
parser.add_argument("-n", "--new",
help="new name")
args = parser.parse_args()
affectnet_path = args.path
old = args.old
new = args.new
rename_csv(affectnet_path, old, new)
|
class ProxymonObject(object):
def __init__(self, name):
self.name = name
def dump(self):
return self.name.upper().replace(" ", "_")
|
from os.path import basename, dirname
from spine.animation.animationstate import AnimationState
from spine.animation.animationstatedata import AnimationStateData
from spine.atlas.atlas import Atlas
from spine.attachment.attachment import AttachmentType
from spine.skeleton.skeleton import Skeleton
from spine.skeleton.skeletonjson import SkeletonJson
from spinekivy.atlasattachmentloader import AtlasAttachmentLoader
from spinekivy.sprite import MODE_TRIANGLE_FAN, MODE_TRIANGLES
from spinekivy.textureloader import TextureLoader
_REGION_INDICES = range(4)
class SkeletonRenderer(object):
def __init__(self):
self.skeleton = None
self.state = None
self.scale = 1.0
self.sprites = []
def load(self, path):
skeleton_data = self._load_skeleton_data(path)
self.skeleton = Skeleton(skeleton_data)
self.state = AnimationState(AnimationStateData(skeleton_data))
def _load_skeleton_data(self, path):
with open(path + '.json') as fp:
json_text = fp.read()
atlas = self._load_atlas(path)
attachment_loader = AtlasAttachmentLoader(atlas)
skeleton_json = SkeletonJson(attachment_loader)
skeleton_json.scale = self.scale
return skeleton_json.read_data(json_text, basename(path))
def _load_atlas(self, path):
with open(path + '.atlas') as fp:
atlas_text = fp.read()
texture_loader = TextureLoader(dirname(path))
return Atlas(atlas_text, texture_loader)
def update(self, dt):
state = self.state
skeleton = self.skeleton
sprites = self.sprites
state.update(dt)
state.apply(skeleton)
skeleton.update_world_transform()
i = -1
for slot in skeleton.draw_order:
i += 1
attachment = slot.attachment
sprite = sprites[i]
if not attachment:
sprite.color.a = 0.0
elif attachment.type == AttachmentType.region:
mesh = sprite.mesh
attachment.compute_world_vertices_uvs(slot, mesh.vertices)
mesh.indices[:] = _REGION_INDICES
mesh.mode = MODE_TRIANGLE_FAN
mesh.texture = attachment.renderer_object
sprite.color.rgba[:] = (slot.r, slot.g, slot.b, slot.a)
elif attachment.type == AttachmentType.mesh:
mesh = sprite.mesh
attachment.compute_world_vertices_uvs(slot, mesh.vertices)
mesh.mode = MODE_TRIANGLES
mesh.indices[:] = attachment.triangles
mesh.texture = attachment.renderer_object
sprite.color.rgba[:] = (slot.r, slot.g, slot.b, slot.a)
elif attachment.type == AttachmentType.skinnedmesh:
mesh = sprite.mesh
attachment.compute_world_vertices_uvs(slot, mesh.vertices)
mesh.mode = MODE_TRIANGLES
mesh.indices[:] = attachment.triangles
mesh.texture = attachment.renderer_object
sprite.color.rgba[:] = (slot.r, slot.g, slot.b, slot.a)
elif attachment.type == AttachmentType.boundingbox:
sprite.color.a = 0.0
else:
raise TypeError(
'Unknown attachment: {}'.format(type(attachment))
)
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XLA Profiler will help you debug and optimize training workload performance for your models using Cloud TPU
performance tools.
Manual capture via TensorBoard
The following instructions are for capturing trace from a running program
0. This [guide](https://cloud.google.com/tpu/docs/pytorch-xla-performance-profiling-tpu-vm#tpu-vm) will
help you with the Cloud TPU setup with the required installations
1. Start a TensorBoard Server
>> tensorboard --logdir ./tensorboard --port 9001
You could view the TensorBoard output at http://localhost:9001 on your local machine, and then open the
``PROFILE`` plugin from the top right dropdown or open http://localhost:9001/#profile
2. Once the code you'd like to profile is running, click on ``CAPTURE PROFILE`` button. You could enter
``localhost:9012`` (default port for XLA Profiler) as the Profile Service URL. Then, you could enter
the number of milliseconds for the profiling duration, and click ``CAPTURE``
3. Make sure the code is running, while you are trying to capture the traces. Also, it would lead to better
performance insights if the profiling duration is longer than the step time
4. Once the capture is finished, the page will refresh and you could browse through the insights using the
``Tools`` dropdown at the top left
"""
import logging
from typing import Dict
from pytorch_lightning.profiler.base import BaseProfiler
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_8, _TPU_AVAILABLE
from pytorch_lightning.utilities.exceptions import MisconfigurationException
if _TPU_AVAILABLE and _TORCH_GREATER_EQUAL_1_8:
import torch_xla.debug.profiler as xp
log = logging.getLogger(__name__)
class XLAProfiler(BaseProfiler):
STEP_FUNCTIONS = {"validation_step", "test_step", "predict_step"}
RECORD_FUNCTIONS = {
"training_step",
"backward",
"validation_step",
"test_step",
"predict_step",
}
def __init__(self, port: int = 9012) -> None:
"""This Profiler will help you debug and optimize training workload performance for your models using Cloud
TPU performance tools."""
if not _TPU_AVAILABLE:
raise MisconfigurationException("`XLAProfiler` is only supported on TPUs")
if not _TORCH_GREATER_EQUAL_1_8:
raise MisconfigurationException("`XLAProfiler` is only supported with `torch-xla >= 1.8`")
super().__init__(dirpath=None, filename=None)
self.port = port
self._recording_map: Dict = {}
self._step_recoding_map: Dict = {}
self._start_trace: bool = False
def start(self, action_name: str) -> None:
if action_name in self.RECORD_FUNCTIONS:
if not self._start_trace:
self.server = xp.start_server(self.port)
self._start_trace = True
if action_name in self.STEP_FUNCTIONS:
step = self._get_step_num(action_name)
recording = xp.StepTrace(action_name, step_num=step)
else:
recording = xp.Trace(action_name)
recording.__enter__()
self._recording_map[action_name] = recording
def stop(self, action_name: str) -> None:
if action_name in self._recording_map:
self._recording_map[action_name].__exit__(None, None, None)
del self._recording_map[action_name]
def _get_step_num(self, action_name: str) -> int:
if action_name not in self._step_recoding_map:
self._step_recoding_map[action_name] = 1
else:
self._step_recoding_map[action_name] += 1
return self._step_recoding_map[action_name]
def summary(self) -> str:
return ""
|
while True:
num = int(
input('Quer ver a tabuada de qual valor (digite número negativo para sair): '))
if num < 0:
break
for i in range(0, 11):
print(f'{num} x {i} = {num * i}')
print('='*30)
|
from .richdatetime import RichDateTime
|
"""Test string
"""
import ARgorithmToolkit
algo = ARgorithmToolkit.StateSet()
st = ARgorithmToolkit.String('st', algo, "Hello world! 1234")
def test_body():
"""Test string contents
"""
assert st.body == "Hello world! 1234"
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_declare'
assert last_state.content["state_def"]["body"] == "Hello world! 1234"
def test_append():
"""Test string append
"""
global st
st.append(" Hahaha")
assert st.body == "Hello world! 1234 Hahaha"
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_append'
assert last_state.content["state_def"]["element"] == " Hahaha"
st+='xyz'
assert st.body == "Hello world! 1234 Hahahaxyz"
last_state = algo.states[-1]
second_last_state = algo.states[-2]
assert last_state.content["state_type"] == 'string_append'
assert last_state.content["state_def"]["element"] == "xyz"
assert second_last_state.content["state_type"] == 'string_declare'
assert second_last_state.content["state_def"]["body"] == "Hello world! 1234 Hahaha"
assert second_last_state.content["state_def"]["variable_name"] == "st_super"
def test_indexing():
"""Test string indexing
"""
assert st[1] == st.body[1]
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_iter'
assert last_state.content["state_def"]["index"] == 1
subst = st[1:3]
assert isinstance(subst,ARgorithmToolkit.String)
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_declare'
assert last_state.content["state_def"]["variable_name"] == 'st_super_sub'
assert last_state.content["state_def"]["body"] == st.body[1:3]
def test_iteration():
"""Test string iteration
"""
for i,(a,b) in enumerate(zip(st,st.body)):
assert a==b
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_iter'
assert last_state.content["state_def"]["index"] == i
|
from __future__ import absolute_import
import numpy as np
from sklearn.linear_model import logistic, SGDClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier
from at_toolkit.interface.adl_classifier import AdlOfflineClassifier, AdlOnlineClassifier
from at_toolkit import info, error, as_timer
def ohe2cat(label):
return np.argmax(label, axis=1)
class SLLRLiblinear(AdlOfflineClassifier):
"""
'liblinear' is limited to one-versus-rest schemes.
use ovr in multi-class.
for single-label, including binary+multi class.
"""
def init(self, class_num: int, init_params: dict = None):
self.clf_name = "sl_lr_liblinear"
self.class_num = class_num
# for single labels.
self.model = logistic.LogisticRegression(solver="liblinear")
self.ml_mode = 2
# for multi-labels
# mode-1: class_num * onevsrestclassifier+lr
self.ml_models = [OneVsRestClassifier(logistic.LogisticRegression(solver="liblinear")) for i in range(class_num)]
# mode-2: onevsrestclassifier+lr
self.ml_model = OneVsRestClassifier(logistic.LogisticRegression(solver="liblinear"))
#mode-3: Pipeline + onevsrestclassifier+lr
self.logReg_pipeline = Pipeline([('clf', OneVsRestClassifier(logistic.LogisticRegression(solver='liblinear'), n_jobs=-1)),])
# for multi-labels.
# mode-1: + onevsrestclassifier
# mode-2: + decision tree.
# self.model = DecisionTreeClassifier()
info(
"Backbone classifier=SLLRLiblinear is init, class_num={}, init_params={}".format(
self.class_num, init_params
)
)
def offline_fit(self, train_examples_x: np.ndarray, train_examples_y: np.ndarray, fit_params: dict = None):
# for single-label
if fit_params.get("if_multilabel") is False:
train_examples_y = ohe2cat(train_examples_y)
self.model.fit(train_examples_x, train_examples_y)
self.label_map = self.model.classes_
# for multi-labels.
else:
if self.ml_mode == 1:
for cls in range(self.class_num):
cls_y = train_examples_y[:, cls]
# self.logReg_pipeline.fit(train_examples_x, cls_y)
self.ml_models[cls].fit(train_examples_x, cls_y)
elif self.ml_mode == 2:
self.ml_model.fit(train_examples_x, train_examples_y)
elif self.ml_mode == 3:
for cls in range(self.class_num):
cls_y = train_examples_y[:, cls]
self.logReg_pipeline.fit(train_examples_x, cls_y)
else:
error("Error: wrong ml_mode={}".format(self.ml_mode))
as_timer("lr_liblinear_fit_{}".format(len(train_examples_x)))
def predict_proba(self, test_examples: np.ndarray, predict_prob_params: dict = None) -> np.ndarray:
# multi-label or single-label.
if predict_prob_params.get("if_multilabel") is True:
return self.predict_proba_multilabel(test_examples)
else:
raw_pred_probas = self.model.predict_proba(test_examples)
print(raw_pred_probas)
if len(self.label_map) < self.class_num:
rebuilt_pred_proba = self.rebuild_prob_res(self.label_map, raw_pred_probas)
as_timer("lr_liblinaer_pred_proba_{}".format(len(test_examples)))
return rebuilt_pred_proba
else:
return raw_pred_probas
def predict_proba_multilabel(self, test_examples: np.ndarray):
if self.ml_mode == 1:
all_preds = []
for cls in range(self.class_num):
preds = self.ml_models[cls].predict_proba(test_examples)
# preds = self.logReg_pipeline.predict_proba(test_examples)
info("cls={}, preds shape={}, data={}".format(cls, preds.shape, preds))
all_preds.append(preds[:, 1])
preds = np.stack(all_preds, axis=1)
elif self.ml_mode == 2:
preds = self.ml_model.predict_proba(test_examples)
elif self.ml_mode == 3:
preds = self.ml_model.predict_proba(test_examples)
all_preds = []
for cls in range(self.class_num):
preds = self.ml_models[cls].predict_proba(test_examples)
preds = self.logReg_pipeline.predict_proba(test_examples)
# info("cls={}, preds shape={}, data={}".format(cls, preds.shape, preds))
all_preds.append(preds[:, 1])
preds = np.stack(all_preds, axis=1)
else:
error("Error: wrong ml_mode={}".format(self.ml_mode))
preds = self.ml_model.predict_proba(test_examples)
info("multilabel, preds shape={} , data={}".format(preds.shape, preds))
return preds
class SLLRSag(AdlOfflineClassifier):
"""
'liblinear' is limited to one-versus-rest schemes.
use ovr in multi-class.
for single-label, including binary+multi class.
"""
def init(self, class_num, init_params: dict):
self.clf_name = "sl_lr_sag"
self.class_num = class_num
self.max_iter = init_params.get("max_iter")
# self.model = logistic.LogisticRegression(solver="sag", max_iter=self.max_iter)
self.model = logistic.LogisticRegression(C=1.0, max_iter=self.max_iter, solver="sag", multi_class="auto")
# self.model = OneVsRestClassifier(logistic.LogisticRegression(C=1.0, max_iter=self.max_iter, solver="sag", multi_class="auto"))
self.ml_model = OneVsRestClassifier(logistic.LogisticRegression(solver="liblinear"))
info(
"Backbone classifier=SLLRLiblinear is init, class_num={}, init_params={}".format(
self.class_num, init_params
)
)
def offline_fit(self, train_examples_x: np.ndarray, train_examples_y: np.ndarray, fit_params: dict = None):
if fit_params.get("if_multilabel") is False:
train_examples_y = ohe2cat(train_examples_y)
self.model.fit(train_examples_x, train_examples_y)
self.label_map = self.model.classes_
else:
self.ml_model.fit(train_examples_x, train_examples_y)
as_timer("lr_sag_fit_{}".format(len(train_examples_x)))
def predict_proba(self, test_examples: np.ndarray, predict_prob_params: dict = None) -> np.ndarray:
if predict_prob_params.get("if_multilabel") is True:
return self.predict_proba_multilabel(test_examples)
else:
raw_pred_probas = self.model.predict_proba(test_examples)
if len(self.label_map) < self.class_num:
rebuilt_pred_proba = self.rebuild_prob_res(self.label_map, raw_pred_probas)
as_timer("lr_liblinaer_pred_proba_{}".format(len(test_examples)))
return rebuilt_pred_proba
else:
return raw_pred_probas
def predict_proba_multilabel(self, test_examples: np.ndarray, predict_prob_params: dict = None) -> np.ndarray:
preds = self.ml_model.predict_proba(test_examples)
return preds
class MLLRLiblinear(AdlOfflineClassifier):
def init(self, class_num: int, init_params: dict):
self.clf_name = "ml_sl_lr_liblinear"
self.class_num = class_num
self.model = logistic.LogisticRegression(solver="liblinear")
info(
"Backbone classifier=SLLRLiblinear is init, class_num={}, init_params={}".format(
self.class_num, init_params
)
)
pass
def offline_fit(self, train_examples_x: np.ndarray, train_examples_y: np.ndarray, fit_params:dict):
pass
def predict_proba(self, test_examples: np.ndarray, predict_prob_params: dict) -> np.ndarray:
pass
def main():
class_num = 100
lr_libl_cls_init_params = {}
lr_sag_cls_init_params = {"max_iter": 30} # 50/100
lr_libl_cls = SLLRLiblinear()
lr_libl_cls.init(class_num)
lr_sag_cls = SLLRSag()
lr_sag_cls.init(class_num, lr_sag_cls_init_params)
if __name__ == "__main__":
main()
|
from pathlib import Path
import ezdxf
import math
DIR = Path('~/Desktop/Outbox').expanduser()
doc = ezdxf.new()
msp = doc.modelspace()
hatch = msp.add_hatch(color=1)
ep = hatch.paths.add_edge_path()
ep.add_line((0, 0), (1, 0))
ep.add_arc(
center=(0, 0),
radius=1,
start_angle=0,
end_angle=90,
ccw=True,
)
ep.add_line((0, 1), (0, 0))
hatch.translate(0.25, 0.25, 0)
for color in range(2, 5):
hatch = hatch.copy()
hatch.dxf.color = color
hatch.rotate_z(math.pi / 2.0)
doc.entitydb.add(hatch)
msp.add_entity(hatch)
doc.set_modelspace_vport(height=5)
doc.saveas(DIR / 'ccw_arc_hatch.dxf')
|
from pycortecs.utility.utils import validate_endpoint
from pycortecs.services import BASE_URL
from pycortecs.services.base_services import BaseServices
SENTIMENT = 'sentiment'
VOLUME = 'socialVolume'
BALANCE = 'socialBalance'
DOMINANCE = 'socialDominance'
ENDPOINTS = 'endpointInfo'
SIGNALS = 'signalInfo'
ASSETS = 'assetInfo'
PROVIDED_SINCE = 'providedSince'
API_URL = "{}/api/v1".format(BASE_URL)
class MediaServices(BaseServices):
def __init__(self, username: str = None, password: str = None):
super().__init__(username, password)
def get_provided_assets(self):
url = "{url}/{feature}".format(url=API_URL,
feature=ASSETS)
res = self._get(url, headers=self._token_header)
return res
def get_provided_signals(self):
url = "{url}/{feature}".format(url=API_URL,
feature=SIGNALS)
res = self._get(url, headers=self._token_header)
return res
def get_provided_endpoints(self):
url = "{url}/{feature}".format(url=API_URL,
feature=ENDPOINTS)
res = self._get(url, headers=self._token_header)
return res
def get_asset_provided_since(self,
asset: str = 'btc',
endpoint: str = 'twitter'):
validate_endpoint(endpoint)
url = "{url}/{provided_since}/{endpoint}".format(url=API_URL,
provided_since=PROVIDED_SINCE,
endpoint=endpoint)
query_parameters = "?asset={asset}".format(asset=asset)
res = self._get(url + query_parameters, headers=self._token_header)
return res
def get_sentiment(self,
endpoint: str = 'twitter',
since: str = None,
until: str = None,
asset: str = None,
interval: str = '1d') -> dict:
validate_endpoint(endpoint)
url = "{url}/{signal}/{endpoint}".format(url=API_URL,
signal=SENTIMENT,
endpoint=endpoint)
query_parameters = "?since={since}&until={until}&asset={asset}&interval={interval}".format(since=since,
until=until,
asset=asset,
interval=interval)
res = self._get(url + query_parameters, headers=self._token_header)
return res
def get_volume(self,
endpoint: str = 'twitter',
since: str = None,
until: str = None,
asset: str = None,
interval: str = '1d') -> dict:
validate_endpoint(endpoint)
url = "{url}/{signal}/{endpoint}".format(url=API_URL,
signal=VOLUME,
endpoint=endpoint)
query_parameters = "?since={since}&until={until}&asset={asset}&interval={interval}".format(since=since,
until=until,
asset=asset,
interval=interval)
res = self._get(url + query_parameters, headers=self._token_header)
return res
def get_dominance(self,
endpoint: str = 'twitter',
since: str = None,
until: str = None,
asset: str = None,
interval: str = '1d') -> dict:
validate_endpoint(endpoint)
url = "{url}/{signal}/{endpoint}".format(url=API_URL,
signal=DOMINANCE,
endpoint=endpoint)
query_parameters = "?since={since}&until={until}&asset={asset}&interval={interval}".format(since=since,
until=until,
asset=asset,
interval=interval)
res = self._get(url + query_parameters, headers=self._token_header)
return res
def get_balance(self,
endpoint: str = 'twitter',
since: str = None,
until: str = None,
asset: str = None,
interval: str = '1d',
threshold: float = 0.7) -> dict:
validate_endpoint(endpoint)
url = "{url}/{signal}/{endpoint}".format(url=API_URL,
signal=BALANCE,
endpoint=endpoint)
query_parameters = "?since={since}&until={until}&asset={asset}&interval={interval}&threshold={threshold}".format(
since=since,
until=until,
asset=asset,
interval=interval,
threshold=threshold)
res = self._get(url + query_parameters, headers=self._token_header)
return res
|
import setuptools
# Read the contents of the README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name="mkdocs-render-swagger-plugin",
version="0.0.2",
author="Bar Harel",
python_requires='>=3.6',
author_email="bzvi7919@gmail.com",
description="MKDocs plugin for rendering swagger & openapi files.",
url="https://github.com/bharel/mkdocs-render-swagger-plugin",
py_modules=["render_swagger"],
install_requires=["mkdocs"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
'mkdocs.plugins': [
'render_swagger = render_swagger:SwaggerPlugin',
]
},
long_description=long_description,
long_description_content_type='text/markdown'
)
|
#!/usr/bin/env python3
"""
Geoscience Australia - Python Geodesy Package
GNSS Module
In Development
"""
import sys
from numpy import zeros
from geodepy.angles import DMSAngle
def read_sinex_estimate(file):
"""This function reads in the SOLUTION/ESTIMATE block of a SINEX file. It
returns estimate, a list of tuples:
estimate = [(code, soln, refEpoch, staX, staY, staZ, staX_sd, staY_sd,
staZ_sd[, velX, velY, velZ, velX_sd, velY_sd, velZ_sd])...]
where:
* code is the stations's 4-character ID
* soln is the segment of the stations's time series
* refEpoch is the epoch of the solution in the form YY:DOY:SSSSS (YY
is the two digit year, DOY is day of year, and SSSSS is the time of
day in seconds
* sta[XYZ] is the station coordinates in the Cartesian reference frame
* sta[XYZ]_sd is the standard deviation of the station coordinates in
the Cartesian reference frame
* vel[XYZ] is the station velocity in the Cartesian reference frame
* vel[XYZ]_sd is the standard deviation of the station velocity in the
Cartesian reference frame
Velocities are not included in all SINEX files and so are only returned if
present.
:param file: the input SINEX file
:return: estimate
"""
# Create data structures and set variables
lines = []
estimate = []
velocities = False
go = False
code = ''
soln = ''
epoch = ''
stax = ''
stay = ''
staz = ''
stax_sd = ''
stay_sd = ''
staz_sd = ''
velx = ''
vely = ''
velz = ''
velx_sd = ''
vely_sd = ''
velz_sd = ''
# Read the SOLUTION/ESTIMATE block into a list and determine if there is
# any velocity information
with open(file) as f:
for line in f:
if line[:18] == '-SOLUTION/ESTIMATE':
break
if go and line[:11] == '*INDEX TYPE':
pass
elif go:
if line[7:10] == 'VEL':
velocities = True
lines.append(line)
if line[:18] == '+SOLUTION/ESTIMATE':
go = True
for line in lines:
typ = line[7:11]
if typ == 'STAX':
code = line[14:18]
soln = line[23:26].lstrip()
epoch = line[27:39]
stax = float(line[47:68])
stax_sd = float(line[69:80])
elif typ == 'STAY':
stay = float(line[47:68])
stay_sd = float(line[69:80])
elif typ == 'STAZ':
staz = float(line[47:68])
staz_sd = float(line[69:80])
if not velocities:
info = (code, soln, epoch, stax, stay, staz, stax_sd, stay_sd,
staz_sd)
estimate.append(info)
elif typ == 'VELX':
velx = float(line[47:68])
velx_sd = float(line[69:80])
elif typ == 'VELY':
vely = float(line[47:68])
vely_sd = float(line[69:80])
elif typ == 'VELZ':
velz = float(line[47:68])
velz_sd = float(line[69:80])
info = (code, soln, epoch, stax, stay, staz, stax_sd, stay_sd,
staz_sd, velx, vely, velz, velx_sd, vely_sd, velz_sd)
estimate.append(info)
return estimate
def read_sinex_matrix(file):
"""This function reads in the SOLUTION/MATRIX_ESTIMATE block of a SINEX
file. It returns matrix, a list of tuples:
matrix = [(code, soln, var_x, covar_xy, covar_xz, var_y, covar_yz,
var_z[, var_v_x, covar_v_xy, covar_v_xz, var_v_y, covar_v_yz,
var_v_z])...]
where:
* code is the stations's 4-character ID
* soln is the segment of the stations's time series
* var_x is the variance in the X coordinate
* covar_xy is the covariance between the X and the Y coordinates
* covar_xz is the covariance between the X and the Z coordinates
* var_y is the variance in the Y coordinate
* covar_yz is the covariance between the Y and the Z coordinates
* var_z is the variance in the Z coordinate
* var_v_x is the variance in the X velocity
* covar_v_xy is the covariance between the X and the Y velocities
* covar_v_xz is the covariance between the X and the Z velocities
* var_v_y is the variance in the Y velocity
* covar_v_yz is the covariance between the Y and the Z velocities
* var_v_z is the variance in the Z velocity
Velocities are not included in all SINEX files and so their VCV information
is only returned if they are present.
:param file: the input SINEX file
:return: matrix
"""
# Read in the codes (station names) and solutions, and check for velocities
data = read_sinex_estimate(file)
code = []
soln = []
velocities = False
for station in data:
code.append(station[0])
soln.append(station[1])
if len(data[0]) == 15:
velocities = True
# Read the SOLUTION/MATRIX_ESTIMATE block into a list and determine if the
# matrix is upper or lower triangular
lines = []
lower_triangular = False
go = False
with open(file) as f:
for line in f:
if line[:25] == '-SOLUTION/MATRIX_ESTIMATE':
break
if go and line[:12] == '*PARA1 PARA2':
pass
elif go:
lines.append(line)
if line[:25] == '+SOLUTION/MATRIX_ESTIMATE':
if line[26] == 'L':
lower_triangular = True
go = True
# Create an array containing the matrix elements
if velocities:
n = 6 * int(len(code))
else:
n = 3 * int(len(code))
element = zeros((n, n))
matrix = []
for line in lines:
col = line.rstrip().split()
for i in range(2, len(col)):
element[int(col[0]) - 1][int(col[1]) + i - 3] = float(col[i])
if velocities:
if lower_triangular:
for i in range(len(code)):
info = (code[i], soln[i], element[6 * i][6 * i],
element[6 * i + 1][6 * i],
element[6 * i + 1][6 * i + 1],
element[6 * i + 2][6 * i],
element[6 * i + 2][6 * i + 1],
element[6 * i + 2][6 * i + 2],
element[6 * i + 3][6 * i + 3],
element[6 * i + 4][6 * i + 3],
element[6 * i + 4][6 * i + 4],
element[6 * i + 5][6 * i + 3],
element[6 * i + 5][6 * i + 4],
element[6 * i + 5][6 * i + 5])
matrix.append(info)
else:
for i in range(len(code)):
info = (code[i], soln[i], element[6 * i][6 * i],
element[6 * i][6 * i + 1], element[6 * i][6 * i + 2],
element[6 * i + 1][6 * i + 1],
element[6 * i + 1][6 * i + 2],
element[6 * i + 2][6 * i + 2],
element[6 * i + 3][6 * i + 3],
element[6 * i + 3][6 * i + 4],
element[6 * i + 3][6 * i + 5],
element[6 * i + 4][6 * i + 4],
element[6 * i + 4][6 * i + 5],
element[6 * i + 5][6 * i + 5])
matrix.append(info)
else:
if lower_triangular:
for i in range(len(code)):
info = (code[i], soln[i], element[3 * i][3 * i],
element[3 * i + 1][3 * i],
element[3 * i + 1][3 * i + 1],
element[3 * i + 2][3 * i],
element[3 * i + 2][3 * i + 1],
element[3 * i + 2][3 * i + 2])
matrix.append(info)
else:
for i in range(len(code)):
info = (code[i], soln[i], element[3 * i][3 * i],
element[3 * i][3 * i + 1], element[3 * i][3 * i + 2],
element[3 * i + 1][3 * i + 1],
element[3 * i + 1][3 * i + 2],
element[3 * i + 2][3 * i + 2])
matrix.append(info)
return matrix
def read_sinex_sites(file):
"""This function reads in the SITE/ID block of a SINEX file. It returns
sites, a list of tuples:
sites = [(site, point, domes, obs, station_description, lon, lat, h)]
where:
* site is the site code
* point is the site's point code
* domes is the site's dome number
* obs is the observation technique
* station_description is a free format desciption of the site
* lon is the approximate longitude of the site as a DMSAngle object
* lat is the approximate latitude of the site as a DMSAngle object
* h is the approximate height of the site
:param file: the input SINEX file
:return: sites
"""
# Read the SITE/ID block into a list
lines = []
go = False
with open(file) as f:
for line in f:
if line[:8] == '-SITE/ID':
break
if go and line[:8] == '*CODE PT':
pass
elif go:
lines.append(line)
if line[:8] == '+SITE/ID':
go = True
sites = []
for line in lines:
site = line[1:5]
point = line[6:8].lstrip()
domes = line[9:18]
obs = line[19:20]
station_description = line[21:43].lstrip()
lon = DMSAngle(line[44:55].lstrip())
lat = DMSAngle(line[56:67].lstrip())
h = float(line[67:73])
info = (site, point, domes, obs, station_description, lon, lat, h)
sites.append(info)
return sites
def read_disconts(file):
"""This function reads in the SOLUTION/DISCONTINUITY block of a
SINEX file. It returns disconts , a list of tuples:
sites = [(site, code1, point, code2, start, end, type)]
where:
* site is the site code
* code1 is unknown
* point is the site's point code
* code2 is unknown
* start is the start time for the point code in YY:DOY:SECOD
* end is the end time for the point code in YY:DOY:SECOD
* type is the type of discontinuity; P for position or V for
velocity
I could not find the format description for this block.
:param file: the input discontinuities file
:return: disconts
"""
# Read the SOLUTION/DISCONTINUITY block into a list
lines = []
go = False
with open(file) as f:
for line in f:
if line[:23] == '-SOLUTION/DISCONTINUITY':
break
elif go:
lines.append(line)
if line[:23] == '+SOLUTION/DISCONTINUITY':
go = True
disconts = []
for line in lines:
site = line[1:5]
code1 = line[5:8].lstrip()
point = line[8:13].lstrip()
code2 = line[14:15]
start = line[16:28]
end = line[29:41]
type = line[42:43]
info = (site, code1, point, code2, start, end, type)
disconts.append(info)
return disconts
def read_solution_epochs(file):
"""This function reads in the SOLUTION/EPOCHS block of a SINEX file.
It returns epochs, a list of tuples:
epochs = [(site, point, sol, obs, start, end, mean)]
where:
* site is the site code
* point is the site's point code
* sol is the solution number at a site/point
* obs is the observation technique
* start is the start time for the solution in YY:DOY:SECOD
* end is the end time for the solution in YY:DOY:SECOD
* mean is the mean time for the solution in YY:DOY:SECOD
:param file: the input SINEX file
:return: epochs
"""
# Read the SOLUTION/EPOCHS block into a list
lines = []
go = False
with open(file) as f:
for line in f:
if line[:16] == '-SOLUTION/EPOCHS':
break
if go and line[:8] == '*Code PT':
pass
elif go:
lines.append(line)
if line[:16] == '+SOLUTION/EPOCHS':
go = True
epochs = []
# Parse each line, create a tuple and add it to the list
for line in lines:
site = line[1:5]
point = line[6:8].lstrip()
sol = line[9:13].lstrip()
obs = line[14:15]
start = line[16:28]
end = line[29:41]
mean = line[42:55].rstrip()
info = (site, point, sol, obs, start, end, mean)
epochs.append(info)
return epochs
|
"""基本节点模块。
节点有以下几个类型:
1. Variable,变量节点。其没有父节点,作为模型权重(可训练)或输入数据(不可训练)。
2. Operator,操作节点。其表示某种运算操作,通常为计算图的中间变量。
3. LossFunction,损失函数节点。
4. Metrics,评估节点。其仅进行前向计算,用于评估模型。
"""
import abc
import numpy as np
from .graph import default_graph
class Node(abc.ABC):
"""
计算图节点类基类
"""
def __init__(self, *parents, **kargs):
# 计算图对象,默认为全局对象 default_graph
self.kargs = kargs
self.graph = kargs.get('graph', default_graph)
self.need_save = kargs.get('need_save', True)
self.gen_node_name(**kargs)
self.parents = list(parents) # 父节点列表
self.children = [] # 子节点列表
self.value = None # 本节点的值
self.jacobi = None # 结果节点对本节点的雅可比矩阵(梯度),使用numpy.matrix实现
# 将本节点添加到父节点的子节点列表中
for parent in self.parents:
parent.children.append(self)
# 将本节点添加到计算图中
self.graph.add_node(self)
def get_parents(self):
"""
获取本节点的父节点
"""
return self.parents
def get_children(self):
"""
获取本节点的子节点
"""
return self.children
def gen_node_name(self, **kargs):
"""
生成节点名称,如果用户不指定,则根据节点类型生成类似于 MatMul:3 的节点名,
如果指定了 name_scope,则生成类似 Hidden/MatMul:3 的节点名
"""
self.name = kargs.get('name', f'{self.__class__.__name__}:'
f'{self.graph.node_count()}')
if self.graph.name_scope:
self.name = f'{self.graph.name_scope}/{self.name}'
def forward(self):
"""
前向传播计算本节点的值,若父节点的值未被计算,则递归调用父节点的forward
方法
"""
for parent in self.parents:
if parent.value is None:
parent.forward()
self.compute()
@abc.abstractmethod
def compute(self):
"""
抽象方法,根据父节点的值计算本节点的值
"""
@abc.abstractmethod
def get_jacobi(self, parent):
"""
抽象方法,计算结果节点对某个父节点的雅可比矩阵
"""
def backward(self, result):
"""
反向传播,计算结果节点对本节点的雅可比矩阵
"""
if self.jacobi is None:
if self is result:
self.jacobi = np.mat(np.eye(self.dimension()))
else:
self.jacobi = np.mat(
np.zeros((result.dimension(), self.dimension()))
)
for child in self.get_children():
if child.value is not None:
self.jacobi += child.backward(result) \
* child.get_jacobi(self)
return self.jacobi
def clear_jacobi(self):
"""
清空结果节点对本节点的雅可比矩阵
"""
self.jacobi = None
def dimension(self):
"""
返回本节点的值展开成向量后的维数
"""
return self.value.shape[0] * self.value.shape[1]
def shape(self):
"""
返回本节点的值作为矩阵的形状:(行数,列数)
"""
return self.value.shape
def reset_value(self, recursive=True):
"""
重置本节点的值,并递归重置本节点的下游节点的值
"""
self.value = None
if recursive:
for child in self.children:
child.reset_value()
class Variable(Node):
"""
变量节点,没有父节点
"""
def __init__(self, dim, init=False, trainable=True, **kargs):
"""
变量节点没有父节点,其构造函数接受变量节点的形状、是否初始化以及是否参与训练的标识
"""
super().__init__(**kargs)
self.dim = dim
# 如果需要初始化,则以正态分布随机初始化变量的值
if init:
self.value = np.mat(np.random.normal(0, 0.1, self.dim))
# 变量节点是否参与训练
self.trainable = trainable
def set_value(self, value):
"""
为变量赋值
"""
assert isinstance(value, np.matrix) and value.shape == self.dim
# 本节点的值被改变,重置所有下游节点的值
self.reset_value()
self.value = value
def compute(self):
pass
def get_jacobi(self, parent):
pass
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from functools import update_wrapper
import logging
from django.conf import settings
from django.utils.html import strip_tags
try:
from django.utils.six.moves import urllib_parse
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
except (ImportError, AttributeError) as e: # Python 2, < Django 1.5
from urlparse import urlsplit, urlunsplit
from django.forms import Media
from django.template.response import TemplateResponse
from adminlinks.admin import AdminlinksMixin
from django.contrib.admin.options import ModelAdmin
try:
from django.contrib.admin.utils import display_for_field, unquote
except ImportError:
from django.contrib.admin.util import display_for_field, unquote
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse, resolve
from django.http import HttpResponse, HttpResponseBadRequest, QueryDict
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
try:
import json
except ImportError: # pragma: no cover ... some older Python2 version.
from django.utils import simplejson as json
try:
from django.utils.encoding import force_text
except ImportError: # pragma: no cover ... < Django 1.5
from django.utils.encoding import force_unicode as force_text
from django.utils.translation import ugettext_lazy as _
from adminlinks.templatetags.utils import _add_link_to_context
from editregions.admin.inlines import EditRegionInline
from editregions.constants import (REQUEST_VAR_REGION, REQUEST_VAR_CT,
REQUEST_VAR_ID)
from editregions.utils.data import (get_modeladmin, get_content_type,
get_model_class, get_configuration,
attach_configuration)
from editregions.admin.changelist import EditRegionChangeList
from editregions.admin.forms import MovementForm
from editregions.admin.utils import (AdminChunkWrapper, shared_media,
guard_querystring_m,
TemplateRequestKeyValue)
from editregions.templatetags.editregion import chunk_iteration_context
from editregions.models import EditRegionChunk, EditRegionConfiguration
from editregions.text import (admin_chunktype_label, admin_summary_label,
admin_position_label, admin_modified_label,
region_v)
try:
from django.utils.text import Truncator
def truncate_words(s, num):
return Truncator(s).words(num, truncate='...')
except ImportError as e: # pragma: no cover
from django.utils.text import truncate_words
logger = logging.getLogger(__name__)
class EditRegionAdmin(ModelAdmin):
frontend_editing = True
fields = None
fieldsets = None
exclude = None
date_hierarchy = None
ordering = None
list_select_related = False
save_as = False
save_on_top = False
actions = None
change_list_template = 'admin/editregions/change_list.html'
list_display = [
# this should always be last, and not be in the list_display_links
'get_object_tools',
'get_subclass_type',
'get_subclass_summary',
]
list_display_links = ()
list_filter = [
'region',
]
def __init__(self, *args, **kwargs):
super(EditRegionAdmin, self).__init__(*args, **kwargs)
# disables the built in link building using
# EditRegionChangeList.url_for_result so that we can have useful
# links that we can customise.
self.list_display_links = self.get_list_display_links(
request=None, list_display=self.list_display,
)
def get_list_display_links(self, request, list_display):
"""
Disable the built in link building so we can have customised links
in the changelist.
"""
return (None,)
def get_list_display(self, request):
"""
A copy of the standard one, hard-copying the fields ...
"""
return self.list_display[:]
def get_changelist_link_html(self, obj, **kwargs):
wrapped_obj = AdminChunkWrapper(opts=obj._meta, obj=obj,
namespace=self.admin_site.name,
content_id=obj.content_id,
content_type=obj.content_type,
region=obj.region)
return ('<a href="{url}" data-adminlinks="autoclose" '
'class="chunktype-{app}-{model} chunk-metadata-{caller}" '
'data-no-turbolink>{data}</a>').format(
url=wrapped_obj.get_absolute_url(),
app=wrapped_obj.url_parts['app'],
model=wrapped_obj.url_parts['module'], **kwargs)
def get_subclass_type(self, obj):
"""
get the verbose name of the given object, which is likely a subclass
.. note::
By using this callable, we avoid the problem of being able to
sort by headers in the changelists (including on the change form)
:return: the subclass object's verbose name
:rtype: string
"""
modeladmin = get_modeladmin(obj)
if hasattr(modeladmin, 'get_editregions_subclass_type'):
value = modeladmin.get_editregions_subclass_type(obj=obj)
else:
value = obj._meta.verbose_name
value = strip_tags(force_text(value))
return self.get_changelist_link_html(obj, data=value,
caller='subclass')
get_subclass_type.allow_tags = True
get_subclass_type.short_description = admin_chunktype_label
def get_subclass_summary(self, obj):
"""
show a brief, HTML aware summary of the content.
.. note::
By using this callable, we avoid the problem of being able to
sort by headers in the changelists (including on the change form)
:return: short representation of the data, HTML included.
:rtype: string
"""
modeladmin = get_modeladmin(obj)
if hasattr(modeladmin, 'get_editregions_subclass_summary'):
value = modeladmin.get_editregions_subclass_summary(obj=obj)
elif hasattr(modeladmin, 'render_into_summary'):
context = chunk_iteration_context(index=0, value=obj,
iterable=(obj,))
context.update({'admin_summary': True})
value = modeladmin.render_into_summary(obj=obj, context=context)
else:
value = '[missing]'
value = strip_tags(force_text(value))
return self.get_changelist_link_html(obj, data=value,
caller='summary')
get_subclass_summary.allow_tags = True
get_subclass_summary.short_description = admin_summary_label
def get_object_tools(self, obj):
"""
Show the modifiers for this object. Currently just implements the
drag handle as per `django-treeadmin`_.
:return: the list of actions or tools available for this object
:rtype: string
"""
modeladmin = get_modeladmin(obj)
if hasattr(modeladmin, 'get_editregions_subclass_tools'):
value = modeladmin.get_editregions_subclass_tools(obj=obj)
else:
value = ''
return '<div class="chunk-object-tools">{value!s}</div>'.format(
value=value)
get_object_tools.allow_tags = True
get_object_tools.short_description = ''
# We're finished our list_display fields here.
def get_model_perms(self, request, *args, **kwargs):
"""
Shadow method for the default ModelAdmin. Allows us to hide stufff.
By using an empty dictionary, permissions still work, but chunk administration
views are hidden from the default AdminSite index.
:param request: The WSGIRequest.
:return: Empty dictionary
"""
return {}
def get_urls(self):
# why this isn't a separate method in Django, I don't know.
from django.conf.urls import patterns, url
def wrap(view): # pragma: no cover this is from the Django admin
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
app_label = self.model._meta.app_label
if hasattr(self.model._meta, 'model_name'):
model_name = self.model._meta.model_name
else:
model_name = self.model._meta.module_name
info = (app_label, model_name)
urlpatterns = patterns('',
# parent_ct is the Django ContentType PK for
# the object the EditRegionChunks are bound to
# eg: a page, a blog post, a project.
# parent_id is the PK of the parent object in
# question. We don't know what format the PK takes
# so we accept anything.
#url(r'^(?P<parent_ct>\d+)/(?P<parent_id>.+)/$',
url(r'^$',
wrap(self.changelist_view),
name='%s_%s_changelist' % info),
# moving an object from one position to another
# potentially across regions.
url(r'^move/$',
wrap(self.move_view),
name='%s_%s_move' % info),
# this thing is needed, unfortunately, to enable
# the delete screen to work on EditRegionChunk
# subclasses.
# see https://code.djangoproject.com/ticket/20640
# As I'm not thrilled with the idea of finding
# the wrong edit screen ... we're going to
# re-point it at the history view.
url(r'^(.+)/$',
wrap(self.history_view),
name='%s_%s_change' % info),
)
return urlpatterns
urls = property(get_urls)
def move_view(self, request):
"""
Allows us to move a Chunk from one place to another.
Yes, it accepts request.GET, because I can't be bothered to monkey
patch the jQuery ajax sending to send a CSRF token. Screw it.
Data received in the request should be:
* `pk`
* `position`
* `region`
The form then handles moving everything in .save()
"""
form = MovementForm(data=request.GET, files=None, initial=None)
if form.is_valid() and self.has_change_permission(request, form.cleaned_data['pk']):
form.save()
html = self.render_changelists_for_object(
request=request, obj=form.cleaned_data['pk'].content_object)
json_data = {
'action': 'move', 'html': html,
'primary_key': form.cleaned_data['pk'].pk,
}
self.log_change(request, *form.change_message())
self.log_change(request, *form.parent_change_message())
return HttpResponse(json.dumps(json_data),
content_type='application/json')
return HttpResponseBadRequest(json.dumps(form.errors),
content_type='application/json')
def get_queryset(self, *args, **kwargs):
"""
Don't use the default queryset/manager, as it won't be our interface
to polymorphic (downcast) EditRegionChunk subclasses.
:param args: Stuff to pass through to
:meth:`~django.contrib.admin.options.BaseModelAdmin.get_ordering`
:param kwargs: Stuff to pass through to
:meth:`~django.contrib.admin.options.BaseModelAdmin.get_ordering`
:return: our EditRegionChunks, but already downcast to their final form.
:rtype: :class:`~django.db.models.query.QuerySet`
"""
qs = self.model.polymorphs.all().select_subclasses()
ordering = self.get_ordering(*args, **kwargs)
if ordering: # pragma: no cover ... I don't care, this should be fine.
qs = qs.order_by(*ordering)
return qs
queryset = get_queryset
def get_object(self, request, object_id):
"""
This overrides the default, to catch ObjectDoesNotExist, because we
can't guarantee what model is being referenced, as it's polymorphic.
"""
queryset = self.queryset(request)
try:
return queryset.get(pk=object_id)
except ObjectDoesNotExist:
return None
def get_changelist(self, *args, **kwargs):
return EditRegionChangeList
def changelist_view(self, request, extra_context=None):
parent_ct = request.GET[REQUEST_VAR_CT]
parent_id = request.GET[REQUEST_VAR_ID]
obj = get_model_class(parent_ct).objects.get(pk=parent_id)
extra_context = extra_context or {}
if request.is_ajax():
return HttpResponse(
self.render_changelists_for_object(request=request, obj=obj))
context = self.changelists_as_context_data(request, obj)
opts = self.model._meta
app_label = opts.app_label
context.update({
'module_name': force_text(opts.verbose_name_plural),
'title': _('Select %s to change') % force_text(opts.verbose_name),
'media': self.media,
'app_label': app_label,
'cl': {
'opts': {
'app_label': app_label,
'verbose_name_plural': opts.verbose_name_plural,
}
}
})
context.update(extra_context or {})
return TemplateResponse(request, self.change_list_template,
context, current_app=self.admin_site.name)
def get_changelists_for_object(self, request, obj, **kwargs):
changelists = []
if obj is not None:
logger.debug('Editing `{obj!r}`, so do '
'`get_changelists_for_object`'.format(obj=obj))
attach_configuration(obj, EditRegionConfiguration)
config = get_configuration(obj)
# Dynamic template changes ...
obj_admin = get_modeladmin(admin_namespace=self.admin_site.name,
obj=obj)
if hasattr(obj_admin, 'editregions_template_field'):
fieldname = obj_admin.editregions_template_field
template_name = request.GET.get(fieldname, None)
kv = TemplateRequestKeyValue(key=fieldname, value=template_name)
if config.is_valid_template(template_name):
logger.debug("{kv!r} was valid for this {obj!r} "
"and {modeladmin!r}".format(
kv=kv, obj=obj, modeladmin=obj_admin))
config.set_template(template_name)
# store the old get here, because it gets changed inside the region
# loops, which is a lossy process.
old_get = request.GET
# mutate the querystring and set some data onto it, which will
# be passed to the get_changelist_filters method, as well as
# being used to filter the ChangeList correctly.
# new_get = request.GET.copy()
new_get = QueryDict('', mutable=True)
new_get[REQUEST_VAR_CT] = get_content_type(obj).pk
new_get[REQUEST_VAR_ID] = obj.pk
for region in config.config:
new_get[REQUEST_VAR_REGION] = region
request.GET = new_get
our_list_display = self.list_display[:]
our_list_links = self.get_list_display_links(
request=request, list_display=our_list_display)
ChangeList = self.get_changelist(request, **kwargs)
cl = ChangeList(request=request, model=self.model,
list_display=our_list_display,
list_display_links=our_list_links,
list_filter=self.list_filter,
date_hierarchy=None, search_fields=None,
list_select_related=None, list_per_page=100,
list_max_show_all=100, list_editable=None,
model_admin=self, parent_obj=obj,
parent_conf=config)
changelists.append(cl)
# as the internal request.GET may be lossy, we restore the original
# data here.
request.GET = old_get
return changelists
def changelists_as_context_data(self, request, obj):
"""
Sets up a context which is understood by the `changelist_view` template
and EditRegionInline for displaying *the changelists only*
Also used by the move view and ChunkAdmin subclasses for rendering
out those changelists to the browser.
"""
return {
'inline_admin_formset': {
'formset': {
'region_changelists': self.get_changelists_for_object(request,
obj)
},
},
'request_is_ajax': request.is_ajax(),
}
def render_changelists_for_object(self, request, obj):
"""
Used by the move view, and ChunkAdmin subclasses to render *just the
changelists*, for returning as JSON to be rendered on the client
browser.
"""
context = self.changelists_as_context_data(request, obj)
return render_to_string(EditRegionInline.template, context)
@property
def media(self):
base_media = super(EditRegionAdmin, self).media
return base_media + shared_media
def render_into_region(self, obj, context, **kwargs):
msg = ("`render_into_region` called because the requested "
"chunk wasn't cast down - likely the model is no longer "
"enabled in the configuration.")
if settings.DEBUG:
raise NotImplementedError(msg)
logger.warning(msg)
return None
def render_into_summary(self, obj, context, **kwargs):
msg = ("`render_into_summary` called because the requested "
"chunk wasn't cast down - likely the model is no longer "
"enabled in the configuration.")
if settings.DEBUG:
raise NotImplementedError(msg)
logger.warning(msg)
return force_text(obj)
class ChunkAdmin(AdminlinksMixin):
actions = None
actions_on_top = False
actions_on_bottom = False
save_as = False
save_on_top = False
exclude = ['content_type', 'content_id', 'region', 'position']
def get_model_perms(self, request, *args, **kwargs):
"""
Shadow method for the default ModelAdmin. Allows us to hide chunks.
By using an empty dictionary, permissions still work, but chunk administration
views are hidden from the default AdminSite index.
:param request: The WSGIRequest.
:return: Empty dictionary
"""
return {}
def log_addition(self, request, object):
"""
Log's against the Chunk, and it's parent object.
"""
super(ChunkAdmin, self).log_addition(request, object)
super(ChunkAdmin, self).log_addition(request, object.content_object)
def log_change(self, request, object, message):
"""
Log's against the Chunk, and it's parent object.
"""
super(ChunkAdmin, self).log_change(request, object, message)
super(ChunkAdmin, self).log_change(request, object.content_object,
message)
def log_deletion(self, request, object, object_repr):
"""
Log's against the Chunk, and it's parent object.
"""
super(ChunkAdmin, self).log_deletion(request, object, object_repr)
super(ChunkAdmin, self).log_deletion(request, object.content_object,
object_repr)
@guard_querystring_m
def save_model(self, request, obj, form, change):
"""
Adds extra fields to the object so it's saved against the correct
content type etc.
"""
obj.content_type_id = request.GET[REQUEST_VAR_CT]
obj.content_id = request.GET[REQUEST_VAR_ID]
obj.region = request.GET[REQUEST_VAR_REGION]
# This is a new object, so let's put it in the last available position
if obj.position is None:
found = EditRegionChunk.objects.get_region_chunks(
content_type=obj.content_type, content_id=obj.content_id,
region=obj.region).count()
if found < 1:
obj.position = 0
else:
obj.position = found + 1
super(ChunkAdmin, self).save_model(request, obj, form, change)
def response_max(self, request, context):
"""
If a chunk limit has been reached,
adding a new one via `add_view` will instead return this view.
"""
possible_templates = [
'admin/editregions/limit_reached.html'
]
return render_to_response(possible_templates, context,
context_instance=RequestContext(request))
@guard_querystring_m
def add_view(self, request, *args, **kwargs):
"""
At this point, our querystring should be 'safe', and we can discover
if we need to stop early because of a chunk limit being reached.
"""
parent_id = request.GET[REQUEST_VAR_ID]
parent_ct = request.GET[REQUEST_VAR_CT]
region = request.GET[REQUEST_VAR_REGION]
# the following is all just about discovering chunk limits
parent_class = get_content_type(parent_ct).model_class()
parent_obj = parent_class.objects.get(pk=parent_id)
erc = EditRegionConfiguration(parent_obj)
available_chunks = erc.config[region]['models']
limit = available_chunks[self.model]
# now we have our possible limit, if there's a limit
# (no infinity set via None) ensure we haven't hit it yet.
if limit is not None:
logger.debug('Limit of %(limit)d found for %(cls)r in region '
'"%(region)s"' % {
'limit': limit,
'cls': self.model,
'region': region,
})
created_objs_count = (self.model.objects.filter(
content_type=parent_ct, content_id=parent_id,
region=region).only('pk').count())
already_created = max(0, created_objs_count)
if already_created >= limit:
logger.info('Already hit limit of %(limit)d, found %(exists)d '
'objects in the database' % {
'limit': limit,
'exists': already_created,
})
context = {
'found': already_created,
'limit': limit,
'region': erc.config[region]['name'],
'me': self.model._meta.verbose_name,
'parent': parent_class._meta.verbose_name,
}
return self.response_max(request, context=context)
# we haven't got a limit for this chunk type, so carry on as normal.
return super(ChunkAdmin, self).add_view(request, *args, **kwargs)
@guard_querystring_m
def change_view(self, request, *args, **kwargs):
"""
This override only exists because I have no idea how to forceably guard
the super() change_view without doing so.
"""
return super(ChunkAdmin, self).change_view(request, *args, **kwargs)
def maybe_fix_redirection(self, request, response, obj=None):
"""
This is basically a middleware for admin responses from add/edit
screens.
Inspect's a URL, and adds in our required fields if they're not there.
eg: if a URL has no querystring, or the querystring does not contain
`content_id`, `content_type` and `region` it will attempt to insert
them, and if `_autoclose` was in the requesting URL, it should be
maintained.
"""
resp = super(ChunkAdmin, self).maybe_fix_redirection(request,
response, obj)
return_early = (
not resp.has_header('location'),
not hasattr(resp, 'redirect_parts'),
hasattr(resp, 'canonical'), # something wants to be *final*
obj is None,
)
if any(return_early):
resp['X-Chunkadmin-Response'] = 'early'
return resp
# get the modeladmin in question, from the URL provided.
func = resolve(resp.redirect_parts[2]).func
# python 3
if hasattr(func, '__closure__'):
func = func.__closure__
else: # python 2
func = func.func_closure
func = func[0].cell_contents
# it doesn't look like a chunk admin, so we can't know we need to
# redirect back to the parent.
if (not hasattr(func, 'response_max')
and not hasattr(func, 'render_into_region')):
resp['X-Chunkadmin-Response'] = 'not-chunkadmin'
return resp
# set up reasons to go back to the parent object's edit view.
redirect_to_parent_if = (
not self.wants_to_autoclose(request),
not self.wants_to_continue_editing(request)
)
# we don't want to autoclose, and we don't want to save a new
# or add another, so we're hopefully inside a bare add/change view
# so we probably ought to go back to the parent object's edit view.
if all(redirect_to_parent_if):
abuse_adminlink = _add_link_to_context(
admin_site=self.admin_site.name, request=request,
opts=obj.content_object._meta, permname='change',
url_params=[obj.content_id], query=resp.redirect_parts[3])
resp.redirect_parts = list(urlsplit(abuse_adminlink['link']))
resp['Location'] = urlunsplit(resp.redirect_parts)
resp['X-Chunkadmin-Response'] = 'redirect-to-parent'
return resp
# we either wanted to autoclose, or we wanted to continue/add another
# etc, so we don't want to redirect elsewhere, we just want to
# update the querystring with fields required by the ChunkAdmin
querystring = QueryDict(resp.redirect_parts[3], mutable=True)
# delete any values which could be wrong [but shouldn't be!]
for x in (REQUEST_VAR_REGION, REQUEST_VAR_CT, REQUEST_VAR_ID):
if x in querystring:
del querystring[x]
querystring.update({REQUEST_VAR_ID: obj.content_id,
REQUEST_VAR_CT: obj.content_type_id,
REQUEST_VAR_REGION: obj.region})
resp.redirect_parts[3] = querystring.urlencode()
resp['Location'] = urlunsplit(resp.redirect_parts)
resp['X-Chunkadmin-Response'] = 'autoclose'
return resp
@guard_querystring_m
def delete_view(self, request, object_id, extra_context=None):
"""
This override exists to guard the querystring, but also to provide
*needed data* to the available context. This is mostly used for ferrying
the parent object details to `get_response_delete_context` so that it
can render the changelists back to the client.
"""
obj = self.get_object(request, unquote(object_id))
needed_data = extra_context or {}
# Django has deprecated request.REQUEST. Sigh
found_popup_in_request = (
"_popup" in request.GET,
"_popup" in request.POST,
)
# emulate the behaviour of add/change_view
needed_data.update(is_popup=any(found_popup_in_request))
if obj is not None:
needed_data.update(gfk={'content_id': obj.content_id,
'content_type': obj.content_type,
'content_object': obj.content_object})
return super(ChunkAdmin, self).delete_view(request, object_id,
extra_context=needed_data)
def get_response_add_context(self, request, obj):
"""
Override the default contexts generated by AdminlinksMixin to add our
HTML.
"""
modeladmin = get_modeladmin(EditRegionChunk, self.admin_site.name)
changelists = modeladmin.render_changelists_for_object(
request=request, obj=obj.content_object)
context = super(ChunkAdmin, self).get_response_add_context(request, obj)
context.update(html=changelists)
return context
def get_response_change_context(self, request, obj):
"""
Override the default contexts generated by AdminlinksMixin to add our
HTML.
"""
modeladmin = get_modeladmin(EditRegionChunk, self.admin_site.name)
changelists = modeladmin.render_changelists_for_object(
request=request, obj=obj.content_object)
context = super(ChunkAdmin, self).get_response_change_context(request,
obj)
context.update(html=changelists)
return context
def get_response_delete_context(self, request, obj_id, extra_context):
"""
Override the default contexts generated by AdminlinksMixin to add our
HTML.
"""
modeladmin = get_modeladmin(EditRegionChunk, self.admin_site.name)
context = super(ChunkAdmin, self).get_response_delete_context(
request, obj_id, extra_context)
try:
changelists = modeladmin.render_changelists_for_object(
request=request, obj=extra_context['gfk']['content_object'])
context.update(html=changelists)
except KeyError as e:
# extra context didn't include gfk, or possibly content_object within
# that gfk key, either way, we now can't render the HTML for the
# client :(
pass
return context
def render_into_region(self, obj, context, **kwargs):
"""
These exist purely to avoid unexpected breakages if an admin subclass
doesn't implement them.
:param obj: The :class:`~editregions.models.EditRegionChunk` subclass
currently expecting to be rendered.
:param context: The overall template context.
:param extra: Additional data available when rendering this object,
mostly related to the current iteration state.
:return: Some output. Usually HTML for output on a page.
"""
msg = ('`render_into_region` not implemented on {0!r}'.format(
self.__class__))
if settings.DEBUG:
raise NotImplementedError(msg)
logger.warning(msg)
return None
def render_into_summary(self, obj, context, **kwargs):
"""
These exist purely to avoid unexpected breakages if an admin subclass
doesn't implement them.
:param obj: The :class:`~editregions.models.EditRegionChunk` subclass
currently expecting to be rendered.
:param context: The overall template context.
:param extra: Additional data available when rendering this summary,
mostly related to the current iteration state.
:return: Some output. Usually a text representation of the
:meth: `~editregions.admin.modeladmins.ChunkAdmin.render_into_region`
"""
msg = ('`render_into_summary` not implemented on {0!r}'.format(
self.__class__))
if settings.DEBUG:
raise NotImplementedError(msg)
logger.warning(msg)
return None
def get_editregions_subclass_tools(self, obj):
if hasattr(EditRegionChunk._meta, 'model_name'):
model_name = EditRegionChunk._meta.model_name
else:
model_name = EditRegionChunk._meta.module_name
url_to_move = '%(admin)s:%(app)s_%(chunkhandler)s_move' % {
'admin': self.admin_site.name,
'app': EditRegionChunk._meta.app_label,
'chunkhandler': model_name,
}
url_to_move2 = reverse(url_to_move)
delete_url = AdminChunkWrapper(opts=obj._meta,
namespace=self.admin_site.name,
obj=obj).get_delete_url()
value = ('<div class="drag_handle" data-pk="%(pk)s" '
'data-href="%(url)s"></div> <a class="delete_handle" '
'href="%(delete_url)s" data-adminlinks="autoclose" '
'data-no-turbolink>%(delete)s</a>' % {
'pk': obj.pk,
'url': url_to_move2,
'delete_url': delete_url,
'delete': _('Delete'),
})
return value
@property
def media(self):
media_instance = super(ChunkAdmin, self).media
return media_instance + Media(js=['editregions/js/childevents.js'])
class SupportsEditRegions(object):
editregion_template_name_suffix = '_detail'
def __init__(self, *args, **kwargs):
super(SupportsEditRegions, self).__init__(*args, **kwargs)
self.original_inlines = self.inlines[:]
def get_inline_instances(self, request, *args, **kwargs):
klass = EditRegionInline
new_inlines = []
if klass not in self.original_inlines:
new_inlines.append(klass)
self.inlines = new_inlines
return super(SupportsEditRegions, self).get_inline_instances(
request, *args, **kwargs)
def get_editregions_templates(self, obj):
opts = obj._meta
kwargs = {'app': opts.app_label, 'pk': obj.pk,
'suffix': self.editregion_template_name_suffix}
if hasattr(opts, 'model_name'):
kwargs.update(model=opts.model_name)
else:
kwargs.update(model=opts.module_name)
return (
'{app}/{model}{suffix}.{pk}.html'.format(**kwargs),
'{app}/{model}{suffix}.html'.format(**kwargs),
'{app}{suffix}.html'.format(**kwargs),
)
|
#coding=utf-8
'''
Created on 2016-3-10
@author: Devuser
'''
import threading
from gatesidelib.common.simplelogger import SimpleLogger
class business_driver(threading.Thread):
'''
classdocs
'''
def __init__(self,request_data):
threading.Thread.__init__(self)
self.request_data=request_data
self.thread_stop=False
def stop(self):
self.thread_stop=True
SimpleLogger.info("stop thread")
def is_matched(self,reqeust_data):
'''
根据driver name 判断是否适合处理当前请求
返回值:True/False
'''
return False
|
class Solution:
def updateBoard(self, board: List[List[str]], click: List[int]) -> List[List[str]]:
if board[click[0]][click[1]] == 'M':
board[click[0]][click[1]] = 'X'
return board
dirs = [(-1, -1), (-1, 0), (-1, 1), (0, -1),
(0, 1), (1, -1), (1, 0), (1, 1)]
def getMinesCount(i: int, j: int) -> int:
minesCount = 0
for dx, dy in dirs:
x = i + dx
y = j + dy
if x < 0 or x == len(board) or y < 0 or y == len(board[0]):
continue
if board[x][y] == 'M':
minesCount += 1
return minesCount
def dfs(i: int, j: int) -> None:
if i < 0 or i == len(board) or j < 0 or j == len(board[0]):
return
if board[i][j] != 'E':
return
minesCount = getMinesCount(i, j)
board[i][j] = 'B' if minesCount == 0 else str(minesCount)
if minesCount == 0:
for dx, dy in dirs:
dfs(i + dx, j + dy)
dfs(click[0], click[1])
return board
|
import torch
import sys
import csv
import nestedtensor
import utils
import torchvision
from torch.nn import functional as F
import random
class DETRNestedTensor(object):
def __init__(self, tensors, mask):
self.tensors = tensors
self.mask = mask
def to(self, *args, **kwargs):
cast_tensor = self.tensors.to(*args, **kwargs)
cast_mask = self.mask.to(
*args, **kwargs) if self.mask is not None else None
return type(self)(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
@classmethod
def from_tensor_list(cls, tensor_list):
# TODO make this more general
if tensor_list[0].ndim == 3:
# TODO make it support different-sized images
max_size = tuple(max(s)
for s in zip(*[img.shape for img in tensor_list]))
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = (len(tensor_list),) + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1],
: img.shape[2]].copy_(img)
m[: img.shape[1], :img.shape[2]] = False
else:
raise ValueError('not supported')
return cls(tensor, mask)
# Performance tanks hard for lots of small Tensors as expected
DEVICE = torch.device('cuda')
NDIM = 256
NHEAD = 8
MODEL = torch.nn.MultiheadAttention(NDIM, NHEAD).to(DEVICE).eval()
def run_benchmark(bsz, mean_i, mean_j, var, autograd, writer):
RAND_INTS = [(int(random.gauss(mean_j, var)), int(
random.gauss(mean_i, var))) for _ in range(bsz)]
src_ = nestedtensor.nested_tensor(
[torch.randn(NDIM * i * j).float().reshape(NDIM, i, j) for (i, j) in RAND_INTS], device=DEVICE, dtype=torch.float)
src = []
for i, s in enumerate(src_):
src.append(i*len(s) + s)
detr_nt_src = DETRNestedTensor.from_tensor_list(src)
sparsity = int(detr_nt_src.decompose()[1].float().mean().item() * 10) / 10
def gen_t_loop_mha(src):
detr_nt_src = DETRNestedTensor.from_tensor_list(src)
src, mask = detr_nt_src.decompose()
src = src.flatten(2).permute(2, 0, 1).contiguous()
mask = mask.flatten(1).contiguous()
if autograd:
src.requires_grad_()
def te():
if autograd:
MODEL(src, src, src, key_padding_mask=mask,
need_weights=False)[0].sum().backward()
MODEL(src, src, src, key_padding_mask=mask,
need_weights=False)
return te
def gen_nt_mha(src):
src = nestedtensor.nested_tensor([t.flatten(1).permute(
1, 0) for t in src], device=DEVICE, dtype=torch.float, requires_grad=True)
def nt():
if autograd:
MODEL(src, src, src, need_weights=False)[0].sum().backward()
MODEL(src, src, src, need_weights=False)
return nt
result_t = {**utils.benchmark_fn(gen_t_loop_mha(src), 5.0, cuda=True), "bsz": bsz,
"sparsity": sparsity, "autograd": autograd, "var": var, "mean_i": mean_i, "mean_j": mean_j}
result_t["numel"] = sum([x.numel() for x in src_])
result_t["numel_div_avg_us"] = result_t["numel"] / result_t["avg_us"]
result_t["avg_ns_div_numel"] = result_t["avg_us"] / result_t["numel"] * 1000
writer.writerow(result_t)
result_nt = {**utils.benchmark_fn(gen_nt_mha(src), 5.0, cuda=True),
"bsz": bsz, "sparsity": 0.0, "autograd": autograd, "var": var, "mean_i": mean_i, "mean_j": mean_j}
result_nt["numel"] = sum([x.numel() for x in src_])
result_nt["numel_div_avg_us"] = result_nt["numel"] / result_nt["avg_us"]
result_nt["avg_ns_div_numel"] = result_nt["avg_us"] / result_nt["numel"] * 1000
writer.writerow(result_nt)
if __name__ == "__main__":
random.seed(1011)
torch.manual_seed(1011)
writer = csv.DictWriter(sys.stdout, fieldnames=[
"name", "avg_us", "std_us", "runs", "bsz", "sparsity",
"autograd", "var", "mean_i", "mean_j", "numel", "numel_div_avg_us",
"avg_ns_div_numel"])
writer.writeheader()
for var in [float(i) / 10 for i in range(0, 100, 50)]:
for autograd in [True, False]:
for batch_size in [2, 8, 16]:
run_benchmark(batch_size, 30, 30, var, autograd, writer)
|
import mysql.connector as mysql
from Constants import *
def sql_exec(
p_server: str,
p_database: str,
p_user: str,
p_password: str,
p_port: int,
p_sql: str,
p_result: int =1
):
"""
Execute queries in MySQL
:param p_server: server
:param p_database: db
:param p_user: login
:param p_password: password
:param p_port: port
:param p_result: flag that the output is necessary (default - 1)
"""
l_error=None
query_output=None
try:
cnct=mysql.connect(
host=p_server,
database=p_database,
user=p_user,
password=p_password,
port=p_port,
connection_timeout=30
)
except mysql.Error as e:
l_error=e.args[1]
return query_output, l_error
crsr=cnct.cursor()
try:
crsr.execute(p_sql)
if p_result==1:
query_output=crsr.fetchall()
else:
query_output=1
except mysql.Error as e:
l_error=e.args[1]
finally:
crsr.close()
cnct.close()
return query_output, l_error
C_CURRENT_TIMESTAMP_SQL="CURRENT_TIMESTAMP"
|
import cv2
import numpy as np
from utils import Util
import pyttsx3 as p
engine = p.init()
class Lane:
def __init__(self, path):
self.path = path
self.util = Util()
def run_img(self, path):
img = cv2.imread(path)
img = cv2.resize(img, (800, 600))
#self.detect(img)
cv2.imshow('Frame', img)
def run(self, path):
cap = cv2.VideoCapture(path)
out = cv2.VideoWriter('output.mp4',0x7634706d , 20.0, (640,480))
if (cap.isOpened() == False):
print("Error opening video stream or file")
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
# Display the resulting frame
frame = cv2.resize(frame, (800, 600))
out.write(frame)
self.detect(frame)
cv2.imshow('Frame', frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
def detect(self, screen):
vert = np.array(
[[100, 550], [375, 350], [450, 350], [800, 550]], np.int32)
fin = self.util.edgeDetect(screen)
fin = self.util.roi(fin, [vert])
line = cv2.HoughLinesP(fin, 2, np.pi/180, 20, 7, 7)
if not(line is None):
for i in line:
cv2.line(screen, (i[0][0], i[0][1]),
(i[0][2], i[0][3]), (255, 0, 0), 10)
l1dataset = []
l2dataset = []
try:
straightxcors, straightycors = self.util.averageLanes(line)
xcors, ycors = self.util.getPoints(line)
l1dataset.append(straightxcors[0])
l1dataset.append(straightycors[0])
l2dataset.append(straightxcors[1])
l2dataset.append(straightxcors[1])
allstraightxcors = straightxcors[0] + straightxcors[1]
allstraightycors = straightycors[0] + straightycors[1]
l1m, l1b = self.util.linearRegression(l1dataset[0], l1dataset[1])
l2m, l2b = self.util.linearRegression(l2dataset[0], l2dataset[1])
allm, allb = self.util.linearRegression(
allstraightxcors, allstraightycors)
allxcor1 = int((allm * 350) + allb)
allxcor2 = int(allb)
filterl1x = []
filterl1y = []
filterl2x = []
filterl2y = []
for count, i in enumerate(ycors):
if (i*l2m + l2b < xcors[count]):
filterl2x.append(xcors[count])
filterl2y.append(i)
else:
filterl1x.append(xcors[count])
filterl1y.append(i)
l1inx1 = int((600 - l1b) / l1m)
l1inx2 = int((350-l1b) / l1m)
l2inx1 = int((600-l2b) / l2m)
l2inx2 = int((350-l2b) / l2m)
cv2.line(screen, (int(l1inx1), 600),
(int(l1inx2), 350), (0, 0, 0), 10)
cv2.line(screen, (int(l2inx1), 600),
(int(l2inx2), 350), (0, 0, 0), 10)
cv2.line(screen, (allxcor1, 600), (allxcor2,350), (255,0,0), 10)
turning = ""
results = self.util.intersection([l1m, l1b], [l2m, l2b])
if not (results is None):
if (results[0] > 400):
with open("write.txt", "w") as f:
f.write("Turn Left")
else:
with open("write.txt", "w") as f:
f.write("Turn Right")
else:
with open("write.txt", "w") as f:
f.write("Go straight")
try:
equ1, polyx1, polyy1 = self.util.polyReg(filterl2x, filterl2y)
for i in range(len(polyx1)):
if i == 0:
pass
else:
cv2.line(screen, (int(polyx1[i]), int(polyy1[i])), (int(
polyx1[i-1]), int(polyy1[i-1])), (255, 255, 0), 10)
except Exception as e:
print(e)
try:
equ2, polyx2, polyy2 = self.util.polyReg(filterl1x, filterl1y)
for i in range(len(polyx2)):
if i == 0:
pass
else:
cv2.line(screen, (int(polyx2[i]), int(polyy2[i])), (int(
polyx2[i-1]), int(polyy2[i-1])), (255, 255, 0), 10)
except:
pass
except Exception as e:
pass
return screen
|
h = float(input('digite a altura: '))
l = float(input('digite a largura: '))
t = (h*l)/2
print('você vai precisar de {} litros de tinta'.format(t))
|
"""
Plot time for LCE, MCE, DCE and Holdout set
"""
import numpy as np
import datetime
import time
import random
import os # for displaying created PDF
import sys
sys.path.append('./../sslh')
from fileInteraction import save_csv_record
from utils import (from_dictionary_beliefs,
create_parameterized_H,
replace_fraction_of_rows,
to_centering_beliefs,
eps_convergence_linbp_parameterized,
matrix_difference,
introduce_errors,
showfig)
from estimation import estimateH, estimateH_baseline_serial, estimateH_baseline_parallel
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import collections # remove error bars
from matplotlib.pyplot import figure, xlabel, ylabel, savefig, show, xlim, ylim, xticks, grid, title
import pandas as pd
pd.set_option('display.max_columns', None) # show all columns from pandas
pd.options.mode.chained_assignment = None # default='warn'
from graphGenerator import planted_distribution_model_H, planted_distribution_model_H
from inference import linBP_symmetric_parameterized
# -- Determine path to data *irrespective* of where the file is run from
from os.path import abspath, dirname, join
from inspect import getfile, currentframe
current_path = dirname(abspath(getfile(currentframe())))
figure_directory = join(current_path, 'figs')
data_directory = join(current_path, 'datacache')
def run(choice, create_data=False, add_data=False, show_plot=False, create_pdf=False, show_pdf=False, shorten_length=False,
show_arrows=False):
# -- Setup
CHOICE = choice
CREATE_DATA = create_data
ADD_DATA = add_data
SHOW_PLOT = show_plot
SHOW_PDF = show_pdf
CREATE_PDF = create_pdf
SHOW_STD = True ## FALSE for just scatter plot points
SHOW_ARROWS = show_arrows
# -- Default Graph parameters
rep_SameGraph = 1 # iterations on same graph
distribution = 'powerlaw'
exponent = -0.3
length = 5
variant = 1
EC = False
numberOfSplits = 1
scaling_vec = [None]*10
ymin = 0.3
ymax = 1
xmin = 1e-3
xmax = 1e3
xtick_lab = [1e-3, 0.01, 0.1, 1, 10, 100, 1000]
xtick_labels = [r'$10^{-3}$', r'$10^{-2}$', r'$10^{-1}$', r'$1$', r'$10$', r'$10^{2}$', r'$10^{3}$']
ytick_lab = np.arange(0, 1.1, 0.1)
k = 3
a = 1
rep_DifferentGraphs = 1 # iterations on different graphs
err = 0
avoidNeighbors = False
convergencePercentage_W = 0.99
facecolor_vec = ["#4C72B0", "#55A868", "#8172B2", "#C44E52", "#CCB974", "#64B5CD"]
label_vec = ['MCE', 'LCE', 'DCE', 'Holdout']
linewidth_vec = [4, 3, 1, 2, 2, 1]
# clip_ons = [True, True, True, True, True, True]
FILEZNAME = 'Fig_timing_accuracy_learning'
marker_vec = ['s', '^', 'v', 'o', 'x', '+', 'None'] #'^'
length_vec = [5]
stratified = True
f = 0.01
numMaxIt_vec = [10]*7
alpha_vec = [0] * 7
beta_vec = [0] * 7 # TODO: LinBP does not use beta. Also SSLH uses alpha, but not beta for W^row! Now fixed
gamma_vec = [0] * 7
s_vec = [0.5] * 7
# -- Main Options
if CHOICE == 1: # Main graph
n = 1000
h = 3
d = 25
option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5', 'opt6']
learning_method_vec = ['MHE'] + ['LHE'] + ['DHE'] + ['DHE'] + ['Holdout'] + ['GS']
label_vec = ['MCE', 'LCE', 'DCE', 'DCE r', 'Holdout', 'GS']
randomize_vec = [False]*3 + [True] + [None]*2
scaling_vec = [None]*2 + [10, 100] + [None]*2
splits_vec = [1, 2, 4, 8]
elif CHOICE == 2:
n = 1000
h = 3
d = 25
option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5']
learning_method_vec = ['MHE'] + ['LHE'] + ['DHE'] + ['DHE'] + ['GS']
label_vec = ['MCE', 'LCE', 'DCE', 'DCE r', 'GS']
randomize_vec = [False]*3 + [True] + [None]
scaling_vec = [None]*2 + [10, 100] + [None]
elif CHOICE == 3:
n = 1000
h = 3
d = 25
option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5']
learning_method_vec = ['MHE'] + ['LHE'] + ['DHE'] + ['DHE'] + ['GS']
label_vec = ['MCE', 'LCE', 'DCE', 'DCE r', 'GS']
randomize_vec = [False]*3 + [True] + [None]
scaling_vec = [None]*2 + [10, 100] + [None]
f = 0.02
elif CHOICE == 4: # TODO: Overnight Wolfgang
n = 1000
h = 3
d = 25
option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5', 'opt6']
learning_method_vec = ['MHE'] + ['LHE'] + ['DHE'] + ['DHE'] + ['Holdout'] + ['GS']
label_vec = ['MCE', 'LCE', 'DCE', 'DCE r', 'Holdout', 'GS']
randomize_vec = [False]*3 + [True] + [None]*2
scaling_vec = [None]*2 + [10, 100] + [None]*2
splits_vec = [1, 2, 4, 8, 16]
elif CHOICE == 5: # Toy graph with 100 nodes
n = 100
h = 3
d = 8
option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5', 'opt6']
learning_method_vec = ['MHE'] + ['LHE'] + ['DHE'] + ['DHE'] + ['Holdout'] + ['GS']
label_vec = ['MCE', 'LCE', 'DCE', 'DCE r', 'Holdout', 'GS']
randomize_vec = [False]*3 + [True] + [None]*2
scaling_vec = [None]*2 + [10, 100] + [None]*2
splits_vec = [1, 2, 4, 8]
f=0.05
elif CHOICE == 6: # To be run by Prakhar on Cluster
n = 10000
h = 3
d = 25
option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5', 'opt6']
learning_method_vec = ['MHE'] + ['LHE'] + ['DHE'] + ['DHE'] + ['Holdout'] + ['GS']
label_vec = ['MCE', 'LCE', 'DCE', 'DCEr', 'Holdout', 'GS']
randomize_vec = [False]*3 + [True] + [None]*2
scaling_vec = [None]*2 + [10, 100] + [None]*2
splits_vec = [1, 2, 4, 8]
f=0.003
xmin = 1e-2
# ymax = 0.9
ymin = 0.2
ymax = 0.9
xmin = 1e-2
xmax = 1e3
elif CHOICE == 7:
n = 1000
h = 3
d = 25
option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5', 'opt6']
learning_method_vec = ['MHE'] + ['LHE'] + ['DHE'] + ['DHE'] + ['Holdout'] + ['GS']
label_vec = ['MCE', 'LCE', 'DCE', 'DCE r', 'Holdout', 'GS']
randomize_vec = [False]*3 + [True] + [None]*2
scaling_vec = [None]*2 + [10, 100] + [None]*2
splits_vec = [1, 2, 4, 8, 16]
f=0.009
# elif CHOICE == 8: # not working well
# n = 1000
# h = 3
# d = 25
# option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5', 'opt6']
# learning_method_vec = ['MHE'] + ['LHE'] + ['DHE'] + ['DHE'] + ['Holdout'] + ['GS']
# label_vec = ['MCE', 'LCE', 'DCE', 'DCE r', 'Holdout', 'GS']
# randomize_vec = [False]*3 + [True] + [None]*2
# scaling_vec = [None]*2 + [10, 100] + [None]*2
# splits_vec = [1, 2, 4, 8, 16]
# f=0.005
else:
raise Warning("Incorrect choice!")
csv_filename = '{}_{}.csv'.format(FILEZNAME, CHOICE)
header = ['currenttime',
'option',
'lensplit',
'f',
'accuracy',
'timetaken']
if CREATE_DATA:
save_csv_record(join(data_directory, csv_filename), header, append=False)
alpha0 = np.array([a, 1., 1.])
alpha0 = alpha0 / np.sum(alpha0)
H0 = create_parameterized_H(k, h, symmetric=True)
H0c = to_centering_beliefs(H0)
RANDOMSEED = None # For repeatability
random.seed(RANDOMSEED) # seeds some other python random generator
np.random.seed(seed=RANDOMSEED) # seeds the actually used numpy random generator; both are used and thus needed
# print("CHOICE: {}".format(CHOICE))
# -- Create data
if CREATE_DATA or ADD_DATA:
for i in range(rep_DifferentGraphs): # create several graphs with same parameters
# print("\ni: {}".format(i))
W, Xd = planted_distribution_model_H(n, alpha=alpha0, H=H0, d_out=d,
distribution=distribution,
exponent=exponent,
directed=False,
debug=False)
X0 = from_dictionary_beliefs(Xd)
for j in range(rep_SameGraph): # repeat several times for same graph
# print("j: {}".format(j))
ind = None
X1, ind = replace_fraction_of_rows(X0, 1-f, avoidNeighbors=avoidNeighbors, W=W, ind_prior=ind, stratified = stratified) # TODO: stratified sampling option = True
X2 = introduce_errors(X1, ind, err)
for option_index, (learning_method, alpha, beta, gamma, s, numMaxIt, weight, randomize, option) in \
enumerate(zip(learning_method_vec, alpha_vec, beta_vec, gamma_vec, s_vec, numMaxIt_vec, scaling_vec, randomize_vec, option_vec)):
# weight = np.array([np.power(scaling, i) for i in range(5)]) # TODO: now enough to specify weight as a scalar!
H_est_dict = {}
timeTaken_dict = {}
# -- Learning
if learning_method == 'Holdout' :
for numberOfSplits in splits_vec:
prev_time = time.time()
H_est_dict[numberOfSplits] = estimateH_baseline_serial(X2, ind, W, numMax=numMaxIt,
# ignore_rows=ind,
numberOfSplits=numberOfSplits,
# method=learning_method, variant=1, distance=length,
EC=EC,
weights=weight, alpha=alpha, beta=beta, gamma=gamma)
timeTaken = time.time() - prev_time
timeTaken_dict[numberOfSplits] = timeTaken
elif learning_method in ['LHE', 'MHE', 'DHE']: # TODO: no smartInit, just randomization as option
for length in length_vec:
prev_time = time.time()
H_est_dict[length] = estimateH(X2, W, method=learning_method, variant=1, randomize=randomize, distance=length, EC=EC, weights=weight)
timeTaken = time.time() - prev_time
timeTaken_dict[length] = timeTaken
elif learning_method == 'GS':
H_est_dict['GS'] = H0
for key in H_est_dict:
H_est = H_est_dict[key]
H2c = to_centering_beliefs(H_est)
# print("H_estimated by {} is \n".format(learning_method), H_est)
# print("H0 is \n", H0)
# print("randomize was: ", randomize)
# Propagation
X2c = to_centering_beliefs(X2, ignoreZeroRows=True) # try without
eps_max = eps_convergence_linbp_parameterized(H2c, W,
method='noecho',
alpha=alpha, beta=beta, gamma=gamma,
X=X2)
eps = s * eps_max
# print("Max Eps ", eps_max)
try:
F, actualIt, actualPercentageConverged = \
linBP_symmetric_parameterized(X2, W, H2c * eps,
method='noecho',
alpha=alpha, beta=beta, gamma=gamma,
numMaxIt=numMaxIt,
convergencePercentage=convergencePercentage_W,
convergenceThreshold=0.99,
debug=2)
except ValueError as e:
print(
"ERROR: {} with {}: d={}, h={}".format(e, learning_method, d, h))
else:
accuracy_X = matrix_difference(X0, F, ignore_rows=ind)
tuple = [str(datetime.datetime.now())]
if learning_method == 'Holdout':
text = [option,"split{}".format(key), f, accuracy_X, timeTaken_dict[key]]
elif learning_method in ['MHE', 'DHE', 'LHE']:
text = [option, "len{}".format(key), f, accuracy_X, timeTaken_dict[key]]
elif learning_method == 'GS':
text = [option, 0, f, accuracy_X, 0]
tuple.extend(text)
# print("option: {}, f: {}, actualIt: {}, accuracy: {}".format(option, f, actualIt, accuracy_X))
save_csv_record(join(data_directory, csv_filename), tuple)
# -- Read, aggregate, and pivot data for all options
df1 = pd.read_csv(join(data_directory, csv_filename))
# print("\n-- df1: (length {}):\n{}".format(len(df1.index), df1.head(15)))
# Aggregate repetitions
df2 = df1.groupby(['option', 'lensplit', 'f']).agg \
({'accuracy': [np.mean, np.std, np.size], # Multiple Aggregates
})
df2.columns = ['_'.join(col).strip() for col in df2.columns.values] # flatten the column hierarchy
df2.reset_index(inplace=True) # remove the index hierarchy
df2.rename(columns={'accuracy_size': 'count'}, inplace=True)
# print("\n-- df2 (length {}):\n{}".format(len(df2.index), df2.head(15)))
df3 = df1.groupby(['option', 'lensplit', 'f']).agg({'timetaken': [np.median] })
df3.columns = ['_'.join(col).strip() for col in df3.columns.values] # flatten the column hierarchy
df3.reset_index(inplace=True) # remove the index hierarchy
# resultdf3 = df3.sort(['timetaken'], ascending=1)
# print("\n-- df3 (length {}):\n{}".format(len(df3.index), df3.head(15)))
X_time_median_dict = {}
Y_acc_dict = {}
Y_std_dict = {}
for option in option_vec:
Y_acc_dict[option] = df2.loc[(df2['option'] == option), "accuracy_mean"].values
Y_std_dict[option] = df2.loc[(df2['option'] == option), "accuracy_std"].values
X_time_median_dict[option] = df3.loc[(df3['option'] == option), "timetaken_median"].values
# print("option: ", option)
# print("Y_acc_dict[option]: ", Y_acc_dict[option])
# print("Y_std_dict[option]: ", Y_std_dict[option])
# print("X_time_median_dict[option]: ", X_time_median_dict[option])
# -- Setup figure
fig_filename = '{}_{}.pdf'.format(FILEZNAME, CHOICE)
mpl.rc('font', **{'family': 'sans-serif', 'sans-serif': [u'Arial', u'Liberation Sans']})
mpl.rcParams['axes.labelsize'] = 18
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['ytick.labelsize'] = 16
mpl.rcParams['axes.titlesize'] = 16
mpl.rcParams['legend.fontsize'] = 14
mpl.rcParams['grid.color'] = '777777' # grid color
mpl.rcParams['xtick.major.pad'] = 2 # padding of tick labels: default = 4
mpl.rcParams['ytick.major.pad'] = 1 # padding of tick labels: default = 4
mpl.rcParams['xtick.direction'] = 'out' # default: 'in'
mpl.rcParams['ytick.direction'] = 'out' # default: 'in'
mpl.rcParams['figure.figsize'] = [4, 4]
fig = figure()
ax = fig.add_axes([0.13, 0.17, 0.8, 0.8])
SHOW_ARROWS = True
for choice, color, learning_method, label, linewidth, marker in \
zip(option_vec, facecolor_vec, learning_method_vec, label_vec, linewidth_vec, marker_vec):
if learning_method == 'Holdout':
# Draw std
X1 = X_time_median_dict[choice]
s = X1.argsort()
X1 = X1[s]
Y1 = Y_acc_dict[choice][s]
Y2 = Y_std_dict[choice][s]
if SHOW_STD:
ax.fill_between(X1, Y1 + Y2, Y1 - Y2, facecolor=color, alpha=0.2, edgecolor=None, linewidth=0)
ax.plot(X1, Y1 + Y2, linewidth=0.5, color='0.8', linestyle='solid')
ax.plot(X1, Y1 - Y2, linewidth=0.5, color='0.8', linestyle='solid')
ax.set_ylim(bottom=ymin)
ax.plot(X1, Y1, linewidth=linewidth, color=color, linestyle='solid', label=label, zorder=20, marker='x', markersize=linewidth + 5, markeredgewidth=1)
ax.annotate(np.round(X1[1], decimals=1), xy=(X1[1], Y1[1] - 0.05), color=color, va='center', annotation_clip=False, zorder=5)
else:
ax.scatter(list(X1), list(Y1),
color=color, label=label, marker='x', s=42)
elif learning_method == 'GS':
ax.plot([1e-4, 1e4], [Y_acc_dict[choice], Y_acc_dict[choice]],
linewidth=1, color='black',
linestyle='dashed', zorder=0,
marker=None,
label=label,
)
else: # For all other
if SHOW_STD:
ax.errorbar(list(X_time_median_dict[choice]), list(Y_acc_dict[choice]), yerr=Y_std_dict[choice],
fmt='-o', linewidth=2, color=color,
label=label, marker=marker, markersize=8)
ax.annotate(np.round(X_time_median_dict[choice], decimals=2), xy=(X_time_median_dict[choice], Y_acc_dict[choice]-0.05), color=color, va='center',
annotation_clip=False, zorder=5)
else:
ax.scatter(list(X_time_median_dict[choice]), list(Y_acc_dict[choice]),
color=color, label=label, marker=marker, s=42)
if SHOW_ARROWS:
dce_opt = 'opt4'
holdout_opt = 'opt5'
ax.annotate(s='', xy=(X_time_median_dict[dce_opt], Y_acc_dict[dce_opt]-0.3), xytext=(X_time_median_dict[holdout_opt][2]+0.02, Y_acc_dict[dce_opt]-0.3), arrowprops=dict(arrowstyle='<->'))
ax.annotate(str(int(np.round(X_time_median_dict[holdout_opt][2] / X_time_median_dict[dce_opt]))) + 'x', xy=((X_time_median_dict[dce_opt] + X_time_median_dict[holdout_opt][2])/100, Y_acc_dict[dce_opt]-0.28),
color='black', va='center',
# bbox = dict(boxstyle="round,pad=0.3", fc="w"),
annotation_clip=False, zorder=5)
# -- Title and legend
title(r'$\!\!\!n\!=\!{}\mathrm{{k}}, d\!=\!{}, h\!=\!{}, f\!=\!{}$'.format(int(n / 1000), d, h, f))
handles, label_vec = ax.get_legend_handles_labels()
for i, (h, learning_method) in enumerate(zip(handles, learning_method_vec)): # remove error bars in legend
if isinstance(handles[i], collections.Container):
handles[i] = handles[i][0]
# plt.legend(loc='upper left', numpoints=1, ncol=3, fontsize=8, bbox_to_anchor=(0, 0))
SHOW_STD = False
legend = plt.legend(handles, label_vec,
loc='upper right', # 'upper right'
handlelength=2,
fontsize=12,
labelspacing=0.2, # distance between label entries
handletextpad=0.3, # distance between label and the line representation
borderaxespad=0.2, # distance between legend and the outer axes
borderpad=0.3, # padding inside legend box
numpoints=1, # put the marker only once
)
if not(SHOW_STD):
legend = plt.legend(handles, label_vec,
loc='upper right', # 'upper right'
handlelength=2,
fontsize=10,
labelspacing=0.2, # distance between label entries
handletextpad=0.3, # distance between label and the line representation
borderaxespad=0.2, # distance between legend and the outer axes
borderpad=0.3, # padding inside legend box
numpoints=1, # put the marker only once
scatterpoints=1 # display only one-scatter point in legend
)
# # legend.set_zorder(1)
frame = legend.get_frame()
frame.set_linewidth(0.0)
frame.set_alpha(0.9) # 0.8
# -- Figure settings and save
plt.xscale('log')
plt.xticks(xtick_lab, xtick_labels)
plt.yticks(ytick_lab, ytick_lab)
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.1f'))
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_ylim(bottom=ymin)
grid(b=True, which='major', axis='both', alpha=0.2, linestyle='solid', linewidth=0.5) # linestyle='dashed', which='minor', axis='y',
grid(b=True, which='minor', axis='both', alpha=0.2, linestyle='solid', linewidth=0.5) # linestyle='dashed', which='minor', axis='y',
xlim(xmin, xmax)
ylim(ymin, ymax)
xlabel(r'Time Median (sec)', labelpad=0) # labelpad=0
ylabel(r'Accuracy', labelpad=0)
if CREATE_PDF:
savefig(join(figure_directory, fig_filename), format='pdf',
dpi=None,
edgecolor='w',
orientation='portrait',
transparent=False,
bbox_inches='tight',
pad_inches=0.05,
frameon=None)
if SHOW_PDF:
showfig(join(figure_directory, fig_filename))
if SHOW_PLOT:
plt.show()
if __name__ == "__main__":
run(6, show_plot=True)
|
# coding=utf-8
import enum
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import (
Column, Integer, String, CHAR, SmallInteger, ForeignKey, Boolean, Enum
)
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
engine = None
Session = None
class UserRole(enum.Enum):
student = 0
teacher = 1
admin = 2
int2UserRole = [UserRole.student, UserRole.teacher, UserRole.admin]
class User(Base):
__tablename__ = "users"
user_id = Column(Integer, primary_key=True)
staff_id = Column(Integer)
role = Column(Enum(UserRole))
name = Column(String(32))
is_male = Column(Boolean)
password = Column(CHAR(64)) # only store sha256 hashed password
class Course(Base):
__tablename__ = "courses"
course_id = Column(Integer, primary_key=True)
course_name = Column(String(96))
start = Column(Integer)
end = Column(Integer)
class Class(Base):
__tablename__ = 'classes'
class_id = Column(Integer, primary_key=True)
class_name = Column(String(96))
weekday = Column(SmallInteger) # 周几
start = Column(SmallInteger) # 第几节课开始
end = Column(SmallInteger) # 第几节课结束
teacher_id = Column(Integer, ForeignKey(User.user_id))
course_id = Column(Integer, ForeignKey(Course.course_id))
class Score(Base):
__tablename__ = 'scores'
score_id = Column(Integer, primary_key=True)
score = Column(Integer)
user_id = Column(Integer, ForeignKey(User.user_id))
class_id = Column(Integer, ForeignKey(Class.class_id))
class RelationUserClass(Base): # 用户(学生)与班级的关系表
__tablename__ = 'rel-user-class'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey(User.user_id))
class_id = Column(Integer, ForeignKey(Class.class_id))
class CheckInCodes(Base):
__tablename__ = "check-in-codes"
code_id = Column(Integer, primary_key=True)
code = Column(SmallInteger)
class_id = Column(Integer, ForeignKey(Class.class_id))
started = Column(Boolean)
expire_at = Column(Integer)
class CheckedInStatus(enum.Enum):
awaiting = 0
normal = 1
late = 2
leave_early = 3
absent = 4
int2status = [
CheckedInStatus.awaiting,
CheckedInStatus.normal, CheckedInStatus.late,
CheckedInStatus.leave_early, CheckedInStatus.absent]
class CheckedInLogs(Base):
__tablename__ = "checked-in-logs"
log_id = Column(Integer, primary_key=True)
code_id = Column(Integer, ForeignKey(CheckInCodes.code_id))
user_id = Column(Integer, ForeignKey(User.user_id))
status = Column(Enum(CheckedInStatus))
class BelongType(enum.Enum):
CLASS = 0
COURSE = 1
class Material(Base):
__tablename__ = "materials"
file_id = Column(Integer, primary_key=True)
filename = Column(String(64))
internal_filename = Column(String(64))
size = Column(Integer)
uploaded_at = Column(Integer)
uploader_id = Column(Integer, ForeignKey(User.user_id))
belong_type = Column(Enum(BelongType))
belong_id = Column(Integer)
class Topic(Base):
__tablename__ = "topics"
topic_id = Column(Integer, primary_key=True)
title = Column(String(64))
content = Column(String(4096))
user_id = Column(Integer, ForeignKey(User.user_id))
created_at = Column(Integer)
updated_at = Column(Integer)
replies = Column(Integer)
belong_type = Column(Enum(BelongType))
belong_id = Column(Integer)
class Reply(Base):
__tablename__ = "replies"
reply_id = Column(Integer, primary_key=True)
topic_id = Column(Integer, ForeignKey(Topic.topic_id))
content = Column(String(4096))
user_id = Column(Integer, ForeignKey(User.user_id))
created_at = Column(Integer)
class Live(Base):
__tablename__ = "lives"
live_id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey(User.user_id))
title = Column(String(64))
introduction = Column(String(1024))
start = Column(Integer)
duration = Column(SmallInteger) # in mins
is_public = Column(Boolean)
is_streaming = Column(Boolean)
class RelationLiveClass(Base):
__tablename__ = "rel-live-class"
id = Column(Integer, primary_key=True)
live_id = Column(Integer, ForeignKey(Live.live_id))
class_id = Column(Integer, ForeignKey(Class.class_id))
def startup(conf):
global engine, Session
if engine and Session:
return
uri = '{dialect}+{driver}://{user}:{password}@{host}/{dbname}'.format(
**conf.db)
if conf.db.options:
uri = uri + '?'
for key in conf.db.options:
uri += '{}={}'.format(key, conf.db.options[key])
engine = create_engine(uri, echo=conf.app.debug,
pool_size=conf.db.pool_size, max_overflow=5,
pool_recycle=3600)
Session = sessionmaker(bind=engine)
def init_db(conf):
startup(conf)
Base.metadata.bind = engine
Base.metadata.create_all()
if conf.app.debug:
sess = Session()
user1 = User(
staff_id=17052100,
role=UserRole.teacher,
name="陈老师",
is_male=True,
password='702ca6f0e58e2d8ddf98228d59e2efc6e19e82fd0858ee948289948f42d86232')
user2 = User(
staff_id=17052101,
role=UserRole.student,
name="陈学生",
is_male=False,
password='c203aa9c9171287d4a7356006646257866e9a5b6f2bea957ae7b0e1a30a07dfa')
user3 = User(
staff_id=10000,
role=UserRole.admin,
name="陈管理员",
is_male=True,
password='bede0c4938ce392d4a96b9743140e64954505b1973b08e386e74ac5bc963980d')
sess.add_all([user1, user2, user3])
sess.commit()
course1 = Course(
course_name="高等数学",
start=1552706506,
end=1553806506)
course2 = Course(
course_name="离散数学",
start=1552704506,
end=1553816506)
sess.add_all([course1, course2])
sess.commit()
class11 = Class(class_name="周一上午", weekday=1, start=3, end=5,
teacher_id=1, course_id=1)
class12 = Class(class_name="周三下午", weekday=3, start=6, end=8,
teacher_id=1, course_id=1)
class21 = Class(class_name="周二上午", weekday=2, start=1, end=2,
teacher_id=1, course_id=2)
class22 = Class(class_name="周三下午", weekday=3, start=6, end=7,
teacher_id=1, course_id=2)
sess.add_all([class11, class12, class21, class22])
sess.commit()
score1 = Score(score=95, user_id=2, class_id=1)
score2 = Score(score=98, user_id=2, class_id=3)
sess.add_all([score1, score2])
sess.commit()
rel1 = RelationUserClass(user_id=2, class_id=1)
sess.add_all([rel1])
sess.commit()
code1 = CheckInCodes(
code=1234, class_id=1, started=1, expire_at=1552707168)
sess.add_all([code1])
sess.commit()
log1 = CheckedInLogs(code_id=1, user_id=2, status=CheckedInStatus.late)
sess.add_all([log1])
sess.commit()
material1 = Material(
filename="高等数学课程培养方案.doc",
internal_filename="no such a file",
size=0,
uploaded_at=1552707319,
uploader_id=1,
belong_type=BelongType.COURSE,
belong_id=1)
material2 = Material(
filename="第一章.ppt",
internal_filename="no such a file",
size=0,
uploaded_at=1552707390,
uploader_id=1,
belong_type=BelongType.CLASS,
belong_id=1)
sess.add_all([material1, material2])
sess.commit()
topic1 = Topic(
title="高数好难啊",
content="如题,真的好难啊",
user_id=2,
created_at=1552707468,
updated_at=1552707582,
replies=2,
belong_type=BelongType.COURSE,
belong_id=1)
sess.add_all([topic1])
sess.commit()
reply1 = Reply(
topic_id=1,
content="附议!好难!",
user_id=2,
created_at=1552707554)
reply2 = Reply(
topic_id=1,
content="你挂了",
user_id=1,
created_at=1552707582)
sess.add_all([reply1, reply2])
sess.commit()
sess.close()
|
"""
Напишете функция, която намира най-малкото по стойност число,
измежду две дадени числа.
>>> min2(3, 1)
1
>>> min2(-1, 2)
-1
"""
def min2(a, b):
raise Exception('Not implemented')
|
import urllib2
import pdb
pdb.set_trace()
authinfo = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(authinfo)
urllib2.install_opener(opener)
f = urllib2.urlopen('http://mail.google.com/a/spasticssocietyofkarnataka.org/#inbox')
print f.info()
|
#===========================================================================
#
# Copyright (c) 2014, California Institute of Technology.
# U.S. Government Sponsorship under NASA Contract NAS7-03001 is
# acknowledged. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#===========================================================================
"The ToExactListOf unit test."
__version__ = "$Revision: #1 $"
#===========================================================================
# Reqmtypes.ed imports. Do not modify these.
import unittest
#===========================================================================
# Place all imports after here.
#
import os
import mplStyle.types.convert as cvt
#
# Place all imports before here.
#===========================================================================
#===========================================================================
class TesttoExactListOf( unittest.TestCase ):
"""ToExactListOf module."""
#-----------------------------------------------------------------------
def setUp( self ):
"""This method is called before any tests are run."""
# You may place initialization code here.
#-----------------------------------------------------------------------
def tearDown( self ):
"""This method is called after all tests are run."""
pass
#=======================================================================
# Add tests methods below.
# Any method whose name begins with 'test' will be run by the framework.
#=======================================================================
def testToExactListOf( self ):
"""Test the ToExactListOf converter."""
# Test the case with only one element.
converter = cvt.toExactListOf
cvtList = [ cvt.Converter( cvt.toType, float, allowNone=False ) ]
d1 = "10"
right = [ 10 ]
result = converter( [ d1 ], cvtList )
self.assertEqual( right, result,
"Incorrect conversion of list with one element." )
self.assertRaises( Exception, converter, d1, cvtList, name='value',
msg="Use non-list argument should be an error." )
self.assertRaises( Exception, converter, [ d1, d1 ], cvtList,
name='value',
msg="Use list with wrong size should be an error." )
self.assertRaises( Exception, converter, [ "foo bar" ], cvtList,
name='value',
msg="Use list with wrong type should be an error." )
self.assertRaises( Exception, converter, None, cvtList, name='value',
msg="Use None with AllowNone=False should be an error." )
# Test the case with multiple elements.
cvtList = [ cvt.Converter( cvt.toType, float ),
cvt.Converter( cvt.toType, str ) ]
s1 = "test"
f1 = "123"
right = [ 123.0, "test" ]
result = converter( [ f1, s1 ], cvtList )
self.assertEqual( right, result,
"Incorrect conversion of list with multiple elements." )
result = converter( ( f1, s1 ), cvtList )
self.assertEqual( right, result, "Incorrect conversion of tuple." )
self.assertRaises( Exception, converter, s1, cvtList,
msg="Use non-list argument should be an error." )
self.assertRaises( Exception, converter, [ s1 ], cvtList,
msg="Use list with wrong size should be an error." )
self.assertRaises( Exception, converter, [ f1, s1 ], cvtList,
msg="Use list with wrong order should be an error." )
self.assertRaises( Exception, converter, None, cvtList,
msg="Use None with AllowNone=False should be an error." )
#-----------------------------------------------------------------------
def testAllowNone( self ):
"""Test allow none."""
converter = cvt.toExactListOf
cvtList = [ cvt.Converter( cvt.toType, float ) ]
result = converter( None, cvtList, allowNone=True )
self.assertEqual( None, result, "Incorrect conversion of none." )
s1 = "123"
right = [ 123 ]
result = converter( [ s1 ], cvtList, allowNone=True )
self.assertEqual( right, result, "Incorrect conversion with allow none." )
#===========================================================================
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
from oneflow import nn
import os
import numpy as np
import oneflow.unittest
class TestModule(nn.Module):
def forward(self, x):
sbp_1ds = [
flow.sbp.broadcast,
flow.sbp.partial_sum,
flow.sbp.split(0),
flow.sbp.split(1),
flow.sbp.split(2),
flow.sbp.split(3),
]
y = x
for sbp1 in sbp_1ds:
for sbp2 in sbp_1ds:
for sbp3 in sbp_1ds:
for sbp4 in sbp_1ds:
# (2, 2) -> (2, 2)
x = x.to_global(sbp=[sbp1, sbp2])
x = x.to_global(sbp=[sbp3, sbp4])
return x
class _TestGraph(nn.Graph):
def __init__(self, model):
super().__init__()
self.model = model
def build(self, x):
x = self.model(x)
return x
@flow.unittest.skip_unless_1n4d()
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestLazyAllSbpCombinationTesting(flow.unittest.TestCase):
def test_lazy_boxing_2d_all_combination(test_case):
model = TestModule()
graph = _TestGraph(model)
x = flow.ones(
4,
4,
4,
4,
sbp=[flow.sbp.broadcast, flow.sbp.broadcast],
placement=flow.placement(
type="cuda", ranks=np.array(range(4)).reshape(2, 2)
),
)
y = graph(x)
if __name__ == "__main__":
unittest.main()
|
"""
Parse denovo vars and use only epilepsy genes.
"""
import pandas, sys
dat_file, xls_file, out_file = sys.argv[1:]
df_pre = pandas.read_excel(xls_file)
genes = set(df_pre['Gene Symbol'].values)
cols = ['Chr', 'Position', 'Variant']
denovo_df = pandas.read_csv(dat_file, dtype={'Position':int}, skiprows=0, header=1, sep='\t')
def filter_genes(row):
return True #row['Gene'] in genes
new_data =[]
for row in denovo_df[denovo_df.apply(filter_genes, axis=1)][cols].itertuples():
ref, alt = row.Variant.split('>')
ls = (row.Chr, row.Position, '', ref, alt, '.', '.')
new_data.append(ls)
idx = ['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER']
new_df = pandas.DataFrame(new_data, columns=idx)
with open(out_file, 'w') as fout:
print('##fileformat=VCFv4.0', file=fout)
new_df.drop_duplicates().sort_values(by=['#CHROM', 'POS'], ascending=[True, True]).to_csv(fout, index=False, sep='\t')
|
#coding: utf-8
'''
Univesidade Federal de Pernambuco -- UFPE (http://www.ufpe.br)
Centro de Informatica -- CIn (http://www.cin.ufpe.br)
Bacharelado em Sistemas de Informacao
IF969 -- Algoritmos e Estruturas de Dados
Autor: Marcos Antonio Tavares Galvão
Email: matg@cin.ufpe.br
Licenca: The MIT License (MIT)
Copyright(c) 2018 Marcos Antonio Tavares Galvão
'''
from Documentos import Documents
from linked_list import Lista
from trie import Trie
import os
from memory_profiler import profile
from cProfile import Profile
class Corpus:
'''
Classe de representacao de um Corpus, o qual é composto por vários Documents de
um mesmo path.
'''
def __init__(self, directory):
'''
Recebe como parametro um diretorio, criando assim um repertorio dos arquivos
presentes no path.
'''
self.__directory = directory
self.__all_documents, self.__trie_words_documents = Corpus.__create_documents(self, self.__directory)
#self.__trie_words_documents = Corpus.__create_trie(self)
self.__number_of_documents = len(self.__all_documents)
def __str__(self):
'''
retorna uma representacao da lista com todos os documentos.
'''
return str(self.__all_documents)
def __repr__(self):
'''
retorna uma forma valida de instanciar um Corpus identico ao atual.
'''
return 'Corpus("{0}")'.format(self.__directory)
def __iter__(self):
'''
retorna o iterador sobre a lista dos Documents.
'''
return iter(self.__all_documents)
def __getitem__(self, key):
'''
retorna o item indexado pela chave desejada.
'''
return self.__all_documents[key]
def __len__(self):
'''
retorna o numero de Documents que compoem o Corpus.
'''
return self.__number_of_documents
def __create_documents(self, directory):
'''
recebe um diretorio ao qual sao realizadas as instanciacoes dos Documents() e adiciona suas palavras a trie.
'''
archives = os.listdir(directory)
documents = Lista()
temp = Trie()
for archive_name in archives:
doc = Documents(directory+archive_name)
documents.anexar(doc)
for ngram in doc.ngrams:
aux_words = ngram.sequence
string = ''
for k in aux_words:
string += k + ' '
temp.add(string,doc)
return documents, temp
def check_plagiarism(self, document, limit):
'''
recebe um objeto Documents e um limiar de plagio, executa a verificacao de
plagio e retorna uma lisca contendo os possiveis arquivos base do plagio.
'''
base_documents = {}
for doc in self.__all_documents:
base_documents[doc] = 0
for ngram in document.ngrams:
aux_words = ngram.sequence
string = ''
for k in aux_words:
string += k + ' '
docs_base = self.__trie_words_documents.search(string)
if docs_base is not None:
for doc in docs_base:
base_documents[doc] += 1
final_list = Lista()
for pair in base_documents.items():
restraint = pair[1]/len(pair[0])
if restraint >= limit:
final_list.anexar(pair[0].local)
return final_list
@property
def directory(self):
'''
retorna o diretorio que originou o Corpus.
'''
return self.__directory
|
#from distutils.core import setup
import os
from setuptools import setup
from setuptools.command.install import install
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
class MyInstall(install):
def run(self):
install.run(self)
#print("\n\n\n\nI did it!!!!\n\n\n\n")
cmd = 'sudo activate-global-python-argcomplete'
print cmd
os.system(cmd)
setup(
name="pymake2",
version = "0.5.33",
author = "Saud Wasly",
author_email = "saudalwasli@gmail.com",
description = ("pymake2 is a simple Python-based make system. It brings simplicity and flexibility of Python language to makefiles."),
license = "MIT",
keywords = "make makefile build",
url = "https://bitbucket.org/saudalwasly/pymake2/src",
install_requires=["argcomplete", "sarge"],
packages=['pymake2'],
scripts=['pymake2/pymake2', 'pymake2/pmake2'],
#data_files = [('', ['__init__.py', 'pymake2', 'make.py', 'utility.py', 'makefile_template.py'])] ,
long_description=read('README.rst'),
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
cmdclass={'install': MyInstall}
)
|
import uuid
from styleguide_example.users.models import BaseUser
def auth_user_get_jwt_secret_key(user: BaseUser) -> str:
return str(user.jwt_key)
def auth_jwt_response_payload_handler(token, user=None, request=None, issued_at=None):
"""
Default implementation. Add whatever suits you here.
"""
return {"token": token}
def auth_logout(user: BaseUser) -> BaseUser:
user.jwt_key = uuid.uuid4()
user.full_clean()
user.save(update_fields=["jwt_key"])
return user
|
# Copyright 2016 Hewlett Packard Enterprise Development LP.
# Copyright 2016 Universidade Federal de Campina Grande
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import importutils
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules.oneview import common
from ironic.drivers.modules.oneview import deploy_utils
from ironic import objects
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
oneview_models = importutils.try_import('oneview_client.models')
@mock.patch.object(common, 'get_oneview_client', spec_set=True, autospec=True)
class OneViewDeployUtilsTestCase(db_base.DbTestCase):
def setUp(self):
super(OneViewDeployUtilsTestCase, self).setUp()
self.config(manager_url='https://1.2.3.4', group='oneview')
self.config(username='user', group='oneview')
self.config(password='password', group='oneview')
mgr_utils.mock_the_extension_manager(driver='fake_oneview')
self.driver = driver_factory.get_driver('fake_oneview')
self.node = obj_utils.create_test_node(
self.context, driver='fake_oneview',
properties=db_utils.get_test_oneview_properties(),
driver_info=db_utils.get_test_oneview_driver_info(),
)
self.info = common.get_oneview_info(self.node)
# Tests for prepare
def test_prepare_node_is_in_use_by_oneview(self, mock_get_ov_client):
"""`prepare` behavior when the node already has a Profile on OneView.
"""
oneview_client = mock_get_ov_client()
fake_server_hardware = oneview_models.ServerHardware()
fake_server_hardware.server_profile_uri = "/any/sp_uri"
oneview_client.get_server_hardware.return_value = fake_server_hardware
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['dynamic_allocation'] = True
task.node.driver_info = driver_info
task.node.provision_state = states.DEPLOYING
self.assertRaises(
exception.InstanceDeployFailure,
deploy_utils.prepare,
oneview_client,
task
)
@mock.patch.object(objects.Node, 'save')
def test_prepare_node_is_successfuly_allocated_to_ironic(
self, mock_node_save, mock_get_ov_client
):
"""`prepare` behavior when the node is free from OneView standpoint.
"""
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = None
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.provision_state = states.DEPLOYING
deploy_utils.prepare(oneview_client, task)
self.assertTrue(oneview_client.clone_template_and_apply.called)
self.assertTrue(oneview_client.get_server_profile_from_hardware)
# Tests for tear_down
def test_tear_down(self, mock_get_ov_client):
"""`tear_down` behavior when node already has Profile applied
"""
sp_uri = '/rest/server-profiles/1234556789'
ov_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = sp_uri
ov_client = mock_get_ov_client.return_value
ov_client.get_server_hardware_by_uuid.return_value = fake_sh
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = \
'/rest/server-profiles/1234556789'
task.node.driver_info = driver_info
self.assertTrue(
'applied_server_profile_uri' in task.node.driver_info
)
deploy_utils.tear_down(ov_client, task)
self.assertFalse(
'applied_server_profile_uri' in task.node.driver_info
)
self.assertTrue(
ov_client.delete_server_profile.called
)
# Tests for prepare_cleaning
@mock.patch.object(objects.Node, 'save')
def test_prepare_cleaning_when_node_does_not_have_sp_applied(
self, mock_node_save, mock_get_ov_client
):
"""`prepare_cleaning` behavior when node is free
"""
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = None
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
with task_manager.acquire(self.context, self.node.uuid) as task:
deploy_utils.prepare_cleaning(oneview_client, task)
self.assertTrue(oneview_client.clone_template_and_apply.called)
@mock.patch.object(objects.Node, 'save')
def test_prepare_cleaning_when_node_has_sp_applied(
self, mock_node_save, mock_get_ov_client
):
"""`prepare_cleaning` behavior when node already has Profile applied
"""
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = 'same/sp_applied'
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = 'same/sp_applied'
task.node.driver_info = driver_info
deploy_utils.prepare_cleaning(oneview_client, task)
self.assertFalse(oneview_client.clone_template_and_apply.called)
def test_prepare_cleaning_node_is_in_use_by_oneview(
self, mock_get_ov_client
):
"""`prepare_cleaning` behavior when node has Server Profile on OneView
"""
oneview_client = mock_get_ov_client()
fake_server_hardware = oneview_models.ServerHardware()
fake_server_hardware.server_profile_uri = "/any/sp_uri"
oneview_client.get_server_hardware.return_value = fake_server_hardware
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['dynamic_allocation'] = True
task.node.driver_info = driver_info
task.node.provision_state = states.DEPLOYING
self.assertRaises(
exception.NodeCleaningFailure,
deploy_utils.prepare_cleaning,
oneview_client,
task
)
# Tests for tear_down_cleaning
def test_tear_down_cleaning(self, mock_get_ov_client):
"""Checks if Server Profile was deleted and its uri removed
"""
sp_uri = '/rest/server-profiles/1234556789'
ov_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = sp_uri
ov_client = mock_get_ov_client.return_value
ov_client.get_server_hardware_by_uuid.return_value = fake_sh
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = \
'/rest/server-profiles/1234556789'
task.node.driver_info = driver_info
self.assertIn('applied_server_profile_uri', task.node.driver_info)
deploy_utils.tear_down_cleaning(ov_client, task)
self.assertNotIn('applied_server_profile_uri',
task.node.driver_info)
self.assertTrue(ov_client.delete_server_profile.called)
# Tests for is_node_in_use_by_oneview
def test_is_node_in_use_by_oneview(self, mock_get_ov_client):
"""Node has a Server Profile applied by a third party user.
"""
oneview_client = mock_get_ov_client()
fake_server_hardware = oneview_models.ServerHardware()
fake_server_hardware.server_profile_uri = "/any/sp_uri"
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['dynamic_allocation'] = True
task.node.driver_info = driver_info
self.assertTrue(
deploy_utils.is_node_in_use_by_oneview(oneview_client,
task.node)
)
def test_is_node_in_use_by_oneview_no_server_profile(
self, mock_get_ov_client
):
"""Node has no Server Profile.
"""
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = None
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertFalse(
deploy_utils.is_node_in_use_by_oneview(oneview_client,
task.node)
)
def test_is_node_in_use_by_oneview_same_server_profile_applied(
self, mock_get_ov_client
):
"""Node's Server Profile uri is the same applied by ironic.
"""
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = 'same/applied_sp_uri/'
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = 'same/applied_sp_uri/'
task.node.driver_info = driver_info
self.assertFalse(
deploy_utils.is_node_in_use_by_oneview(oneview_client,
task.node)
)
# Tests for is_node_in_use_by_ironic
def test_is_node_in_use_by_ironic(self, mock_get_ov_client):
"""Node has a Server Profile applied by ironic.
"""
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = "same/applied_sp_uri/"
ov_client = mock_get_ov_client.return_value
ov_client.get_server_hardware_by_uuid.return_value = fake_sh
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['dynamic_allocation'] = True
driver_info['applied_server_profile_uri'] = 'same/applied_sp_uri/'
task.node.driver_info = driver_info
self.assertTrue(
deploy_utils.is_node_in_use_by_ironic(ov_client, task.node)
)
def test_is_node_in_use_by_ironic_no_server_profile(
self, mock_get_ov_client
):
"""Node has no Server Profile.
"""
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = None
ov_client = mock_get_ov_client.return_value
ov_client.get_server_hardware_by_uuid.return_value = fake_sh
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertFalse(
deploy_utils.is_node_in_use_by_ironic(ov_client, task.node)
)
# Tests for _add_applied_server_profile_uri_field
def test__add_applied_server_profile_uri_field(self, mock_get_ov_client):
"""Checks if applied_server_profile_uri was added to driver_info.
"""
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
task.node.driver_info = driver_info
fake_server_profile = oneview_models.ServerProfile()
fake_server_profile.uri = 'any/applied_sp_uri/'
self.assertNotIn('applied_server_profile_uri',
task.node.driver_info)
deploy_utils._add_applied_server_profile_uri_field(
task.node,
fake_server_profile
)
self.assertIn('applied_server_profile_uri', task.node.driver_info)
# Tests for _del_applied_server_profile_uri_field
def test__del_applied_server_profile_uri_field(self, mock_get_ov_client):
"""Checks if applied_server_profile_uri was removed from driver_info.
"""
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = 'any/applied_sp_uri/'
task.node.driver_info = driver_info
self.assertIn('applied_server_profile_uri', task.node.driver_info)
deploy_utils._del_applied_server_profile_uri_field(task.node)
self.assertNotIn('applied_server_profile_uri',
task.node.driver_info)
# Tests for allocate_server_hardware_to_ironic
@mock.patch.object(objects.Node, 'save')
def test_allocate_server_hardware_to_ironic(
self, mock_node_save, mock_get_ov_client
):
"""Checks if a Server Profile was created and its uri is in driver_info.
"""
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = None
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
with task_manager.acquire(self.context, self.node.uuid) as task:
deploy_utils.allocate_server_hardware_to_ironic(
oneview_client, task.node, 'serverProfileName'
)
self.assertTrue(oneview_client.clone_template_and_apply.called)
self.assertIn('applied_server_profile_uri', task.node.driver_info)
@mock.patch.object(objects.Node, 'save')
@mock.patch.object(deploy_utils,
'_del_applied_server_profile_uri_field')
def test_allocate_server_hardware_to_ironic_node_has_server_profile(
self, mock_delete_applied_sp, mock_node_save, mock_get_ov_client
):
"""Tests server profile allocation when applied_server_profile_uri exists.
This test consider that no Server Profile is applied on the Server
Hardware but the applied_server_profile_uri remained on the node. Thus,
the conductor should remove the value and apply a new server profile to
use the node.
"""
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = None
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = 'any/applied_sp_uri/'
task.node.driver_info = driver_info
deploy_utils.allocate_server_hardware_to_ironic(
oneview_client, task.node, 'serverProfileName'
)
self.assertTrue(mock_delete_applied_sp.called)
# Tests for deallocate_server_hardware_from_ironic
@mock.patch.object(objects.Node, 'save')
def test_deallocate_server_hardware_from_ironic(
self, mock_node_save, mock_get_ov_client
):
oneview_client = mock_get_ov_client()
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = 'any/applied_sp_uri/'
oneview_client.get_server_hardware_by_uuid.return_value = fake_sh
mock_get_ov_client.return_value = oneview_client
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = 'any/applied_sp_uri/'
task.node.driver_info = driver_info
deploy_utils.deallocate_server_hardware_from_ironic(
oneview_client, task.node
)
self.assertTrue(oneview_client.delete_server_profile.called)
self.assertTrue(
'applied_server_profile_uri' not in task.node.driver_info
)
@mock.patch.object(objects.Node, 'save')
def test_deallocate_server_hardware_from_ironic_missing_profile_uuid(
self, mock_node_save, mock_get_ov_client
):
"""Test for case when server profile application fails.
Due to an error when applying Server Profile in OneView,
the node will have no Server Profile uuid in the
'applied_server_profile_uri' namespace. When the method
tested is called without Server Profile uuid, the client
will raise a ValueError when trying to delete the profile,
this error is converted to an OneViewError.
"""
ov_client = mock_get_ov_client.return_value
fake_sh = oneview_models.ServerHardware()
fake_sh.server_profile_uri = 'any/applied_sp_uri/'
ov_client.get_server_hardware_by_uuid.return_value = fake_sh
ov_client.delete_server_profile.side_effect = ValueError
mock_get_ov_client.return_value = ov_client
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['applied_server_profile_uri'] = 'any/applied_sp_uri/'
task.node.driver_info = driver_info
self.assertRaises(
exception.OneViewError,
deploy_utils.deallocate_server_hardware_from_ironic,
ov_client,
task.node
)
self.assertTrue(ov_client.delete_server_profile.called)
self.assertTrue(
'applied_server_profile_uri' in task.node.driver_info
)
|
'''Getting into the application'''
from flask import Flask
import os
from app import create_app
app = create_app()
if __name__ == '__main__':
app.run()
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class AccessTokenTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.access_tokens.create(identity="identity", factor_type="push")
values = {'Identity': "identity", 'FactorType': "push", }
self.holodeck.assert_has_request(Request(
'post',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/AccessTokens',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "YKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"entity_identity": "ff483d1ff591898a9942916050d2ca3f",
"factor_type": "push",
"factor_friendly_name": "John Doe iPhone",
"token": "eyJ6aXAiOiJERUYiLCJraWQiOiJTQVNfUzNfX19LTVNfdjEiLCJjdHkiOiJ0d2lsaW8tZnBhO3Y9MSIsImVuYyI6IkEyNTZHQ00iLCJhbGciOiJkaXIifQ..qjltWfIgQaTwp2De.81Z_6W4kR-hdlAUvJQCbwS8CQ7QAoFRkOvNMoySEj8zEB4BAY3MXhPARfaK4Lnr4YceA2cXEmrzPKQ7bPm0XZMGYm1fqLYzAR8YAqUetI9WEdQLFytg1h4XnJnXhgd99eNXsLkpKHhsCnFkchV9eGpRrdrfB0STR5Xq0fdakomb98iuIFt1XtP0_iqxvxQZKe1O4035XhK_ELVwQBz_qdI77XRZBFM0REAzlnEOe61nOcQxkaIM9Qel9L7RPhcndcCPFAyYjxo6Ri5c4vOnszLDiHmeK9Ep9fRE5-Oz0px0ZEg_FgTUEPFPo2OHQj076H1plJnFr-qPINDJkUL_i7loqG1IlapOi1JSlflPH-Ebj4hhpBdMIcs-OX7jhqzmVqkIKWkpPyPEmfvY2-eA5Zpoo08YpqAJ3G1l_xEcHl28Ijkefj1mdb6E8POx41skAwXCpdfIbzWzV_VjFpmwhacS3JZNt9C4hVG4Yp-RGPEl1C7aJHRIUavAmoRHaXbfG20zzv5Zu0P5PcopDszzoqVfZpzc.GCt35DWTurtP-QaIL5aBSQ",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AccessTokens/YKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.access_tokens.create(identity="identity", factor_type="push")
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.access_tokens("YKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/AccessTokens/YKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "YKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"entity_identity": "ff483d1ff591898a9942916050d2ca3f",
"factor_type": "push",
"factor_friendly_name": "John Doe iPhone",
"token": "eyJ6aXAiOiJERUYiLCJraWQiOiJTQVNfUzNfX19LTVNfdjEiLCJjdHkiOiJ0d2lsaW8tZnBhO3Y9MSIsImVuYyI6IkEyNTZHQ00iLCJhbGciOiJkaXIifQ..qjltWfIgQaTwp2De.81Z_6W4kR-hdlAUvJQCbwS8CQ7QAoFRkOvNMoySEj8zEB4BAY3MXhPARfaK4Lnr4YceA2cXEmrzPKQ7bPm0XZMGYm1fqLYzAR8YAqUetI9WEdQLFytg1h4XnJnXhgd99eNXsLkpKHhsCnFkchV9eGpRrdrfB0STR5Xq0fdakomb98iuIFt1XtP0_iqxvxQZKe1O4035XhK_ELVwQBz_qdI77XRZBFM0REAzlnEOe61nOcQxkaIM9Qel9L7RPhcndcCPFAyYjxo6Ri5c4vOnszLDiHmeK9Ep9fRE5-Oz0px0ZEg_FgTUEPFPo2OHQj076H1plJnFr-qPINDJkUL_i7loqG1IlapOi1JSlflPH-Ebj4hhpBdMIcs-OX7jhqzmVqkIKWkpPyPEmfvY2-eA5Zpoo08YpqAJ3G1l_xEcHl28Ijkefj1mdb6E8POx41skAwXCpdfIbzWzV_VjFpmwhacS3JZNt9C4hVG4Yp-RGPEl1C7aJHRIUavAmoRHaXbfG20zzv5Zu0P5PcopDszzoqVfZpzc.GCt35DWTurtP-QaIL5aBSQ",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AccessTokens/YKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.access_tokens("YKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
|
# -*- coding: utf-8 -*-
import logging
import unittest
import time
from uuid import uuid4, UUID
from cassandra.query import dict_factory
from caes.client import CassandraClient
from caes.test.utils import random_string
logging.basicConfig(level=logging.DEBUG)
class CassandraClientTestCase(unittest.TestCase):
def tearDown(self):
query = """
DROP KEYSPACE %s
""" % self.keyspace
session = self.cclient._cluster.connect(self.keyspace)
session.execute(query)
session.shutdown()
def setUp(self):
self.keyspace = random_string().lower()
self.data_column_family = random_string().lower()
self.cclient = CassandraClient(self.keyspace, self.data_column_family)
session = self.cclient._cluster.connect()
query = """
CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};
""" % self.keyspace
session.execute(query)
session.shutdown()
session = self.cclient._cluster.connect(self.keyspace)
query = """
CREATE TABLE %s (
%s int,
timestamp int,
%s uuid,
PRIMARY KEY(%s, timestamp)
);
""" % (self.cclient._timeseries_column_family,
self.cclient._timeseries_id_field_name,
self.cclient._data_id_field_name,
self.cclient._timeseries_id_field_name)
session.execute(query)
query = """
CREATE TABLE %s (
%s uuid,
vint int,
vstring text,
PRIMARY KEY(%s)
);
""" % (self.data_column_family,
self.cclient._data_id_field_name,
self.cclient._data_id_field_name)
session.execute(query)
query = """
CREATE TABLE vint_by_did (
%s uuid,
vint int,
PRIMARY KEY(%s, vint)
);
""" % (self.cclient._data_id_field_name,
self.cclient._data_id_field_name)
session.execute(query)
session.shutdown()
def test_write_doc(self):
data = dict(vint=1, vstring="Hi")
did = uuid4()
timestamp = int(time.time())
self.cclient.write([(data, did, timestamp)])
self.cclient.flush()
query = """
SELECT *
FROM %s
WHERE did = ?
""" % self.data_column_family
session = self.cclient._cluster.connect(self.keyspace)
session.row_factory = dict_factory
prepared = session.prepare(query)
results = session.execute(prepared, (did,))
session.shutdown()
self.assertNotEqual(0, len(results))
self.assertDictContainsSubset(data, results[0])
def test_write_doc_none(self):
did = uuid4()
timestamp = int(time.time())
self.cclient.write([(None, did, timestamp)])
self.cclient.flush()
query = """
SELECT *
FROM %s
WHERE did = ?
""" % self.data_column_family
session = self.cclient._cluster.connect(self.keyspace)
session.row_factory = dict_factory
prepared = session.prepare(query)
results = session.execute(prepared, (did,))
session.shutdown()
self.assertEqual(0, len(results))
def test_extra_insert_query(self):
data = dict(vint=1, vstring="Hi")
did = uuid4()
timestamp = int(time.time())
self.cclient._insert_query = """
INSERT INTO vint_by_did (did, vint)
VALUES (%(did)s, %(vint)s)
"""
self.cclient.write([(data, did, timestamp)])
self.cclient.flush()
query = """
SELECT *
FROM vint_by_did
WHERE did = ?
"""
session = self.cclient._cluster.connect(self.keyspace)
session.row_factory = dict_factory
prepared = session.prepare(query)
results = session.execute(prepared, (did,))
session.shutdown()
print results
self.assertEqual(1, len(results))
self.assertEqual(data.get('vint'), results[0].get('vint'))
self.assertEqual(did, results[0].get('did'))
def test_latest(self):
session = self.cclient._cluster.connect(self.keyspace)
params = dict(keyspace=self.keyspace,
ts_family=self.cclient._timeseries_column_family,
dt_family=self.cclient._data_column_family,
ts_id_name=self.cclient._timeseries_id_field_name,
did_name=self.cclient._data_id_field_name,
ts_field_name=self.cclient._timestamp_field_name,
data_columns=None,
data_values=None)
data1 = dict(vint=2, vstring="Hello")
did1 = uuid4()
t1 = int(time.time())
kv1 = zip(*data1.iteritems())
params['data_columns'] = ", ".join(kv1[0])
params['data_values'] = ", ".join("?" for _ in range(len(kv1[1])))
query = """
BEGIN BATCH
INSERT INTO %(ts_family)s (%(ts_id_name)s, %(ts_field_name)s, %(did_name)s) VALUES (?, ?, ?)
INSERT INTO %(dt_family)s (%(did_name)s, %(data_columns)s) VALUES (?, %(data_values)s)
APPLY BATCH;
""" % params
prepared = session.prepare(query)
session.execute(prepared, (0, t1, did1) + (did1, ) + tuple(kv1[1]))
time.sleep(1)
tm = int(time.time())
time.sleep(1)
data2 = dict(vint=1, vstring="Bye")
did2 = uuid4()
t2 = int(time.time())
kv2 = zip(*data2.iteritems())
params['data_columns'] = ", ".join(kv1[0])
params['data_values'] = ", ".join("?" for _ in range(len(kv2[1])))
query = """
BEGIN BATCH
INSERT INTO %(ts_family)s (%(ts_id_name)s, %(ts_field_name)s, %(did_name)s) VALUES (?, ?, ?)
INSERT INTO %(dt_family)s (%(did_name)s, %(data_columns)s) VALUES (?, %(data_values)s)
APPLY BATCH;
""" % params
prepared = session.prepare(query)
session.execute(prepared, (0, t2, did2) + (did2, ) + tuple(kv2[1]))
self.cclient.flush()
results3 = self.cclient.latest(t2 + 1)
results2 = self.cclient.latest(t2)
resultsm = self.cclient.latest(tm)
results1 = self.cclient.latest(t1)
self.assertEqual(len(results1), 2)
self.assertEqual(len(resultsm), 1)
self.assertEqual(len(results2), 1)
self.assertEqual(len(results3), 0)
self.assertIn(did1, [did for _, did, _ in results1])
self.assertNotIn(did1, [did for _, did, _ in resultsm])
self.assertNotIn(did1, [did for _, did, _ in results2])
self.assertNotIn(did1, [did for _, did, _ in results3])
self.assertIn(did2, [did for _, did, _ in results1])
self.assertIn(did2, [did for _, did, _ in resultsm])
self.assertIn(did2, [did for _, did, _ in results2])
self.assertEqual(len(results3), 0)
session.shutdown()
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(CassandraClientTestCase)
|
from .packet_meta_type import PacketMetaType
from .tlv_type import TLVType
from .packet_tlv_type import PacketTLVType
|
#!/usr/bin/env PYTHONIOENCODING=UTF-8 python3
# <bitbar.title>Icinga2 Advanced</bitbar.title>
# <bitbar.version>v1.0</bitbar.version>
# <bitbar.author>Sebastian Czoch</bitbar.author>
# <bitbar.author.github>SebastianCzoch</bitbar.author.github>
# <bitbar.desc>Icinga2 monitoring BitBar plugin</bitbar.desc>
# <bitbar.image>https://github.com/SebastianCzoch/icinga2-bitbar/blob/master/assets/bitbar2.png?raw=true</bitbar.image>
# <bitbar.dependencies>python3,requests</bitbar.dependencies>
# <bitbar.abouturl>https://github.com/SebastianCzoch/icinga2-bitbar</bitbar.abouturl>
import requests, json, os, sys
from requests.packages.urllib3.exceptions import InsecureRequestWarning
config = {
"icinga2_address": "https://example.com",
"icinga2_port": 5665,
"icinga2_user": "root",
"icinga2_password": "admin",
"verify_ssl": False,
}
STATE_SERVICE_OK = 0
STATE_SERVICE_WARNING = 1
STATE_SERVICE_CRITICAL = 2
STATE_SERVICE_UNKNOWN = 3
STATE_HOST_UP = 0
STATE_HOST_DOWN = 1
SCRIPT_PATH = os.path.realpath(__file__)
COLOR_OK = "#009933"
COLOR_WARNING = "#ff9900"
COLOR_UNKNOWN = "#660066"
COLOR_CRITICAL = "#ff0000"
if not config["verify_ssl"]:
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def __get_hosts():
return __make_get("/v1/objects/hosts")
def __get_services():
return __make_get("/v1/objects/services")
def __make_post(path, data=None):
return __make_request(method="POST", path=path, data=data)
def __make_get(path):
return __make_request(path=path)
def __exit_with_error(e):
print("ERROR | color=red")
print(e)
sys.exit(1)
def __make_request(method="GET", path="/", data=None):
try:
params = {**config, **{"path": path}}
headers = {"Accept": "application/json"}
if method == "GET":
headers = {**headers, **{"X-HTTP-Method-Override": "GET"}}
r = requests.post(
url="{icinga2_address}:{icinga2_port}{path}".format(**params),
headers=headers,
verify=config["verify_ssl"],
auth=(config["icinga2_user"], config["icinga2_password"]),
json=data,
)
if r.status_code != 200:
__exit_with_error(r)
return json.loads(json.dumps(r.json()))["results"]
except Exception as e:
__exit_with_error(e)
def __filter_by_state(objects, state):
return [i for i in objects if i["attrs"]["state"] == state]
def __filter_by_ack(objects, is_acked):
return [i for i in objects if i["attrs"]["acknowledgement"] == int(is_acked)]
def __get_color_for_item(item):
if item["type"] == "Service":
if item["attrs"]["state"] == STATE_SERVICE_CRITICAL:
return COLOR_CRITICAL
if item["attrs"]["state"] == STATE_SERVICE_UNKNOWN:
return COLOR_UNKNOWN
if item["attrs"]["state"] == STATE_SERVICE_WARNING:
return COLOR_WARNING
if item["type"] == "Host":
if item["attrs"]["state"] == STATE_HOST_DOWN:
return COLOR_CRITICAL
return COLOR_OK
def __print_service(item):
print(
"{} - {} | color={}".format(
item["attrs"]["display_name"],
item["attrs"]["host_name"],
__get_color_for_item(item),
)
)
print("--{}".format(item["attrs"]["last_check_result"]["output"]))
print(
'--Acknowledge | bash={} param2=ack param3=service param4="{}" terminal=false refresh=true'.format(
SCRIPT_PATH, item["attrs"]["__name"]
)
)
print(
'--Check now | bash={} param2=recheck param3=service param4="{}" terminal=false refresh=true'.format(
SCRIPT_PATH, item["attrs"]["__name"]
)
)
def __print_service_acked(item):
print(
"--{} - {} | color={}".format(
item["attrs"]["display_name"],
item["attrs"]["host_name"],
__get_color_for_item(item),
)
)
print("----{}".format(item["attrs"]["last_check_result"]["output"]))
print(
'----Remove acknowledgement | bash={} param2=remove_ack param3=service param4="{}" terminal=false refresh=true'.format(
SCRIPT_PATH, item["attrs"]["__name"]
)
)
print(
'----Check now | bash={} param2=recheck param3=service param4="{}" terminal=false refresh=true'.format(
SCRIPT_PATH, item["attrs"]["__name"]
)
)
def __print_host(item):
print(
"{} | color={}".format(
item["attrs"]["display_name"], __get_color_for_item(item)
)
)
print("--{}".format(item["attrs"]["last_check_result"]["output"]))
print(
'--Acknowledge | bash={} param2=ack param3=host param4="{}" terminal=false refresh=true'.format(
SCRIPT_PATH, item["attrs"]["__name"]
)
)
print(
'--Check now | bash={} param2=recheck param3=host param4="{}" terminal=false refresh=true'.format(
SCRIPT_PATH, item["attrs"]["__name"]
)
)
def __print_host_acked(item):
print(
"--{} | color={}".format(
item["attrs"]["display_name"], __get_color_for_item(item)
)
)
print("----{}".format(item["attrs"]["last_check_result"]["output"]))
print(
'----Remove acknowledgement | bash={} param2=remove_ack param3=host param4="{}" terminal=false refresh=true'.format(
SCRIPT_PATH, item["attrs"]["__name"]
)
)
print(
'----Check now | bash={} param2=recheck param3=host param4="{}" terminal=false refresh=true'.format(
SCRIPT_PATH, item["attrs"]["__name"]
)
)
def __recheck(name, kind):
r = __make_post("/v1/actions/reschedule-check?{}={}".format(kind, name))
def __ack(name, kind):
r = __make_post(
"/v1/actions/acknowledge-problem?{}={}".format(kind, name),
data={"author": config["icinga2_user"], "comment": " "},
)
def __remove_ack(name, kind):
r = __make_post("/v1/actions/remove-acknowledgement?{}={}".format(kind, name))
if "ack" in sys.argv:
__ack(sys.argv[3], sys.argv[2])
sys.exit(0)
if "remove_ack" in sys.argv:
__remove_ack(sys.argv[3], sys.argv[2])
sys.exit(0)
if "recheck" in sys.argv:
__recheck(sys.argv[3], sys.argv[2])
sys.exit(0)
hosts = __get_hosts()
services = __get_services()
critical_services = __filter_by_ack(
__filter_by_state(services, STATE_SERVICE_CRITICAL), False
)
critical_services_acked = __filter_by_ack(
__filter_by_state(services, STATE_SERVICE_CRITICAL), True
)
unknown_services = __filter_by_ack(
__filter_by_state(services, STATE_SERVICE_UNKNOWN), False
)
unknown_services_acked = __filter_by_ack(
__filter_by_state(services, STATE_SERVICE_UNKNOWN), True
)
warning_services = __filter_by_ack(
__filter_by_state(services, STATE_SERVICE_WARNING), False
)
warning_services_acked = __filter_by_ack(
__filter_by_state(services, STATE_SERVICE_WARNING), True
)
down_hosts = __filter_by_ack(__filter_by_state(hosts, STATE_HOST_DOWN), False)
down_hosts_acked = __filter_by_ack(__filter_by_state(hosts, STATE_HOST_DOWN), True)
main_color = COLOR_OK
if len(warning_services) > 0:
main_color = COLOR_WARNING
if len(unknown_services) > 0:
main_color = COLOR_UNKNOWN
if len(critical_services) + len(down_hosts) > 0:
main_color = COLOR_CRITICAL
print(
"D: {} C: {} U: {} W: {} | color={}".format(
len(down_hosts),
len(critical_services),
len(unknown_services),
len(warning_services),
main_color,
)
)
print("---")
print("Hosts: {}".format(len(hosts)))
print("Services: {}".format(len(services)))
# Hosts down
if len(down_hosts) + len(down_hosts_acked):
print("---")
print("Host problems")
[__print_host(s) for s in down_hosts]
if len(down_hosts_acked) != 0:
print("Acknowledged hosts down {}".format(len(down_hosts_acked)))
[__print_host_acked(s) for s in down_hosts_acked]
if (
len(critical_services)
+ len(critical_services_acked)
+ len(unknown_services)
+ len(unknown_services_acked)
+ len(warning_services)
+ len(warning_services_acked)
> 0
):
print("---")
print("Service problems")
# Critical services
if len(critical_services) + len(critical_services_acked):
[__print_service(s) for s in critical_services]
if len(critical_services_acked) != 0:
print("Acknowledged critical services {}".format(len(critical_services_acked)))
[__print_service_acked(s) for s in critical_services_acked]
# Unknown services
if len(unknown_services) + len(unknown_services_acked):
[__print_service(s) for s in unknown_services]
if len(unknown_services_acked) != 0:
print("Acknowledged unknown services {}".format(len(unknown_services_acked)))
[__print_service_acked(s) for s in unknown_services_acked]
# Warning services
if len(warning_services) + len(warning_services_acked):
[__print_service(s) for s in warning_services]
if len(warning_services_acked) != 0:
print("Acknowledged warning services {}".format(len(warning_services_acked)))
[__print_service_acked(s) for s in warning_services_acked]
print("---")
print("Refresh | refresh=true")
|
#!/usr/bin/env python
import asyncio
import websockets
async def consumer_handler():
async with websockets.connect('ws://shell.127-0-0-101.nip.io') as websocket:
async for data in websocket:
print(f'Received: {data}')
asyncio.get_event_loop().run_until_complete(consumer_handler())
|
from django.shortcuts import get_object_or_404, render, redirect
from django.contrib.auth.decorators import login_required
@login_required
def afterLogin(request):
# Get user object after login
user = request.user
# Redirect to different pages depending on the user role
if user.hasRole('ROLE_ADMIN'):
# Redirect to the wealthbot admin page
redirectUrl = 'rx_admin_homepage'
else:
if user.hasRole('ROLE_RIA') or user.hasRole('ROLE_RIA_USER'):
# Redirect to the RIA page
if ('wealthbot_ria_view_client_id' in request.session):
redirectUrl = 'rx_ria_dashboard_show_client'
else:
redirectUrl = getRouteForRia(user)
elif user.hasRole('ROLE_CLIENT'):
# Redirect to the client page
redirectUrl = getRouteForClient(user=user, session=request.session)
elif user.hasRole('ROLE_SLAVE_CLIENT'):
# Redirect to the client page
redirectUrl = getSessionRedirectUrl(request.session)
if redirectUrl is not None:
removeSessionRedirectUrl(request.session)
else:
redirectUrl = 'rx_client_dashboard'
else:
# Redirect to the landing page for anonymous user
redirectUrl = 'rx_user_homepage'
return redirect(redirectUrl)
def getRouteForRia(user):
return 'rx_ria_dashboard'
def getRouteForClient(user, session):
# Check if session has stored redirectUrl
redirectUrl = getSessionRedirectUrl(session)
if redirectUrl is not None:
removeSessionRedirectUrl(session)
return redirectUrl
# Return the redirect label from the given registration_step
if hasattr(user, 'profile'):
return {
0 : 'rx_client_profile_step_one',
1 : 'rx_client_profile_step_two',
2 : 'rx_client_profile_step_three',
3 : 'rx_client_portfolio',
4 : 'rx_client_portfolio',
5 : 'rx_client_transfer',
6 : 'rx_client_transfer',
7 : 'rx_client_dashboard',
}.get(user.profile.registration_step, 'rx_user_homepage')
return 'rx_client_profile_step_one'
def getSessionRedirectUrl(session):
if 'redirect_url' in session:
return session['redirect_url']
def removeSessionRedirectUrl(session):
del session['redirect_url']
|
from __future__ import division
from ..base import BaseScoreType
from .util import _filter_y_pred
class DetectionBaseScoreType(BaseScoreType):
"""Common abstract base type for detection scores.
Implements `__call__` by selecting all prediction detections with
a confidence higher than `conf_threshold`. It assumes that the child
class implements `detection_score`.
"""
conf_threshold = 0.5
def __call__(self, y_true, y_pred, conf_threshold=None):
if conf_threshold is None:
conf_threshold = self.conf_threshold
y_pred_above_confidence = _filter_y_pred(y_pred, conf_threshold)
return self.detection_score(y_true, y_pred_above_confidence)
class DetectionBaseIOUScoreType(DetectionBaseScoreType):
def __init__(self, name=None, precision=3, conf_threshold=0.5,
minipatch=None, iou_threshold=0.5):
if name is None:
self.name = '{name}(IOU={iou_threshold})'.format(
name=self._name, iou_threshold=iou_threshold)
else:
self.name = name
self.precision = precision
self.conf_threshold = conf_threshold
self.minipatch = minipatch
self.iou_threshold = iou_threshold
|
"""Phil's pyGame Utilities
"""
__version__ = '0.14'
# vim: set filetype=python sts=4 sw=4 noet si :
|
import requests
import emoji
from datetime import datetime
import datetime as dt
import discord
from discord import Embed
from django.utils import timezone
from asgiref.sync import sync_to_async
from src.utils import (
createReminder, deleteReminder, getFutureEvents, modifyReminder, displayResult, _asChannel as _, parseVote)
from src.settings import UNSPLASH_API
from decorators.log_this import log_this
from decorators.requires_parameters import requires_parameters
from exceptions.bad_format_exception import BadFormatException
from singleton.command_registry import CommandRegistry
from singleton.cog import ReminderCog
from singleton.constants import Constants
registry = CommandRegistry.getInstance()
constants = Constants.getInstance()
@requires_parameters(nb_parameters=5)
@log_this
@registry.register(
command="addreminder",
description="Adds a reminder",
help="date(jj/mm/yyyy) hour(HH:MM) name(no space) duration(HH:MM) peopletoremind(@role)"
)
async def addReminder(parameters, channel):
"""Adds a reminder in the database
Args:
parameters (list): The list of parameters required for the command to work
channel (discord.channel): The channel in which the command has been done
"""
try:
start_time = datetime.strptime(
"{} {}".format(parameters[0], parameters[1]), "%d/%m/%Y %H:%M"
)
name = parameters[2].lower()
hours, minutes = parameters[3].split(":")
duration = dt.timedelta(hours=int(hours), minutes=int(minutes))
except ValueError as e:
await channel.send("Valeur pas valide :", e)
return
people_to_remind = " ".join(parameters[4:])
start_time = timezone.make_aware(start_time)
await sync_to_async(createReminder)(
name=name,
start_time=start_time,
duration=duration,
people_to_remind=people_to_remind,
channel_id=_(channel).id,
server_id=_(channel).guild.id,
)
message = "Bert a ajouté l'évenement **{}** le **{}** (pour {})".format(
name, start_time.strftime("%d/%m/%Y à %H:%M"), people_to_remind
)
await channel.send(message)
@requires_parameters
@log_this
@registry.register(
command="delreminder",
description="Deletes a reminder",
help="name"
)
async def delReminder(parameters, channel):
"""Deletes a reminder
Args:
parameters (list): The list of parameters required for the command to work
channel (discord.channel): The channel in which the command has been done
"""
name = parameters[0]
guild_id = _(channel).guild.id
result = await sync_to_async(deleteReminder)(name, guild_id)
await displayResult(channel, result)
@requires_parameters(nb_parameters=3)
@log_this
@registry.register(
command="modreminder",
description="Modifies a field of a reminder",
help="name parameter(name|start_date|duration|channel|allow_dp) new_value"
)
async def modReminder(parameters, channel):
"""Modifies the selected field from a reminder
Args:
parameters (list): The list of parameters required for the command to work
channel (discord.channel): The channel in which the command has been done
"""
name = parameters[0]
guild_id = _(channel).guild.id
field = parameters[1]
value = " ".join(parameters[2:])
result = await sync_to_async(modifyReminder)(
name=name, server_id=guild_id, field=field, value=value
)
await displayResult(channel, result)
@requires_parameters
@log_this
@registry.register(
command="deathping",
description="Pings a person every two seconds until stopped",
help="[@someone]"
)
async def deathping(parameters, channel):
"""Launches a deathping on the given user (The bot will ping the user every two seconds)
Args:
parameters (list): The list of parameters required for the command to work
channel (discord.channel): The channel in which the command has been done
"""
uids = parameters
for uid in uids:
if uid.startswith("<") and uid.endswith(">"):
ReminderCog.getInstance().toBePinged.append((uid, _(channel).id))
await channel.send(f"Gonna ping the shit out of {uid}\n{constants.deathping_gif}")
@requires_parameters
@log_this
@registry.register(
command="stopping",
description="Stops pinging a person",
help="[@someone]"
)
async def stopping(parameters, channel):
"""Stops the deathping on the selected user
Args:
parameters (list): The list of parameters required for the command to work
channel (discord.channel): The channel in which the command has been done
"""
uids = parameters
cog = ReminderCog.getInstance()
for uid in uids:
uid = uid.replace("!", "")
if uid.startswith("<") and uid.endswith(">"):
if (uid, _(channel).id) in cog.toBePinged:
del cog.toBePinged[cog.toBePinged.index((uid, _(channel).id))]
await channel.send(f"Stopping to ping the shit out of {uid}")
else:
await channel.send(
f"{uid} is not in the list of person to deathing in this channel"
)
@log_this
@registry.register(
command="getfuture",
description="Shows a list of future reminders",
help="[hours|days|weeks] value"
)
async def getFuture(parameters, channel):
"""Returns the future events occuring in the given period of time
Args:
parameters (list): The list of parameters required for the command to work
channel (discord.channel): The channel in which the command has been done
"""
if len(parameters) == 0:
field = "days"
value = "7"
else:
field = parameters[0]
value = parameters[1]
if not value.isdigit():
await channel.send(f"La valeur {value} doit être chiffre")
return
value = int(value)
future_events = await sync_to_async(getFutureEvents)(
name=field, value=value, guild=_(channel).guild.id
)
for event in future_events:
await channel.send(
f"```Événement : {event['name']}\n Début : {event['start_time']}\n Durée : {event['duration']}\n\n```"
)
if len(future_events) == 0:
await channel.send("Bert a pas trouvé événements dans période donnée")
@log_this
@registry.register(
command="morsty",
description="? ? ?",
help="? ? ?"
)
async def morsty(_parameters, channel):
"""Morsty's a mystery"""
await channel.send(
"""```
___
.-9 9 `\\ Is it
=(:(::)= ; Binary ?
Who |||| \\
am |||| `-.
I ? ,\\|\\| `,
/ \\ What's life ?
; `'---.,
| `\\
; / |
Is it \\ | /
morse ? ) \\ __,.--\\ /
.-' \\,..._\\ \\` .-' .-'
`-=`` `: | /-/-/`
`.__/
```"""
)
@log_this
@registry.register(
command="help",
description="Prints help messages",
help="[command]"
)
async def hjelp(parameters, channel):
"""Displays help messages on the commands
Args:
parameters (list): The list of parameters required for the command to work
channel (discord.channel): The channel in which the command has been done
"""
if len(parameters) >= 1:
for command in parameters:
if (commandWrapper := registry.get(command)) is not None:
await channel.send(embed=commandWrapper.asEmbed())
else:
await channel.send(f"Commande '{command}' pas dans aide de bert")
else:
help_msg = Embed(title="Help", description="Help for the functions")
for command in registry.commands:
help_msg.add_field(**command.asEmbedPart)
await channel.send(embed=help_msg)
@requires_parameters
@log_this
@registry.register(
command="pic",
description="Shows a random image with the given tags",
help="[tag]"
)
async def pic(parameters, channel):
"""Shows a random pic using the given tag
Args:
parameters (list): The list of parameters required for the command to work
channel (discord.channel): The channel in which the command has been done
"""
query = " ".join(parameters)
payload = {"client_id": UNSPLASH_API, "query": query}
response = requests.get("https://api.unsplash.com/photos/random", params=payload)
response = response.json()
author = response['user']['name']
author_url = f"https://unsplash.com/@{response['user']['username']}?utm_source=Bert&utm_medium=referral"
unsplash_url = "https://unsplash.com/?utm_source=Bert&utm_medium=referral"
em = discord.Embed(
title=response["alt_description"],
description=f"Picture by [{author}]({author_url}) on [Unsplash]({unsplash_url})",
)
em.set_image(url=response["urls"]["small"])
em.set_author(
name=response["user"]["name"],
url=f"https://unsplash.com/@{response['user']['username']}?utm_source=Bert&utm_medium=referral",
)
await channel.send(embed=em)
@requires_parameters
@log_this
@registry.register(
command="vote",
description="Proposes a vote with the given options",
help="\"Question\" \"proposition 0\" ... \"proposition 10\""
)
async def vote(parameters, channel):
"""Creates a vote embed
Args:
parameters (list): The list of parameters required for the command to work
channel (discord.channel): The channel in which the command has been done
"""
word_num = [
"zero", "one", "two", "three", "four",
"five", "six", "seven", "eight", "nine",
]
word_emojis = [f":keycap_{x}:" for x in range(10)]
try:
parsed = parseVote(parameters, not type(channel) is discord.channel)
except BadFormatException as e:
await channel.send(str(e))
return
question = parsed[0]
responses = parsed[1:]
em = discord.Embed(
title=question,
description="React to this message to vote",
)
for i, response in enumerate(responses):
em.add_field(name=response, value=f":{word_num[i]}:")
message = await channel.send(embed=em)
for i in range(len(responses)):
await message.add_reaction(emoji.emojize(word_emojis[i]))
|
# coding: utf-8
import os
import wfile
import wdfproc
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.font_manager._rebuild() #キャッシュの削除
plt.rcParams['font.family'] = 'IPAGothic' # インストールしたフォントを指定
import pydotplus
from sklearn.tree import export_graphviz, plot_tree
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
#from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score
##################################################
# 指定したリストの列を除去する
##################################################
def remove_cols(df, remove_columns):
for remove_col in remove_columns:
remove_cols = [col for col in df.columns if(remove_col in col)]
df = df.drop(columns=remove_cols)
return df
##################################################
# 地上気象データ取得
##################################################
def get_ground_weather():
# カレントディレクトリを取得する
cwd = os.getcwd()
# 地上気象データを取得する
ground_weather_csv = 'ground_weather.csv'
if os.path.isfile(ground_weather_csv):
ground_df = pd.read_csv(ground_weather_csv, index_col=0, parse_dates=[1])
else:
ground_dir = os.path.join(cwd, 'ground_weather')
ground_df = wfile.get_ground_weather(ground_dir)
ground_df.to_csv(ground_weather_csv)
# 天気記号を数値に変換する
ground_df = wdfproc.convert_symbol_to_number(ground_df)
# 地上気象データからNaNが含まれる列を削る
ground_df = wdfproc.drop_ground(ground_df)
ground_df.to_csv('ground2.csv')
# 風速・風向きを数値に変換する
ground_df = wdfproc.convert_wind_to_vector_ground(ground_df)
ground_df.to_csv('ground3.csv')
# 天気を数値に変換する
ground_df = wdfproc.convert_weather_to_interger(ground_df)
ground_df.to_csv('ground4.csv')
# 雲量を浮動小数点数に変換する
ground_df = wdfproc.convert_cloud_volume_to_float(ground_df)
ground_df.to_csv('ground5.csv')
# 天気を指定した境界値で分類する
# - 水戸は3分割、それ以外は○分割にする
weather_cols = [col for col in ground_df.columns if('天気' in col)]
weather_cols.pop( weather_cols.index('Mito_天気') )
ground_df = wdfproc.replace_weather(
ground_df, columns=weather_cols,
mode=wdfproc.WeatherConvertMode.Coarse)
ground_df.to_csv('ground6.csv')
ground_df = wdfproc.replace_weather(ground_df, columns=['Mito_天気'])
ground_df.to_csv('ground7.csv')
# 浮動小数点数を32ビットに変換する
ground_df = wdfproc.type_to_float32(ground_df)
ground_df.to_csv('ground8.csv')
# 不要な列を除去する
ground_df = remove_cols(
ground_df,
# [ '現地気圧', '海面気圧', '気温', '露点温度', '蒸気圧', '日照時間',
# '降雪', '積雪', '雲量', '視程', '全天日射', '降水量', '風速' ]
[ '現地気圧', '海面気圧', '気温', '露点温度', '蒸気圧', '日照時間',
'降雪', '積雪', '雲量', '視程', '全天日射', '降水量' ]
)
print(ground_df.info())
return ground_df
##################################################
# 高層気象データ取得
##################################################
def get_highrise_weather():
# カレントディレクトリを取得する
cwd = os.getcwd()
# 高層気象データを取得する
highrise_weather_csv = 'highrise_weather.csv'
if os.path.isfile(highrise_weather_csv):
highrise_df = pd.read_csv(highrise_weather_csv, index_col=0, parse_dates=[1])
else:
highrise_dir = os.path.join(cwd, 'highrise_weather')
highrise_df = wfile.get_highrise_weather(highrise_dir)
highrise_df.to_csv(highrise_weather_csv)
# 高層気象データから不要データを除去する
#highrise_df = wdfproc.drop_higirise(highrise_df)
#highrise_df.to_csv('highrise2.csv')
# 風速・風向きを数値に変換する
highrise_df = wdfproc.convert_wind_to_vector_highrise(highrise_df)
highrise_df.to_csv('highrise2.csv')
# 浮動小数点数を32ビットに変換する
highrise_df = wdfproc.type_to_float32(highrise_df)
highrise_df.to_csv('highrise3.csv')
# 不要な列を除去する
highrise_df = remove_cols(
highrise_df,
[ '高度', '1000', '925', '900', '800', '600', '400']
)
print(highrise_df.info())
return highrise_df
##################################################
# 学習データ作成
##################################################
def make_training_data(df, y_name):
df = df.drop(columns=['時', '日付'])
data_x = df.drop(columns=[y_name, ])
data_y = df[y_name]
# Xデータから末尾(最新時刻)のデータを削る
data_x = data_x.iloc[:-1,]
# Yデータから先頭(最旧時刻)のデータを削る
data_y = data_y.iloc[1:,]
train_x, test_x, train_y, test_y = train_test_split(data_x, data_y, shuffle=True)
return train_x, train_y, test_x, test_y
##################################################
# 特徴量の重要度を表示する
##################################################
def show_importance_of_feature(model, train_x):
importances = model.feature_importances_
columns = train_x.columns
feature_importances = pd.DataFrame(importances, index=train_x.columns, columns=['Importance'])
feature_importances = feature_importances.sort_values(by='Importance', ascending=False)
feature_importances.plot(kind='bar', figsize=(20,20))
plt.savefig('feature_importances.png', bbox_inches='tight')
feature_importances.to_csv('feature_importances.csv')
##################################################
# 正解率を表示する
##################################################
def print_accuracy(test_y, pred_y):
# 正解率を表示する
acc = accuracy_score(test_y, pred_y)
print('Score:{0:.4f}'.format(acc))
idx_sunny = np.where(test_y.values == 0)[0]
acc_sunnny = accuracy_score(test_y.iloc[idx_sunny], pred_y[idx_sunny])
print('Score(sunny):{0:.4f}'.format(acc_sunnny))
idx_cloudy = np.where(test_y.values == 1)[0]
acc_cloudy= accuracy_score(test_y.iloc[idx_cloudy], pred_y[idx_cloudy])
print('Score(cloudy):{0:.4f}'.format(acc_cloudy))
idx_rain = np.where(test_y.values == 2)[0]
acc_rain= accuracy_score(test_y.iloc[idx_rain], pred_y[idx_rain])
print('Score(rain):{0:.4f}'.format(acc_rain))
##################################################
# メイン
##################################################
if __name__ == '__main__':
# 地上気象データを取得する
gdf = get_ground_weather()
# 高層気象データを取得する
hdf = get_highrise_weather()
# 地上気象データと高層気象データをマージする
df = pd.merge(gdf, hdf, on=('日付','時'))
# NaNを置換する
df = df.fillna(-9999)
# 学習データ・テスト用データ作成
train_x, train_y, test_x, test_y = make_training_data(df, 'Mito_天気')
# ランダムフォレストの学習モデルを生成する
model = RandomForestClassifier(n_estimators=1000, max_depth=20, random_state=1)
# 学習データで学習を行う
model.fit(train_x, train_y)
# 特徴量の重要度を可視化する
show_importance_of_feature(model, train_x)
# テストデータで予測を行う
pred_y = model.predict(test_x)
# 正解率を表示する
print_accuracy(test_y, pred_y)
dot_data = export_graphviz(
model.estimators_[0],
feature_names=train_x.columns,
class_names=['Sunny', 'Cloud', 'Rain', 'Other'],
filled=True,
rounded=True)
graph = pydotplus.graph_from_dot_data( dot_data )
graph.write_png('tree_graphviz.png')
#fig = plt.figure(figsize=(100, 50))
#ax = fig.add_subplot()
#plot_tree(
# model.estimators_[0],
# feature_names=train_x.columns,
# ax=ax,
# class_names=['Sunny', 'Cloud', 'Rain', 'Other'],
# filled=True
#)
#plt.savefig('tree_plt.png', bbox_inches='tight')
|
from zeus.utils.trees import build_tree
def test_build_tree():
test_names = [
"foo.bar.bar",
"foo.bar.biz",
"foo.biz",
"blah.brah",
"blah.blah.blah",
]
result = build_tree(test_names, min_children=2)
assert sorted(result) == ["blah", "foo"]
result = build_tree(test_names, min_children=2, parent="foo")
assert sorted(result) == ["foo.bar", "foo.biz"]
result = build_tree(test_names, min_children=2, parent="foo.biz")
assert result == set()
|
"""
1820. Maximum Number of Accepted Invitations
Medium
There are m boys and n girls in a class attending an upcoming party.
You are given an m x n integer matrix grid, where grid[i][j] equals 0 or 1. If grid[i][j] == 1, then that means the ith boy can invite the jth girl to the party. A boy can invite at most one girl, and a girl can accept at most one invitation from a boy.
Return the maximum possible number of accepted invitations.
Example 1:
Input: grid = [[1,1,1],
[1,0,1],
[0,0,1]]
Output: 3
Explanation: The invitations are sent as follows:
- The 1st boy invites the 2nd girl.
- The 2nd boy invites the 1st girl.
- The 3rd boy invites the 3rd girl.
Example 2:
Input: grid = [[1,0,1,0],
[1,0,0,0],
[0,0,1,0],
[1,1,1,0]]
Output: 3
Explanation: The invitations are sent as follows:
-The 1st boy invites the 3rd girl.
-The 2nd boy invites the 1st girl.
-The 3rd boy invites no one.
-The 4th boy invites the 2nd girl.
Constraints:
grid.length == m
grid[i].length == n
1 <= m, n <= 200
grid[i][j] is either 0 or 1.
"""
class Solution:
def maximumInvitations(self, grid: List[List[int]]) -> int:
M, N = len(grid), len(grid[0])
matching = [-1] * N # girls' mate
def dfs(node, seen):
for nei in range(N): # ask each girl
if grid[node][nei] and not seen[nei]:
seen[nei] = True # mark her as asked
if matching[nei] == -1 or dfs(matching[nei], seen):
matching[nei] = node
return True
return False
res = 0
for i in range(M):
seen = [False] * N
if dfs(i, seen):
res += 1
return res
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from paddlerec.core.metrics import PrecisionRecall
import paddle
import paddle.fluid as fluid
def calc_precision(tp_count, fp_count):
if tp_count > 0.0 or fp_count > 0.0:
return tp_count / (tp_count + fp_count)
return 1.0
def calc_recall(tp_count, fn_count):
if tp_count > 0.0 or fn_count > 0.0:
return tp_count / (tp_count + fn_count)
return 1.0
def calc_f1_score(precision, recall):
if precision > 0.0 or recall > 0.0:
return 2 * precision * recall / (precision + recall)
return 0.0
def get_states(idxs, labels, cls_num, weights=None, batch_nums=1):
ins_num = idxs.shape[0]
# TP FP TN FN
states = np.zeros((cls_num, 4)).astype('float32')
for i in range(ins_num):
w = weights[i] if weights is not None else 1.0
idx = idxs[i][0]
label = labels[i][0]
if idx == label:
states[idx][0] += w
for j in range(cls_num):
states[j][2] += w
states[idx][2] -= w
else:
states[label][3] += w
states[idx][1] += w
for j in range(cls_num):
states[j][2] += w
states[label][2] -= w
states[idx][2] -= w
return states
def compute_metrics(states, cls_num):
total_tp_count = 0.0
total_fp_count = 0.0
total_fn_count = 0.0
macro_avg_precision = 0.0
macro_avg_recall = 0.0
for i in range(cls_num):
total_tp_count += states[i][0]
total_fp_count += states[i][1]
total_fn_count += states[i][3]
macro_avg_precision += calc_precision(states[i][0], states[i][1])
macro_avg_recall += calc_recall(states[i][0], states[i][3])
metrics = []
macro_avg_precision /= cls_num
macro_avg_recall /= cls_num
metrics.append(macro_avg_precision)
metrics.append(macro_avg_recall)
metrics.append(calc_f1_score(macro_avg_precision, macro_avg_recall))
micro_avg_precision = calc_precision(total_tp_count, total_fp_count)
metrics.append(micro_avg_precision)
micro_avg_recall = calc_recall(total_tp_count, total_fn_count)
metrics.append(micro_avg_recall)
metrics.append(calc_f1_score(micro_avg_precision, micro_avg_recall))
return np.array(metrics).astype('float32')
class TestPrecisionRecall(unittest.TestCase):
def setUp(self):
self.ins_num = 64
self.cls_num = 10
self.batch_nums = 3
self.datas = []
self.states = np.zeros((self.cls_num, 4)).astype('float32')
for i in range(self.batch_nums):
probs = np.random.uniform(0, 1.0, (self.ins_num,
self.cls_num)).astype('float32')
idxs = np.array(np.argmax(
probs, axis=1)).reshape(self.ins_num, 1).astype('int32')
labels = np.random.choice(range(self.cls_num),
self.ins_num).reshape(
(self.ins_num, 1)).astype('int32')
self.datas.append((probs, labels))
states = get_states(idxs, labels, self.cls_num)
self.states = np.add(self.states, states)
self.metrics = compute_metrics(self.states, self.cls_num)
self.place = fluid.core.CPUPlace()
def build_network(self):
predict = fluid.data(
name="predict",
shape=[-1, self.cls_num],
dtype='float32',
lod_level=0)
label = fluid.data(
name="label", shape=[-1, 1], dtype='int32', lod_level=0)
precision_recall = PrecisionRecall(
input=predict, label=label, class_num=self.cls_num)
return precision_recall
def test_forward(self):
precision_recall = self.build_network()
metrics = precision_recall.get_result()
fetch_vars = []
metric_keys = []
for item in metrics.items():
fetch_vars.append(item[1])
metric_keys.append(item[0])
exe = fluid.Executor(self.place)
exe.run(fluid.default_startup_program())
for i in range(self.batch_nums):
outs = exe.run(
fluid.default_main_program(),
feed={'predict': self.datas[i][0],
'label': self.datas[i][1]},
fetch_list=fetch_vars,
return_numpy=True)
outs = dict(zip(metric_keys, outs))
self.assertTrue(np.allclose(outs['[TP FP TN FN]'], self.states))
self.assertTrue(np.allclose(outs['precision_recall_f1'], self.metrics))
def test_exception(self):
self.assertRaises(Exception, PrecisionRecall)
self.assertRaises(
Exception,
PrecisionRecall,
input=self.datas[0][0],
label=self.datas[0][1],
class_num=self.cls_num)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
'''Run trac connecting through a unix stream socket using fcgi
'''
import os
import sys
if len(sys.argv) != 2 :
print('%s unix:/path/to/socket.sock' % (sys.argv[0]))
exit(1)
sockaddr = sys.argv[1]
#os.environ['TRAC_ENV'] = '/home/trac/instance'
try:
from trac.web.main import dispatch_request
import trac.web._fcgi
fcgiserv = trac.web._fcgi.WSGIServer(dispatch_request,
bindAddress = sockaddr, umask = 7)
fcgiserv.run()
except SystemExit:
raise
except Exception, e:
print 'Content-Type: text/plain\r\n\r\n',
print 'Oops...'
print
print 'Trac detected an internal error:'
print
print e
print
import traceback
import StringIO
tb = StringIO.StringIO()
traceback.print_exc(file=tb)
print tb.getvalue()
|
import pandas as pd
import pickle
import re
dict_file = "data2.txt"
dictionary = {}
CURRENT_KEY = ""
process_started = False
antonyms_started = False
output_file = "../textplainer/data/fallows.dat"
# Some entries contain multiple words that should have independent entries
# For the moment we will use the first key
key_split_pattern = " or "
# NOTES
# There are multiple keys that contain phrasal verbs, or additonal grammatical
# notes in the key field. We are ignoring them for the moment which will render
# the corresponding entry inaccessible by word look up. There are 20 odd entries
# like this. E.g carry on, fawn upon, pluck up
##################################################################
def initialise_new_record(content):
global CURRENT_KEY
global antonyms_started
mykey, ref, pos = extract_key_ref_pos(content)
if len(mykey.split(key_split_pattern))>1:
mykey = mykey.split(key_split_pattern)[0]
if pos == "":
pos = 'x'
CURRENT_KEY = mykey + "_" + pos
antonyms_started = False
dictionary[CURRENT_KEY] = {"SYN":[], "ANT":[], "REF":ref, "POS":pos}
##################################################################
def finalise():
with open(output_file, 'wb') as f:
pickle.dump(dictionary, f)
exit(1)
##################################################################
def clean(x):
temp = re.sub('\{\[(.*)\]\?\}', '\\1', x)
temp2 = re.sub('\.','',temp)
temp3 = re.sub('=','',temp2)
return temp3
##################################################################
def update_content(starter, content, stripped_line):
if starter == "SYN:":
update_synonyms(content)
elif starter == "ANT:":
update_antonyms(content)
else:
if antonyms_started :
update_antonyms(stripped_line)
else:
update_synonyms(stripped_line)
##################################################################
def update_synonyms(content):
update_dictionary(content,"SYN")
##################################################################
def update_antonyms(content):
global antonyms_started
antonyms_started = True
update_dictionary(content,"ANT")
##################################################################
def update_dictionary(content,section):
record = dictionary[CURRENT_KEY]
newwds = content.split(",")
newwds = list(map(str.strip,newwds))
if "" in newwds:
newwds.remove("")
curr = record[section]
curr.extend(newwds)
record[section] = curr
dictionary[CURRENT_KEY] = record
##################################################################
def extract_key_ref_pos(content):
pos = ""
refs = []
res = re.findall(r"\\[a-zA-Z]+\.?\\", content)
if len(res) != 0:
pos = res[0]
pos = pos[1]
temp = re.sub(r"\\[a-zA-Z]+\.?\\", '', content).strip()
res = re.findall(r"\[[^\]]*\]", temp)
for r in res:
ref = re.sub(r"\[see", '', r, flags=re.IGNORECASE).strip()
ref = re.sub(r"\]", '', ref).strip()
refl = ref.split(" and ")
refs.extend(refl)
keys = re.sub(r"\[.*\]", '', temp).strip()
new_key = keys.split(",")[0].strip()
if len(new_key) == 0:
if len(refs) > 0:
new_key = refs[0]
return new_key, refs, pos
##################################################################
def replace_refs(content):
return re.sub(r"\[see (.*)\]", '\\1', content, flags=re.IGNORECASE).strip()
##################################################################
with open(dict_file, "r") as f:
for line in f:
#print("PROCESSING:", line)
stripped_line = line.strip()
stripped_line = clean(stripped_line)
stripped_line = re.sub("_", ' ', stripped_line)
starter = stripped_line[0:4]
content = stripped_line[5:]
if starter == "KEY:":
process_started = True
initialise_new_record(content.lower())
elif process_started:
content = replace_refs(content).lower()
stripped_line = replace_refs(stripped_line).lower()
update_content(starter, content, stripped_line)
# NOW SAVE THE GENERATED DATA STRUCTURE
finalise()
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Helper functions used by the Monorail servlet base class."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import calendar
import datetime
import logging
import urllib
from framework import framework_bizobj
from framework import framework_helpers
from framework import permissions
from framework import template_helpers
from framework import urls
from framework import xsrf
_ZERO = datetime.timedelta(0)
class _UTCTimeZone(datetime.tzinfo):
"""UTC"""
def utcoffset(self, _dt):
return _ZERO
def tzname(self, _dt):
return "UTC"
def dst(self, _dt):
return _ZERO
_UTC = _UTCTimeZone()
def GetBannerTime(timestamp):
"""Converts a timestamp into EZT-ready data so it can appear in the banner.
Args:
timestamp: timestamp expressed in the following format:
[year,month,day,hour,minute,second]
e.g. [2009,3,20,21,45,50] represents March 20 2009 9:45:50 PM
Returns:
EZT-ready data used to display the time inside the banner message.
"""
if timestamp is None:
return None
ts = datetime.datetime(*timestamp, tzinfo=_UTC)
return calendar.timegm(ts.timetuple())
def AssertBasePermissionForUser(user, user_view):
"""Verify user permissions and state.
Args:
user: user_pb2.User protocol buffer for the user
user_view: framework.views.UserView for the user
"""
if permissions.IsBanned(user, user_view):
raise permissions.BannedUserException(
'You have been banned from using this site')
def AssertBasePermission(mr):
"""Make sure that the logged in user can view the requested page.
Args:
mr: common information parsed from the HTTP request.
Returns:
Nothing
Raises:
BannedUserException: If the user is banned.
PermissionException: If the user does not have permisssion to view.
"""
AssertBasePermissionForUser(mr.auth.user_pb, mr.auth.user_view)
if mr.project_name and not CheckPerm(mr, permissions.VIEW):
logging.info('your perms are %r', mr.perms)
raise permissions.PermissionException(
'User is not allowed to view this project')
def CheckPerm(mr, perm, art=None, granted_perms=None):
"""Convenience method that makes permission checks easier.
Args:
mr: common information parsed from the HTTP request.
perm: A permission constant, defined in module framework.permissions
art: Optional artifact pb
granted_perms: optional set of perms granted specifically in that artifact.
Returns:
A boolean, whether the request can be satisfied, given the permission.
"""
return mr.perms.CanUsePerm(
perm, mr.auth.effective_ids, mr.project,
permissions.GetRestrictions(art), granted_perms=granted_perms)
def CheckPermForProject(mr, perm, project, art=None):
"""Convenience method that makes permission checks for projects easier.
Args:
mr: common information parsed from the HTTP request.
perm: A permission constant, defined in module framework.permissions
project: The project to enforce permissions for.
art: Optional artifact pb
Returns:
A boolean, whether the request can be satisfied, given the permission.
"""
perms = permissions.GetPermissions(
mr.auth.user_pb, mr.auth.effective_ids, project)
return perms.CanUsePerm(
perm, mr.auth.effective_ids, project, permissions.GetRestrictions(art))
def ComputeIssueEntryURL(mr, config):
"""Compute the URL to use for the "New issue" subtab.
Args:
mr: commonly used info parsed from the request.
config: ProjectIssueConfig for the current project.
Returns:
A URL string to use. It will be simply "entry" in the non-customized
case. Otherewise it will be a fully qualified URL that includes some
query string parameters.
"""
if not config.custom_issue_entry_url:
return '/p/%s/issues/entry' % (mr.project_name)
base_url = config.custom_issue_entry_url
sep = '&' if '?' in base_url else '?'
token = xsrf.GenerateToken(
mr.auth.user_id, '/p/%s%s%s' % (mr.project_name, urls.ISSUE_ENTRY, '.do'))
role_name = framework_helpers.GetRoleName(mr.auth.effective_ids, mr.project)
continue_url = urllib.quote(framework_helpers.FormatAbsoluteURL(
mr, urls.ISSUE_ENTRY + '.do'))
return '%s%stoken=%s&role=%s&continue=%s' % (
base_url, sep, urllib.quote(token),
urllib.quote(role_name or ''), continue_url)
def IssueListURL(mr, config, query_string=None):
"""Make an issue list URL for non-members or members."""
url = '/p/%s%s' % (mr.project_name, urls.ISSUE_LIST)
if query_string:
url += '?' + query_string
elif framework_bizobj.UserIsInProject(mr.project, mr.auth.effective_ids):
if config and config.member_default_query:
url += '?q=' + urllib.quote_plus(config.member_default_query)
return url
|
import sys
ips = [line.strip() for line in open("ips.txt", "r")]
n = len(ips)
ins = [line.split("\t")[0] for line in ips]
outs = [line.split("\t")[1] for line in ips]
id = int(sys.argv[1])
port = int(sys.argv[2])
protocols = ["Star", "SiloGC", "TwoPLGC"]
def get_cmd(n, i):
cmd = ""
for j in range(n):
if j > 0:
cmd += ";"
if id == j:
cmd += ins[j] + ":" + str(port+i)
else:
cmd += outs[j] + ":" + str(port+i)
return cmd
ix = 0
for protocol in protocols:
cmd = get_cmd(n, ix)
ix += 1
print('./bench_ycsb --logtostderr=1 --id=%d --servers="%s" --protocol=%s --partition_num=%d --threads=12 --partitioner=hash2 --read_write_ratio=90 --cross_ratio=10 --batch_flush=200' % (id, cmd, protocol, 12*n))
for protocol in protocols:
cmd = get_cmd(n, ix)
ix += 1
print('./bench_tpcc --logtostderr=1 --id=%d --servers="%s" --protocol=%s --partition_num=%d --threads=12 --partitioner=hash2 --query=mixed --neworder_dist=10 --payment_dist=15' % (id, cmd, protocol, 12*n))
|
import operator
a = 100
b = 1
print('a=',a)
print('b=',b)
print(operator.iadd(a, b))
print('a=',a)
print('b=',b)
print(operator.add(a, b))
print('a=',a)
print('b=',b)
|
class User():
def __init__(self, username, password, followers=0, following=0):
self.username = username
self.password = password
self.followers = int(followers)
self.following = int(following)
def get_username(self):
return self.username
def get_password(self):
return self.password
def get_followers(self):
return self.followers
def get_following(self):
return self.following
def __class__(self):
return User
|
#!/usr/bin/env python3
""" rrrw_instr.py
Implementation of RRRW format instructions.
"""
from pyvex.lifting.util import Type, Instruction
import bitstring
from .rtl import extend_to_32_bits, sign_extend_2
from .logger import log_this
class RRRW_Instructions(Instruction):
""" RRRW instructions:
- Insert Bit Field instruction.
op = 0x57
op2 = 0x00 (3-bit)
User Status Flags: no change.
- Insert Mask instruction:
op = 0x57
op2 = 0x01 (3-bit)
User Status Flags: no change.
- Extract Bit Field:
op = 0x57
op2 = 0x02 (3-bit)
User Status Flags: no change.
- Extract Bit Field Unsigned:
op = 0x57
op2 = 0x03 (3-bit)
User Status Flags: no change.
"""
name = 'RRRW_Instructions'
op = "{0}{1}".format(bin(5)[2:].zfill(4), bin(7)[2:].zfill(4))
bin_format = op + 'a'*4 + 'b'*4 + 'c'*4 + 'd'*4 + 'e'*4 + 'f'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
tmp = bitstring.BitArray(bin="{0}{1}{2}{3}{4}{5}".format(data['e'],
data['f'],
data['c'],
data['d'],
data['a'],
data['b']))
a = int(tmp[20:24].hex, 16)
b = int(tmp[16:20].hex, 16)
w = int(tmp[11:16].bin.zfill(8), 2)
op2 = int(tmp[8:11].bin, 2)
d = int(tmp[4:8].hex, 16)
c = int(tmp[:4].hex, 16)
if op2 == 0:
self.name = "RRRW_INSERT"
elif op2 == 1:
self.name = "RRRW_IMASK"
elif op2 == 2:
self.name = "RRPW_EXTR"
elif op2 == 3:
self.name = "RRPW_EXTR.U"
else:
self.name = "UNKNOWN"
data = {"a": a,
"b": b,
"c": c,
"w": w,
"d": d,
"op2": op2}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_d_d(self):
return self.get("d{0}".format(self.data['d']), Type.int_32)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_d_d()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
d_d = args[2]
pos = (d_d & 0x1f).cast_to(Type.int_8)
width = self.data["w"]
# undefined result if (pos + width) > 32
cond_undefined = extend_to_32_bits(((pos.cast_to(Type.int_32) + width) >> 5) == 0)
result = ""
if self.data['op2'] == 0: # INSERT
mask = (((2**width) - 1) << pos).cast_to(Type.int_32)
result = ((d_a & ~mask) | ((d_b << pos) & mask)) & cond_undefined
elif self.data['op2'] == 1: # IMASK
const_1 = self.constant(1, Type.int_32)
result_1 = ((const_1 << width)-1) << pos
result_2 = d_b << pos
result_1 = result_1 & cond_undefined
result_2 = result_2 & cond_undefined
self.put(result_1, "d{0}".format(self.data['c']+1))
self.put(result_2, "d{0}".format(self.data['c']))
elif self.data['op2'] == 2: # EXTR
mask = (1 << width) - 1
result = sign_extend_2((d_a >> pos) & mask, width) & cond_undefined
elif self.data['op2'] == 3: # EXTR.U
mask = (1 << width) - 1
result = ((d_a >> pos) & mask) & cond_undefined
return result
def commit_result(self, res):
if self.data['op2'] != 1: # IMASK PUTs its results itself.
self.put(res, self.get_dst_reg())
|
import tensorflow as tf
import numpy as np
from numpy import newaxis
from PIL import Image
import sys
import logging
import visualize_data
import matplotlib.pyplot as plt
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_score, recall_score
from nms import nms
import cv2
import meanAP
import os
import os.path as osp
from smooth_L1 import smooth_L1, decode_smooth_L1
class PixorModel(object):
def __init__(self, flags):
self.flags = flags
NUM_EPOCHS = flags.num_epochs
BATCH_SIZE = flags.batch_size
TILE_SIZE = flags.tile_size
IMAGE_SIZE = (TILE_SIZE, TILE_SIZE, 3)
LOGFILE_NAME = flags.logfile_name
NUM_CLASSES = flags.num_classes
BATCH_SIZE = flags.batch_size
GPU = flags.gpu
DATA_FILE_NAME = flags.data_path
TRAIN_BASE_PATH = os.path.join('..', DATA_FILE_NAME, 'pixor', 'train')
VAL_BASE_PATH = os.path.join('..', DATA_FILE_NAME, 'pixor', 'val')
VAL_LEN = len(os.listdir(VAL_BASE_PATH))
TILE_SIZE = flags.tile_size
#Initialize expected input for images
self.x = tf.placeholder(tf.float32, shape=(None, TILE_SIZE, TILE_SIZE, 3), name='x')
#Initialize holder for per-pixel bounding boxes
self.y_box = tf.placeholder(tf.float32, shape=(None, TILE_SIZE, TILE_SIZE, 6), name='y_box')
#Initialize holder for per-pixel labels
self.y_class = tf.placeholder(tf.int32, shape=(None, TILE_SIZE, TILE_SIZE, 1), name='y_class')
# two convolutional layers, 3x3, 32 filters
conv1 = tf.layers.conv2d(inputs=self.x, filters=32, kernel_size=3, padding='same', activation=tf.nn.relu)
conv2 = tf.layers.conv2d(inputs=conv1, filters=32, kernel_size=3, padding='same', activation=tf.nn.relu)
# resnet block 1
block1_shortcut = conv2
block1_shortcut_proj = tf.layers.conv2d(inputs=block1_shortcut, filters=96, kernel_size=1, strides=2, padding='same', activation=tf.nn.relu)
block1_1 = tf.layers.conv2d(inputs=conv2, filters=24, kernel_size=3, strides=2, padding='same', activation=tf.nn.relu)
block1_2 = tf.layers.conv2d(inputs=block1_1, filters=24, kernel_size=3, padding='same', activation=tf.nn.relu)
block1_3 = tf.layers.conv2d(inputs=block1_2, filters=96, kernel_size=3, padding='same', activation=tf.nn.relu)
block1_out = block1_3 + block1_shortcut_proj
# resnet block 2 [Compressed from original version for now]
block2_shortcut = block1_out
block2_shortcut_proj = tf.layers.conv2d(inputs=block2_shortcut, filters=192, kernel_size=1, strides=2, padding='same', activation=tf.nn.relu)
block2_1 = tf.layers.conv2d(inputs=block1_out, filters=48, kernel_size=3, strides=2, padding='same', activation=tf.nn.relu)
block2_2 = tf.layers.conv2d(inputs=block2_1, filters=48, kernel_size=3, padding='same', activation=tf.nn.relu)
block2_3 = tf.layers.conv2d(inputs=block2_2, filters=192, kernel_size=3, padding='same', activation=tf.nn.relu)
block2_out = block2_3 + block2_shortcut_proj
# skip connection from this output
skip_block2 = block2_out
# resnet block 3 [Compressed from original version for now]
block3_shortcut = block2_out
block3_shortcut_proj = tf.layers.conv2d(inputs=block3_shortcut, filters=256, kernel_size=1, strides=2, padding='same', activation=tf.nn.relu)
block3_1 = tf.layers.conv2d(inputs=block2_out, filters=64, kernel_size=3, strides=2, padding='same', activation=tf.nn.relu)
block3_2 = tf.layers.conv2d(inputs=block3_1, filters=64, kernel_size=3, padding='same', activation=tf.nn.relu)
block3_3 = tf.layers.conv2d(inputs=block3_2, filters=256, kernel_size=3, padding='same', activation=tf.nn.relu)
block3_out = block3_3 + block3_shortcut_proj
# skip connection from this output
skip_block3 = block3_out
# resnet block 4
block4_shortcut = block3_out
block4_shortcut_proj = tf.layers.conv2d(inputs=block4_shortcut, filters=384, kernel_size=1, strides=2, padding='same', activation=tf.nn.relu)
block4_1 = tf.layers.conv2d(inputs=block3_out, filters=96, kernel_size=3, strides=2, padding='same', activation=tf.nn.relu)
block4_2 = tf.layers.conv2d(inputs=block4_1, filters=96, kernel_size=3, padding='same', activation=tf.nn.relu)
block4_3 = tf.layers.conv2d(inputs=block4_2, filters=384, kernel_size=3, padding='same', activation=tf.nn.relu)
block4_out = block4_3 + block4_shortcut_proj
# one convolutional layer, 1x1, 196 filters
prep_upsampling = tf.layers.conv2d(inputs=block4_out, filters=196, kernel_size=1, activation=tf.nn.relu)
upsample1 = self.conv2d_transpose(input=prep_upsampling, filter_size=3, out_channels=128, stride=2, activation=tf.nn.relu)
# postprocessing to add skip connection after upsample 6
# [1x1, 128 channel convolution on skip ;; then add]
processed_skip_block3 = tf.layers.conv2d(inputs=skip_block3, filters=128, kernel_size=1, padding='same', activation=tf.nn.relu)
skipped_upsample1 = upsample1 + processed_skip_block3
# upsample 7, 96 filters, x2
upsample2 = self.conv2d_transpose(input=skipped_upsample1, filter_size=3, out_channels=96, stride=2, activation=tf.nn.relu)
# postprocessing to add skip connection after upsample 7
# [1x1, 96 channel convolution on skip ;; then add]
processed_skip_block2 = tf.layers.conv2d(inputs=skip_block2, filters=96, kernel_size=1, padding='same', activation=tf.nn.relu)
skipped_upsample2 = upsample2 + processed_skip_block2
# PLACEHOLDER UPSAMPLING
temp_final_upsample = self.conv2d_transpose(input=skipped_upsample2, filter_size=3, out_channels=96, stride=4, activation=tf.nn.relu)
# HEADER NETWORK
# four convolutional layers, 3x3, 96 filters
header1 = tf.layers.conv2d(inputs=temp_final_upsample, filters=96, kernel_size=3, padding='same', activation=tf.nn.relu)
header2 = tf.layers.conv2d(inputs=header1, filters=96, kernel_size=3, padding='same', activation=tf.nn.relu)
header3 = tf.layers.conv2d(inputs=header2, filters=96, kernel_size=3, padding='same', activation=tf.nn.relu)
header4 = tf.layers.conv2d(inputs=header3, filters=96, kernel_size=3, padding='same', activation=tf.nn.relu)
# one convolutional layer, 3x3, 1 filter
self.output_class = tf.layers.conv2d(inputs=header4, filters=NUM_CLASSES, kernel_size=3, padding='same', name='output_class')
# one convolutional layer, 3x3, 6 filters
self.output_box = tf.layers.conv2d(inputs=header4, filters=6, kernel_size=3, padding='same', name='output_box')
# print('self.output_box.shape', self.output_box.shape) #(?, 224, 224, 6)
self.get_loss()
self.train_step = tf.train.AdamOptimizer(1e-4).minimize(self.pixor_loss)
self.decode_train_step = tf.train.AdamOptimizer(1e-4).minimize(self.decode_pixor_loss)
def get_loss(self):
pos_weight = 1
neg_weight = 1
class_loss_result = self.custom_cross_entropy(class_labels=self.y_class, unnormalized_class_preds=self.output_class, class_weights=(pos_weight, neg_weight))
self.class_loss = 10 * class_loss_result
smooth_L1_loss = 100 * smooth_L1(box_labels=self.y_box, box_preds=self.output_box, class_labels=self.y_class)
self.decoded_output = visualize_data.tf_pixor_to_corners(self.output_box)
# print('decoded_output.shape', decoded_output.shape) # (?, 224, 224, 4, 2)
self.decoded_labels = visualize_data.tf_pixor_to_corners(self.y_box)
self.decode_loss = 100 * decode_smooth_L1(box_labels=self.decoded_labels, box_preds=self.decoded_output, class_labels=self.y_class)
self.box_loss = smooth_L1_loss
self.pixor_loss = self.class_loss + self.box_loss
self.decode_pixor_loss = self.class_loss + self.decode_loss
return self.box_loss, self.pixor_loss, self.decode_loss, self.decode_pixor_loss
# alpha is the weight of the less frequent class
def custom_cross_entropy(self, class_labels, unnormalized_class_preds, class_weights, alpha=0.25, gamma=2.0):
squeezed_y = tf.squeeze(class_labels, -1)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=unnormalized_class_preds, labels=squeezed_y)
classify_loss = tf.reduce_mean(loss)
return classify_loss
""" Standard transposed convolutional layer."""
def conv2d_transpose(self, input, filter_size, out_channels, stride, activation="None"):
return tf.layers.conv2d_transpose(inputs=input, filters=out_channels,
kernel_size=filter_size, strides=stride, padding='same', activation=activation)
def train_one_epoch(self, epoch, sess, batch_size, train_path):
per_epoch_train_loss = 0
per_epoch_box_loss = 0
per_epoch_class_loss = 0
TRAIN_LEN = len(os.listdir(os.path.join(train_path, 'images')))
batch_indices = np.arange(TRAIN_LEN)
np.random.shuffle(batch_indices)
# RIGHT NOW IF DOESN'T PERFECTLY DIVIDE IT DOESN'T COVER REMAINING, MIGHT WANT TO CHANGE THIS
num_batches = TRAIN_LEN // batch_size
# num_batches = 1
for batch_number in range(0, num_batches):
start_idx = batch_number * batch_size
end_idx = start_idx + batch_size
batch_images, batch_boxes, batch_classes = get_batch(start_idx, self.flags, train_path)
_, b_loss, c_loss, batch_train_loss, box_preds, unnorm_class_preds = \
sess.run([self.decode_train_step, self.decode_loss, self.class_loss, self.decode_pixor_loss, self.output_box, self.output_class],
feed_dict =
{self.x: batch_images,
self.y_box: batch_boxes,
self.y_class: batch_classes})
per_epoch_train_loss += batch_train_loss
per_epoch_box_loss += b_loss
per_epoch_class_loss += c_loss
return box_preds, unnorm_class_preds, per_epoch_train_loss, per_epoch_box_loss, per_epoch_class_loss
def evaluate(self, sess, val_base_path):
val_images, val_boxes, val_classes = get_batch(0, self.flags, val_base_path)
val_loss, box_preds, unnorm_class_preds = sess.run([self.decode_pixor_loss, self.output_box, self.output_class], feed_dict = {self.x: val_images, self.y_box: val_boxes, self.y_class: val_classes})
return val_loss, box_preds, unnorm_class_preds, val_classes
def evaluate_one(self, sess, val_base_path):
p = osp.join(val_base_path, 'images', '1.jpg')
im = Image.open(p)
im_arr = np.array(im)
im_arr = (im_arr - mean) / std
val_classes = np.load(osp.join(path, 'class_annotations', '1.npy'))
if(len(val_classes.shape) == 2):
val_classes = val_classes[:,:,newaxis]
# Open the json file and parse into dictionary of index -> buildings pairs
val_boxes = np.load(osp.join(path, 'box_annotations', '1.npy'))
val_loss, box_preds, unnorm_class_preds = sess.run([self.decode_pixor_loss, self.output_box, self.output_class], feed_dict = {self.x: im_arr, self.y_box: val_boxes, self.y_class: val_classes})
return val_loss, box_preds, unnorm_class_preds, val_classes
def get_tile_and_label(index, flags, norm=True):
"""
Method 2)
Gets the tile and label associated with data index.
Returns:
(tile_array, dictionary_of_buildings)
"""
DATA_FILE_NAME = flags.data_path
TRAIN_BASE_PATH = os.path.join('..', DATA_FILE_NAME, 'pixor', 'train')
path=TRAIN_BASE_PATH
mean = np.load('mean.npy')
std = np.load('std.npy')
train_mean = np.load('train_mean.npy')
train_std = np.load('train_std.npy')
# Open the jpeg image and save as numpy array
p = osp.join(path, 'images', f'{index}.jpg')
im = Image.open(p)
im_arr = np.array(im)
im_arr = (im_arr - mean) / std
class_annotation = np.load(osp.join(path, 'class_annotations', f'{index}.npy'))
if(len(class_annotation.shape) == 2):
class_annotation = class_annotation[:,:,newaxis]
# Open the json file and parse into dictionary of index -> buildings pairs
box_annotation = np.load(osp.join(path, 'box_annotations', f'{index}.npy'))
# normalizing the positive labels if norm=True
if norm:
clipped = np.clip(class_annotation, 0, 1)
box_annotation = clipped * (box_annotation - train_mean)/train_std + (1 - clipped) * box_annotation
return im_arr, box_annotation, class_annotation
def get_batch(start_index, flags, path='', norm=True):
"""
Method 3)
Gets batch of tiles and labels associated with data start_index.
Returns:
[(tile_array, list_of_buildings), ...]
"""
# DATA_FILE_NAME = flags.data_path
# TRAIN_BASE_PATH = os.path.join('..', DATA_FILE_NAME, 'pixor', 'train')
BATCH_SIZE = flags.batch_size
TILE_SIZE = flags.tile_size
# path = TRAIN_BASE_PATH
p = osp.join(path, 'images')
length = len(os.listdir(p))
batch_indices = np.arange(length)
batch_images = np.zeros((BATCH_SIZE, TILE_SIZE, TILE_SIZE, 3))
batch_boxes = np.zeros((BATCH_SIZE, TILE_SIZE, TILE_SIZE, 6))
batch_classes = np.zeros((BATCH_SIZE, TILE_SIZE, TILE_SIZE, 1))
for i in range(start_index, start_index + BATCH_SIZE):
batch_images[i % BATCH_SIZE], batch_boxes[i % BATCH_SIZE], batch_classes[i % BATCH_SIZE] = get_tile_and_label(batch_indices[i], flags, norm=norm)
return batch_images, batch_boxes, batch_classes
|
from tensorflow.keras import layers
from catekitten.base import SequenceEncoderBase
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_is_fitted
from sklearn.linear_model import LogisticRegression
from scipy import sparse
import tensorflow as tf
import numpy as np
class NbSvmClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, C=1.0, dual=False, n_jobs=1):
self.C = C
self.dual = dual
self.n_jobs = n_jobs
def predict(self, x):
# Verify that model has been fit
check_is_fitted(self, ['_r', '_clf'])
return self._clf.predict(x.multiply(self._r))
def predict_proba(self, x):
# Verify that model has been fit
check_is_fitted(self, ['_r', '_clf'])
return self._clf.predict_proba(x.multiply(self._r))
def fit(self, x, y):
# Check that X and y have correct shape
x, y = check_X_y(x, y, accept_sparse=True)
if isinstance(x, np.ndarray):
x = sparse.csr_matrix(x)
def pr(x, y_i, y):
p = x[y==y_i].sum(0)
return (p+1) / ((y==y_i).sum()+1)
self._r = sparse.csr_matrix(np.log(pr(x,1,y) / pr(x,0,y)))
x_nb = x.multiply(self._r)
self._clf = LogisticRegression(C=self.C, dual=self.dual, n_jobs=self.n_jobs).fit(x_nb, y)
return self
class BidirectionalCNN(SequenceEncoderBase):
def __init__(self, num_filters=64, dropout_rate=0.5):
"""Modified version of Yoon Kim's shallow cnn model: https://arxiv.org/pdf/1408.5882.pdf
Args:
num_filters: The number of filters to use per `filter_size`. (Default value = 64)
filter_sizes: The filter sizes for each convolutional layer. (Default value = [3, 4, 5])
**cnn_kwargs: Additional args for building the `Conv1D` layer.
"""
super(BidirectionalCNN, self).__init__(dropout_rate)
self.num_filters = num_filters
def build_model(self, x):
x = layers.Bidirectional(layers.GRU(self.num_filters, return_sequences=True))(x)
x = layers.Conv1D(64, kernel_size=3, padding="valid", kernel_initializer="glorot_uniform")(x)
x = layers.GlobalMaxPooling1D()(x)
return x
class YoonKimCNNv3(SequenceEncoderBase):
def __init__(self, num_filters=128, filter_sizes=[2, 3, 4, 5], dropout_rate=0.5, **conv_kwargs):
"""Modified version of Yoon Kim's shallow cnn model: https://arxiv.org/pdf/1408.5882.pdf
Args:
num_filters: The number of filters to use per `filter_size`. (Default value = 64)
filter_sizes: The filter sizes for each convolutional layer. (Default value = [3, 4, 5])
**cnn_kwargs: Additional args for building the `Conv1D` layer.
"""
super(YoonKimCNNv3, self).__init__(dropout_rate)
self.num_filters = num_filters
self.filter_sizes = filter_sizes
self.conv_kwargs = conv_kwargs
def build_model(self, x):
maxpooled_tensors = []
avgpooled_tensors = []
for filter_size in self.filter_sizes:
x_i = layers.Conv1D(self.num_filters, filter_size,
use_bias=False, **self.conv_kwargs,
name="conv%s" % filter_size)(x)
x_i = layers.ELU(name="elu%s" % filter_size)(x_i)
x_i = layers.BatchNormalization(name="bn%s" % filter_size)(x_i)
x_m = layers.GlobalMaxPooling1D(name="global_maxpool%s" % filter_size)(x_i)
x_a = layers.GlobalAveragePooling1D(name="global_avgpool%s" % filter_size)(x_i)
maxpooled_tensors.append(x_m)
avgpooled_tensors.append(x_a)
x_m = layers.concatenate(maxpooled_tensors, axis=-1)
x_a = layers.concatenate(avgpooled_tensors, axis=-1)
x = layers.concatenate([x_m, x_a], axis=-1)
return x
class YoonKimCNNv2(SequenceEncoderBase):
def __init__(self, num_filters=128, filter_sizes=[2, 3, 4, 5], dropout_rate=0.5, **conv_kwargs):
"""Modified version of Yoon Kim's shallow cnn model: https://arxiv.org/pdf/1408.5882.pdf
Args:
num_filters: The number of filters to use per `filter_size`. (Default value = 64)
filter_sizes: The filter sizes for each convolutional layer. (Default value = [3, 4, 5])
**cnn_kwargs: Additional args for building the `Conv1D` layer.
"""
super(YoonKimCNNv2, self).__init__(dropout_rate)
self.num_filters = num_filters
self.filter_sizes = filter_sizes
self.conv_kwargs = conv_kwargs
def build_model(self, x):
pooled_tensors = []
for filter_size in self.filter_sizes:
x_i = layers.Conv1D(self.num_filters, filter_size,
use_bias=False, **self.conv_kwargs,
name="conv%s" % filter_size)(x)
x_i = layers.ELU(name="elu%s" % filter_size)(x_i)
x_i = layers.BatchNormalization(name="bn%s" % filter_size)(x_i)
x_i = layers.GlobalMaxPooling1D(name="global_maxpool%s" % filter_size)(x_i)
pooled_tensors.append(x_i)
x = pooled_tensors[0] if len(self.filter_sizes) == 1 else layers.concatenate(pooled_tensors, axis=-1)
return x
class YoonKimCNN(SequenceEncoderBase):
def __init__(self, num_filters=128, filter_sizes=[3, 4, 5], dropout_rate=0.5, **conv_kwargs):
"""Modified version of Yoon Kim's shallow cnn model: https://arxiv.org/pdf/1408.5882.pdf
Args:
num_filters: The number of filters to use per `filter_size`. (Default value = 64)
filter_sizes: The filter sizes for each convolutional layer. (Default value = [3, 4, 5])
**cnn_kwargs: Additional args for building the `Conv1D` layer.
"""
super(YoonKimCNN, self).__init__(dropout_rate)
self.num_filters = num_filters
self.filter_sizes = filter_sizes
self.conv_kwargs = conv_kwargs
def build_model(self, x):
pooled_tensors = []
for filter_size in self.filter_sizes:
x_i = layers.Conv1D(self.num_filters, filter_size, use_bias=False, **self.conv_kwargs)(x)
x_i = layers.ELU()(x_i)
x_i = layers.BatchNormalization()(x_i)
x_i = layers.GlobalMaxPooling1D()(x_i)
pooled_tensors.append(x_i)
x = pooled_tensors[0] if len(self.filter_sizes) == 1 else layers.concatenate(pooled_tensors, axis=-1)
return x
class TextLab(SequenceEncoderBase):
def __init__(self, num_filters=128, filter_sizes=[3, 4, 5], dropout_rate=0.5, **conv_kwargs):
"""Modified version of Yoon Kim's shallow cnn model: https://arxiv.org/pdf/1408.5882.pdf
Args:
num_filters: The number of filters to use per `filter_size`. (Default value = 64)
filter_sizes: The filter sizes for each convolutional layer. (Default value = [3, 4, 5])
**cnn_kwargs: Additional args for building the `Conv1D` layer.
"""
super(TextLab, self).__init__(dropout_rate)
self.filter_sizes = filter_sizes
self.conv_kwargs = conv_kwargs
self.num_filters = num_filters
def conv_block(self, x, num_filters, num_kernels=2):
bypass = x
for _ in range(num_kernels):
x = layers.Conv1D(num_filters, 3, padding='same',
activation='relu', use_bias=False, **self.conv_kwargs)(x)
x = layers.BatchNormalization()(x)
x = layers.Dropout(self.dropout_rate)(x)
x = layers.Add()([bypass, x])
return x
def build_model(self, x):
pooled_tensors = []
with tf.variable_scope("encoder"):
for filter_size in self.filter_sizes:
x_i = layers.Conv1D(self.num_filters, filter_size, use_bias=False, **self.conv_kwargs)(x)
x_i = layers.BatchNormalization()(x_i)
x_i = layers.GlobalMaxPooling1D()(x_i)
pooled_tensors.append(x_i)
x = pooled_tensors[0] if len(self.filter_sizes) == 1 else layers.concatenate(pooled_tensors, axis=-1)
with tf.variable_scope("decoder"):
x = self.conv_block(x, self.num_filters)
return x
|
"""
Definition of a DataGenerator that creates data batches from DataLoader
"""
import math
import numpy as np
from .funcs.funcs_varr import unpack_varr_arrays
from .idata_generator import IDataGenerator
class DataGenerator(IDataGenerator):
"""Primary `vlne` DataGenerator that batches data from a `IDataLoader`.
This generator takes an instance of `IDataLoader` and specification of
input/target variables and creates batches of data based on them.
Batches can be retrieved with __getitem__ method that takes batch
index as an input.
Parameters
----------
data_loader : `IDataLoader`
`IDataLoader` which will be used to retrieve values of variables.
batch_size : int
Size of the batches to be generated.
max_prongs : int or None, optional
If `max_prongs` is not None, then the number of 2D and 3D prongs will
be truncated by `max_prongs`. Default: None.
vars_input_slice : list of str or None, optional
Names of slice level input variables in `data_loader`.
If None no slice level inputs will be generated. Default: None.
vars_input_png3d : list of str or None, optional
Names of 3d prong level input variables in `data_loader`.
If None no 3d prong level inputs will be generated. Default: None.
vars_input_png2d : list of str or None, optional
Names of 2d prong level input variables in `data_loader`.
If None no 2d prong level inputs will be generated. Default: None.
var_target_total : str or None, optional
Name of the variable in `data_loader` that holds total energy of
the event (e.g. neutrino energy).
If None, no total energy target will be generated. Default: None
var_target_primary : str or None, optional
Name of the variable in `data_loader` that holds primary energy of
the event (e.g. lepton energy).
If None, no primary energy target will be generated. Default: None
"""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
data_loader,
batch_size = 1024,
max_prongs = None,
vars_input_slice = None,
vars_input_png3d = None,
vars_input_png2d = None,
var_target_total = None,
var_target_primary = None,
):
super(DataGenerator, self).__init__()
self._data_loader = data_loader
self._batch_size = batch_size
self._max_prongs = max_prongs
self._vars_input_slice = vars_input_slice
self._vars_input_png3d = vars_input_png3d
self._vars_input_png2d = vars_input_png2d
self._var_target_total = var_target_total
self._var_target_primary = var_target_primary
def get_scalar_data(self, variables, index):
"""Generate batch of scalar data.
Parameters
----------
variables : list of str
List of variables names which values will be joined into a batch.
index : int or list of int or None
Index that defines slice of values to be used when generating
batch. If None, all available values will be joined into a batch.
Returns
-------
ndarray, shape (N_SAMPLE, len(variables))
Values of `variables` with sliced by `index` batched together.
"""
result = np.empty((len(index), len(variables)), dtype = np.float32)
for idx,vname in enumerate(variables):
result[:, idx] = self._data_loader.get(vname, index)
return result
def get_varr_data(self, variables, index, max_prongs = None):
"""Generate batch of variable length arrays (prong) data.
All variables length arrays will be batches together into a fixed
size `np.ndarray`. Missing variable length values will be padded
by `np.nan`
Parameters
----------
variables : list of str
List of variables names which values will be joined into a batch.
index : int or list of int or None
Index that defines slice of values to be used when generating
batch. If None, all available values will be joined into a batch.
max_prongs : int or None, optional
If `max_prongs` is not None, then the variable length dimension
will be truncated by `max_prongs`.
Returns
-------
ndarray, shape (N_SAMPLE, N_VARR, len(variables))
Values of `variables` with sliced by `index` and batched together.
Second dimension goes along the variable length axis.
See Also
--------
unpack_varr_arrays
"""
result = unpack_varr_arrays(
self._data_loader, variables, index, max_prongs
)
return result
def get_data(self, index):
"""Generate batch of inputs and targets.
Only variables from vars_input_* will be used to generate input
batches. Similarly, only variables from var_target_* will be used
to generate target batches.
Parameters
----------
index : int or list of int or None
Index of the `IDataLoader` this generator holds that specifies
slice of values to be batched together.
If None, all available values will be batched.
Returns
-------
(inputs, targets)
Dictionaries of input and target batches.
See Also
--------
DataGenerator.__getitem__
"""
inputs = {}
targets = {}
if self._vars_input_slice is not None:
inputs['input_slice'] = self.get_scalar_data(
self._vars_input_slice, index
)
if self._vars_input_png3d is not None:
inputs['input_png3d'] = self.get_varr_data(
self._vars_input_png3d, index, self._max_prongs
)
if self._vars_input_png2d is not None:
inputs['input_png2d'] = self.get_varr_data(
self._vars_input_png2d, index, self._max_prongs
)
if self._var_target_total is not None:
targets['target_total'] = self.get_scalar_data(
[ self._var_target_total ], index
)
if self._var_target_primary is not None:
targets['target_primary'] = self.get_scalar_data(
[ self._var_target_primary ], index
)
return (inputs, targets)
def __len__(self):
return math.ceil(len(self._data_loader) / self._batch_size)
@property
def weights(self):
return np.ones(len(self._data_loader))
def __getitem__(self, index):
start = index * self._batch_size
end = min((index + 1) * self._batch_size, len(self._data_loader))
data_slice = np.arange(start, end)
batch_data = self.get_data(data_slice)
batch_weights = np.ones(end - start)
return batch_data + ( [batch_weights, ] * len(batch_data[1]), )
|
from werkzeug.exceptions import BadRequest
class RequestDictionary:
""" Provides an interface to an http request """
def __init__(self, request, optional_request=False):
self.requestDict = self.derive(request, optional_request)
def get_value(self, value):
""" Returns value for specified key """
if value not in self.requestDict:
raise ValueError(value + " not found")
return self.requestDict[value]
def exists(self, value):
""" Returns True if key is in request json """
if value not in self.requestDict:
return False
return True
def to_string(self):
return str(self.requestDict)
@staticmethod
def derive(request, optional_request=False):
"""Check request header to determine where to fetch arguments from.
Raise exceptions. @todo - replace this whole class with standard flask
HTTP argument handling"""
try:
if "Content-Type" not in request.headers:
raise ValueError("Must include Content-Type header")
content_type = request.headers['Content-Type']
# Allowing extra content after application/json for firefox
# compatibility
if request.is_json:
result = request.get_json()
if not isinstance(result, dict):
# @todo: this shouldn't be a type error
raise TypeError(
"Failed to create a dictionary out of json")
return result
elif content_type == "application/x-www-form-urlencoded":
return request.form
else:
raise ValueError("Invalid Content-Type : " + content_type)
except BadRequest as br:
if optional_request:
return {}
else:
raise br
|
import subprocess
import time
from languages.main import install_language
from ides.main import install_editors
from databases.main import install_databases
from browsers.main import install_browsers
print('Welcome to Linux setup tool')
print('We shall start by updating and upgrading your system.')
# subprocess.call('sudo apt update && upgrade', shell=True)
# print('All Done.')
# time.sleep(1)
# subprocess.call('clear', shell=True)
def printColumns(nested_list):
col_width = max(len(word) for row in nested_list for word in row) + 2
for row in nested_list:
print(''.join(word.ljust(col_width) for word in row))
def install_packages(function):
try:
selected_packages = raw_input(
'Select numerical value(s) seperated by spaces. Leave empty if none needed: ')
except:
selected_packages = input(
'Select numerical value(s) seperated by spaces. Leave empty if none needed: ')
if selected_packages:
function(selected_packages)
return 0
programming_languages = [['[1] Java', '[2] PHP'], [
'[3] Node.js', '[4] Python3']]
print('=========================================')
print('Select Programming Languages To Install')
print('=========================================')
printColumns(programming_languages)
print('\n')
install_packages(install_language)
text_editors = [['[1] VS Code', '[2] Sublime Text'], [
'[3] Atom', '[4] Android Studio'], ['[5] PyCharm', '[6] WebStorm']]
print('=========================================')
print('Select Text Editors/IDEs To Install')
print('=========================================')
printColumns(text_editors)
print('\n')
install_packages(install_editors)
databases = [['[1] MySQL', '[2] PostgreSQL'], ['[3] Mongo']]
print('=========================================')
print('Select Databases To Install')
print('=========================================')
printColumns(databases)
print('\n')
install_packages(install_databases)
web_browsers = [['[1] Chrome', '[2] Chromium'], [
'[3] Opera', '[4] Firefox'], ['[5] Brave']]
print('=========================================')
print('Select Web Browsers To Install')
print('=========================================')
printColumns(web_browsers)
print('\n')
install_packages(install_browsers)
|
from .csv_exporter import CSVExporter
from .ec_exporter import ECExporter
from .ecms_exporter import ECMSExporter
|
#!/usr/bin/env python
from argparse import ArgumentParser
import sys
import numpy as np
def linear(x, a=0.4, b=1.2):
return a*x + b
arg_parser = ArgumentParser(description='create linear data with noise')
arg_parser.add_argument('n', type=int, default=5, help='number of points')
arg_parser.add_argument('--x_min', type=float, default=0.0,
help='minimum x-value')
arg_parser.add_argument('--x_max', type=float, default=1.0,
help='maximum x-value')
arg_parser.add_argument('--a', type=float, default=0.4,
help='slope in a*x + b')
arg_parser.add_argument('--b', type=float, default=1.2,
help='intercept in a*x + b')
arg_parser.add_argument('--sigma', type=float, default=0.25,
help='noise level')
arg_parser.add_argument('--out', help='name of output file')
options = arg_parser.parse_args()
names = 'x,y'
x = np.linspace(options.x_min, options.x_max, options.n)
f = np.vectorize(linear)
y = f(x, options.a, options.b) + options.sigma*np.random.randn(len(x))
if options.out:
out = open(options.out, 'w')
else:
out = sys.stdout
out.write(names + '\n')
for i in range(len(x)):
out.write('{x:.7e},{y:.7e}\n'.format(x=x[i], y=y[i]))
if options.out:
out.close()
|
"""
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
import pytest
from byceps.database import db
from byceps.services.authorization import service as authorization_service
from byceps.services.user import service as user_service
from byceps.services.verification_token import (
service as verification_token_service,
)
from tests.helpers import http_client
@pytest.fixture(scope='module')
def user1(make_user):
return make_user(email_address='user1@mail.test', initialized=False)
@pytest.fixture(scope='module')
def user2(make_user):
return make_user(initialized=False)
@pytest.fixture(scope='module')
def user3(make_user):
return make_user(email_address='user3@mail.test', initialized=True)
@pytest.fixture(scope='module')
def user4(make_user):
return make_user(initialized=True)
@pytest.fixture(scope='module')
def user5(make_user):
return make_user(email_address='user5@mail.test', initialized=True)
@pytest.fixture
def role(admin_app, site, user1, user2):
role = authorization_service.create_role('board_user', 'Board User')
yield role
for user in user1, user2:
authorization_service.deassign_all_roles_from_user(user.id)
authorization_service.delete_role(role.id)
def test_valid_token(site_app, user1, role):
user_id = user1.id
user_before = user_service.get_db_user(user_id)
assert not user_before.email_address_verified
assert not user_before.initialized
token = create_verification_token(user_id, 'user1@mail.test')
# -------------------------------- #
response = confirm(site_app, token)
# -------------------------------- #
assert response.status_code == 302
user_after = user_service.get_db_user(user_id)
assert user_after.email_address_verified
assert user_after.initialized
assert get_role_ids(user_id) == {'board_user'}
def test_unknown_token(site_app, site, user2, role):
user_id = user2.id
user_before = user_service.get_db_user(user_id)
assert not user_before.initialized
unknown_token = 'wZdSLzkT-zRf2x2T6AR7yGa3Nc_X3Nn3F3XGPvPtOhw'
# -------------------------------- #
response = confirm(site_app, unknown_token)
# -------------------------------- #
assert response.status_code == 404
user_after = user_service.get_db_user(user_id)
assert not user_after.initialized
assert get_role_ids(user_id) == set()
def test_initialized_user(site_app, user3, role):
user_id = user3.id
user_before = user_service.get_db_user(user_id)
assert not user_before.email_address_verified
assert user_before.initialized
token = create_verification_token(user_id, 'user3@mail.test')
# -------------------------------- #
response = confirm(site_app, token)
# -------------------------------- #
assert response.status_code == 302
user_after = user_service.get_db_user(user_id)
assert user_after.email_address_verified
assert user_after.initialized
def test_account_without_email_address(site_app, site, user4, role):
user_id = user4.id
user_with_email_address = user_service.get_db_user(user_id)
user_with_email_address.email_address = None
db.session.commit()
user_before = user_service.get_db_user(user_id)
assert user_before.email_address is None
assert not user_before.email_address_verified
assert user_before.initialized
token = create_verification_token(user_id, 'user4@mail.test')
# -------------------------------- #
response = confirm(site_app, token)
# -------------------------------- #
assert response.status_code == 302
user_after = user_service.get_db_user(user_id)
assert not user_after.email_address_verified
def test_different_user_and_token_email_addresses(site_app, site, user5, role):
user_id = user5.id
user_before = user_service.get_db_user(user_id)
assert not user_before.email_address_verified
assert user_before.initialized
token = create_verification_token(user_id, 'user5@mail-other.test')
# -------------------------------- #
response = confirm(site_app, token)
# -------------------------------- #
assert response.status_code == 302
user_after = user_service.get_db_user(user_id)
assert not user_after.email_address_verified
# helpers
def confirm(app, token):
url = f'/users/email_address/confirmation/{token}'
with http_client(app) as client:
return client.get(url)
def get_role_ids(user_id):
return authorization_service.find_role_ids_for_user(user_id)
def create_verification_token(user_id, email_address):
token = verification_token_service.create_for_email_address_confirmation(
user_id, email_address
)
return token.token
|
import sys
import os
from unittest.mock import Mock
sys.modules["logger"] = Mock()
sys.path.append(os.path.join(os.path.dirname(__file__), "../sesmanworker"))
|
from data_processing import *
create_and_store_CIFARdataset('./', np.array([0, 1, 2, 3, 4]),\
32, 1000, 2.0, '/home/nilay/GANMM-master/data')
create_and_store_CIFARdataset('./', np.array([0, 1, 2, 3, 4]),\
32, 1000, 3.0, '/home/nilay/GANMM-master/data')
create_and_store_CIFARdataset('./', np.array([0, 1, 2, 3, 4]),\
32, 1000, 4.0, '/home/nilay/GANMM-master/data')
print("Generating 5 clusters CIFAR10 done!")
create_and_store_CIFARdataset('./', np.array([0, 1, 2, 3, 4, 5, 6]),\
32, 1000, 0.0, '/home/nilay/GANMM-master/data')
create_and_store_CIFARdataset('./', np.array([0, 1, 2, 3, 4, 5, 6]),\
32, 1000, 1.0, '/home/nilay/GANMM-master/data')
create_and_store_CIFARdataset('./', np.array([0, 1, 2, 3, 4, 5, 6]),\
32, 1000, 2.0, '/home/nilay/GANMM-master/data')
create_and_store_CIFARdataset('./', np.array([0, 1, 2, 3, 4, 5, 6]),\
32, 1000, 3.0, '/home/nilay/GANMM-master/data')
create_and_store_CIFARdataset('./', np.array([0, 1, 2, 3, 4, 5, 6]),\
32, 1000, 4.0, '/home/nilay/GANMM-master/data')
print("Generating 7 clusters CIFAR10 done!")
create_and_store_CIFARdataset('./', np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\
32, 1000, 0.0, '/home/nilay/GANMM-master/data')
create_and_store_CIFARdataset('./', np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\
32, 1000, 1.0, '/home/nilay/GANMM-master/data')
create_and_store_CIFARdataset('./', np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\
32, 1000, 2.0, '/home/nilay/GANMM-master/data')
create_and_store_CIFARdataset('./', np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\
32, 1000, 3.0, '/home/nilay/GANMM-master/data')
create_and_store_CIFARdataset('./', np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\
32, 1000, 4.0, '/home/nilay/GANMM-master/data')
print("Generating 10 clusters CIFAR10 done!")
|
import tensorflow as tf
@tf.function
def default_transform_targets(current_state, next_state):
"""
This is the default transform targets function used, which preprocesses the targets of the network before training the
dynamics function using the inputs and targets. The default one is (target = next_state - current_state).
Parameters
---------
current_state: tf.float32
The current_state has a shape of (Batch X dim_S)
next_state: tf.float32
The next_state has a shape of (Batch X dim_S)
"""
return next_state - current_state
@tf.function
def default_inverse_transform_targets(current_state, delta):
"""
This is the default inverse transform targets function used, which reverses the preprocessing of the targets of
the dynamics function to obtain the real current_state not the relative one,
The default one is (current_state = target + current_state).
Parameters
---------
current_state: tf.float32
The current_state has a shape of (Batch X dim_S)
delta: tf.float32
The delta has a shape of (Batch X dim_S) which is equivilant to the target of the network.
"""
return delta + current_state
|
import numpy as np
import pickle
import os
class data_reader():
def __init__(self):
if os.path.exists('./data.pkl'):
print('Loading from pickle')
data = pickle.load(open('./data.pkl','rb'))
self.adj, self.paper_category, self.features = data
print('Loaded')
else:
print('No pkl found, generating data...')
num_id = 0
explored_categories = {}
paper_id = {}
paper_category = {}
features = []
f = open('./cora/cora.content')
for i in f:
i = i.strip()
i = i.split('\t')
pprid = int(i[0])
category = i[-1]
feature = i[1:-1]
feature = [float(item) for item in feature]
feature = np.float32(feature)
features.append(feature)
paper_id[pprid] = num_id
if not category in explored_categories:
num = len(explored_categories)
print(category)
explored_categories[category] = num
paper_category[num_id] = explored_categories[category]
num_id+=1
citation = []
f = open('./cora/cora.cites')
for i in f:
i = i.strip()
i = i.split('\t')
cited_ppr = int(i[0])
cited_id = paper_id[cited_ppr]
citing_ppr = int(i[1])
citing_id = paper_id[citing_ppr]
citation.append([cited_id, citing_id])
self.max_id = num_id
self.paper_category = paper_category
self.citation = citation
self.adj = self.get_adj_mtx()
self.features = np.float32(features)
data = [self.adj, self.paper_category, self.features]
pickle.dump( data, open('./data.pkl','wb'))
print('Dump finished')
def get_adj_mtx(self):
mtx = np.zeros([self.max_id, self.max_id], np.float32)
for i in self.citation:
mtx[i[0], i[1]] = 1
mtx[i[1], i[0]] = 1
return mtx
def process_data(self, one_hot=True):
import random
random.seed(2019)
indices = list(range(2708))
self.indices = random.sample(indices, 140)
self.labels = [self.paper_category[i] for i in self.indices]
if one_hot:
self.labels = [np.eye(7)[i] for i in self.labels]
self.indices = [[i] for i in self.indices]
def get_data(self):
category = [self.paper_category[i] for i in range(2708)]
return self.features, self.adj, self.indices, self.labels, category
if __name__=='__main__':
dt = data_reader()
print(dt.adj.shape)
cate = dt.paper_category
a = {}
for k in cate:
a[cate[k]] = 1
print(len(a))
|
import unittest
from deepnox.auth.credentials import Credentials
class CredentialsTestCase(unittest.TestCase):
def test___init__(self):
self.assertIsInstance(Credentials(), Credentials)
# class BasicAuthTestCase(unittest.TestCase):
# def test___init__(self):
# self.assertRaises(TypeError, lambda: BasicAuth())
# self.assertRaises(TypeError, lambda: BasicAuth("username"))
# self.assertRaises(TypeError, lambda: BasicAuth(password="password"))
# self.assertIsInstance(BasicAuth("username", "password"), BasicAuth)
# self.assertIsInstance(BasicAuth("username", "password", encoding="utf8"), BasicAuth)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
#
# Spectral estimation with (multi-)tapered FFT
#
# Builtin/3rd party package imports
import numpy as np
from scipy import signal
# local imports
from ._norm_spec import _norm_spec, _norm_taper
def mtmfft(data_arr,
samplerate,
nSamples=None,
taper="hann",
taper_opt=None,
demean_taper=False):
"""
(Multi-)tapered fast Fourier transform. Returns
full complex Fourier transform for each taper.
Multi-tapering only supported with Slepian windwows (`taper="dpss"`).
Parameters
----------
data_arr : (N,) :class:`numpy.ndarray`
Uniformly sampled multi-channel time-series data
The 1st dimension is interpreted as the time axis
samplerate : float
Samplerate in Hz
nSamples : int or None
Absolute length of the (potentially to be padded) signals
or `None` for no padding (`N` is the number of samples)
taper : str or None
Taper function to use, one of `scipy.signal.windows`
Set to `None` for no tapering.
taper_opt : dict or None
Additional keyword arguments passed to the `taper` function.
For multi-tapering with ``taper='dpss'`` set the keys
`'Kmax'` and `'NW'`.
For further details, please refer to the
`SciPy docs <https://docs.scipy.org/doc/scipy/reference/signal.windows.html>`_
demean_taper : bool
Set to `True` to perform de-meaning after tapering
Returns
-------
ftr : 3D :class:`numpy.ndarray`
Complex output has shape ``(nTapers x nFreq x nChannels)``.
freqs : 1D :class:`numpy.ndarray`
Array of Fourier frequencies
Notes
-----
For a (MTM) power spectral estimate average the absolute squared
transforms across tapers:
``Sxx = np.real(ftr * ftr.conj()).mean(axis=0)``
The FFT result is normalized such that this yields the power
spectral density. For a clean harmonic and a Fourier frequency bin
width of `dF` this will give a peak power of `A**2 / 2 * dF`,
with `A` as harmonic ampltiude.
"""
# attach dummy channel axis in case only a
# single signal/channel is the input
if data_arr.ndim < 2:
data_arr = data_arr[:, np.newaxis]
signal_length = data_arr.shape[0]
if nSamples is None:
nSamples = signal_length
nChannels = data_arr.shape[1]
freqs = np.fft.rfftfreq(nSamples, 1 / samplerate)
nFreq = freqs.size
# no taper is boxcar
if taper is None:
taper = 'boxcar'
if taper_opt is None:
taper_opt = {}
taper_func = getattr(signal.windows, taper)
# only really 2d if taper='dpss' with Kmax > 1
# here we take the actual signal lengths!
windows = np.atleast_2d(taper_func(signal_length, **taper_opt))
# normalize window
windows = _norm_taper(taper, windows, nSamples)
# Fourier transforms (nTapers x nFreq x nChannels)
ftr = np.zeros((windows.shape[0], nFreq, nChannels), dtype='complex64')
for taperIdx, win in enumerate(windows):
win = np.tile(win, (nChannels, 1)).T
win *= data_arr
# de-mean again after tapering - needed for Granger!
if demean_taper:
win -= win.mean(axis=0)
ftr[taperIdx] = np.fft.rfft(win, n=nSamples, axis=0)
ftr[taperIdx] = _norm_spec(ftr[taperIdx], nSamples, samplerate)
return ftr, freqs
|
import streamlit as st
import spacy
import base64
import pandas as pd
import jsonlines
from pymongo import MongoClient
HTML_WRAPPER = """<div style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem">{}</div>"""
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def setup_mongo():
client = MongoClient('mongodb://localhost:27017/')
db = client['gsr']
coll = db['prod_dec_2020_2']
return coll
def visualize(coll):
coder = st.selectbox("Select your port/ID number",
[9015, 9016, 9017, 9019, 9020, 9022, 9023, 9025])
coder = int(coder)
#st.markdown("Total sentences in collection: {}".format(coll.count()))
assigned = coll.count({"assigned_annotators": {"$in" : [coder]}})
completed = coll.count({"coders": {"$in" : [coder]}})
st.markdown("Sentences assigned to {}: {}".format(coder, assigned))
st.markdown("Sentences completed by {}: {}".format(coder, completed))
st.markdown("Progress:")
try:
prog = completed/assigned
except ZeroDivisionError:
prog = 0
st.progress(prog)
st.title('Annotation progress')
st.markdown("Check your annotation progress by selecting your port/ID number")
coll = setup_mongo()
visualize(coll)
|
# flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from launchdarkly_api.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from launchdarkly_api.model.access_denied_reason_rep import AccessDeniedReasonRep
from launchdarkly_api.model.access_denied_rep import AccessDeniedRep
from launchdarkly_api.model.access_rep import AccessRep
from launchdarkly_api.model.access_token_post import AccessTokenPost
from launchdarkly_api.model.action_input_rep import ActionInputRep
from launchdarkly_api.model.action_output_rep import ActionOutputRep
from launchdarkly_api.model.all_variations_summary import AllVariationsSummary
from launchdarkly_api.model.approval_condition_input_rep import ApprovalConditionInputRep
from launchdarkly_api.model.approval_condition_output_rep import ApprovalConditionOutputRep
from launchdarkly_api.model.approval_settings import ApprovalSettings
from launchdarkly_api.model.audit_log_entry_listing_rep import AuditLogEntryListingRep
from launchdarkly_api.model.audit_log_entry_listing_rep_collection import AuditLogEntryListingRepCollection
from launchdarkly_api.model.audit_log_entry_rep import AuditLogEntryRep
from launchdarkly_api.model.authorized_app_data_rep import AuthorizedAppDataRep
from launchdarkly_api.model.big_segment_target import BigSegmentTarget
from launchdarkly_api.model.branch_collection_rep import BranchCollectionRep
from launchdarkly_api.model.branch_rep import BranchRep
from launchdarkly_api.model.clause import Clause
from launchdarkly_api.model.client_side_availability import ClientSideAvailability
from launchdarkly_api.model.client_side_availability_post import ClientSideAvailabilityPost
from launchdarkly_api.model.condition_base_output_rep import ConditionBaseOutputRep
from launchdarkly_api.model.condition_input_rep import ConditionInputRep
from launchdarkly_api.model.condition_output_rep import ConditionOutputRep
from launchdarkly_api.model.confidence_interval_rep import ConfidenceIntervalRep
from launchdarkly_api.model.conflict import Conflict
from launchdarkly_api.model.conflict_output_rep import ConflictOutputRep
from launchdarkly_api.model.copied_from_env import CopiedFromEnv
from launchdarkly_api.model.create_copy_flag_config_approval_request_request import CreateCopyFlagConfigApprovalRequestRequest
from launchdarkly_api.model.create_flag_config_approval_request_request import CreateFlagConfigApprovalRequestRequest
from launchdarkly_api.model.custom_properties import CustomProperties
from launchdarkly_api.model.custom_property import CustomProperty
from launchdarkly_api.model.custom_role import CustomRole
from launchdarkly_api.model.custom_role_post import CustomRolePost
from launchdarkly_api.model.custom_role_post_data import CustomRolePostData
from launchdarkly_api.model.custom_roles import CustomRoles
from launchdarkly_api.model.custom_workflow_input_rep import CustomWorkflowInputRep
from launchdarkly_api.model.custom_workflow_meta import CustomWorkflowMeta
from launchdarkly_api.model.custom_workflow_output_rep import CustomWorkflowOutputRep
from launchdarkly_api.model.custom_workflow_stage_meta import CustomWorkflowStageMeta
from launchdarkly_api.model.custom_workflows_listing_output_rep import CustomWorkflowsListingOutputRep
from launchdarkly_api.model.default_client_side_availability_post import DefaultClientSideAvailabilityPost
from launchdarkly_api.model.defaults import Defaults
from launchdarkly_api.model.dependent_flag import DependentFlag
from launchdarkly_api.model.dependent_flag_environment import DependentFlagEnvironment
from launchdarkly_api.model.dependent_flags_by_environment import DependentFlagsByEnvironment
from launchdarkly_api.model.derived_attribute import DerivedAttribute
from launchdarkly_api.model.destination import Destination
from launchdarkly_api.model.destination_post import DestinationPost
from launchdarkly_api.model.destinations import Destinations
from launchdarkly_api.model.environment import Environment
from launchdarkly_api.model.environment_post import EnvironmentPost
from launchdarkly_api.model.execution_output_rep import ExecutionOutputRep
from launchdarkly_api.model.experiment_allocation_rep import ExperimentAllocationRep
from launchdarkly_api.model.experiment_enabled_period_rep import ExperimentEnabledPeriodRep
from launchdarkly_api.model.experiment_environment_setting_rep import ExperimentEnvironmentSettingRep
from launchdarkly_api.model.experiment_info_rep import ExperimentInfoRep
from launchdarkly_api.model.experiment_metadata_rep import ExperimentMetadataRep
from launchdarkly_api.model.experiment_rep import ExperimentRep
from launchdarkly_api.model.experiment_results_rep import ExperimentResultsRep
from launchdarkly_api.model.experiment_stats_rep import ExperimentStatsRep
from launchdarkly_api.model.experiment_time_series_slice import ExperimentTimeSeriesSlice
from launchdarkly_api.model.experiment_time_series_variation_slice import ExperimentTimeSeriesVariationSlice
from launchdarkly_api.model.experiment_time_series_variation_slices import ExperimentTimeSeriesVariationSlices
from launchdarkly_api.model.experiment_totals_rep import ExperimentTotalsRep
from launchdarkly_api.model.expiring_user_target_error import ExpiringUserTargetError
from launchdarkly_api.model.expiring_user_target_get_response import ExpiringUserTargetGetResponse
from launchdarkly_api.model.expiring_user_target_item import ExpiringUserTargetItem
from launchdarkly_api.model.expiring_user_target_patch_response import ExpiringUserTargetPatchResponse
from launchdarkly_api.model.extinction import Extinction
from launchdarkly_api.model.extinction_collection_rep import ExtinctionCollectionRep
from launchdarkly_api.model.extinction_list_post import ExtinctionListPost
from launchdarkly_api.model.feature_flag import FeatureFlag
from launchdarkly_api.model.feature_flag_body import FeatureFlagBody
from launchdarkly_api.model.feature_flag_config import FeatureFlagConfig
from launchdarkly_api.model.feature_flag_scheduled_change import FeatureFlagScheduledChange
from launchdarkly_api.model.feature_flag_scheduled_changes import FeatureFlagScheduledChanges
from launchdarkly_api.model.feature_flag_status import FeatureFlagStatus
from launchdarkly_api.model.feature_flag_status_across_environments import FeatureFlagStatusAcrossEnvironments
from launchdarkly_api.model.feature_flag_statuses import FeatureFlagStatuses
from launchdarkly_api.model.feature_flags import FeatureFlags
from launchdarkly_api.model.flag_config_approval_request_response import FlagConfigApprovalRequestResponse
from launchdarkly_api.model.flag_config_approval_requests_response import FlagConfigApprovalRequestsResponse
from launchdarkly_api.model.flag_copy_config_environment import FlagCopyConfigEnvironment
from launchdarkly_api.model.flag_copy_config_post import FlagCopyConfigPost
from launchdarkly_api.model.flag_global_attributes_rep import FlagGlobalAttributesRep
from launchdarkly_api.model.flag_listing_rep import FlagListingRep
from launchdarkly_api.model.flag_scheduled_changes_input import FlagScheduledChangesInput
from launchdarkly_api.model.flag_status_rep import FlagStatusRep
from launchdarkly_api.model.flag_summary import FlagSummary
from launchdarkly_api.model.forbidden_error_rep import ForbiddenErrorRep
from launchdarkly_api.model.form_variable_config import FormVariableConfig
from launchdarkly_api.model.hunk_rep import HunkRep
from launchdarkly_api.model.instruction import Instruction
from launchdarkly_api.model.instructions import Instructions
from launchdarkly_api.model.integration_metadata import IntegrationMetadata
from launchdarkly_api.model.integration_status import IntegrationStatus
from launchdarkly_api.model.invalid_request_error_rep import InvalidRequestErrorRep
from launchdarkly_api.model.ip_list import IpList
from launchdarkly_api.model.json_patch import JSONPatch
from launchdarkly_api.model.last_seen_metadata import LastSeenMetadata
from launchdarkly_api.model.link import Link
from launchdarkly_api.model.member import Member
from launchdarkly_api.model.member_data_rep import MemberDataRep
from launchdarkly_api.model.member_permission_grant_summary_rep import MemberPermissionGrantSummaryRep
from launchdarkly_api.model.member_summary_rep import MemberSummaryRep
from launchdarkly_api.model.member_team_summary_rep import MemberTeamSummaryRep
from launchdarkly_api.model.members import Members
from launchdarkly_api.model.method_not_allowed_error_rep import MethodNotAllowedErrorRep
from launchdarkly_api.model.metric_collection_rep import MetricCollectionRep
from launchdarkly_api.model.metric_listing_rep import MetricListingRep
from launchdarkly_api.model.metric_post import MetricPost
from launchdarkly_api.model.metric_rep import MetricRep
from launchdarkly_api.model.metric_seen import MetricSeen
from launchdarkly_api.model.modification import Modification
from launchdarkly_api.model.multi_environment_dependent_flag import MultiEnvironmentDependentFlag
from launchdarkly_api.model.multi_environment_dependent_flags import MultiEnvironmentDependentFlags
from launchdarkly_api.model.new_member_form import NewMemberForm
from launchdarkly_api.model.new_member_form_list_post import NewMemberFormListPost
from launchdarkly_api.model.not_found_error_rep import NotFoundErrorRep
from launchdarkly_api.model.parent_resource_rep import ParentResourceRep
from launchdarkly_api.model.patch_failed_error_rep import PatchFailedErrorRep
from launchdarkly_api.model.patch_operation import PatchOperation
from launchdarkly_api.model.patch_segment_instruction import PatchSegmentInstruction
from launchdarkly_api.model.patch_segment_request import PatchSegmentRequest
from launchdarkly_api.model.patch_with_comment import PatchWithComment
from launchdarkly_api.model.permission_grant_collection_rep import PermissionGrantCollectionRep
from launchdarkly_api.model.permission_grant_input import PermissionGrantInput
from launchdarkly_api.model.permission_grant_rep import PermissionGrantRep
from launchdarkly_api.model.post_approval_request_apply_request import PostApprovalRequestApplyRequest
from launchdarkly_api.model.post_approval_request_review_request import PostApprovalRequestReviewRequest
from launchdarkly_api.model.post_flag_scheduled_changes_input import PostFlagScheduledChangesInput
from launchdarkly_api.model.prerequisite import Prerequisite
from launchdarkly_api.model.project import Project
from launchdarkly_api.model.project_listing_rep import ProjectListingRep
from launchdarkly_api.model.project_post import ProjectPost
from launchdarkly_api.model.projects import Projects
from launchdarkly_api.model.pub_nub_detail_rep import PubNubDetailRep
from launchdarkly_api.model.put_branch import PutBranch
from launchdarkly_api.model.rate_limited_error_rep import RateLimitedErrorRep
from launchdarkly_api.model.reference_rep import ReferenceRep
from launchdarkly_api.model.relay_auto_config_collection_rep import RelayAutoConfigCollectionRep
from launchdarkly_api.model.relay_auto_config_post import RelayAutoConfigPost
from launchdarkly_api.model.relay_auto_config_rep import RelayAutoConfigRep
from launchdarkly_api.model.repository_collection_rep import RepositoryCollectionRep
from launchdarkly_api.model.repository_post import RepositoryPost
from launchdarkly_api.model.repository_rep import RepositoryRep
from launchdarkly_api.model.resource_access import ResourceAccess
from launchdarkly_api.model.resource_id_response import ResourceIDResponse
from launchdarkly_api.model.review_output_rep import ReviewOutputRep
from launchdarkly_api.model.review_response import ReviewResponse
from launchdarkly_api.model.rollout import Rollout
from launchdarkly_api.model.root_response import RootResponse
from launchdarkly_api.model.rule import Rule
from launchdarkly_api.model.schedule_condition_input_rep import ScheduleConditionInputRep
from launchdarkly_api.model.schedule_condition_output_rep import ScheduleConditionOutputRep
from launchdarkly_api.model.sdk_list_rep import SdkListRep
from launchdarkly_api.model.sdk_version_list_rep import SdkVersionListRep
from launchdarkly_api.model.sdk_version_rep import SdkVersionRep
from launchdarkly_api.model.segment_body import SegmentBody
from launchdarkly_api.model.segment_metadata import SegmentMetadata
from launchdarkly_api.model.segment_user_list import SegmentUserList
from launchdarkly_api.model.segment_user_state import SegmentUserState
from launchdarkly_api.model.series_list_rep import SeriesListRep
from launchdarkly_api.model.series_metadata_rep import SeriesMetadataRep
from launchdarkly_api.model.series_time_slice_rep import SeriesTimeSliceRep
from launchdarkly_api.model.source_flag import SourceFlag
from launchdarkly_api.model.stage_input_rep import StageInputRep
from launchdarkly_api.model.stage_output_rep import StageOutputRep
from launchdarkly_api.model.statement import Statement
from launchdarkly_api.model.statement_post import StatementPost
from launchdarkly_api.model.statement_post_data import StatementPostData
from launchdarkly_api.model.statement_post_list import StatementPostList
from launchdarkly_api.model.statement_rep import StatementRep
from launchdarkly_api.model.statistic_collection_rep import StatisticCollectionRep
from launchdarkly_api.model.statistic_rep import StatisticRep
from launchdarkly_api.model.statistics_root import StatisticsRoot
from launchdarkly_api.model.status_conflict_error_rep import StatusConflictErrorRep
from launchdarkly_api.model.subject_data_rep import SubjectDataRep
from launchdarkly_api.model.target import Target
from launchdarkly_api.model.target_resource_rep import TargetResourceRep
from launchdarkly_api.model.team_collection_rep import TeamCollectionRep
from launchdarkly_api.model.team_patch_input import TeamPatchInput
from launchdarkly_api.model.team_post_input import TeamPostInput
from launchdarkly_api.model.team_rep import TeamRep
from launchdarkly_api.model.title_rep import TitleRep
from launchdarkly_api.model.token import Token
from launchdarkly_api.model.token_data_rep import TokenDataRep
from launchdarkly_api.model.tokens import Tokens
from launchdarkly_api.model.unauthorized_error_rep import UnauthorizedErrorRep
from launchdarkly_api.model.url_matchers import UrlMatchers
from launchdarkly_api.model.url_post import UrlPost
from launchdarkly_api.model.user import User
from launchdarkly_api.model.user_attribute_names_rep import UserAttributeNamesRep
from launchdarkly_api.model.user_flag_setting import UserFlagSetting
from launchdarkly_api.model.user_flag_settings import UserFlagSettings
from launchdarkly_api.model.user_record import UserRecord
from launchdarkly_api.model.user_record_rep import UserRecordRep
from launchdarkly_api.model.user_segment import UserSegment
from launchdarkly_api.model.user_segment_rule import UserSegmentRule
from launchdarkly_api.model.user_segments import UserSegments
from launchdarkly_api.model.users import Users
from launchdarkly_api.model.value_put import ValuePut
from launchdarkly_api.model.variation import Variation
from launchdarkly_api.model.variation_or_rollout_rep import VariationOrRolloutRep
from launchdarkly_api.model.variation_summary import VariationSummary
from launchdarkly_api.model.versions_rep import VersionsRep
from launchdarkly_api.model.webhook import Webhook
from launchdarkly_api.model.webhook_post import WebhookPost
from launchdarkly_api.model.webhooks import Webhooks
from launchdarkly_api.model.weighted_variation import WeightedVariation
|
"""
An extension of urlfetch that keeps cookies between redirects
From: http://everydayscripting.blogspot.com/2009/08/google-app-engine-cookie-handling-with.html
"""
import urllib
import urllib2
import Cookie
from google.appengine.api import urlfetch
class URLOpener:
def __init__(self):
self.cookie = Cookie.SimpleCookie()
def open(self, url, data=None):
if data is None:
method = urlfetch.GET
else:
method = urlfetch.POST
while url is not None:
response = urlfetch.fetch(url=url,
payload=data,
method=method,
headers=self._getHeaders(self.cookie),
allow_truncated=False,
follow_redirects=False,
deadline=10
)
data = None # Next request will be a get, so no need to send the data again.
method = urlfetch.GET
self.cookie.load(response.headers.get('set-cookie', '')) # Load the cookies from the response
url = response.headers.get('location')
return response
def _getHeaders(self, cookie):
headers = {
'Cookie': self._makeCookieHeader(cookie)
}
return headers
def _makeCookieHeader(self, cookie):
cookieHeader = ""
for value in cookie.values():
cookieHeader += "%s=%s; " % (value.key, value.value)
return cookieHeader
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class TemplateInfo:
# name should be the same as template file name (no extension)
name: str
# name of the file to create
file_name: Optional[str]
# if set to true a directory will be created
is_batch: bool = field(default=True)
# suffix appended to class names
suffix: str = field(default='')
@dataclass
class Suffixes:
component = 'Component'
page = 'Page'
reducer = 'Reducer'
service = 'Service'
state = 'State'
@dataclass
class TemplateOptions:
shared_component: TemplateInfo
function_component: TemplateInfo
ts_function_component: TemplateInfo
class_component: TemplateInfo
styles: TemplateInfo
css_module: TemplateInfo
readme: TemplateInfo
test: TemplateInfo
state: TemplateInfo
actions: TemplateInfo
ts_actions: TemplateInfo
ts_state: TemplateInfo
ts_connected_component: TemplateInfo
ts_reducer: TemplateInfo
reducer: TemplateInfo
service: TemplateInfo
ts_styles: TemplateInfo
def __init__(self, folder_name: str):
self.shared_component = TemplateInfo(
name='shared-component',
file_name=None
)
self.ts_state = TemplateInfo(
name='ts-state',
file_name=None
)
self.ts_reducer = TemplateInfo(
name='ts-reducer',
file_name=f'{folder_name}.reducer.ts',
)
self.function_component = TemplateInfo(
name='function-component',
file_name=f'{folder_name}.component.jsx',
suffix=Suffixes.component
)
self.class_component = TemplateInfo(
name='class-component',
file_name=f'{folder_name}.component.jsx',
suffix=Suffixes.component
)
self.styles = TemplateInfo(name='styles', file_name=f'{folder_name}.styles.js')
self.ts_styles = TemplateInfo(name='ts-styles', file_name=f'{folder_name}.styles.ts')
self.css_module = TemplateInfo(name='css-module', file_name=f'{folder_name}.styles.scss')
self.readme = TemplateInfo(
name='readme',
file_name=f'{folder_name}.readme.md',
suffix=Suffixes.component
)
self.test = TemplateInfo(
name='test',
file_name=f'{folder_name}.test.jsx',
suffix=Suffixes.component
)
self.state = TemplateInfo(
name='state',
file_name=f'{folder_name}.state.js',
suffix=Suffixes.state
)
self.actions = TemplateInfo(name='actions', file_name=f'{folder_name}.actions.js')
self.reducer = TemplateInfo(
name='reducer',
file_name=f'{folder_name}.reducer.js',
suffix=Suffixes.reducer
)
self.service = TemplateInfo(
name='service',
file_name=f'{folder_name}.service.js',
suffix=Suffixes.service,
is_batch=False
)
self.ts_function_component = TemplateInfo(
name='ts-function-component',
file_name=f'{folder_name}.component.tsx',
suffix=Suffixes.component,
is_batch=True
)
self.ts_connected_component = TemplateInfo(
name='ts-connected-component',
file_name=f'{folder_name}.page.tsx',
suffix=Suffixes.page,
is_batch=True
)
self.ts_actions = TemplateInfo(
name='ts-actions',
file_name=f'{folder_name}.actions.ts'
)
self.ts_state = TemplateInfo(
name='ts-state',
file_name=f'{folder_name}.state.ts'
)
def get_suffix(self, template_name):
try:
template = self.get_template_info(template_name)
return template.suffix
except ModuleNotFoundError:
return ""
def get_template_info(self, template_name) -> Optional[TemplateInfo]:
try:
return self.__getattribute__(template_name)
except ModuleNotFoundError:
return None
def get_batch_templates_dictionary(self):
return {
'shared-component': [
self.function_component,
self.styles,
self.readme,
self.test
],
'class-component': [
self.class_component,
self.css_module
],
'function-component': [
self.function_component,
self.css_module
],
'ts-function-component': [
self.ts_function_component,
self.ts_styles
],
'ts-connected-component': [
self.ts_connected_component,
self.ts_styles
],
'ts-state': [
self.ts_actions,
self.ts_state,
self.ts_reducer
],
'state': [
self.state,
self.reducer,
self.actions
],
'service': [
self.service
]
}
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
from awkward._v2._typetracer import UnknownLength
typetracer = ak._v2._typetracer.TypeTracer.instance()
def test_getitem_at():
concrete = ak._v2.contents.NumpyArray(np.arange(2 * 3 * 5).reshape(2, 3, 5) * 0.1)
abstract = ak._v2.contents.NumpyArray(concrete.raw(typetracer))
assert concrete.shape == (2, 3, 5)
assert abstract.shape[1:] == (3, 5)
assert abstract[0].shape[1:] == (5,)
assert abstract[0][0].shape[1:] == ()
assert abstract.form == concrete.form
assert abstract.form.type == concrete.form.type
assert abstract[0].form == concrete[0].form
assert abstract[0].form.type == concrete[0].form.type
def test_EmptyArray():
a = ak._v2.contents.emptyarray.EmptyArray()
assert a.typetracer.form == a.forget_length().form
def test_NumpyArray():
a = ak._v2.contents.numpyarray.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3], dtype=np.float64)
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
b = ak._v2.contents.numpyarray.NumpyArray(
np.arange(2 * 3 * 5, dtype=np.int64).reshape(2, 3, 5)
)
assert b.typetracer.form == b.forget_length().form
assert b.forget_length().length is UnknownLength
assert b.forget_length().data.shape[1:] == (3, 5)
def test_RegularArray_NumpyArray():
# 6.6 is inaccessible
a = ak._v2.contents.regulararray.RegularArray(
ak._v2.contents.numpyarray.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
),
3,
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
b = ak._v2.contents.regulararray.RegularArray(
ak._v2.contents.emptyarray.EmptyArray(), 0, zeros_length=10
)
assert b.typetracer.form == b.forget_length().form
assert b.forget_length().length is UnknownLength
def test_ListArray_NumpyArray():
# 200 is inaccessible in stops
# 6.6, 7.7, and 8.8 are inaccessible in content
a = ak._v2.contents.listarray.ListArray(
ak._v2.index.Index(np.array([4, 100, 1], dtype=np.int64)),
ak._v2.index.Index(np.array([7, 100, 3, 200], dtype=np.int64)),
ak._v2.contents.numpyarray.NumpyArray(
np.array([6.6, 4.4, 5.5, 7.7, 1.1, 2.2, 3.3, 8.8])
),
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
def test_ListOffsetArray_NumpyArray():
# 6.6 and 7.7 are inaccessible
a = ak._v2.contents.listoffsetarray.ListOffsetArray(
ak._v2.index.Index(np.array([1, 4, 4, 6])),
ak._v2.contents.numpyarray.NumpyArray([6.6, 1.1, 2.2, 3.3, 4.4, 5.5, 7.7]),
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
def test_RecordArray_NumpyArray():
# 5.5 is inaccessible
a = ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(np.array([0, 1, 2, 3, 4])),
ak._v2.contents.numpyarray.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])
),
],
["x", "y"],
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
# 5.5 is inaccessible
b = ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(np.array([0, 1, 2, 3, 4])),
ak._v2.contents.numpyarray.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])
),
],
None,
)
assert b.typetracer.form == b.forget_length().form
assert b.forget_length().length is UnknownLength
c = ak._v2.contents.recordarray.RecordArray([], [], 10)
assert c.typetracer.form == c.forget_length().form
assert c.forget_length().length is UnknownLength
d = ak._v2.contents.recordarray.RecordArray([], None, 10)
assert d.typetracer.form == d.forget_length().form
assert d.forget_length().length is UnknownLength
def test_IndexedArray_NumpyArray():
# 4.4 is inaccessible; 3.3 and 5.5 appear twice
a = ak._v2.contents.indexedarray.IndexedArray(
ak._v2.index.Index(np.array([2, 2, 0, 1, 4, 5, 4])),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
def test_IndexedOptionArray_NumpyArray():
# 1.1 and 4.4 are inaccessible; 3.3 appears twice
a = ak._v2.contents.indexedoptionarray.IndexedOptionArray(
ak._v2.index.Index(np.array([2, 2, -1, 1, -1, 5, 4])),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
def test_ByteMaskedArray_NumpyArray():
# 2.2, 4.4, and 6.6 are inaccessible
a = ak._v2.contents.bytemaskedarray.ByteMaskedArray(
ak._v2.index.Index(np.array([1, 0, 1, 0, 1], dtype=np.int8)),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
valid_when=True,
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
# 2.2, 4.4, and 6.6 are inaccessible
b = ak._v2.contents.bytemaskedarray.ByteMaskedArray(
ak._v2.index.Index(np.array([0, 1, 0, 1, 0], dtype=np.int8)),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
valid_when=False,
)
assert b.typetracer.form == b.forget_length().form
assert b.forget_length().length is UnknownLength
def test_BitMaskedArray_NumpyArray():
# 4.0, 5.0, 6.0, 7.0, 2.2, 4.4, and 6.6 are inaccessible
a = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
1,
1,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
],
dtype=np.uint8,
)
)
),
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]
)
),
valid_when=True,
length=13,
lsb_order=False,
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
# 4.0, 5.0, 6.0, 7.0, 2.2, 4.4, and 6.6 are inaccessible
b = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
],
dtype=np.uint8,
)
)
),
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]
)
),
valid_when=False,
length=13,
lsb_order=False,
)
assert b.typetracer.form == b.forget_length().form
assert b.forget_length().length is UnknownLength
# 4.0, 5.0, 6.0, 7.0, 2.2, 4.4, and 6.6 are inaccessible
c = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
1,
0,
1,
0,
1,
],
dtype=np.uint8,
)
)
),
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]
)
),
valid_when=True,
length=13,
lsb_order=True,
)
assert c.typetracer.form == c.forget_length().form
assert c.forget_length().length is UnknownLength
# 4.0, 5.0, 6.0, 7.0, 2.2, 4.4, and 6.6 are inaccessible
d = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
],
dtype=np.uint8,
)
)
),
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]
)
),
valid_when=False,
length=13,
lsb_order=True,
)
assert d.typetracer.form == d.forget_length().form
assert d.forget_length().length is UnknownLength
def test_UnmaskedArray_NumpyArray():
a = ak._v2.contents.unmaskedarray.UnmaskedArray(
ak._v2.contents.numpyarray.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3], dtype=np.float64)
)
)
assert a.typetracer.form == a.form
assert a.typetracer.form.type == a.form.type
assert len(a) == 4
assert a[2] == 2.2
assert a[-2] == 2.2
assert type(a[2]) is np.float64
with pytest.raises(IndexError):
a[4]
with pytest.raises(IndexError):
a[-5]
assert isinstance(a[2:], ak._v2.contents.unmaskedarray.UnmaskedArray)
assert a[2:][0] == 2.2
assert len(a[2:]) == 2
with pytest.raises(IndexError):
a["bad"]
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
def test_UnionArray_NumpyArray():
# 100 is inaccessible in index
# 1.1 is inaccessible in contents[1]
a = ak._v2.contents.unionarray.UnionArray(
ak._v2.index.Index(np.array([1, 1, 0, 0, 1, 0, 1], dtype=np.int8)),
ak._v2.index.Index(np.array([4, 3, 0, 1, 2, 2, 4, 100])),
[
ak._v2.contents.numpyarray.NumpyArray(np.array([1, 2, 3])),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5])),
],
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
def test_RegularArray_RecordArray_NumpyArray():
# 6.6 is inaccessible
a = ak._v2.contents.regulararray.RegularArray(
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
)
],
["nest"],
),
3,
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
b = ak._v2.contents.regulararray.RegularArray(
ak._v2.contents.recordarray.RecordArray(
[ak._v2.contents.emptyarray.EmptyArray()], ["nest"]
),
0,
zeros_length=10,
)
assert b.typetracer.form == b.form
assert b.typetracer.form.type == b.form.type
assert len(b["nest"]) == 10
assert b.typetracer["nest"].form == b["nest"].form
assert isinstance(b["nest"][5], ak._v2.contents.emptyarray.EmptyArray)
assert b.typetracer["nest"][5].form == b["nest"][5].form
assert len(b["nest"][5]) == 0
assert isinstance(b["nest"][7:], ak._v2.contents.regulararray.RegularArray)
assert b.typetracer["nest"][7:].form == b["nest"][7:].form
assert len(b["nest"][7:]) == 3
assert len(b["nest"][7:100]) == 3
with pytest.raises(IndexError):
b["nest"]["bad"]
assert b.typetracer.form == b.forget_length().form
assert b.forget_length().length is UnknownLength
def test_ListArray_RecordArray_NumpyArray():
# 200 is inaccessible in stops
# 6.6, 7.7, and 8.8 are inaccessible in content
a = ak._v2.contents.listarray.ListArray(
ak._v2.index.Index(np.array([4, 100, 1])),
ak._v2.index.Index(np.array([7, 100, 3, 200])),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([6.6, 4.4, 5.5, 7.7, 1.1, 2.2, 3.3, 8.8])
)
],
["nest"],
),
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
def test_ListOffsetArray_RecordArray_NumpyArray():
# 6.6 and 7.7 are inaccessible
a = ak._v2.contents.listoffsetarray.ListOffsetArray(
ak._v2.index.Index(np.array([1, 4, 4, 6])),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
[6.6, 1.1, 2.2, 3.3, 4.4, 5.5, 7.7]
)
],
["nest"],
),
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
def test_IndexedArray_RecordArray_NumpyArray():
# 4.4 is inaccessible; 3.3 and 5.5 appear twice
a = ak._v2.contents.indexedarray.IndexedArray(
ak._v2.index.Index(np.array([2, 2, 0, 1, 4, 5, 4])),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
)
],
["nest"],
),
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
def test_IndexedOptionArray_RecordArray_NumpyArray():
# 1.1 and 4.4 are inaccessible; 3.3 appears twice
a = ak._v2.contents.indexedoptionarray.IndexedOptionArray(
ak._v2.index.Index(np.array([2, 2, -1, 1, -1, 5, 4])),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
)
],
["nest"],
),
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
def test_ByteMaskedArray_RecordArray_NumpyArray():
# 2.2, 4.4, and 6.6 are inaccessible
a = ak._v2.contents.bytemaskedarray.ByteMaskedArray(
ak._v2.index.Index(np.array([1, 0, 1, 0, 1], dtype=np.int8)),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
)
],
["nest"],
),
valid_when=True,
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
# 2.2, 4.4, and 6.6 are inaccessible
b = ak._v2.contents.bytemaskedarray.ByteMaskedArray(
ak._v2.index.Index(np.array([0, 1, 0, 1, 0], dtype=np.int8)),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
)
],
["nest"],
),
valid_when=False,
)
assert b.typetracer.form == b.forget_length().form
assert b.forget_length().length is UnknownLength
def test_BitMaskedArray_RecordArray_NumpyArray():
# 4.0, 5.0, 6.0, 7.0, 2.2, 4.4, and 6.6 are inaccessible
a = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
True,
True,
True,
True,
False,
False,
False,
False,
True,
False,
True,
False,
True,
]
)
)
),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
1.1,
2.2,
3.3,
4.4,
5.5,
6.6,
]
)
)
],
["nest"],
),
valid_when=True,
length=13,
lsb_order=False,
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
# 4.0, 5.0, 6.0, 7.0, 2.2, 4.4, and 6.6 are inaccessible
b = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
],
dtype=np.uint8,
)
)
),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
1.1,
2.2,
3.3,
4.4,
5.5,
6.6,
]
)
)
],
["nest"],
),
valid_when=False,
length=13,
lsb_order=False,
)
assert b.typetracer.form == b.forget_length().form
assert b.forget_length().length is UnknownLength
# 4.0, 5.0, 6.0, 7.0, 2.2, 4.4, and 6.6 are inaccessible
c = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
1,
0,
1,
0,
1,
],
dtype=np.uint8,
)
)
),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
1.1,
2.2,
3.3,
4.4,
5.5,
6.6,
]
)
)
],
["nest"],
),
valid_when=True,
length=13,
lsb_order=True,
)
assert c.typetracer.form == c.forget_length().form
assert c.forget_length().length is UnknownLength
# 4.0, 5.0, 6.0, 7.0, 2.2, 4.4, and 6.6 are inaccessible
d = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
],
dtype=np.uint8,
)
)
),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
1.1,
2.2,
3.3,
4.4,
5.5,
6.6,
]
)
)
],
["nest"],
),
valid_when=False,
length=13,
lsb_order=True,
)
assert d.typetracer.form == d.forget_length().form
assert d.forget_length().length is UnknownLength
def test_UnmaskedArray_RecordArray_NumpyArray():
a = ak._v2.contents.unmaskedarray.UnmaskedArray(
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3], dtype=np.float64)
)
],
["nest"],
)
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
def test_UnionArray_RecordArray_NumpyArray():
# 100 is inaccessible in index
# 1.1 is inaccessible in contents[1]
a = ak._v2.contents.unionarray.UnionArray(
ak._v2.index.Index(np.array([1, 1, 0, 0, 1, 0, 1], dtype=np.int8)),
ak._v2.index.Index(np.array([4, 3, 0, 1, 2, 2, 4, 100])),
[
ak._v2.contents.recordarray.RecordArray(
[ak._v2.contents.numpyarray.NumpyArray(np.array([1, 2, 3]))], ["nest"]
),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5])
)
],
["nest"],
),
],
)
assert a.typetracer.form == a.forget_length().form
assert a.forget_length().length is UnknownLength
|
from kevin.developing.executor import Executor
from .along_axis import get_executor_ls_by_block_along_axis
def get_executor_ls_by_block_of_all(factory, chunk_step, need_to_generate, **kwargs):
"""
通过调用 verification.Factory 中的 generate_by_block() 函数,
来对整个矩阵,
生成一系列的执行器 executor_ls,
每个执行器在被 executor() 调用后都将返回一个数据集
参数:
factory: verification.Factory 实例
chunk_step: 每个分块的大小
need_to_generate: 需要生成的字段
(参见 Factory.generate_by_block() 中的介绍)
"""
width = len(factory.paras["features"]) # 矩阵的宽度
assert width > 0, \
Exception("Error: the length of features in the input factory should be larger than 0!")
# block的数量
chunk_nums = (width - 1) // chunk_step + 1
"""
执行器
"""
executor_ls, size_ls = [], []
# 对于完整的 block(去除最后一行、列上的block)
# 将整个block作为一个dataset
# dataset_size = chunk_step * chunk_step
for i in range(chunk_nums - 1):
i_0 = i * chunk_step
i_1 = i_0 + chunk_step
for j in range(chunk_nums - 1):
j_0 = j * chunk_step
j_1 = j_0 + chunk_step
# 计算
paras = dict(i_0=i_0, i_1=i_1, j_0=j_0, j_1=j_1,
pick_triangle=False, need_to_generate=need_to_generate)
executor_ls.append(Executor(func=factory.generate_by_block,
kwargs=paras))
size_ls.append(factory.cal_size_of_block(**paras))
# 对于最后一行
# 对于完整的矩形有
# chunk_step * chunk_step < dataset_size <= chunk_step * (chunk_step + 1)
# 注意:
# 对于最后一个可能残缺的矩阵(亦即大小不满足下界),
# 该部分将与接下来的最后一列的部分头部组成一个dataset
executor_ls_temp, size_ls_temp = get_executor_ls_by_block_along_axis(factory,
i_0=(chunk_nums - 1) * chunk_step,
i_1=width,
j_0=0,
j_1=width,
axis_to_split="j", size_upper=size_upper,
need_to_generate=need_to_generate)
executor_ls.extend(executor_ls_temp)
size_ls.extend(size_ls_temp)
# 对于最后一列
# 第一个矩形将与前面残缺的部分整合为一个dataset,该矩形的行数将根据缺少部分的数量计算得到,从而保证
# chunk_step * chunk_step <= dataset_size <= chunk_step * (chunk_step + 1)
# 接下来的矩形将调整行数以尽量贴近上界,
# 对于完整的矩形有
# chunk_step * chunk_step < dataset_size <= chunk_step * (chunk_step + 1)
# 对于最后一个可能残缺的矩阵,有
# dataset_size <= chunk_step * (chunk_step + 1)
# 这里承接上面的 res
if chunk_nums > 1:
executor_ls_temp, size_ls_temp = get_executor_ls_by_block_along_axis(factory,
i_0=0,
i_1=(chunk_nums - 1) * chunk_step,
j_0=(chunk_nums - 1) * chunk_step,
j_1=width,
axis_to_split="i", size_upper=size_upper,
need_to_generate=need_to_generate,
pre_executor=executor_ls.pop(-1),
pre_size=size_ls.pop(-1))
executor_ls.extend(executor_ls_temp)
size_ls.extend(size_ls_temp)
# 综合而言,
# 除最后一个dataset以外,都有
# chunk_step * chunk_step <= dataset_size <= chunk_step * (chunk_step + 1)
# 最后一个dataset可能是残缺的,有
# 0 < dataset_size <= chunk_step * (chunk_step + 1)
return executor_ls, size_ls
|
"""
Regnet - from paper: Designing Network Design Spaces - https://arxiv.org/pdf/2003.13678.pdf
Implementation of paradigm described in paper published by Facebook AI Research (FAIR)
@author: Signatrix GmbH
Code taken from: https://github.com/signatrix/regnet - MIT Licence
"""
import numpy as np
import torch.nn as nn
from math import sqrt
from super_gradients.training.models.sg_module import SgModule
from super_gradients.training.utils.regularization_utils import DropPath
from super_gradients.training.utils.utils import get_param
class Head(nn.Module): # From figure 3
def __init__(self, num_channels, num_classes, dropout_prob):
super(Head, self).__init__()
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.dropout = nn.Dropout(p=dropout_prob)
self.fc = nn.Linear(num_channels, num_classes)
def forward(self, x):
x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.fc(x)
return x
class Stem(nn.Module): # From figure 3
def __init__(self, in_channels, out_channels):
super(Stem, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
self.rl = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.rl(x)
return x
class XBlock(nn.Module): # From figure 4
def __init__(self, in_channels, out_channels, bottleneck_ratio, group_width, stride, se_ratio=None, droppath_prob=0.):
super(XBlock, self).__init__()
inter_channels = int(out_channels // bottleneck_ratio)
groups = int(inter_channels // group_width)
self.conv_block_1 = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(inter_channels),
nn.ReLU()
)
self.conv_block_2 = nn.Sequential(
nn.Conv2d(inter_channels, inter_channels, kernel_size=3, stride=stride, groups=groups, padding=1,
bias=False),
nn.BatchNorm2d(inter_channels),
nn.ReLU()
)
if se_ratio is not None:
se_channels = in_channels // se_ratio
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(output_size=1),
nn.Conv2d(inter_channels, se_channels, kernel_size=1, bias=True),
nn.ReLU(),
nn.Conv2d(se_channels, inter_channels, kernel_size=1, bias=True),
nn.Sigmoid(),
)
else:
self.se = None
self.conv_block_3 = nn.Sequential(
nn.Conv2d(inter_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels)
)
if stride != 1 or in_channels != out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
else:
self.shortcut = None
self.drop_path = DropPath(drop_prob=droppath_prob)
self.rl = nn.ReLU()
def forward(self, x):
x1 = self.conv_block_1(x)
x1 = self.conv_block_2(x1)
if self.se is not None:
x1 = x1 * self.se(x1)
x1 = self.conv_block_3(x1)
if self.shortcut is not None:
x2 = self.shortcut(x)
else:
x2 = x
x1 = self.drop_path(x1)
x = self.rl(x1 + x2)
return x
class Stage(nn.Module): # From figure 3
def __init__(self, num_blocks, in_channels, out_channels, bottleneck_ratio, group_width, stride, se_ratio,
droppath_prob):
super(Stage, self).__init__()
self.blocks = nn.Sequential()
self.blocks.add_module("block_0",
XBlock(in_channels, out_channels, bottleneck_ratio, group_width, stride, se_ratio,
droppath_prob))
for i in range(1, num_blocks):
self.blocks.add_module("block_{}".format(i),
XBlock(out_channels, out_channels, bottleneck_ratio, group_width, 1, se_ratio,
droppath_prob))
def forward(self, x):
x = self.blocks(x)
return x
class AnyNetX(SgModule):
def __init__(self, ls_num_blocks, ls_block_width, ls_bottleneck_ratio, ls_group_width, stride, num_classes,
se_ratio, backbone_mode, dropout_prob=0., droppath_prob=0., input_channels=3):
super(AnyNetX, self).__init__()
verify_correctness_of_parameters(ls_num_blocks, ls_block_width, ls_bottleneck_ratio, ls_group_width)
self.net = nn.Sequential()
self.backbone_mode = backbone_mode
prev_block_width = 32
self.net.add_module("stem", Stem(in_channels=input_channels, out_channels=prev_block_width))
for i, (num_blocks, block_width, bottleneck_ratio, group_width) in enumerate(zip(ls_num_blocks, ls_block_width,
ls_bottleneck_ratio,
ls_group_width)):
self.net.add_module("stage_{}".format(i),
Stage(num_blocks, prev_block_width, block_width, bottleneck_ratio, group_width, stride,
se_ratio, droppath_prob))
prev_block_width = block_width
# FOR BACK BONE MODE - DO NOT ADD THE HEAD (AVG_POOL + FC)
if not self.backbone_mode:
self.net.add_module("head", Head(ls_block_width[-1], num_classes, dropout_prob))
self.initialize_weight()
self.ls_block_width = ls_block_width
self.dropout_prob = dropout_prob
def initialize_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
def forward(self, x):
x = self.net(x)
return x
def replace_head(self, new_num_classes=None, new_head=None):
if new_num_classes is None and new_head is None:
raise ValueError("At least one of new_num_classes, new_head must be given to replace output layer.")
if new_head is not None:
self.net.head = new_head
else:
self.net.head = Head(self.ls_block_width[-1], new_num_classes, self.dropout_prob)
class RegNetX(AnyNetX):
def __init__(self, initial_width, slope, quantized_param, network_depth, bottleneck_ratio, group_width,
stride, arch_params, se_ratio=None, input_channels=3):
# We need to derive block width and number of blocks from initial parameters.
parameterized_width = initial_width + slope * np.arange(network_depth) # From equation 2
parameterized_block = np.log(parameterized_width / initial_width) / np.log(quantized_param) # From equation 3
parameterized_block = np.round(parameterized_block)
quantized_width = initial_width * np.power(quantized_param, parameterized_block)
# We need to convert quantized_width to make sure that it is divisible by 8
quantized_width = 8 * np.round(quantized_width / 8)
ls_block_width, ls_num_blocks = np.unique(quantized_width.astype(np.int), return_counts=True)
# At this points, for each stage, the above-calculated block width could be incompatible to group width
# due to bottleneck ratio. Hence, we need to adjust the formers.
# Group width could be swapped to number of groups, since their multiplication is block width
ls_group_width = np.array([min(group_width, block_width // bottleneck_ratio) for block_width in ls_block_width])
ls_block_width = np.round(ls_block_width // bottleneck_ratio / group_width) * group_width
ls_bottleneck_ratio = [bottleneck_ratio for _ in range(len(ls_block_width))]
# GET THE BACKBONE MODE FROM arch_params IF EXISTS - O.W. - SET AS FALSE
backbone_mode = get_param(arch_params, 'backbone_mode', False)
dropout_prob = get_param(arch_params, 'dropout_prob', 0.)
droppath_prob = get_param(arch_params, 'droppath_prob', 0.)
super(RegNetX, self).__init__(ls_num_blocks, ls_block_width.astype(np.int).tolist(), ls_bottleneck_ratio,
ls_group_width.tolist(), stride, arch_params.num_classes, se_ratio, backbone_mode,
dropout_prob, droppath_prob, input_channels)
class RegNetY(RegNetX):
# RegNetY = RegNetX + SE
def __init__(self, initial_width, slope, quantized_param, network_depth, bottleneck_ratio, group_width,
stride, arch_params, se_ratio, input_channels=3):
super(RegNetY, self).__init__(initial_width,
slope,
quantized_param,
network_depth,
bottleneck_ratio,
group_width,
stride,
arch_params,
se_ratio, input_channels)
def verify_correctness_of_parameters(ls_num_blocks, ls_block_width, ls_bottleneck_ratio, ls_group_width):
"""VERIFY THAT THE GIVEN PARAMETERS FIT THE SEARCH SPACE DEFINED IN THE REGNET PAPER"""
err_message = 'Parameters don\'t fit'
assert len(set(ls_bottleneck_ratio)) == 1, f"{err_message} AnyNetXb"
assert len(set(ls_group_width)) == 1, f"{err_message} AnyNetXc"
assert all(i <= j for i, j in zip(ls_block_width, ls_block_width[1:])) is True, f"{err_message} AnyNetXd"
if len(ls_num_blocks) > 2:
assert all(i <= j for i, j in zip(ls_num_blocks[:-2], ls_num_blocks[1:-1])) is True, f"{err_message} AnyNetXe"
# For each stage & each layer, number of channels (block width / bottleneck ratio) must be divisible by group width
for block_width, bottleneck_ratio, group_width in zip(ls_block_width, ls_bottleneck_ratio, ls_group_width):
assert int(block_width // bottleneck_ratio) % group_width == 0
class CustomRegNet(RegNetX):
def __init__(self, arch_params):
"""All parameters must be provided in arch_params other than SE"""
super().__init__(initial_width=arch_params.initial_width,
slope=arch_params.slope,
quantized_param=arch_params.quantized_param,
network_depth=arch_params.network_depth,
bottleneck_ratio=arch_params.bottleneck_ratio,
group_width=arch_params.group_width,
stride=arch_params.stride,
arch_params=arch_params,
se_ratio=arch_params.se_ratio if hasattr(arch_params, 'se_ratio') else None,
input_channels=get_param(arch_params, 'input_channels', 3))
class NASRegNet(RegNetX):
def __init__(self, arch_params):
"""All parameters are provided as a single structure list: arch_params.structure"""
structure = arch_params.structure
super().__init__(initial_width=structure[0],
slope=structure[1],
quantized_param=structure[2],
network_depth=structure[3],
bottleneck_ratio=structure[4],
group_width=structure[5],
stride=structure[6],
se_ratio=structure[7] if structure[7] > 0 else None,
arch_params=arch_params)
class RegNetY200(RegNetY):
def __init__(self, arch_params):
super().__init__(24, 36, 2.5, 13, 1, 8, 2, arch_params, 4)
class RegNetY400(RegNetY):
def __init__(self, arch_params):
super().__init__(48, 28, 2.1, 16, 1, 8, 2, arch_params, 4)
class RegNetY600(RegNetY):
def __init__(self, arch_params):
super().__init__(48, 33, 2.3, 15, 1, 16, 2, arch_params, 4)
class RegNetY800(RegNetY):
def __init__(self, arch_params):
super().__init__(56, 39, 2.4, 14, 1, 16, 2, arch_params, 4)
|
#!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
"""Using optparse with single-letter options.
"""
#end_pymotw_header
import optparse
parser = optparse.OptionParser()
parser.add_option('-v', action="count",
dest='verbosity', default=1)
parser.add_option('-q', action='store_const',
const=0, dest='verbosity')
options, args = parser.parse_args()
print options.verbosity
|
from lti_app.request_forms import BaseRequestForm as BRF
class AssignmentRequestForm(BRF):
course_id = {'type': str, 'required': True}
assignment_id = {'type': str, 'required': True}
assignment_type = {'type': str}
reference = {'type': str}
excerpt = {'type': str}
supporting_excerpts = {'type': str}
model_answers = {'type': str}
# General settings
rubric = {'type': str, 'default': None}
graded_confirmation_text = {'type': str}
max_attempts = {'type': int, 'default': 3}
show_excerpt = {'type': bool, 'get': BRF.get_boolean_from_checkbox}
show_retry_button = {'type': bool, 'get': BRF.get_boolean_from_checkbox}
# Checks
citation_check = {'type': bool, 'get': BRF.get_boolean_from_checkbox}
grammar_check = {'type': bool, 'get': BRF.get_boolean_from_checkbox}
plagiarism_check = {'type': bool, 'get': BRF.get_boolean_from_checkbox}
academic_style_check = {'type': bool, 'get': BRF.get_boolean_from_checkbox}
semantics_check = {'type': int}
def __init__(self, form_data):
BRF.__init__(self, form_data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.