blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
293fa724238f069bbc6baeb29ccc88ff5522b35d | 68aa8afaae7429ea8e1760e4483ddc4cde90fe45 | /benchmark/addressbook.proto.py | 7311a99f5cdc479b9b5d120dfae92406b1e32d0e | [
"BSD-2-Clause"
] | permissive | amluto/capnpc-python-cpp | 0329e9eb99f9283a023d0bc9c911fec2696e88a6 | 2fe61781f512d4f7ae0bdc5c0ece8672b24274f5 | refs/heads/master | 2021-01-18T10:01:48.233946 | 2013-08-28T21:46:53 | 2013-08-28T21:46:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | from __future__ import print_function
import addressbook_pb2 as addressbook
import os
print = lambda *x: x
def writeAddressBook(fd):
addressBook = addressbook.AddressBook()
alice = addressBook.person.add()
alice.id = 123
alice.name = 'Alice'
alice.email = 'alice@example.com'
alicePhones = [alice.phone.add()]
alicePhones[0].number = "555-1212"
alicePhones[0].type = addressbook.Person.MOBILE
bob = addressBook.person.add()
bob.id = 456
bob.name = 'Bob'
bob.email = 'bob@example.com'
bobPhones = [bob.phone.add(), bob.phone.add()]
bobPhones[0].number = "555-4567"
bobPhones[0].type = addressbook.Person.HOME
bobPhones[1].number = "555-7654"
bobPhones[1].type = addressbook.Person.WORK
message_string = addressBook.SerializeToString()
fd.write(message_string)
def printAddressBook(fd):
addressBook = addressbook.AddressBook()
addressBook.ParseFromString(fd.read())
for person in addressBook.person:
print(person.name, ':', person.email)
for phone in person.phone:
print(phone.type, ':', phone.number)
print()
if __name__ == '__main__':
for i in range(10000):
f = open('example', 'w')
writeAddressBook(f)
f = open('example', 'r')
printAddressBook(f)
os.remove('example')
| [
"github@jparyani.com"
] | github@jparyani.com |
f75b7cea7eec79e082a31dcc921f3ddc5b829510 | 4774250f47b717f78f5be54b152bfc0fd46e49b3 | /Planet Class.py | f476aedd5a763a55a0d0799dcaaaf07717eb9f63 | [] | no_license | LuckySalmon/SolarSystemSimulation | dfb6641aff8127a87260565eed4a1994cf46d494 | 7c3917674ad2d5b70c2d768e7e6a98a2a140a9f7 | refs/heads/master | 2022-08-12T18:23:53.437959 | 2020-05-02T23:21:43 | 2020-05-02T23:21:43 | 260,792,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,133 | py | import random, math
# SBC is the Stefan Boltzmann constant, used in calculating luminosity.
SBC = 5.670367 * (10 ** -8)
# g is the gravitational constant
g=6.674 * (10 ** -11)
# Name can be changed later.
# Velocity will later be changed to only allow stable orbits.
# Habitable will later be changed to depend on the star's habitable zone.
class Planet:
def __init__(self, sun, name, distance):
self.name = name
#self.distanceFromStar = float(random.random()*10) # measured in AU
self.distanceFromStar = distance
self.velocity = float(random.random()*10) # measured in years
self.mass = float(random.random()*10) # measured in Earths
self.radius = float(random.random()*10) # measured in Earths
self.volume = ((4/3)*math.pi*(self.radius**3))
self.density = (self.mass)/self.volume
if self.distanceFromStar > star.habitableZoneInner and self.distanceFromStar < star.habitableZoneOuter:
isHabitable = True
else:
self.isHabitable = False
self.gravity = "placeholder" # input the gravity calculation
self.sun = sun
class Star:
def __init__(self):
self.name = "Sun"
self.radius = float(random.randint(10000, 7000000)) # measured in km
self.temperature = random.randint(2000, 27000) # measured in kelvin
if self.temperature <= 3500:
self.color = "Red"
elif self.temperature > 3500 and self.temperature <= 5000:
self.color = "Orange"
elif self.temperature > 5000 and self.temperature <= 8000:
self.color = "Yellow"
elif self.temperature > 8000 and self.temperature <= 17500:
self.color = "Yellow"
else:
self.color = "Blue"
# luminosity is used in calculating the habitable zone of a star
self.surfaceArea = 4*math.pi*self.radius ** 2
self.luminosity = SBC*self.surfaceArea*(self.temperature ** 4)
# THIS IS INCORRECT
self.habitableZoneInner = math.sqrt(self.luminosity)*0.95
self.habitableZoneOuter = math.sqrt(self.luminosity)*1.37
| [
"LuckySalmon@users.noreply.github.com"
] | LuckySalmon@users.noreply.github.com |
13dd0603c64d79f959ef65d4b02c4e97751e6004 | 19a6255cd379a3bd446f022e408e6cc46f32746b | /venv/Scripts/easy_install-script.py | c8194e216a42d8c1f63e1cc8dc562e7f78aea851 | [] | no_license | Li-Rui-QI/practice | 6a78826d254f618e5b40ef85670d6f00aa3ea7e1 | 3830e4b3f6c328f486c5c2f82f31f9038b0acc55 | refs/heads/master | 2022-11-19T23:56:34.633858 | 2020-07-22T18:02:11 | 2020-07-22T18:02:11 | 279,687,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | #!C:\liruiqi\college3\practice\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"li995186428@gmail.com"
] | li995186428@gmail.com |
529c3a6e35142acdd0bfc6805bdf3d08e6bb3cfb | d3ad5201a6479e7522e87598b50600468a1a3c97 | /workers/proxies.py | c20818e6d77437bff982605e2db13d1327de43ea | [] | no_license | ludalex/storechecker.io | ba736e9869018db699d2351adb1683c600d069af | c396d34dc57068058ae88427e36b059f824f43e5 | refs/heads/master | 2021-01-23T22:38:28.426417 | 2014-11-15T13:27:33 | 2014-11-15T13:27:33 | 6,759,656 | 2 | 0 | null | 2014-11-15T13:24:48 | 2012-11-19T12:00:10 | PHP | UTF-8 | Python | false | false | 2,252 | py | from datetime import datetime
import sqlite3
from requests import session, __version__
from requests.exceptions import ConnectionError, ProxyError, Timeout
from bs4 import BeautifulSoup
import re
conn = sqlite3.connect('/var/www/n4checker/db/storechecker.db')
db = conn.cursor()
db.execute("delete from proxies")
conn.commit()
with session() as c:
for page in range(1, 12):
hma = c.get("http://hidemyass.com/proxy-list/"+str(page))
soup = BeautifulSoup(hma.text)
hma_proxies = []
trs = soup.findAll("tr")
trs.pop(0)
for tr in trs:
tds = tr.findAll("td")
ips = tds[1]
port = tds[2].renderContents()
proxyCountry = tds[3].find("span").text.strip()
proxySpeed_style = tds[4].find("div").find("div")['style']
proxySpeed = re.findall('width\:(.*?)\%;', proxySpeed_style )[0].strip()
proxyConnectionTime_style = tds[5].find("div").find("div")['style']
proxyConnectionTime = re.findall('width\:(.*?)\%;', proxyConnectionTime_style )[0].strip()
proxyType = tds[6].renderContents()
classesToStrip = []
style = ips.find("style")
for styleLine in style.renderContents().split('\n'):
if styleLine:
if "display:none" in styleLine or "display: none" in styleLine:
classesToStrip.append( re.findall('.(.*?){', styleLine) )
[s.extract() for s in ips.select( '[style~="display:none"]' )]
for classToStrip in classesToStrip:
[s.extract() for s in ips.select( '[class~="' + classToStrip[0] + '"]' )]
[s.extract() for s in ips.select( 'style' )]
ip_raw = ips.text #ip_raw = nltk.clean_html(ips.renderContents())
if proxyType is not 'socks4/5':
#hma_proxies.append({'ip': str(ip_raw.replace(" ", "")), 'port': port.replace("\n", ""), 'type': proxyType, 'country': str(proxyCountry), 'speed': proxySpeed, 'connectionTime': proxyConnectionTime})
ip = str(ip_raw.replace(" ", ""))
port = port.replace("\n", "")
type = proxyType
country = str(proxyCountry)
speed = proxySpeed
connectionTime = proxyConnectionTime
dateTime = datetime.now()
db.execute("INSERT INTO proxies VALUES (?,?,?,?,?,?,?,'hma','','')", (ip, port, type, country, speed, connectionTime, dateTime) )
print ip + " added"
conn.commit()
#return hma_proxies
| [
"ludalex@gmail.com"
] | ludalex@gmail.com |
feab3ebba8930e7e527605d29f696b086b58d027 | 4c3094a869f59be8836993469b28f088fef9fff1 | /Questions/Q_093_RentalCarLocations.py | 35739a04cd88935d0ee54e3e84963fad486f00b2 | [] | no_license | Bharadwaja92/DataInterviewQuestions | d885d40da4d546a164eee37e7250ddb519fc8954 | 5b002f34c3b1440f4347a098f7ce1db84fc80e7f | refs/heads/master | 2022-11-06T08:57:49.283013 | 2020-06-22T09:10:40 | 2020-06-22T09:10:40 | 269,247,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | """""""""
Suppose you're working for a car rental company, looking to model potential location distribution of their
cars at major airports. The company operates in LA, SF, and San Jose.
Customers regularly pickup a car in one of these 3 cities and drop it off in another.
The company is looking to compute how likely it is that a given car will end up in a given city.
You can model this as a Markov chain (where each time step corresponds to a new customer taking the car).
The transition probabilities of the company's car allocation by city is as follows:
SF | LA | San Jose
0.6 0.1 0.3 | SF
0.2 0.8 0.3 | LA
0.2 0.1 0.4 | San Jose
As shown, the probability a car stays in SF is 0.6, the probability it moves from SF to LA is 0.2,
SF to San Jose is 0.2, etc.
Using the information above, determine the probability a car will start in SF but move to LA right after.
"""
| [
"saibharadwaj.kh@gaiansolutions.com"
] | saibharadwaj.kh@gaiansolutions.com |
a1b04624df6910adad210fe98bb6ade2e31d986b | b772048db1d84de6071dcb3978b6f548d2b42ae4 | /tests/test_ner.py | 25161ef7c203bccec745b1000a646113cac4af98 | [
"BSD-2-Clause"
] | permissive | yutanakamura-tky/MedNER-J | 46ca13d87b6c4977b4042915ff2105ab4dc62d88 | a0c68a32553bbbdb9f5ae5fd41584198951bc14c | refs/heads/master | 2023-08-21T23:05:22.645001 | 2021-08-10T02:34:45 | 2021-08-10T02:34:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,623 | py | import unittest
from medner_j import Ner
class TestNer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = Ner.from_pretrained(model_name="BERT", normalizer="dict")
cls.examples = ['それぞれの関節に関節液貯留は見られなかった', 'その後、左半身麻痺、CTにて右前側頭葉の出血を認める。 ']
cls.xmls = ['それぞれの関節に<CN value="かんせつえきちょりゅう;icd=E877;lv=C/freq=高;体液貯留">関節液貯留</CN>は見られなかった', 'その後、<C value="ひだりはんしんまひ;icd=G819;lv=A/freq=高;片麻痺">左半身麻痺</C>、CTにて右前側頭葉の<C value="しゅっけつ;icd=R58;lv=S/freq=高;出血">出血</C>を認める。 ']
cls.dicts = [
[{"span": (8, 13), "type": "CN", "disease":"関節液貯留", "norm":"かんせつえきちょりゅう;icd=E877;lv=C/freq=高;体液貯留"}],
[{"span": (4, 9), "type": "C", "disease": "左半身麻痺", "norm": "ひだりはんしんまひ;icd=G819;lv=A/freq=高;片麻痺"}, {"span": (20, 22), "type": "C", "disease": "出血", "norm": "しゅっけつ;icd=R58;lv=S/freq=高;出血"}]
]
def test_xml(self):
results = self.model.predict(self.examples)
self.assertEqual(results, self.xmls)
def test_dict(self):
results = self.model.predict(self.examples, output_format="dict")
self.assertEqual(results, self.dicts)
@classmethod
def tearDownClass(cls):
del cls.model
del cls.examples
del cls.xmls
del cls.dicts
| [
"suzzz428@gmail.com"
] | suzzz428@gmail.com |
d1cd60b8ac3a89b9dd0b4a456d9c166b93f4ffe5 | 67c5269fa4720cf728d4c1dd572c09d5e4e7a321 | /convert_mcnp71.py | db687aef0e14ec73a1479e0f9dc3959d89a76938 | [] | no_license | SamPUG/data | cff882327f5fe79ce2c2fca70d217173300c4f85 | 457755083bb8e05e58bbc3765f52bf8c756abb9c | refs/heads/master | 2020-12-19T14:57:12.806099 | 2020-03-06T08:30:47 | 2020-03-06T08:30:47 | 235,767,080 | 0 | 0 | null | 2020-02-25T14:43:04 | 2020-01-23T09:58:38 | Python | UTF-8 | Python | false | false | 4,330 | py | #!/usr/bin/env python3
import argparse
from collections import defaultdict
from pathlib import Path
import sys
import openmc.data
# Make sure Python version is sufficient
assert sys.version_info >= (3, 6), "Python 3.6+ is required"
description = """
Convert ENDF/B-VII.1 ACE data from the MCNP6 distribution into an HDF5 library
that can be used by OpenMC. This assumes that you have a directory containing
subdirectories 'endf71x' and 'ENDF71SaB'. Optionally, if a recent photoatomic
library (e.g., eprdata14) is available, it can also be converted using the
--photon argument.
"""
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
parser = argparse.ArgumentParser(
description=description,
formatter_class=CustomFormatter
)
parser.add_argument('-d', '--destination', type=Path, default=Path('mcnp_endfb71'),
help='Directory to create new library in')
parser.add_argument('--libver', choices=['earliest', 'latest'],
default='earliest', help="Output HDF5 versioning. Use "
"'earliest' for backwards compatibility or 'latest' for "
"performance")
parser.add_argument('-p', '--photon', type=Path,
help='Path to photoatomic data library (eprdata12 or later)')
parser.add_argument('mcnpdata', type=Path,
help='Directory containing endf71x and ENDF71SaB')
args = parser.parse_args()
# Check arguments to make sure they're valid
assert args.mcnpdata.is_dir(), 'mcnpdata argument must be a directory'
if args.photon is not None:
assert args.photon.is_file(), 'photon argument must be an existing file'
# Get a list of all ACE files
endf71x = list(args.mcnpdata.glob('endf71x/*/*.7??nc'))
endf71sab = list(args.mcnpdata.glob('ENDF71SaB/*.??t'))
# Check for fixed H1 files and remove old ones if present
hydrogen = args.mcnpdata / 'endf71x' / 'H'
if (hydrogen / '1001.720nc').is_file():
for i in range(10, 17):
endf71x.remove(hydrogen / f'1001.7{i}nc')
# There's a bug in H-Zr at 1200 K
thermal = args.mcnpdata / 'ENDF71SaB'
endf71sab.remove(thermal / 'h-zr.27t')
# Check for updated TSL files and remove old ones if present
checks = [
('sio2', 10, range(20, 37)),
('u-o2', 30, range(20, 28)),
('zr-h', 30, range(20, 28))
]
for material, good, bad in checks:
if (thermal / f'{material}.{good}t').is_file():
for suffix in bad:
f = thermal / f'{material}.{suffix}t'
if f.is_file():
endf71sab.remove(f)
# Group together tables for the same nuclide
tables = defaultdict(list)
for p in sorted(endf71x + endf71sab):
tables[p.stem].append(p)
# Create output directory if it doesn't exist
(args.destination / 'photon').mkdir(parents=True, exist_ok=True)
library = openmc.data.DataLibrary()
for name, paths in sorted(tables.items()):
# Convert first temperature for the table
p = paths[0]
print(f'Converting: {p}')
if p.name.endswith('t'):
data = openmc.data.ThermalScattering.from_ace(p)
else:
data = openmc.data.IncidentNeutron.from_ace(p, 'mcnp')
# For each higher temperature, add cross sections to the existing table
for p in paths[1:]:
print(f'Adding: {p}')
if p.name.endswith('t'):
data.add_temperature_from_ace(p)
else:
data.add_temperature_from_ace(p, 'mcnp')
# Export HDF5 file
h5_file = args.destination / f'{data.name}.h5'
print(f'Writing {h5_file}...')
data.export_to_hdf5(h5_file, 'w', libver=args.libver)
# Register with library
library.register_file(h5_file)
# Handle photoatomic data
if args.photon is not None:
lib = openmc.data.ace.Library(args.photon)
for table in lib.tables:
# Convert first temperature for the table
print(f'Converting: {table.name}')
data = openmc.data.IncidentPhoton.from_ace(table)
# Export HDF5 file
h5_file = args.destination / 'photon' / f'{data.name}.h5'
print(f'Writing {h5_file}...')
data.export_to_hdf5(h5_file, 'w', libver=args.libver)
# Register with library
library.register_file(h5_file)
# Write cross_sections.xml
library.export_to_xml(args.destination / 'cross_sections.xml')
| [
"paul.k.romano@gmail.com"
] | paul.k.romano@gmail.com |
3e7f64e403c62cb91b90b2d22413862faa747cfd | 0fc78cbe3dcf339faf5302de82db8e81ae4a952a | /task3_snkrs/spiders/neiman_spider.py | ad904a7a1fcc7eef5ca7216a6f973c731e28d06f | [] | no_license | waseesoft/task3_snkrs | 6e9de5a292d28dbe3a0986167da6d2a6f696a182 | 15e14d01135f0f9208e5a6c8aa6c96b9c866c909 | refs/heads/master | 2021-06-24T06:41:59.354166 | 2021-04-01T21:46:22 | 2021-04-01T21:46:22 | 201,049,018 | 0 | 0 | null | 2019-10-24T13:42:33 | 2019-08-07T12:46:46 | Python | UTF-8 | Python | false | false | 8,301 | py | import json
from scrapy.spiders import CrawlSpider, Rule, Request
from scrapy.linkextractors import LinkExtractor
from task3_snkrs.items import NeimanItem
class NeimanSpider(CrawlSpider):
name = 'neimanmarcus'
HTTPS = 'https:'
custom_settings = {
'USER_AGENT': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36"
}
allowed_domains = [
'neimanmarcus.com',
]
start_urls = [
'https://www.neimanmarcus.com/en-cn/index.jsp',
]
cookies = {
'tms_data': '{DT-2017.03}a3HwxssoiZzaMm5Pj2Bv6OVfV52gp3oiNhZzH+Q1Et/Dqvj4UZJdUi3o2xJa8'
'M7HpxN6YGZIZum7DZENeq2mpKWsMMyjHzZzskVuLSi5glDPBfxqHllZ2ATy6SBwEqoLuEFuFVN'
'hhw9H6USiUQk2MevVCpxX43W932GrkdrWBWOZ10w0I54Cujvasl9kPL6NE/N0GxD1AkccaD6JWG'
'dOXiXFLSyAYhxfiwLqbrXsVfFrMH6XHHgvBstXkyq9kUG4IChvW47uZiQ+jAxwXSW/Ntm2X9NpzG'
'mhOp+i/CGDKbq9ExXV4hL92pOz47MfElVC5s91r6+5gB7jaH62Nnzt8A+kYcGo1PzCSEFeBvbKmXd'
'/UQaNS9npeuy296A5gmaaUtWQgp+J9A91MzoIpTo5PZ5CkCwIllUtuyVNcy/XxtjRjozw2k36quitU'
'KtOqIE3Y0di38hvqLx5Y9ZS5tqi127/sj1E0AwyB5IGnP2vpuheaKsICNkiPPIWc4FBYlN49JWVRHlm'
'o0ApsItKZCQgjHCozMyntDUHvtbH7bIeXeTIcxia6/Zss4sz+jgsQh8t3+ggHCty76ZxrT9Kwrb54rEX'
'GkSanU9W5IyiJmrYcCb84IbHXsPw/eJjp7UjP2C0uMV5NDEbxpFJYdZLkGGuHy9dZx5h3XINJorm2r8XN'
'iYZtbheJvfkxkpM3pXdsG9RarRp52UEcsPVsJreUHygoLJF8DI6A/P9G5bkAZUWmUdOkpejNE6nWFn/wzW'
'tTk6XH2F/FHK8yYGl7vu/Zdrvu3XaUnmOliCgqKZJulwli6EMjFh+oo57Qu8k3q/+NQ7lfO18FeTD8flEte'
'4D9CEnqWgTRKmcnqcbvrE8LHY4MRgvFWT6EVUPA/rTo2wx9+qojGkfbwWrNA+L/0ojXjIvddFI+4AoKTsKA'
'63gqYmoRcYrbu6OGXSrlzuVvTaxKE+qzGBxEF9Sb96krdEeD0fWUQ==',
}
cook = {
'tms_data': '{DT-2017.03}a3HwxssoiZzaMm5Pj2Bv6L13Gv8Ad/WZkJm2zLBiJ22quEI2eCcOen+zdhEJJvHeeOOXi+MO99UjG2/1D+hl4AXI0xqxMcBJcNKRoDmB8W5Ptb0z0I9kIPIlYImXaHDdOdwGiYZVK7VYetLzT9+AlvcXAgQLwm8YRoSydQX9y/iR/GCfWSi0wro7/kwt7J5Bi/FFkjSxBX6XHCkgroK52hUKgW/YC1MZ5sJseydRx1IoiHRiDZy5ztxXq6ZzvseaBT8nS56U9EH3crgXmw7726TvadPC383EPCcEAJZTuPTQi1SjH0Vww5owXy0GVtVTHgQUbpz2HR2jg/liv8BYRgT2uIMscZUHtj+3+LXEgL6h5VNpAM7BXr6dpAo87UmpKZAaZhUufcW4Hj6OhLjcc5Ae8ZOY/g3Ei3DxvzB6aoaOI4FwvVc1FhRMcX3UGkXfsYvcNKgQnb6ELb7f4yJm9mSzR3oVmqDMXFPe1HnsR95VAvDwEZlbY18XLrU5bGYP4J/0xyiH9OE+PfysOstZsnMZxsPhNo1VZiWNo5S8enqzFf7dhClsTL5qAjscfQTNv1JIrXORKfF6DcBf6i/91Q8zGK1KAKTv37mIV3btzFSeNu7fPUOTtBM/TFgJzzGLe7AYYInEvPqfxx0yQ+d5xzRk7NNsgoykAQK83uIbFnVuJmCggiAv7tabslD7R/ZCKyfdbvFE1siLr8Lhn00KWEBdt9hvDuoEV1DiP+oaNg7B5sIaxERI55GR2VgK/9C0UqiFyyO95itCF45/y/ruyNakse0Ttc80Q/BXLhImKOOi5HrGsbxf+PEuy5H84QG5/6EhXwB4UpWJQc82EysqOlMBhT/Jya6TmzWB9Ztp6jH4a2Wox15pF6VYlVHKTbLEIjmMZm1x+b3GYJaY0NPLNV8jeFLpB3Tbs9RoUsHPbuN1gR29OXRa7GfW2oCg6AHm0/shfgNgeMf+9AsLDt7Mhg=='
}
c = '{DT-2017.03}a3HwxssoiZzaMm5Pj2Bv6L13Gv8Ad/WZkJm2zLBiJ22quEI2eCcOen+zdhEJJvHeeOOXi+MO99UjG2/1D+hl4AXI0xqxMcBJcNKRoDmB8W5Ptb0z0I9kIPIlYImXaHDdOdwGiYZVK7VYetLzT9+AlvcXAgQLwm8YRoSydQX9y/iR/GCfWSi0wro7/kwt7J5Bi/FFkjSxBX6XHCkgroK52hUKgW/YC1MZ5sJseydRx1IoiHRiDZy5ztxXq6ZzvseaBT8nS56U9EH3crgXmw7726TvadPC383EPCcEAJZTuPTQi1SjH0Vww5owXy0GVtVTHgQUbpz2HR2jg/liv8BYRgT2uIMscZUHtj+3+LXEgL6h5VNpAM7BXr6dpAo87UmpKZAaZhUufcW4Hj6OhLjcc5Ae8ZOY/g3Ei3DxvzB6aoaOI4FwvVc1FhRMcX3UGkXfsYvcNKgQnb6ELb7f4yJm9mSzR3oVmqDMXFPe1HnsR95VAvDwEZlbY18XLrU5bGYP4J/0xyiH9OE+PfysOstZsnMZxsPhNo1VZiWNo5S8enqzFf7dhClsTL5qAjscfQTNv1JIrXORKfF6DcBf6i/91Q8zGK1KAKTv37mIV3btzFSeNu7fPUOTtBM/TFgJzzGLe7AYYInEvPqfxx0yQ+d5xzRk7NNsgoykAQK83uIbFnVuJmCggiAv7tabslD7R/ZCKyfdbvFE1siLr8Lhn00KWEBdt9hvDuoEV1DiP+oaNg7B5sIaxERI55GR2VgK/9C0UqiFyyO95itCF45/y/ruyNakse0Ttc80Q/BXLhImKOOi5HrGsbxf+PEuy5H84QG5/6EhXwB4UpWJQc82EysqOlMBhT/Jya6TmzWB9Ztp6jH4a2Wox15pF6VYlVHKTbLEIjmMZm1x+b3GYJaY0NPLNV8jeFLpB3Tbs9RoUsHPbuN1gR29OXRa7GfW2oCg6AHm0/shfgNgeMf+9AsLDt7Mhg=='
def start_requests(self):
for url in self.start_urls:
yield Request(url, callback=self.parse, cookies=self.cookies)
listings_css = [
'.arrow-button--right',
'.menu-wrapper a',
]
rules = [
Rule(LinkExtractor(restrict_css=listings_css)),
Rule(LinkExtractor(restrict_css='.product-thumbnail__link'), callback='parse_product',
process_request='add_cookie_in_req', follow=True),
]
def parse_product(self, response):
yield from super().parse(response)
yield from self.get_products(response)
def add_cookie_in_req(self, request):
request.cookies['tms_data'] = self.c
return request
def get_raw_product(self, response):
return json.loads(response.css('#state::text').get())
def get_products(self, response):
raw = self.get_raw_product(response)
p_info = raw['utag']['product']['productInfo']
products, prices, = [], []
p_ids = p_info['product_id']
brands = p_info['product_brand']
old_price_flags = p_info['product_pricing_adornment_flag']
for i, values in enumerate(zip(p_ids, p_info['product_name'], brands,
p_info['product_price'], old_price_flags)):
product_id, name, brand, price, old_price_flag = values
url, description, currency = '', '', ''
images, old_prices = [], []
item = NeimanItem()
item['product_id'] = product_id
item['name'] = name
item['brand'] = brand if isinstance(brands, list) else brands
if old_price_flag == 'true' or (old_price_flag == 'false' and len(p_ids) > 1):
p = raw['productCatalog']['group']['childProducts'][product_id]
url = p['linkedData']['url']
currency = p['price']['currencyCode']
description = p['linkedData']['description']
images += self.get_media_images(p) + self.get_images(p)
old_prices = [e['price'] for e in p['price'].get('adornments', []) if e['price'] != price]
elif old_price_flag == 'false' and len(p_ids) == 1:
p = raw['productCatalog']['product']
currency = p['price']['currencyCode']
raw_data = p['linkedData']
description = raw_data['description']
url = raw_data['url']
images += self.get_media_images(p) + self.get_images(p)
item['url'] = url
item['image_urls'] = list(set(images))
item['description'] = description
# item['headers'] = response.headers
products.append(item)
prices.append(
{
'price': price,
'old_prices': old_prices,
'currency': currency,
},
)
self.get_skus(raw['utag']['product']['productAnalytics'], products, prices)
return products
def get_images(self, raw_product):
urls = []
raw_urls = raw_product['options']['productOptions']
for raw in raw_urls:
if raw.get('label') != 'color':
continue
for value in raw.get('values'):
urls += self.get_media_images(value)
return urls
def get_media_images(self, raw_media):
urls = []
media = raw_media.get('media', {})
alternates = media.get('alternate', {})
url = media.get('main', {}).get('medium', {}).get('url')
if url:
urls.append(self.HTTPS + url)
for e in alternates:
url = alternates[e].get('medium', {}).get('url')
if url:
urls.append(self.HTTPS + url)
return urls
def get_skus(self, product_analytics, products, prices):
for i, e in enumerate(product_analytics['details']['products']):
skus = {}
for s in e['skus']:
sku = prices[i]
sku['availability'] = s['inStock']
sku['color'] = s['color']
sku['size'] = s.get('size', 'one-size')
skus[s['id']] = sku
products[i].update(
{
'skus': skus,
},
)
| [
"mrarslan101@gmail.com"
] | mrarslan101@gmail.com |
a7d0d5f5718864db29eb82f729063d29482ad9a9 | 812521a7dc172551572b36ee1ecca4bd76e6f8b2 | /tests/test_flask_reverse_proxy_middleware_path_prefix.py | d9a7c1e9df33f5f4002294c8a974b22147f4b0d9 | [
"OGL-UK-3.0"
] | permissive | rachmadaniHaryono/flask-reverse-proxy-fix | 8b1128646431706218fe52007c3282ab6996221a | a6a9b6e91d9e2152a1f34109249905ab609377a5 | refs/heads/master | 2023-01-23T08:18:00.977832 | 2023-01-21T02:33:38 | 2023-01-21T02:33:38 | 239,060,743 | 1 | 3 | null | 2020-02-08T03:04:11 | 2020-02-08T03:04:10 | null | UTF-8 | Python | false | false | 1,797 | py | import unittest
from http import HTTPStatus
from flask_reverse_proxy_fix.middleware import ReverseProxyPrefixFix
from app import create_app_with_middleware, create_app_without_middleware
class FlaskReverseProxyMiddlewarePathPrefixTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
self.app_context.pop()
def test_with_prefix(self):
self.app = create_app_with_middleware()
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
self.app.config['REVERSE_PROXY_PATH'] = '/foo'
ReverseProxyPrefixFix(self.app)
expected_url = 'http://localhost:9000/test/sample'
response = self.client.get(
'/sample',
base_url='http://localhost:9000'
)
json_response = response.get_json()
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertIn('links', json_response.keys())
self.assertIn('self', json_response['links'].keys())
self.assertEqual(expected_url, json_response['links']['self'])
def test_without_prefix(self):
self.app = create_app_without_middleware()
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
expected_url = 'http://localhost:9000/sample'
response = self.client.get(
'/sample',
base_url='http://localhost:9000'
)
json_response = response.get_json()
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertIn('links', json_response.keys())
self.assertIn('self', json_response['links'].keys())
self.assertEqual(expected_url, json_response['links']['self'])
| [
"felix@felixfennell.co.uk"
] | felix@felixfennell.co.uk |
16ac483d71ea83b3969b16eb429e6f84f5674a9f | 34d5a8760dc3cfe71aab7a6f70d61eb319308b1e | /student/affaircourse/migrations/0005_studentcourse_cournum.py | 35a061c3294f36d3577218c73b734652921aa47b | [] | no_license | Lmagicport/Student-affairs | a4c5a9e0d92967b5ebbf926fee98ea786692cc27 | 007f35f0d30e16635e9a386fb95351895f5b092a | refs/heads/main | 2023-02-01T06:25:10.583127 | 2020-12-15T08:33:34 | 2020-12-15T08:33:34 | 307,300,571 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | # Generated by Django 3.1.2 on 2020-11-19 13:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('affaircourse', '0004_studentcourse_ispass'),
]
operations = [
migrations.AddField(
model_name='studentcourse',
name='CourNum',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
| [
"2034913866@qq.com"
] | 2034913866@qq.com |
ba45777ebf476d635254faf1c942e070055b6fc5 | c463e77c3d76e6b4810e202541d3f3f7f91bcf60 | /build/PCL-ROS-cluster-Segmentation/cmake/sensor_stick-genmsg-context.py | 31a011a3d2b1087f74bbb8bde784bccea1893805 | [] | no_license | MGRNascimento/Tese | 18087ee59dfee96ee000c9f16c646d1750174285 | bf78d417849a74d9c5a520d40dcbebeadf084706 | refs/heads/master | 2020-06-23T13:57:01.699657 | 2019-10-23T21:47:19 | 2019-10-23T21:47:19 | 198,638,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/msg/DetectedObjectsArray.msg;/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/msg/DetectedObject.msg;/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/msg/SegmentedClustersArray.msg"
services_str = "/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/srv/GetNormals.srv"
pkg_name = "sensor_stick"
dependencies_str = "std_msgs;sensor_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "sensor_stick;/home/miguel/catkin_ws/src/PCL-ROS-cluster-Segmentation/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg;sensor_msgs;/opt/ros/kinetic/share/sensor_msgs/cmake/../msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"relvas.cvg@gmail.com"
] | relvas.cvg@gmail.com |
08455de6972b66d2d49d9e1f4df28605236f0491 | 73c0d0b755182ca5ae182637677aefb3f2f572b6 | /scoring_program/libscores.py | 96829266b92e2701193ec70504993fc5e5b1afea | [] | no_license | Kihansi95/ChallengePredictSales | 1c52b253f86d55188b60119713b7604bec970e75 | 6e3f602b41c736c51115cc74946c6b6a1cf8ea0b | refs/heads/master | 2020-05-01T10:31:13.318397 | 2019-05-05T22:32:55 | 2019-05-05T22:32:55 | 177,422,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,028 | py | # Score library for NUMPY arrays
# ChaLearn AutoML challenge
# For regression:
# solution and prediction are vectors of numerical values of the same dimension
# For classification:
# solution = array(p,n) of 0,1 truth values, samples in lines, classes in columns
# prediction = array(p,n) of numerical scores between 0 and 1 (analogous to probabilities)
# Isabelle Guyon and Arthur Pesah, ChaLearn, August-November 2014
# ALL INFORMATION, SOFTWARE, DOCUMENTATION, AND DATA ARE PROVIDED "AS-IS".
# ISABELLE GUYON, CHALEARN, AND/OR OTHER ORGANIZERS OR CODE AUTHORS DISCLAIM
# ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE, AND THE
# WARRANTY OF NON-INFRINGEMENT OF ANY THIRD PARTY'S INTELLECTUAL PROPERTY RIGHTS.
# IN NO EVENT SHALL ISABELLE GUYON AND/OR OTHER ORGANIZERS BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF SOFTWARE, DOCUMENTS, MATERIALS,
# PUBLICATIONS, OR INFORMATION MADE AVAILABLE FOR THE CHALLENGE.
import os
from sys import stderr
from sys import version
import numpy as np
import scipy as sp
from sklearn import metrics
from sklearn.preprocessing import *
swrite = stderr.write
from os import getcwd as pwd
try:
from pip._internal.utils.misc import get_installed_distributions
except ImportError: # pip<10
from pip import get_installed_distributions
from glob import glob
import platform
import psutil
if (os.name == "nt"):
filesep = '\\'
else:
filesep = '/'
# ========= Useful functions ==============
def read_array(filename):
''' Read array and convert to 2d np arrays '''
array = np.genfromtxt(filename, dtype=float)
if len(array.shape) == 1:
array = array.reshape(-1, 1)
return array
def sanitize_array(array):
''' Replace NaN and Inf (there should not be any!)'''
a = np.ravel(array)
#maxi = np.nanmax((filter(lambda x: x != float('inf'), a))) # Max except NaN and Inf
#mini = np.nanmin((filter(lambda x: x != float('-inf'), a))) # Mini except NaN and Inf
maxi=np.max(a)
mini=np.min(a)
array[array == float('inf')] = maxi
array[array == float('-inf')] = mini
mid = (maxi + mini) / 2
array[np.isnan(array)] = mid
return array
def normalize_array(solution, prediction):
''' Use min and max of solution as scaling factors to normalize prediction,
then threshold it to [0, 1]. Binarize solution to {0, 1}.
This allows applying classification scores to all cases.
In principle, this should not do anything to properly formatted
classification inputs and outputs.'''
# Binarize solution
sol = np.ravel(solution) # convert to 1-d array
#maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf
#mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf
maxi=np.max(sol);
mini=np.min(sol)
if maxi == mini:
print('Warning, cannot normalize')
return [solution, prediction]
diff = maxi - mini
mid = (maxi + mini) / 2.
new_solution = np.copy(solution)
new_solution[solution >= mid] = 1
new_solution[solution < mid] = 0
# Normalize and threshold predictions (takes effect only if solution not in {0, 1})
new_prediction = (np.copy(prediction) - float(mini)) / float(diff)
new_prediction[new_prediction > 1] = 1 # and if predictions exceed the bounds [0, 1]
new_prediction[new_prediction < 0] = 0
# Make probabilities smoother
# new_prediction = np.power(new_prediction, (1./10))
return [new_solution, new_prediction]
def binarize_predictions(array, task='binary.classification'):
''' Turn predictions into decisions {0,1} by selecting the class with largest
score for multiclass problems and thresholding at 0.5 for other cases.'''
# add a very small random value as tie breaker (a bit bad because this changes the score every time)
# so to make sure we get the same result every time, we seed it
# eps = 1e-15
# np.random.seed(sum(array.shape))
# array = array + eps*np.random.rand(array.shape[0],array.shape[1])
bin_array = np.zeros(array.shape)
if (task != 'multiclass.classification') or (array.shape[1] == 1):
bin_array[array >= 0.5] = 1
else:
sample_num = array.shape[0]
for i in range(sample_num):
j = np.argmax(array[i, :])
bin_array[i, j] = 1
return bin_array
def acc_stat(solution, prediction):
''' Return accuracy statistics TN, FP, TP, FN
Assumes that solution and prediction are binary 0/1 vectors.'''
# This uses floats so the results are floats
TN = sum(np.multiply((1 - solution), (1 - prediction)))
FN = sum(np.multiply(solution, (1 - prediction)))
TP = sum(np.multiply(solution, prediction))
FP = sum(np.multiply((1 - solution), prediction))
# print "TN =",TN
# print "FP =",FP
# print "TP =",TP
# print "FN =",FN
return (TN, FP, TP, FN)
def tiedrank(a):
''' Return the ranks (with base 1) of a list resolving ties by averaging.
This works for numpy arrays.'''
m = len(a)
# Sort a in ascending order (sa=sorted vals, i=indices)
i = a.argsort()
sa = a[i]
# Find unique values
uval = np.unique(a)
# Test whether there are ties
R = np.arange(m, dtype=float) + 1 # Ranks with base 1
if len(uval) != m:
# Average the ranks for the ties
oldval = sa[0]
newval = sa[0]
k0 = 0
for k in range(1, m):
newval = sa[k]
if newval == oldval:
# moving average
R[k0:k + 1] = R[k - 1] * (k - k0) / (k - k0 + 1) + R[k] / (k - k0 + 1)
else:
k0 = k;
oldval = newval
# Invert the index
S = np.empty(m)
S[i] = R
return S
def mvmean(R, axis=0):
''' Moving average to avoid rounding errors. A bit slow, but...
Computes the mean along the given axis, except if this is a vector, in which case the mean is returned.
Does NOT flatten.'''
if len(R.shape) == 0: return R
average = lambda x: reduce(lambda i, j: (0, (j[0] / (j[0] + 1.)) * i[1] + (1. / (j[0] + 1)) * j[1]), enumerate(x))[
1]
R = np.array(R)
if len(R.shape) == 1: return average(R)
if axis == 1:
return np.array(map(average, R))
else:
return np.array(map(average, R.transpose()))
# ======= Default metrics ========
def bac_binary(solution, prediction):
return bac_metric(solution, prediction, task='binary.classification')
def bac_multiclass(solution, prediction):
return bac_metric(solution, prediction, task='multiclass.classification')
def bac_multilabel(solution, prediction):
return bac_metric(solution, prediction, task='multilabel.classification')
def auc_binary(solution, prediction):
return auc_metric(solution, prediction, task='binary.classification')
def auc_multilabel(solution, prediction):
return auc_metric(solution, prediction, task='multilabel.classification')
def pac_binary(solution, prediction):
return pac_metric(solution, prediction, task='binary.classification')
def pac_multiclass(solution, prediction):
return pac_metric(solution, prediction, task='multiclass.classification')
def pac_multilabel(solution, prediction):
return pac_metric(solution, prediction, task='multilabel.classification')
def f1_binary(solution, prediction):
return f1_metric(solution, prediction, task='binary.classification')
def f1_multilabel(solution, prediction):
return f1_metric(solution, prediction, task='multilabel.classification')
def abs_regression(solution, prediction):
return a_metric(solution, prediction, task='regression')
def r2_regression(solution, prediction):
return r2_metric(solution, prediction, task='regression')
# ======= Pre-made metrics ========
### REGRESSION METRICS (work on raw solution and prediction)
# These can be computed on all solutions and predictions (classification included)
def r2_metric(solution, prediction, task='regression'):
''' 1 - Mean squared error divided by variance '''
mse = mvmean((solution - prediction) ** 2)
var = mvmean((solution - mvmean(solution)) ** 2)
score = 1 - mse / var
return mvmean(score)
def a_metric(solution, prediction, task='regression'):
''' 1 - Mean absolute error divided by mean absolute deviation '''
mae = mvmean(np.abs(solution - prediction)) # mean absolute error
mad = mvmean(np.abs(solution - mvmean(solution))) # mean absolute deviation
score = 1 - mae / mad
return mvmean(score)
### END REGRESSION METRICS
### CLASSIFICATION METRICS (work on solutions in {0, 1} and predictions in [0, 1])
# These can be computed for regression scores only after running normalize_array
def bac_metric(solution, prediction, task='binary.classification'):
''' Compute the normalized balanced accuracy. The binarization and
the normalization differ for the multi-label and multi-class case. '''
label_num = solution.shape[1]
score = np.zeros(label_num)
bin_prediction = binarize_predictions(prediction, task)
[tn, fp, tp, fn] = acc_stat(solution, bin_prediction)
# Bounding to avoid division by 0
eps = 1e-15
tp = sp.maximum(eps, tp)
pos_num = sp.maximum(eps, tp + fn)
tpr = tp / pos_num # true positive rate (sensitivity)
if (task != 'multiclass.classification') or (label_num == 1):
tn = sp.maximum(eps, tn)
neg_num = sp.maximum(eps, tn + fp)
tnr = tn / neg_num # true negative rate (specificity)
bac = 0.5 * (tpr + tnr)
base_bac = 0.5 # random predictions for binary case
else:
bac = tpr
base_bac = 1. / label_num # random predictions for multiclass case
bac = mvmean(bac) # average over all classes
# Normalize: 0 for random, 1 for perfect
score = (bac - base_bac) / sp.maximum(eps, (1 - base_bac))
return score
def pac_metric(solution, prediction, task='binary.classification'):
''' Probabilistic Accuracy based on log_loss metric.
We assume the solution is in {0, 1} and prediction in [0, 1].
Otherwise, run normalize_array.'''
debug_flag = False
[sample_num, label_num] = solution.shape
if label_num == 1: task = 'binary.classification'
eps = 1e-15
the_log_loss = log_loss(solution, prediction, task)
# Compute the base log loss (using the prior probabilities)
pos_num = 1. * sum(solution) # float conversion!
frac_pos = pos_num / sample_num # prior proba of positive class
the_base_log_loss = prior_log_loss(frac_pos, task)
# Alternative computation of the same thing (slower)
# Should always return the same thing except in the multi-label case
# For which the analytic solution makes more sense
if debug_flag:
base_prediction = np.empty(prediction.shape)
for k in range(sample_num): base_prediction[k, :] = frac_pos
base_log_loss = log_loss(solution, base_prediction, task)
diff = np.array(abs(the_base_log_loss - base_log_loss))
if len(diff.shape) > 0: diff = max(diff)
if (diff) > 1e-10:
print('Arrggh {} != {}'.format(the_base_log_loss, base_log_loss))
# Exponentiate to turn into an accuracy-like score.
# In the multi-label case, we need to average AFTER taking the exp
# because it is an NL operation
pac = mvmean(np.exp(-the_log_loss))
base_pac = mvmean(np.exp(-the_base_log_loss))
# Normalize: 0 for random, 1 for perfect
score = (pac - base_pac) / sp.maximum(eps, (1 - base_pac))
return score
def f1_metric(solution, prediction, task='binary.classification'):
''' Compute the normalized f1 measure. The binarization differs
for the multi-label and multi-class case.
A non-weighted average over classes is taken.
The score is normalized.'''
label_num = solution.shape[1]
score = np.zeros(label_num)
bin_prediction = binarize_predictions(prediction, task)
[tn, fp, tp, fn] = acc_stat(solution, bin_prediction)
# Bounding to avoid division by 0
eps = 1e-15
true_pos_num = sp.maximum(eps, tp + fn)
found_pos_num = sp.maximum(eps, tp + fp)
tp = sp.maximum(eps, tp)
tpr = tp / true_pos_num # true positive rate (recall)
ppv = tp / found_pos_num # positive predictive value (precision)
arithmetic_mean = 0.5 * sp.maximum(eps, tpr + ppv)
# Harmonic mean:
f1 = tpr * ppv / arithmetic_mean
# Average over all classes
f1 = mvmean(f1)
# Normalize: 0 for random, 1 for perfect
if (task != 'multiclass.classification') or (label_num == 1):
# How to choose the "base_f1"?
# For the binary/multilabel classification case, one may want to predict all 1.
# In that case tpr = 1 and ppv = frac_pos. f1 = 2 * frac_pos / (1+frac_pos)
# frac_pos = mvmean(solution.ravel())
# base_f1 = 2 * frac_pos / (1+frac_pos)
# or predict random values with probability 0.5, in which case
# base_f1 = 0.5
# the first solution is better only if frac_pos > 1/3.
# The solution in which we predict according to the class prior frac_pos gives
# f1 = tpr = ppv = frac_pos, which is worse than 0.5 if frac_pos<0.5
# So, because the f1 score is used if frac_pos is small (typically <0.1)
# the best is to assume that base_f1=0.5
base_f1 = 0.5
# For the multiclass case, this is not possible (though it does not make much sense to
# use f1 for multiclass problems), so the best would be to assign values at random to get
# tpr=ppv=frac_pos, where frac_pos=1/label_num
else:
base_f1 = 1. / label_num
score = (f1 - base_f1) / sp.maximum(eps, (1 - base_f1))
return score
def auc_metric(solution, prediction, task='binary.classification'):
''' Normarlized Area under ROC curve (AUC).
Return Gini index = 2*AUC-1 for binary classification problems.
Should work for a vector of binary 0/1 (or -1/1)"solution" and any discriminant values
for the predictions. If solution and prediction are not vectors, the AUC
of the columns of the matrices are computed and averaged (with no weight).
The same for all classification problems (in fact it treats well only the
binary and multilabel classification problems).'''
# auc = metrics.roc_auc_score(solution, prediction, average=None)
# There is a bug in metrics.roc_auc_score: auc([1,0,0],[1e-10,0,0]) incorrect
label_num = solution.shape[1]
auc = np.empty(label_num)
for k in range(label_num):
r_ = tiedrank(prediction[:, k])
s_ = solution[:, k]
if sum(s_) == 0: print('WARNING: no positive class example in class {}'.format(k + 1))
npos = sum(s_ == 1)
nneg = sum(s_ < 1)
auc[k] = (sum(r_[s_ == 1]) - npos * (npos + 1) / 2) / (nneg * npos)
# print('AUC[%d]=' % k + '%5.2f' % auc[k])
return 2 * mvmean(auc) - 1
### END CLASSIFICATION METRICS
# ======= Specialized scores ========
# We run all of them for all tasks even though they don't make sense for some tasks
def nbac_binary_score(solution, prediction):
''' Normalized balanced accuracy for binary and multilabel classification '''
return bac_metric(solution, prediction, task='binary.classification')
def nbac_multiclass_score(solution, prediction):
''' Multiclass accuracy for binary and multilabel classification '''
return bac_metric(solution, prediction, task='multiclass.classification')
def npac_binary_score(solution, prediction):
''' Normalized balanced accuracy for binary and multilabel classification '''
return pac_metric(solution, prediction, task='binary.classification')
def npac_multiclass_score(solution, prediction):
''' Multiclass accuracy for binary and multilabel classification '''
return pac_metric(solution, prediction, task='multiclass.classification')
def f1_binary_score(solution, prediction):
''' Normalized balanced accuracy for binary and multilabel classification '''
return f1_metric(solution, prediction, task='binary.classification')
def f1_multiclass_score(solution, prediction):
''' Multiclass accuracy for binary and multilabel classification '''
return f1_metric(solution, prediction, task='multiclass.classification')
def log_loss(solution, prediction, task='binary.classification'):
''' Log loss for binary and multiclass. '''
[sample_num, label_num] = solution.shape
eps = 1e-15
pred = np.copy(prediction) # beware: changes in prediction occur through this
sol = np.copy(solution)
if (task == 'multiclass.classification') and (label_num > 1):
# Make sure the lines add up to one for multi-class classification
norma = np.sum(prediction, axis=1)
for k in range(sample_num):
pred[k, :] /= sp.maximum(norma[k], eps)
# Make sure there is a single label active per line for multi-class classification
sol = binarize_predictions(solution, task='multiclass.classification')
# For the base prediction, this solution is ridiculous in the multi-label case
# Bounding of predictions to avoid log(0),1/0,...
pred = sp.minimum(1 - eps, sp.maximum(eps, pred))
# Compute the log loss
pos_class_log_loss = - mvmean(sol * np.log(pred), axis=0)
if (task != 'multiclass.classification') or (label_num == 1):
# The multi-label case is a bunch of binary problems.
# The second class is the negative class for each column.
neg_class_log_loss = - mvmean((1 - sol) * np.log(1 - pred), axis=0)
log_loss = pos_class_log_loss + neg_class_log_loss
# Each column is an independent problem, so we average.
# The probabilities in one line do not add up to one.
# log_loss = mvmean(log_loss)
# print('binary {}'.format(log_loss))
# In the multilabel case, the right thing i to AVERAGE not sum
# We return all the scores so we can normalize correctly later on
else:
# For the multiclass case the probabilities in one line add up one.
log_loss = pos_class_log_loss
# We sum the contributions of the columns.
log_loss = np.sum(log_loss)
# print('multiclass {}'.format(log_loss))
return log_loss
def prior_log_loss(frac_pos, task='binary.classification'):
''' Baseline log loss. For multiplr classes ot labels return the volues for each column'''
eps = 1e-15
frac_pos_ = sp.maximum(eps, frac_pos)
if (task != 'multiclass.classification'): # binary case
frac_neg = 1 - frac_pos
frac_neg_ = sp.maximum(eps, frac_neg)
pos_class_log_loss_ = - frac_pos * np.log(frac_pos_)
neg_class_log_loss_ = - frac_neg * np.log(frac_neg_)
base_log_loss = pos_class_log_loss_ + neg_class_log_loss_
# base_log_loss = mvmean(base_log_loss)
# print('binary {}'.format(base_log_loss))
# In the multilabel case, the right thing i to AVERAGE not sum
# We return all the scores so we can normalize correctly later on
else: # multiclass case
fp = frac_pos_ / sum(frac_pos_) # Need to renormalize the lines in multiclass case
# Only ONE label is 1 in the multiclass case active for each line
pos_class_log_loss_ = - frac_pos * np.log(fp)
base_log_loss = np.sum(pos_class_log_loss_)
return base_log_loss
# sklearn implementations for comparison
def log_loss_(solution, prediction):
return metrics.log_loss(solution, prediction)
def r2_score_(solution, prediction):
return metrics.r2_score(solution, prediction)
def a_score_(solution, prediction):
mad = float(mvmean(abs(solution - mvmean(solution))))
return 1 - metrics.mean_absolute_error(solution, prediction) / mad
def auc_score_(solution, prediction):
auc = metrics.roc_auc_score(solution, prediction, average=None)
return mvmean(auc)
### SOME I/O functions
def ls(filename):
return sorted(glob(filename))
def write_list(lst):
for item in lst:
swrite(item + "\n")
def mkdir(d):
if not os.path.exists(d):
os.makedirs(d)
def get_info(filename):
''' Get all information {attribute = value} pairs from the public.info file'''
info = {}
with open(filename, "r") as info_file:
lines = info_file.readlines()
features_list = list(map(lambda x: tuple(x.strip("\'").split(" = ")), lines))
for (key, value) in features_list:
info[key] = value.rstrip().strip("'").strip(' ')
if info[key].isdigit(): # if we have a number, we want it to be an integer
info[key] = int(info[key])
return info
def show_io(input_dir, output_dir):
''' show directory structure and inputs and autputs to scoring program'''
swrite('\n=== DIRECTORIES ===\n\n')
# Show this directory
swrite("-- Current directory " + pwd() + ":\n")
write_list(ls('.'))
write_list(ls('./*'))
write_list(ls('./*/*'))
swrite("\n")
# List input and output directories
swrite("-- Input directory " + input_dir + ":\n")
write_list(ls(input_dir))
write_list(ls(input_dir + '/*'))
write_list(ls(input_dir + '/*/*'))
write_list(ls(input_dir + '/*/*/*'))
swrite("\n")
swrite("-- Output directory " + output_dir + ":\n")
write_list(ls(output_dir))
write_list(ls(output_dir + '/*'))
swrite("\n")
# write meta data to sdterr
swrite('\n=== METADATA ===\n\n')
swrite("-- Current directory " + pwd() + ":\n")
try:
metadata = yaml.load(open('metadata', 'r'))
for key, value in metadata.items():
swrite(key + ': ')
swrite(str(value) + '\n')
except:
swrite("none\n");
swrite("-- Input directory " + input_dir + ":\n")
try:
metadata = yaml.load(open(os.path.join(input_dir, 'metadata'), 'r'))
for key, value in metadata.items():
swrite(key + ': ')
swrite(str(value) + '\n')
swrite("\n")
except:
swrite("none\n");
def show_version(scoring_version):
''' Python version and library versions '''
swrite('\n=== VERSIONS ===\n\n')
# Scoring program version
swrite("Scoring program version: " + str(scoring_version) + "\n\n")
# Python version
swrite("Python version: " + version + "\n\n")
# Give information on the version installed
swrite("Versions of libraries installed:\n")
map(swrite, sorted(["%s==%s\n" % (i.key, i.version) for i in lib()]))
def show_platform():
''' Show information on platform'''
swrite('\n=== SYSTEM ===\n\n')
try:
linux_distribution = platform.linux_distribution()
except:
linux_distribution = "N/A"
swrite("""
dist: %s
linux_distribution: %s
system: %s
machine: %s
platform: %s
uname: %s
version: %s
mac_ver: %s
memory: %s
number of CPU: %s
""" % (
str(platform.dist()),
linux_distribution,
platform.system(),
platform.machine(),
platform.platform(),
platform.uname(),
platform.version(),
platform.mac_ver(),
psutil.virtual_memory(),
str(psutil.cpu_count())
))
def compute_all_scores(solution, prediction):
''' Compute all the scores and return them as a dist'''
missing_score = -0.999999
scoring = {'BAC (multilabel)': nbac_binary_score,
'BAC (multiclass)': nbac_multiclass_score,
'F1 (multilabel)': f1_binary_score,
'F1 (multiclass)': f1_multiclass_score,
'Regression ABS ': a_metric,
'Regression R2 ': r2_metric,
'AUC (multilabel)': auc_metric,
'PAC (multilabel)': npac_binary_score,
'PAC (multiclass)': npac_multiclass_score}
# Normalize/sanitize inputs
[csolution, cprediction] = normalize_array(solution, prediction)
solution = sanitize_array(solution);
prediction = sanitize_array(prediction)
# Compute all scores
score_names = sorted(scoring.keys())
scores = {}
for key in score_names:
scoring_func = scoring[key]
try:
if key == 'Regression R2 ' or key == 'Regression ABS ':
scores[key] = scoring_func(solution, prediction)
else:
scores[key] = scoring_func(csolution, cprediction)
except:
scores[key] = missing_score
return scores
def write_scores(fp, scores):
''' Write scores to file opened under file pointer fp'''
for key in scores.keys():
fp.write("%s --> %s\n" % (key, scores[key]))
print(key + " --> " + str(scores[key]))
def show_all_scores(solution, prediction):
''' Compute and display all the scores for debug purposes'''
scores = compute_all_scores(solution, prediction)
for key in scores.keys():
print(key + " --> " + str(scores[key]))
############################### TEST PROGRAM ##########################################
if __name__ == "__main__":
# This shows a bug in metrics.roc_auc_score
# print('\n\nBug in sklearn.metrics.roc_auc_score:')
# print('auc([1,0,0],[1e-10,0,0])=1')
# print('Correct (ours): ' +str(auc_metric(np.array([[1,0,0]]).transpose(),np.array([[1e-10,0,0]]).transpose())))
# print('Incorrect (sklearn): ' +str(metrics.roc_auc_score(np.array([1,0,0]),np.array([1e-10,0,0]))))
# This checks the binary and multi-class cases are well implemented
# In the 2-class case, all results should be identical, except for f1 because
# this is a score that is not symmetric in the 2 classes.
eps = 1e-15
print('\n\nBinary score verification:')
print('\n\n==========================')
sol0 = np.array([[1, 0], [1, 0], [0, 1], [0, 1]])
comment = ['PERFECT']
Pred = [sol0]
Sol = [sol0]
comment.append('ANTI-PERFECT, very bad for r2_score')
Pred.append(1 - sol0)
Sol.append(sol0)
comment.append('UNEVEN PROBA, BUT BINARIZED VERSION BALANCED (bac and auc=0.5)')
Pred.append(
np.array([[0.7, 0.3], [0.4, 0.6], [0.49, 0.51], [0.2, 0.8]])) # here is we have only 2, pac not 0 in uni-col
Sol.append(sol0)
comment.append('PROBA=0.5, TIES BROKEN WITH SMALL VALUE TO EVEN THE BINARIZED VERSION')
Pred.append(
np.array([[0.5 + eps, 0.5 - eps], [0.5 - eps, 0.5 + eps], [0.5 + eps, 0.5 - eps], [0.5 - eps, 0.5 + eps]]))
Sol.append(sol0)
comment.append('PROBA=0.5, TIES NOT BROKEN (bad for f1 score)')
Pred.append(np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]))
Sol.append(sol0)
sol1 = np.array([[1, 0], [0, 1], [0, 1]])
comment.append('EVEN PROBA, but wrong PAC prior because uneven number of samples')
Pred.append(np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]))
Sol.append(sol1)
comment.append(
'Correct PAC prior; score generally 0. But 100% error on positive class because of binarization so f1 (1 col) is at its worst.')
p = len(sol1)
Pred.append(np.array([sum(sol1) * 1. / p] * p))
Sol.append(sol1)
comment.append('All positive')
Pred.append(np.array([[1, 1], [1, 1], [1, 1]]))
Sol.append(sol1)
comment.append('All negative')
Pred.append(np.array([[0, 0], [0, 0], [0, 0]]))
Sol.append(sol1)
for k in range(len(Sol)):
sol = Sol[k]
pred = Pred[k]
print('****** ({}) {} ******'.format(k, comment[k]))
print('------ 2 columns ------')
show_all_scores(sol, pred)
print('------ 1 column ------')
sol = np.array([sol[:, 0]]).transpose()
pred = np.array([pred[:, 0]]).transpose()
show_all_scores(sol, pred)
print('\n\nMulticlass score verification:')
print('\n\n==========================')
sol2 = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]])
comment = ['Three classes perfect']
Pred = [sol2]
Sol = [sol2]
comment.append('Three classes all wrong')
Pred.append(np.array([[0, 1, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]]))
Sol.append(sol2)
comment.append('Three classes equi proba')
Pred.append(np.array([[1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3]]))
Sol.append(sol2)
comment.append('Three classes some proba that do not add up')
Pred.append(np.array([[0.2, 0, 0.5], [0.8, 0.4, 0.1], [0.9, 0.1, 0.2], [0.7, 0.3, 0.3]]))
Sol.append(sol2)
comment.append('Three classes predict prior')
Pred.append(np.array([[0.75, 0.25, 0.], [0.75, 0.25, 0.], [0.75, 0.25, 0.], [0.75, 0.25, 0.]]))
Sol.append(sol2)
for k in range(len(Sol)):
sol = Sol[k]
pred = Pred[k]
print('****** ({}) {} ******'.format(k, comment[k]))
show_all_scores(sol, pred)
print('\n\nMulti-label score verification: 1) all identical labels')
print('\n\n=======================================================')
print('\nIt is normal that for more then 2 labels the results are different for the multiclass scores.')
print('\nBut they should be indetical for the multilabel scores.')
num = 2
sol = np.array([[1, 1, 1], [0, 0, 0], [0, 0, 0], [0, 0, 0]])
sol3 = sol[:, 0:num]
if num == 1:
sol3 = np.array([sol3[:, 0]]).transpose()
comment = ['{} labels perfect'.format(num)]
Pred = [sol3]
Sol = [sol3]
comment.append('All wrong, in the multi-label sense')
Pred.append(1 - sol3)
Sol.append(sol3)
comment.append('All equi proba: 0.5')
sol = np.array([[0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]])
if num == 1:
Pred.append(np.array([sol[:, 0]]).transpose())
else:
Pred.append(sol[:, 0:num])
Sol.append(sol3)
comment.append('All equi proba, prior: 0.25')
sol = np.array([[0.25, 0.25, 0.25], [0.25, 0.25, 0.25], [0.25, 0.25, 0.25], [0.25, 0.25, 0.25]])
if num == 1:
Pred.append(np.array([sol[:, 0]]).transpose())
else:
Pred.append(sol[:, 0:num])
Sol.append(sol3)
comment.append('Some proba')
sol = np.array([[0.2, 0.2, 0.2], [0.8, 0.8, 0.8], [0.9, 0.9, 0.9], [0.7, 0.7, 0.7]])
if num == 1:
Pred.append(np.array([sol[:, 0]]).transpose())
else:
Pred.append(sol[:, 0:num])
Sol.append(sol3)
comment.append('Invert both solution and prediction')
if num == 1:
Pred.append(np.array([sol[:, 0]]).transpose())
else:
Pred.append(sol[:, 0:num])
Sol.append(1 - sol3)
for k in range(len(Sol)):
sol = Sol[k]
pred = Pred[k]
print('****** ({}) {} ******'.format(k, comment[k]))
show_all_scores(sol, pred)
print('\n\nMulti-label score verification:')
print('\n\n==========================')
sol4 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 1]])
comment = ['Three labels perfect']
Pred = [sol4]
Sol = [sol4]
comment.append('Three classes all wrong, in the multi-label sense')
Pred.append(1 - sol4)
Sol.append(sol4)
comment.append('Three classes equi proba')
Pred.append(np.array([[1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3]]))
Sol.append(sol4)
comment.append('Three classes some proba that do not add up')
Pred.append(np.array([[0.2, 0, 0.5], [0.8, 0.4, 0.1], [0.9, 0.1, 0.2], [0.7, 0.3, 0.3]]))
Sol.append(sol4)
comment.append('Three classes predict prior')
Pred.append(np.array([[0.25, 0.25, 0.5], [0.25, 0.25, 0.5], [0.25, 0.25, 0.5], [0.25, 0.25, 0.5]]))
Sol.append(sol4)
for k in range(len(Sol)):
sol = Sol[k]
pred = Pred[k]
print('****** ({}) {} ******'.format(k, comment[k]))
show_all_scores(sol, pred)
| [
"ndh3395@gmail.com"
] | ndh3395@gmail.com |
075b9c4afc1dee1fe7094ba2c41a89815e17e658 | a0e4be557dd32d6a89ea3d86166e23d4c6de3101 | /6.00.1x_scripts/Week 4/Problem Set 4/ps4a_wordgame.py | 70e23ae820dce6c9e9deadde6e4856e28f2501a2 | [
"Giftware"
] | permissive | acpfog/python | 65d8016d115ad5ae62314ed809375b93fb692bf6 | 1ec01bf1c7feeaa420fe08b5936c53b8d0e03879 | refs/heads/master | 2018-12-07T02:43:18.127737 | 2018-09-11T06:34:07 | 2018-09-11T06:34:07 | 32,280,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,505 | py | #
# A Word Game: Part a
#
# This game is a lot like Scrabble or Words With Friends, if you've played those.
# Letters are dealt to players, who then construct one or more words out of their letters.
# Each valid word receives a score, based on the length of the word and the letters in that word.
#
# The rules of the game are as follows:
#
# Dealing
# A player is dealt a hand of n letters chosen at random (assume n=7 for now).
#
# The player arranges the hand into as many words as they want out of the letters, using each letter at most once.
#
# Some letters may remain unused (these won't be scored).
#
# Scoring
# The score for the hand is the sum of the scores for each word formed.
#
# The score for a word is the sum of the points for letters in the word, multiplied by the length of the word,
# plus 50 points if all n letters are used on the first word created.
#
# Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is worth 3, D is worth 2, E is worth 1, and so on.
# We have defined the dictionary SCRABBLE_LETTER_VALUES that maps each lowercase letter to its Scrabble letter value.
#
# For example, 'weed' would be worth 32 points ((4+1+1+2) for the four letters, then multiply by len('weed') to get (4+1+1+2)*4 = 32).
# Be sure to check that the hand actually has 1 'w', 2 'e's, and 1 'd' before scoring the word!
#
# As another example, if n=7 and you make the word 'waybill' on the first try, it would be worth 155 points
# (the base score for 'waybill' is (4+1+4+3+1+1+1)*7=105, plus an additional 50 point bonus for using all n letters).
#
import random
import string
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
WORDLIST_FILENAME = "words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# wordList: list of strings
wordList = []
for line in inFile:
wordList.append(line.strip().lower())
print " ", len(wordList), "words loaded."
return wordList
def getFrequencyDict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
#
# Scoring a word
#
def getWordScore(word, n):
"""
Returns the score for a word. Assumes the word is a valid word.
The score for a word is the sum of the points for letters in the
word, multiplied by the length of the word, PLUS 50 points if all n
letters are used on the first turn.
Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is
worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES)
word: string (lowercase letters)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: int >= 0
"""
score = 0
if len(word) > 0 and len(word) == n:
bonus = 50
else:
bonus = 0
for letter in word:
score += SCRABBLE_LETTER_VALUES[letter]
return score * len(word) + bonus
#
# Make sure you understand how this function works and what it does!
#
def displayHand(hand):
"""
Displays the letters currently in the hand.
For example:
>>> displayHand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
for letter in hand.keys():
for j in range(hand[letter]):
print letter, # print all on the same line
print # print an empty line
#
# Make sure you understand how this function works and what it does!
#
def dealHand(n):
"""
Returns a random hand containing n lowercase letters.
At least n/3 the letters in the hand should be VOWELS.
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand={}
numVowels = n / 3
for i in range(numVowels):
x = VOWELS[random.randrange(0,len(VOWELS))]
hand[x] = hand.get(x, 0) + 1
for i in range(numVowels, n):
x = CONSONANTS[random.randrange(0,len(CONSONANTS))]
hand[x] = hand.get(x, 0) + 1
return hand
#
# Update a hand by removing letters
#
def updateHand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
hand2 = hand.copy()
for letter in word:
if hand2.get(letter,0) == 0:
hand2[letter] = 0
else:
hand2[letter] -= 1
return hand2
#
# Test word validity
#
def isValidWord(word, hand, wordList):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or wordList.
word: string
hand: dictionary (string -> int)
wordList: list of lowercase strings
"""
hand2 = hand.copy()
for letter in word:
if hand2.get(letter,0) == 0:
return False
else:
hand2[letter] -= 1
if word in wordList:
return True
else:
return False
#
# Playing a hand
#
def calculateHandlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
num = 0
for letter in hand.keys():
if hand.get(letter,0) != 0:
num += hand[letter]
return num
def playHand(hand, wordList, n):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word or a single period (the string ".")
to indicate they're done playing
* Invalid words are rejected, and a message is displayed asking
the user to choose another word until they enter a valid word or "."
* When a valid word is entered, it uses up letters from the hand.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters or the user
inputs a "."
hand: dictionary (string -> int)
wordList: list of lowercase strings
n: integer (HAND_SIZE; i.e., hand size required for additional points)
"""
# BEGIN PSEUDOCODE <-- Remove this comment when you code this function; do your coding within the pseudocode (leaving those comments in-place!)
# Keep track of the total score
total_score = 0
# As long as there are still letters left in the hand:
while calculateHandlen(hand) > 0:
# Display the hand
print "Current Hand: ",
displayHand(hand)
# Ask user for input
word = str(raw_input("Enter word, or a \".\" to indicate that you are finished: ")).lower()
# If the input is a single period:
if word == ".":
# End the game (break out of the loop)
print "Goodbye!",
break
# Otherwise (the input is not a single period):
else:
# If the word is not valid:
if not isValidWord(word, hand, wordList):
# Reject invalid word (print a message followed by a blank line)
print "Invalid word, please try again.\n"
# Otherwise (the word is valid):
else:
# Tell the user how many points the word earned, and the updated total score, in one line followed by a blank line
word_score = getWordScore(word, n)
total_score += word_score
print "\"%s\" earned %s points. Total: %s points\n" % ( word, word_score, total_score )
# Update the hand
hand = updateHand(hand, word)
# Game is over (user entered a '.' or ran out of letters), so tell user the total score
if calculateHandlen(hand) == 0:
print "Run out of letters.",
print "Total score: %s points.\n" % total_score
#
# Playing a game
#
def playGame(wordList):
"""
Allow the user to play an arbitrary number of hands.
1) Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'n', let the user play a new (random) hand.
* If the user inputs 'r', let the user play the last hand again.
* If the user inputs 'e', exit the game.
* If the user inputs anything else, tell them their input was invalid.
2) When done playing the hand, repeat from step 1
"""
while True:
action = str(raw_input("Enter n to deal a new hand, r to replay the last hand, or e to end game: ")).lower()
if action == "n":
hand = dealHand(HAND_SIZE)
playHand(hand, wordList, HAND_SIZE)
elif action == "r":
if 'hand' in locals():
playHand(hand, wordList, HAND_SIZE)
else:
print "You have not played a hand yet. Please play a new hand first!\n"
elif action == "e":
return
else:
print "Invalid command."
#
# Build data structures used for entire session and play game
#
if __name__ == '__main__':
wordList = loadWords()
playGame(wordList)
| [
"acpfog@gmail.com"
] | acpfog@gmail.com |
7b6403c7efbad9fe1289f6a2236850d7a726f626 | eacff46eda2c6b509449979a16002b96d4645d8e | /Collections-a-installer/community-general-2.4.0/plugins/modules/awall.py | ca3979593c598ecae378543075eff676aa1be9d1 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | d-amien-b/simple-getwordpress | 5e6d4d15d5f87124ab591e46b63fec552998fdc3 | da90d515a0aa837b633d50db4d91d22b031c04a2 | refs/heads/master | 2023-04-08T22:13:37.347545 | 2021-04-06T09:25:51 | 2021-04-06T09:25:51 | 351,698,069 | 0 | 0 | MIT | 2021-03-31T16:16:45 | 2021-03-26T07:30:00 | HTML | UTF-8 | Python | false | false | 15 | py | system/awall.py | [
"test@burdo.fr"
] | test@burdo.fr |
2dd90c00d550fb8a4a73f533eae06e6108524325 | 11cff97013030eb41bf5afdb08a0c3c640f8c961 | /Tensorflow/roi-pooling/code/neptune_handler.py | f783c2c9ab1f3906b7e8c27cbb5d5a1647b679de | [] | no_license | mellophi/Codes | 86dcc5c45591f97bdaca79f81fc988954841dd5a | 110fe75961354c259e31cb7cb9a9b5fda32b70bc | refs/heads/master | 2022-11-20T19:53:21.498960 | 2018-02-18T04:32:47 | 2018-02-18T04:32:47 | 118,418,556 | 0 | 1 | null | 2022-10-29T00:45:51 | 2018-01-22T06:55:33 | Jupyter Notebook | UTF-8 | Python | false | false | 2,742 | py | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range
from builtins import object
from deepsense import neptune
import numpy as np
import cv2
from PIL import Image
class NeptuneHandler(object):
def __init__(self, num_channel_names, charts_desc, im_channel_names):
self.ctx = neptune.Context()
self.learn_rate = self.ctx.params.learning_rate
self.num_epochs = int(self.ctx.params.num_epochs)
self.roi_folder = self.ctx.params.roidb
self.im_folder = self.ctx.params.im_folder
self.pretrained_path = self.ctx.params.pretrained_path
self.create_numeric_channels(num_channel_names, self.ctx)
self.create_charts(charts_desc, self.ctx)
self.create_image_channels(im_channel_names, self.ctx)
def create_numeric_channels(self, channel_names, ctx):
self.numerical_channels = [ctx.job.create_channel(name=name, channel_type=neptune.ChannelType.NUMERIC)
for name in channel_names]
def create_charts(self, charts_desc, ctx):
self.charts = [ctx.job.create_chart(name=charts_desc[i][0],
series={charts_desc[i][1]: self.numerical_channels[i]})
for i in range(len(self.numerical_channels))]
def send_to_neptune(self, time_point, values):
send = lambda ch, val: ch.send(x=time_point, y=val)
for i in range(len(self.numerical_channels)):
send(self.numerical_channels[i], values[i])
def create_image_channels(self, channel_names, ctx):
self.im_channels = [ctx.job.create_channel(name=name, channel_type=neptune.ChannelType.IMAGE)
for name in channel_names]
def send_image_with_proposals(self, time_step, im, proposals, shape, rois=False):
width = 340
height = 150
im_ = cv2.resize(im, (width, height))
im_ = np.uint8(im_*255.)
for proposal in proposals:
x1 = int(width*proposal[0]/float(shape[1]))
y1 = int(height*proposal[1]/float(shape[0]))
x2 = int(width*proposal[2]/float(shape[1]))
y2 = int(height*proposal[3]/float(shape[0]))
cv2.rectangle(im_, (x1, y1), (x2, y2), (255, 0, 0), 1)
pil_im = Image.fromarray(im_)
if rois:
neptune_im = neptune.Image(name='all the RoIs', description='region proposals', data=pil_im)
self.im_channels[0].send(x=time_step, y=neptune_im)
else:
neptune_im = neptune.Image(name='chosen RoIs', description='object detections', data=pil_im)
self.im_channels[1].send(x=time_step, y=neptune_im)
| [
"ayon01051998@gmail.com"
] | ayon01051998@gmail.com |
ef19beb3ab81659669a7ebc9612b50f9315b39a6 | 429d014690e2e2f7ea49ac7a2e0fa01455a4125e | /katas/scraping/get_honor.py | b6974268c09fa9df7f80fab797afaa1a87d2f6af | [] | no_license | harrietty/python-katas | 4c2b12c7a29d9103b9d3eeffc1aa38f5273ab62f | 07957c3c5a1a35a9f2359f02f43e433fddbb8de6 | refs/heads/master | 2021-05-14T12:20:39.361071 | 2018-01-27T13:42:51 | 2018-01-27T13:42:51 | 116,405,926 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | # Returns the honour for a specific username
import requests
from bs4 import BeautifulSoup
def get_honor(username):
URL = 'https://www.codewars.com/users/{}'.format(username)
r = requests.get(URL)
page = BeautifulSoup(r.text, 'html.parser')
stats_area = page.find_all(attrs={'class': 'stat-row'})[0]
honor_section = stats_area.find_all('b')
return int(honor_section[1].next_sibling.replace(',', '')) | [
"harriethryder@gmail.com"
] | harriethryder@gmail.com |
8fa27adb7b645a2f9b9ee0039cbef6dac5e7a346 | cfb66405a8aa30401c4c9b9cdb5f40edc3cda4a1 | /Layout/MainWindow.py | 6e703100858bf8bd974a878b76b75390e1523897 | [] | no_license | stinger000/CopterHack2020_IR_LED_Desktop_GUI | da42e978ab4eb8ee137ce07a25373c46719e3580 | 88c2ce10e3ba566a210d9e3a9c277770f2d388b6 | refs/heads/master | 2023-03-25T04:56:57.216682 | 2021-03-19T17:44:47 | 2021-03-19T17:44:47 | 300,357,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,750 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui\MainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(398, 420)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.comboSerial = QtWidgets.QComboBox(self.centralwidget)
self.comboSerial.setGeometry(QtCore.QRect(0, 20, 231, 22))
self.comboSerial.setObjectName("comboSerial")
self.btnConnect = QtWidgets.QPushButton(self.centralwidget)
self.btnConnect.setGeometry(QtCore.QRect(250, 20, 75, 23))
self.btnConnect.setObjectName("btnConnect")
self.listData = QtWidgets.QListWidget(self.centralwidget)
self.listData.setGeometry(QtCore.QRect(20, 180, 256, 192))
self.listData.setObjectName("listData")
self.labelClock = QtWidgets.QLabel(self.centralwidget)
self.labelClock.setGeometry(QtCore.QRect(20, 80, 141, 51))
font = QtGui.QFont()
font.setPointSize(18)
self.labelClock.setFont(font)
self.labelClock.setObjectName("labelClock")
self.btnStart = QtWidgets.QPushButton(self.centralwidget)
self.btnStart.setGeometry(QtCore.QRect(20, 150, 75, 23))
self.btnStart.setObjectName("btnStart")
self.btnStop = QtWidgets.QPushButton(self.centralwidget)
self.btnStop.setGeometry(QtCore.QRect(120, 150, 75, 23))
self.btnStop.setObjectName("btnStop")
self.radioFreeMode = QtWidgets.QRadioButton(self.centralwidget)
self.radioFreeMode.setGeometry(QtCore.QRect(290, 220, 91, 31))
self.radioFreeMode.setChecked(True)
self.radioFreeMode.setObjectName("radioFreeMode")
self.radioLapsMode = QtWidgets.QRadioButton(self.centralwidget)
self.radioLapsMode.setGeometry(QtCore.QRect(290, 250, 91, 31))
self.radioLapsMode.setObjectName("radioLapsMode")
self.checkFirstLap = QtWidgets.QCheckBox(self.centralwidget)
self.checkFirstLap.setGeometry(QtCore.QRect(290, 190, 101, 21))
self.checkFirstLap.setObjectName("checkFirstLap")
self.spinLaps = QtWidgets.QSpinBox(self.centralwidget)
self.spinLaps.setEnabled(True)
self.spinLaps.setGeometry(QtCore.QRect(290, 290, 71, 22))
self.spinLaps.setMinimum(1)
self.spinLaps.setObjectName("spinLaps")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.radioFreeMode.toggled['bool'].connect(self.spinLaps.setHidden)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Easy Race"))
self.btnConnect.setText(_translate("MainWindow", "Connect"))
self.labelClock.setText(_translate("MainWindow", "00:00:00.000"))
self.btnStart.setText(_translate("MainWindow", "Start"))
self.btnStop.setText(_translate("MainWindow", "Stop"))
self.radioFreeMode.setText(_translate("MainWindow", "Free Mode"))
self.radioLapsMode.setText(_translate("MainWindow", "Laps Mode"))
self.checkFirstLap.setText(_translate("MainWindow", "Start on first lap"))
| [
"bigbani1334@gmail.com"
] | bigbani1334@gmail.com |
022c0ad1b16fcf2c7fcb93d178db8aeacd866d1b | 2e83e004d8a69a773d1e305152edd16e4ea35ed8 | /students/humberto_gonzalez/session04/mailroom.py | d46c459282d38bc0a37ac96b496c40d1524f327a | [] | no_license | UWPCE-PythonCert-ClassRepos/SP_Online_PY210 | 9b170efbab5efedaba8cf541e8fc42c5c8c0934d | 76224d0fb871d0bf0b838f3fccf01022edd70f82 | refs/heads/master | 2021-06-16T20:14:29.754453 | 2021-02-25T23:03:19 | 2021-02-25T23:03:19 | 161,077,720 | 19 | 182 | null | 2021-02-25T23:03:19 | 2018-12-09T20:18:25 | Python | UTF-8 | Python | false | false | 4,004 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 17:53:34 2019
@author: humberto gonzalez
"""
import sys
import tempfile
import os
import operator
donor_db = {"Tyrod Taylor": [1000.00, 45.50],
"Jarvis Landry": [150.25],
"Philip Rivers": [650.23, 40.87, 111.32],
"Melvin Gordon": [1677.25, 4300.23, 10532.00],
"Mike Williams": [230.56, 12.45, 11.00],
"Antonio Brown": [100.00, 88.88]
}
donors = list(donor_db.keys())
main_prompt = "\n".join(("Welcome to the Mailroom!",
"Please choose from below options:",
"1 - Send a Thank You",
"2 - Create a report",
"3 - Send letters to all donors",
"4 - Exit Program",
">>> "))
def create_report_db(db):
'''Takes in the donor database and creates the required database to be used for printing out the report'''
report_df = {}
for donor in db:
donations = db[donor]
total = round(sum(donations),2)
num = len(donations)
average = round(total/num,2)
report_df[donor] = [total,num,average]
report_df = sorted(report_df.items(), key=operator.itemgetter(1),reverse=True)
return report_df
def send_thank_you():
'''Prompts user for a donor and donation amount, and then prints out a thank you email'''
donor_name = input('What is the full name of the donor you would like to thank? ')
if donor_name.lower() == "quit":
main()
if donor_name not in donors:
donor_db[donor_name] = []
donation = input('What was the donation amount? ')
if donation.lower()=="quit":
main()
donor_db[donor_name].append(float(donation))
print()
print()
print(f'Dear {donor_name},\n Thank you for your generous donation of ${donation}')
def create_report():
'''Creates a report of the current donor database'''
print('{:20} | {:^10} | {:^10} | {:^10} |'.format("Donor Name",
"Total Given","Num Gifts","Average Gift"))
print('-'*64)
formatter = "{:20} ${:>10} {:>10} ${:>10}"
report_db = create_report_db(donor_db)
for donor_info in report_db:
donor = donor_info[0]
donation_info = donor_info[1]
total = donation_info[0]
num = donation_info[1]
average = donation_info[2]
print(formatter.format(donor,total,num,average))
def send_letters():
'''Writes and saves letters to all donors in the database
in the form of txt files'''
formatter = '''Dear {},\n Thank you for your generous donation of ${}. \n Your donation will be put to great use. \n Sincerely, \n -The Organization'''
path = tempfile.gettempdir()
path = path + "/" + "Letters to Donors"
os.mkdir(path)
for donor in donor_db:
temp = path + "/" + donor.replace(' ','_') + '.txt'
with open(temp,'w') as file:
txt = formatter.format(donor,donor_db[donor][0])
file.write(txt)
file.close()
print('Letters have been created and saved to \n a new folder in your temp directory')
def exit_program():
print("Bye!")
sys.exit()
def main():
while True:
response = input(main_prompt) # continuously collect user selection
# now redirect to feature functions based on the user selection
menu_options = {"1":send_thank_you,
"2":create_report,
"3":send_letters,
"4":exit_program}
if response not in menu_options:
print()
print("Please select one of the available options")
print("You will be returned to the main menu")
main()
menu_options.get(response)()
if __name__ == "__main__":
# don't forget this block to guard against your code running automatically if this module is imported
main()
| [
"Humberto.gonzalezj@gmail.com"
] | Humberto.gonzalezj@gmail.com |
30728e1d5147122974d91d2d0ec16a80eed21091 | 06f05e8a3786349edee85a3feb06b5fe8e881c5a | /assets/misc/ExcelHandler.py | 4be1ee7fac62cd65b664c361400c50354f464b64 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | UbiCastTeam/mediasite_client | fcb3b20c8d70d170d2acac562ff8166e4bd8383a | 04f398c0a1dd2ab70272b346065db4e63dfb38fb | refs/heads/master | 2023-07-05T18:23:35.439153 | 2023-06-30T14:53:51 | 2023-06-30T15:10:23 | 329,262,964 | 0 | 0 | MIT | 2021-06-28T16:21:42 | 2021-01-13T09:53:23 | null | UTF-8 | Python | false | false | 867 | py | # Referenced from:
# https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch12s08.html
from xml.sax import ContentHandler, parse
class ExcelHandler(ContentHandler):
def __init__(self):
self.chars = [ ]
self.cells = [ ]
self.rows = [ ]
self.tables = [ ]
def characters(self, content):
self.chars.append(content)
def startElement(self, name, atts):
if name=="Cell":
self.chars = [ ]
elif name=="Row":
self.cells=[ ]
elif name=="Table":
self.rows = [ ]
def endElement(self, name):
if name=="Cell":
self.cells.append(''.join(self.chars))
elif name=="Row":
self.rows.append(self.cells)
elif name=="Table":
self.tables.append(self.rows) | [
"d33bs@users.noreply.github.com"
] | d33bs@users.noreply.github.com |
af80ef60a13d2e3be5f9fadcee5100682e0db9d3 | 8fb567f62189d1669439c797e19502dd560e4fb6 | /relief_canvas.py | 2c563e55e99831bf3b00d56fbf734106ef3bc6f7 | [] | no_license | AndiEcker/kivy_playground | 22cabf8f77fb2087d2f8c4f3719a4c3d367d1874 | d86c736c752b08172760d2ea145527df4eb58199 | refs/heads/master | 2021-07-06T23:51:29.284502 | 2021-05-05T22:02:03 | 2021-05-05T22:02:03 | 236,343,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,125 | py | """ relief canvas """
from typing import Any, Callable, Tuple, Union
from kivy.factory import Factory
from kivy.graphics import Color, Line
from kivy.graphics.instructions import InstructionGroup
from kivy.lang import Builder
from kivy.properties import NumericProperty, ObjectProperty
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from ae.gui_app import id_of_flow
from ae.kivy_app import FlowButton, FlowToggler, KivyMainApp
DEF_NUM_PROP_VAL = "99px"
ANGLE_BEG = 87
ANGLE_END = 267
ColorRGB = Tuple[float, float, float] #: color with Red, Green and Blue parts between 0.0 and 1.0
ColorRGBA = Tuple[float, float, float, float] #: ink is rgb color and alpha
ColorOrInk = Union[ColorRGB, ColorRGBA] #: color or ink type
ReliefColors = Union[Tuple[ColorRGB, ColorRGB], Tuple] #: tuple of (top, bottom) relief colors or empty tuple
ReliefBrightness = Tuple[float, float] #: top and bottom brightness/darken factor
def relief_colors(color_or_ink: ColorOrInk = (0, 0, 0), darken_factors: ReliefBrightness = (0.6, 0.3)) -> ReliefColors:
""" calculate the (top and bottom) colors used for the relief lines/drawings.
:param color_or_ink: color used for to calculate the relief colors from, which will first be lightened
until one of the color parts (R, G or B) reach the value 1.0; then the
darken factors will be applied to the color parts. If not passed then grey colors
will be returned.
.. note::
If the alpha value of paramref:`~relief_colors.color_or_ink` is zero then no relief
colors will be calculated and an empty tuple will be returned (disabling relief).
:param darken_factors: two factors for to darken (1) the top and (2) the bottom relief color parts.
:return: tuple with darkened colors calculated from ink or an empty tuple if the alpha
value of paramref:`~relief_colors.ink` has a zero value.
"""
if len(color_or_ink) > 3 and not color_or_ink[3]:
return ()
max_col_part = max(color_or_ink[:3])
if max_col_part == 0: # prevent zero division if color_or_ink is black/default
lightened_color = (1.0, 1.0, 1.0)
else:
brighten_factor = 1 / max_col_part
lightened_color = tuple([(col * brighten_factor) for col in color_or_ink[:3]])
return tuple([tuple([col_part * darken for col_part in lightened_color]) for darken in darken_factors])
class ReliefCanvas: # (Widget): # also works without Widget/any ancestor
""" relief behavior """
relief_ellipse_inner_colors: ReliefColors = ObjectProperty(())
relief_ellipse_inner_lines: NumericProperty = NumericProperty('6sp')
relief_ellipse_inner_offset: NumericProperty = NumericProperty('1sp')
relief_ellipse_outer_colors: ReliefColors = ObjectProperty(())
relief_ellipse_outer_lines: NumericProperty = NumericProperty('6sp')
relief_square_inner_colors: ReliefColors = ObjectProperty(())
relief_square_inner_lines: NumericProperty = NumericProperty('3sp')
relief_square_inner_offset: NumericProperty = NumericProperty('1sp')
relief_square_outer_colors: ReliefColors = ObjectProperty(())
relief_square_outer_lines: NumericProperty = NumericProperty('3sp')
_relief_graphic_instructions: InstructionGroup
# attributes provided by the class to be mixed into
x: float
y: float
width: float
height: float
canvas: Any
bind: Any
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.bind(pos=self._relief_refresh)
self.bind(size=self._relief_refresh)
self.bind(relief_ellipse_inner_colors=self._relief_refresh)
self.bind(relief_ellipse_inner_lines=self._relief_refresh)
self.bind(relief_ellipse_inner_offset=self._relief_refresh)
self.bind(relief_ellipse_outer_colors=self._relief_refresh)
self.bind(relief_ellipse_outer_lines=self._relief_refresh)
self.bind(relief_square_inner_colors=self._relief_refresh)
self.bind(relief_square_inner_lines=self._relief_refresh)
self.bind(relief_square_inner_offset=self._relief_refresh)
self.bind(relief_square_outer_colors=self._relief_refresh)
self.bind(relief_square_outer_lines=self._relief_refresh)
self._relief_graphic_instructions = InstructionGroup()
def _relief_refresh(self, *_args):
""" pos/size or color changed event handler. """
if self._relief_graphic_instructions.length():
self.canvas.after.remove(self._relief_graphic_instructions)
self._relief_graphic_instructions.clear()
add = self._relief_graphic_instructions.add
pos_size = self.x, self.y, self.width, self.height
if self.relief_ellipse_inner_colors and self.relief_ellipse_inner_lines:
self._relief_ellipse_inner_refresh(add, *self.relief_ellipse_inner_colors, *pos_size)
if self.relief_ellipse_outer_colors and self.relief_ellipse_outer_lines:
self._relief_ellipse_outer_refresh(add, *self.relief_ellipse_outer_colors, *pos_size)
if self.relief_square_inner_colors and self.relief_square_inner_lines:
self._relief_square_inner_refresh(add, *self.relief_square_inner_colors, *pos_size)
if self.relief_square_outer_colors and self.relief_square_outer_lines:
self._relief_square_outer_refresh(add, *self.relief_square_outer_colors, *pos_size)
if self._relief_graphic_instructions.length():
self.canvas.after.add(self._relief_graphic_instructions)
def _relief_ellipse_inner_refresh(self, add_instruction: Callable,
top_color: ColorRGB, bottom_color: ColorRGB,
wid_x: float, wid_y: float, wid_width: float, wid_height: float):
""" ellipse pos/size or color changed event handler. """
lines = int(self.relief_ellipse_inner_lines)
offset = int(self.relief_ellipse_inner_offset)
for line in range(1, lines + 1):
alpha = 0.9 - (line / lines) * 0.81
line += offset
line2 = 2 * line
in_x1 = wid_x + line
in_y1 = wid_y + line
in_width = wid_width - line2
in_height = wid_height - line2
add_instruction(Color(*top_color, alpha)) # inside top left
add_instruction(Line(ellipse=[in_x1, in_y1, in_width, in_height, ANGLE_END, 360 + ANGLE_BEG]))
add_instruction(Color(*bottom_color, alpha)) # inside bottom right
add_instruction(Line(ellipse=[in_x1, in_y1, in_width, in_height, ANGLE_BEG, ANGLE_END]))
def _relief_ellipse_outer_refresh(self, add_instruction: Callable,
top_color: ColorRGB, bottom_color: ColorRGB,
wid_x: float, wid_y: float, wid_width: float, wid_height: float):
""" ellipse pos/size or color changed event handler. """
lines = int(self.relief_ellipse_outer_lines)
for line in range(1, lines + 1):
alpha = 0.9 - (line / lines) * 0.81
line2 = 2 * line
out_x1 = wid_x - line
out_y1 = wid_y - line
out_width = wid_width + line2
out_height = wid_height + line2
add_instruction(Color(*top_color, alpha)) # outside top left
add_instruction(Line(ellipse=[out_x1, out_y1, out_width, out_height, ANGLE_END, 360 + ANGLE_BEG]))
add_instruction(Color(*bottom_color, alpha)) # outside bottom right
add_instruction(Line(ellipse=[out_x1, out_y1, out_width, out_height, ANGLE_BEG, ANGLE_END]))
def _relief_square_inner_refresh(self, add_instruction: Callable,
top_color: ColorRGB, bottom_color: ColorRGB,
wid_x: float, wid_y: float, wid_width: float, wid_height: float):
""" square pos/size or color changed event handler. """
lines = int(self.relief_square_inner_lines)
offset = int(self.relief_square_inner_offset)
for line in range(1, lines + 1):
alpha = 0.9 - (line / lines) * 0.81
line += offset
line2 = 2 * line
in_x1 = wid_x + line
in_x2 = in_x1 + wid_width - line2
in_y1 = wid_y + line
in_y2 = in_y1 + wid_height - line2
add_instruction(Color(*top_color, alpha)) # inside top left
add_instruction(Line(points=[in_x1, in_y1, in_x1, in_y2, in_x2, in_y2]))
add_instruction(Color(*bottom_color, alpha)) # inside bottom right
add_instruction(Line(points=[in_x1, in_y1, in_x2, in_y1, in_x2, in_y2]))
def _relief_square_outer_refresh(self, add_instruction: Callable,
top_color: ColorRGB, bottom_color: ColorRGB,
wid_x: float, wid_y: float, wid_width: float, wid_height: float):
""" square pos/size or color changed event handler. """
lines = int(self.relief_square_outer_lines)
for line in range(1, lines + 1):
alpha = 0.9 - (line / lines) * 0.81
line2 = 2 * line
out_x1 = wid_x - line
out_x2 = out_x1 + wid_width + line2
out_y1 = wid_y - line
out_y2 = out_y1 + wid_height + line2
add_instruction(Color(*top_color, alpha)) # outside upper left
add_instruction(Line(points=[out_x1, out_y1, out_x1, out_y2, out_x2, out_y2]))
add_instruction(Color(*bottom_color, alpha)) # outside bottom right
add_instruction(Line(points=[out_x1, out_y1, out_x2, out_y1, out_x2, out_y2]))
class ReliefLabel(ReliefCanvas, Label):
""" relief label """
class ReliefButton(ReliefCanvas, Button):
""" relief button """
class ReliefFlowButton(ReliefCanvas, FlowButton):
""" relief flow button """
class ReliefFlowToggler(ReliefCanvas, FlowToggler):
""" relief flow toggle button """
if __name__ == '__main__':
Builder.load_string("""\
#: import relief_colors relief_canvas.relief_colors
#: import ERR relief_canvas.DEF_NUM_PROP_VAL
<ReliefHelpToggler@ReliefCanvas+HelpToggler>:
<Main@FloatLayout>:
BoxLayout:
orientation: 'vertical'
padding: 3
spacing: 6
BoxLayout:
padding: 3
spacing: 6
size_hint_y: None
height: 90
#HelpToggler: # needed for to run kivy_app
ReliefHelpToggler:
relief_square_outer_colors:
relief_colors((1, 1, 0) if app.help_layout else (1, 0, 1), darken_factors=(.81, .51))
relief_square_outer_lines: app.main_app.correct_num_prop_value(squ_out.text)
relief_square_inner_lines: app.main_app.correct_num_prop_value(squ_inn.text)
relief_square_inner_offset: app.main_app.correct_num_prop_value(squ_off.text)
FlowButton:
text: "toggle theme"
on_release: app.main_app.change_app_state('light_theme', not app.app_states['light_theme'])
BoxLayout:
orientation: 'vertical'
size_hint_x: 3
BoxLayout:
ImageLabel:
text: "lines"
ImageLabel:
text: "outer"
ImageLabel:
text: "inner"
ImageLabel:
text: "offset"
BoxLayout:
ImageLabel:
text: "ellipse"
FlowInput:
id: ell_out
text: "6sp"
background_color:
(1, 0, 0, .3) if app.main_app.correct_num_prop_value(self.text) == ERR else (1, 1, 1, 1)
FlowInput:
id: ell_inn
text: "3sp"
background_color:
(1, 0, 0, .3) if app.main_app.correct_num_prop_value(self.text) == ERR else (1, 1, 1, 1)
FlowInput:
id: ell_off
text: "1sp"
background_color:
(1, 0, 0, .3) if app.main_app.correct_num_prop_value(self.text) == ERR else (1, 1, 1, 1)
BoxLayout:
ImageLabel:
text: "square"
FlowInput:
id: squ_out
text: "6sp"
background_color:
(1, 0, 0, .3) if app.main_app.correct_num_prop_value(self.text) == ERR else (1, 1, 1, 1)
FlowInput:
id: squ_inn
text: "3sp"
background_color:
(1, 0, 0, .3) if app.main_app.correct_num_prop_value(self.text) == ERR else (1, 1, 1, 1)
FlowInput:
id: squ_off
text: "1sp"
background_color:
(1, 0, 0, .3) if app.main_app.correct_num_prop_value(self.text) == ERR else (1, 1, 1, 1)
BoxLayout:
padding: app.main_app.correct_num_prop_value(ell_out.text)
spacing: 69
ReliefFlowButton:
text: "FlowDarkFac36"
ellipse_fill_ink: .6, .3, .3, 1
on_ellipse_fill_ink: print("INK changed", args)
#relief_ellipse_outer_lines: '9sp'
#relief_ellipse_inner_lines: '6sp'
relief_ellipse_inner_colors: relief_colors(self.ellipse_fill_ink, darken_factors=(0.3, 0.6))
on_relief_ellipse_inner_colors: print("COL changed", args)
relief_ellipse_inner_lines: app.main_app.correct_num_prop_value(ell_inn.text)
relief_ellipse_outer_colors: relief_colors(self.ellipse_fill_ink)
on_relief_ellipse_outer_colors: print("COL changed", args)
relief_ellipse_outer_lines: app.main_app.correct_num_prop_value(ell_out.text)
relief_ellipse_inner_offset: app.main_app.correct_num_prop_value(ell_off.text)
size_hint: 1, 1
on_release: app.main_app.toggle_color_picker(self, 'ellipse_fill_ink')
ReliefFlowButton:
text: "Flow"
ellipse_fill_ink: .7, .7, .3, 1
relief_ellipse_inner_colors: relief_colors(self.ellipse_fill_ink)
relief_ellipse_outer_colors: relief_colors(self.ellipse_fill_ink)
relief_ellipse_outer_lines: app.main_app.correct_num_prop_value(ell_out.text)
relief_ellipse_inner_lines: app.main_app.correct_num_prop_value(ell_inn.text)
relief_ellipse_inner_offset: app.main_app.correct_num_prop_value(ell_off.text)
size_hint: 1, 1
on_release: app.main_app.toggle_color_picker(self, 'ellipse_fill_ink')
ReliefFlowButton:
text: "0 alpha"
ellipse_fill_ink: .4, .7, .7, 0 # the 0 alpha is preventing relief
relief_ellipse_colors: relief_colors(self.ellipse_fill_ink)
relief_ellipse_outer_lines: app.main_app.correct_num_prop_value(ell_out.text)
relief_ellipse_inner_lines: app.main_app.correct_num_prop_value(ell_inn.text)
relief_ellipse_inner_offset: app.main_app.correct_num_prop_value(ell_off.text)
square_fill_ink: .6, .6, .6, .6
size_hint: None, 1
width: self.height
on_release: app.main_app.toggle_color_picker(self, 'ellipse_fill_ink')
ReliefFlowToggler:
text: "Toggler"
ellipse_fill_ink: .42, .63, .93, 1
relief_ellipse_inner_colors: relief_colors(self.ellipse_fill_ink, darken_factors=(0.3, 0.6))
relief_ellipse_inner_lines:
app.main_app.correct_num_prop_value(ell_inn.text if self.state == 'down' else '18sp')
relief_ellipse_outer_colors: relief_colors(self.ellipse_fill_ink)
relief_ellipse_outer_lines:
app.main_app.correct_num_prop_value(ell_out.text if self.state == 'down' else '12sp')
relief_ellipse_inner_offset: app.main_app.correct_num_prop_value(ell_off.text)
size_hint: None, 1
width: self.height
on_state: print("Ellipse Toggler state change", args)
BoxLayout:
padding: app.main_app.correct_num_prop_value(squ_out.text)
spacing: 69
ReliefLabel:
text: "kivy label"
color: (0, 0, 0, 1) if app.app_states['light_theme'] else (1, 1, 1, 1)
relief_square_inner_colors: relief_colors((1, 1, 1), darken_factors=(0.3, 0.6))
relief_square_outer_colors: relief_colors((1, 1, 1))
relief_square_outer_lines: app.main_app.correct_num_prop_value(squ_out.text)
relief_square_inner_lines: app.main_app.correct_num_prop_value(squ_inn.text)
relief_square_inner_offset: app.main_app.correct_num_prop_value(squ_off.text)
ReliefButton:
text: "kivy button"
color: (0, 0, 0, 1) if app.app_states['light_theme'] else (1, 1, 1, 1)
relief_square_inner_colors: relief_colors((1, 1, 0), darken_factors=(0.3, 0.6))
relief_square_outer_colors: relief_colors((0, 0, 1))
relief_square_outer_lines: app.main_app.correct_num_prop_value(squ_out.text)
relief_square_inner_lines: app.main_app.correct_num_prop_value(squ_inn.text)
relief_square_inner_offset: app.main_app.correct_num_prop_value(squ_off.text)
ReliefFlowButton:
text: "flow button"
square_fill_ink: .42, .63, .93, 1
relief_square_inner_colors: relief_colors(self.square_fill_ink, darken_factors=(0.3, 0.6))
relief_square_outer_colors: relief_colors(self.square_fill_ink)
relief_square_outer_lines: app.main_app.correct_num_prop_value(squ_out.text)
relief_square_inner_lines: app.main_app.correct_num_prop_value(squ_inn.text)
relief_square_inner_offset: app.main_app.correct_num_prop_value(squ_off.text)
size_hint: 1, 1
on_release: app.main_app.toggle_color_picker(self)
ReliefFlowToggler:
text: "flow toggler"
square_fill_ink: .42, .63, .93, 1
relief_square_inner_colors: relief_colors(self.square_fill_ink, darken_factors=(0.3, 0.6))
relief_square_outer_colors: relief_colors(self.square_fill_ink)
relief_square_inner_lines:
app.main_app.correct_num_prop_value(squ_inn.text) if self.state == 'down' else '18sp'
relief_square_outer_lines:
app.main_app.correct_num_prop_value(squ_out.text) if self.state == 'down' else '9sp'
relief_square_inner_offset: app.main_app.correct_num_prop_value(squ_off.text)
size_hint: 1, 1
on_release: app.main_app.toggle_color_picker(self)
<ColorPickerDD@FlowDropDown>:
ColorPicker:
id: col_pic
size_hint_y: None
height: self.width
on_color: app.main_app.debug_print("PIC changed", args)
""")
Factory.register('ReliefCanvas', ReliefCanvas)
class NumPropTester(Widget):
""" test NumericProperty values """
num_prop = NumericProperty()
class ReliefCanvasApp(KivyMainApp):
""" app """
color_picker: Any = None
color_dropdown: Any = None
@staticmethod
def correct_num_prop_value(num_prop_value: Union[str, int, float]) -> Union[str, int, float]:
""" test if num_prop_value has a valid/assignable NumericProperty value and if not correct it to 21sp """
wid = NumPropTester()
try:
wid.num_prop = num_prop_value
except ValueError:
print(f"ReliefCanvasApp.correct_num_prop_value() got invalid numeric property value '{num_prop_value}'")
return DEF_NUM_PROP_VAL
return num_prop_value
def debug_print(self, *args, **kwargs):
""" added to find out why the color got lightened when opening color picker dropdown. """
print("APP_DEBUG_PRINT", args, kwargs)
def toggle_color_picker(self, wid, color_name='square_fill_ink'):
""" show or hide color picker"""
print("TOGGLE COLOR PICKER", getattr(wid, color_name), self.color_picker)
is_open = self.color_dropdown and self.color_dropdown.attach_to
if is_open:
self.color_dropdown.dismiss()
if self.color_dropdown:
self.color_picker.unbind(color=wid.setter(color_name))
self.color_picker = None
self.color_dropdown = None
if not is_open:
self.color_dropdown = Factory.ColorPickerDD()
self.change_flow(id_of_flow('suggest'))
self.color_dropdown.open(wid)
self.color_picker = self.color_dropdown.ids.col_pic
self.color_picker.color = getattr(wid, color_name)
self.color_picker.bind(color=wid.setter(color_name))
ReliefCanvasApp().run_app()
| [
"aecker2@gmail.com"
] | aecker2@gmail.com |
d7366a977e0afa6e57c3e3e0f85d765de473224f | a00443d297229f1631eb14b2fa5359b37ed6ace7 | /Scripts/pip3.7-script.py | 93e6c18ca6cae0ed63e4add0a85b977ad98f32a9 | [] | no_license | ugoodumegwu/my-first-blog | 9e4f0df4fe971246515e827b362bbfef09f8813f | ce891b3a62efb5c5933bd65db14a25dc186114b4 | refs/heads/master | 2020-06-11T23:28:19.434299 | 2019-06-28T11:32:08 | 2019-06-28T11:32:08 | 194,120,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #!C:\Users\user\PycharmProjects\anywhere\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"ugoodumegwu@gmail.com"
] | ugoodumegwu@gmail.com |
3f124ab243ee4915437638872d37c259c7a7ea28 | e3396ba55ba855e146963aeb4733f0ce2725892b | /result/145/ant.py | a5e2a21c04735fe5cf42d467a81de5c361fc8e03 | [] | no_license | masashihamaguchi/contest-2018-GOMOKU | 2ad93794a72370801bbcfd628cf4ed03a87c4313 | e6b2fd0aa1b4095cfd962078ed2ec69068e929c5 | refs/heads/master | 2021-09-23T03:24:59.725681 | 2018-09-20T08:01:13 | 2018-09-20T08:01:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,774 | py | #!/usr/bin/python3
N = 15
# The main routine of AI.
# input: str[N][N] field : state of the field.
# output: int[2] : where to put a stone in this turn.
def Think(field):
CENTER = (int(N / 2), int(N / 2))
best_position = (0, 0)
canMaxStone = 0
for i in range(N):
for j in range(N):
if field[i][j] != '.':
continue
position = (i, j)
# Assume to put a stone on (i, j).
field[i][j] = 'O'
count, lineCounts = CanHaveFiveStones(field, position)
#DebugPrint('I have a winning choice at (%d, %d)' %(i, j))
if count >= 5:
return position
elif lineCounts > canMaxStone:
best_position = position
canMaxStone = lineCounts
# return position
field[i][j] = 'X'
if OppHaveNumStones(field, position, 5):
DebugPrint('I have a winning choice at (%d, %d)' % (i, j))
return position
field[i][j] = '.'
if GetDistance(best_position, CENTER) > GetDistance(position, CENTER) and canMaxStone == 0:
best_position = position
return best_position
def OppHaveNumStones(field, position, num):
return (CountOppStonesOnLine(field, position, (1, 1)) >= num or
CountOppStonesOnLine(field, position, (1, 0)) >= num or
CountOppStonesOnLine(field, position, (1, -1)) >= num or
CountOppStonesOnLine(field, position, (0, 1)) >= num)
def CountOppStonesOnLine(field, position, diff):
count = 0
row = position[0]
col = position[1]
while True:
if row < 0 or col < 0 or row >= N or col >= N or field[row][col] != 'X':
break
row += diff[0]
col += diff[1]
count += 1
row = position[0] - diff[0]
col = position[1] - diff[1]
while True:
if row < 0 or col < 0 or row >= N or col >= N or field[row][col] != 'X':
break
row -= diff[0]
col -= diff[1]
count += 1
return count
# Returns true if you have five stones from |position|. Returns false otherwise.
def CanHaveFiveStones(field, position):
LineCounts = 0
count1 = CountStonesOnLine(field, position, (1, 1))
if LineCounts < CountStonesOnLine(field, position, (1, 1)):
LineCounts = count1
count2 = CountStonesOnLine(field, position, (1, 0))
if LineCounts < CountStonesOnLine(field, position, (1, 0)):
LineCounts = count2
count3 = CountStonesOnLine(field, position, (0, 1))
if LineCounts < CountStonesOnLine(field, position, (0, 1)):
LineCounts = count3
count4 = CountStonesOnLine(field, position, (1, -1))
if LineCounts < CountStonesOnLine(field, position, (1, -1)):
LineCounts = count4
count1 -= 1
count2 -= 1
count3 -= 1
count4 -= 1
return LineCounts, (count1 * count2 * count3* count4)
# return (CountStonesOnLine(field, position, (1, 1)) >= 5 or
# CountStonesOnLine(field, position, (1, 0)) >= 5 or
# CountStonesOnLine(field, position, (1, -1)) >= 5 or
# CountStonesOnLine(field, position, (0, 1)) >= 5)
# Returns the number of stones you can put around |position| in the direction specified by |diff|.
def CountStonesOnLine(field, position, diff):
count = 0
row = position[0]
col = position[1]
while True:
if row < 0 or col < 0 or row >= N or col >= N or field[row][col] != 'O':
break
row += diff[0]
col += diff[1]
count += 1
row = position[0] - diff[0]
col = position[1] - diff[1]
#num = count
while True:
if row < 0 or col < 0 or row >= N or col >= N or field[row][col] != 'O':
break
row -= diff[0]
col -= diff[1]
count += 1
return count
# Returns the Manhattan distance from |a| to |b|.
def GetDistance(a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
# =============================================================================
# DO NOT EDIT FOLLOWING FUNCTIONS
# =============================================================================
def main():
field = Input()
position = Think(field)
Output(position)
def Input():
field = [list(input()) for i in range(N)]
return field
def Output(position):
print(position[0], position[1])
# Outputs |msg| to stderr; This is actually a thin wrapper of print().
def DebugPrint(*msg):
import sys
print(*msg, file=sys.stderr)
if __name__ == '__main__':
main()
| [
"sknzw2608@gmail.com"
] | sknzw2608@gmail.com |
a7bd12e2ee0ac60ddb220bbd563cbe214e9ed450 | 3fa8d270c7d8f6e1fd608cbe7e5ddbf2b39768f3 | /external-calls/httpsGet/src/lambda_function.py | 1a3cecae92804d050293b2f412631fda258ecade | [
"Apache-2.0"
] | permissive | knowlsie/alexa-cookbook | f73980df1bcd8aef630d4924e88b8797444d7280 | 4026ff4ff54194f628736d4c38471546e9b60860 | refs/heads/master | 2021-01-19T22:41:09.817843 | 2017-04-18T13:11:47 | 2017-04-18T13:11:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,541 | py | ## alexa-cookbook sample code
## There are three sections, Text Strings, Skill Code, and Helper Function(s).
## You can copy and paste the entire file contents as the code for a new Lambda function,
## or copy & paste section #3, the helper function, to the bottom of your existing Lambda code.
## 1. Text strings =====================================================================================================
## Modify these strings and messages to change the behavior of your Lambda function
myData = 'New York'
myUrl = 'https://cp6gckjt97.execute-api.us-east-1.amazonaws.com/prod/stateresource?usstate='
## 2. Skill Code =======================================================================================================
def speechResponse(say, endSession, sessionAttributes):
print('say = ' + say);
print
return {
'version': '1.0',
'sessionAttributes': sessionAttributes,
'response': {
'outputSpeech': {
'type': 'SSML',
'ssml': say
},
'shouldEndSession': endSession
}
}
def lambda_handler(event, context):
#print("Received event: " + json.dumps(event, indent=2))
if event['request']['type'] == "LaunchRequest":
pop = httpsGet(myData) ## see the helper function defined below
say = "The population of " + myData + " is " + str(pop)
return speechResponse(say, True, {})
elif event['request']['type'] == "IntentRequest":
intentName = event['request']['intent']['name']
print('handling Intent', intentName)
say = 'I heard your intent ' + intentName
return speechResponse(say, False, {})
elif event['request']['type'] == "SessionEndedRequest":
say = 'goodbye'
return speechResponse(say, True, {})
## 3. Helper Function =================================================================================================
import json
import urllib2
## Requests: a separate Python Library http://docs.python-requests.org/en/master/
## to install with Python PIP:
## open a command prompt in your /src folder and type
## pip install requests -t .
import requests
def httpsGet(myData):
global myUrl
myUrl = myUrl + urllib2.quote(myData)
r = requests.get(myUrl)
myJs = r.json()
if 'population' in myJs:
return myJs['population']
else:
print 'Error, web service return data not in expected format'
return 0
| [
"mccaul@amazon.com"
] | mccaul@amazon.com |
a8db440dc6845514e7806efca43efbe2f6c0c9bd | c27e6ce3e77dd7ab1f66a2a653893fcc06fc3ad4 | /employee_register/migrations/0003_auto_20200812_1039.py | e503bb2bd350fcfa1ffff6c9f5f295392bfa0056 | [] | no_license | bokerwere/python-django-employee-registration | f162c01c70fda7979c3904277df3cad0f192abc8 | ac6433bfa378b37f7d84c36e0244f42e5b5383da | refs/heads/master | 2022-12-12T06:48:35.922474 | 2020-09-01T12:13:08 | 2020-09-01T12:13:08 | 291,987,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | # Generated by Django 3.0.8 on 2020-08-12 07:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('employee_register', '0002_auto_20200808_1309'),
]
operations = [
migrations.RenameField(
model_name='employee',
old_name='mopile',
new_name='mobile',
),
]
| [
"josephboker93@gmail.com"
] | josephboker93@gmail.com |
e6d1540bc6d924824bde87140da6b4be13c83c4f | 1c3f011b5e29ce6aa2f65da8b5055454b2be3eab | /battleship_oo1.py | 9d048819f5c89625f70f7cba2fde3645aa776cbe | [] | no_license | pythonlectures/battleship | b26f86f6207a04bd8921d6547ee6837e4cf4db99 | e35fd6e9fe14f4c612ba5c89be4a9bcae3fdad0d | refs/heads/master | 2020-03-23T21:04:07.132029 | 2018-08-05T08:01:43 | 2018-08-05T08:01:43 | 142,079,474 | 0 | 0 | null | 2018-08-11T18:10:52 | 2018-07-23T23:22:16 | Python | UTF-8 | Python | false | false | 2,740 | py | class Game:
def __init__(self, player1, player2, board):
self.player1 = player1
self.player2 = player2
self.board = board
self.board.ships.extend(player1.ships)
self.board.ships.extend(player2.ships)
self.player_takes_turn = self.player1
self.winner = None
@classmethod
def default(cls):
player1 = Player("Jack", [Ship(1, 1), Ship(2, 2)])
player2 = Player("Jill", [Ship(8, 8), Ship(3, 3)])
board = Board(9)
return cls(player1, player2, board)
def play_game(self):
while self.winner is None:
shot = self.player_takes_turn.call_your_shot()
self.take_shot(shot)
self.winner = self.return_winner()
self.alternate_turns()
self.declare_winner()
def alternate_turns(self):
self.player_takes_turn = self.player2 if self.player_takes_turn == self.player1 else self.player1
def declare_winner(self):
print('{}, you won the game!'.format(self.winner.name))
def take_shot(self, shot):
ship_hit = self.board.take_shot(shot)
if ship_hit:
print('{}, your shot hit {}!'.format(self.player_takes_turn.name, ship_hit.name))
self.winner = self.player2
else:
print('{}, you missed your shot!'.format(self.player_takes_turn.name))
self.winner = None
def return_winner(self):
if self.player1.has_lost():
return self.player2
elif self.player2.has_lost():
return self.player1
else:
return None
class Board:
def __init__(self, board_len):
self.coordinates = [(x, y) for x in range(1, board_len) for y in range(1, board_len)]
self.ships = []
def take_shot(self, shot):
for ship in self.ships:
if ship.is_hit(shot):
return ship
return None
class Player:
def __init__(self, name, ships):
self.name = name
self.ships = []
self.ships.extend(ships)
def call_your_shot(self):
return tuple(int(x.strip()) for x in
input('{}, call your shot using comma separated coordinates x, y: '.format(self.name)).split(','))
def has_lost(self):
return all(ship.is_sunk for ship in self.ships)
class Ship:
id_counter = 0
def __init__(self, x, y):
Ship.id_counter += 1
self.name = "Ship{}".format(Ship.id_counter)
self.coordinates = (x, y)
self.is_sunk = False
def is_hit(self, shot):
hit = (shot == self.coordinates)
if hit:
self.is_sunk = True
return hit
if __name__ == "__main__":
Game.default().play_game()
| [
"marco.verduci@outlook.com"
] | marco.verduci@outlook.com |
e7a6a0fe0f7e05f27f6c11b927615a33570870ae | b560c65b2cdaa92333d114d79cf2a191c6da5852 | /client.py | 29faa95073402246f8263fbb591214aa58c647aa | [] | no_license | Ranger11Danger/python-server-client | 3823be1b3cc761c6f75343c0fc3c058742746f9c | b587d961184944853c7ca3636fd173b98964d5f3 | refs/heads/master | 2020-09-23T04:13:03.990788 | 2019-12-02T14:51:49 | 2019-12-02T14:51:49 | 225,398,857 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | #!/usr/bin/python3
import socket
import os
import time
ip = '0.0.0.0'
port = 1337
s = socket.create_connection((ip,port))
while True:
message = input('To server: ')
if '#shell' in message:
os.system("gnome-terminal -e 'bash -c \"ncat -nlvp 8888 --ssl\" '")
time.sleep(3)
s.send(message.encode())
data = s.recv(1024)
print(data.decode('utf-8'))
| [
"noreply@github.com"
] | Ranger11Danger.noreply@github.com |
bf85a112cc933c60dd948b96d1bb63260bbb3d38 | 118a63e4b65052430edbc07b5d794d3bad95a75d | /Урок 7/Task7_2.py | 71b53f30b81915ba28649a964cc02c4e4c20c5d5 | [] | no_license | olgaBovyka/BasicLanguagePython | a47bff74b4908dc34d106defc81f5825d75dc88e | 312d32ca4a7f382eac7e25295c44846572c3b9f0 | refs/heads/master | 2022-11-05T22:34:29.302452 | 2020-06-24T20:35:55 | 2020-06-24T20:35:55 | 263,566,938 | 0 | 0 | null | 2020-06-24T20:35:57 | 2020-05-13T08:13:21 | Python | UTF-8 | Python | false | false | 3,141 | py | """
2. Реализовать проект расчета суммарного расхода ткани на производство одежды. Основная сущность (класс) этого
проекта — одежда, которая может иметь определенное название. К типам одежды в этом проекте относятся пальто и костюм.
У этих типов одежды существуют параметры: размер (для пальто) и рост (для костюма). Это могут быть обычные числа:
V и H, соответственно.
Для определения расхода ткани по каждому типу одежды использовать формулы: для пальто (V/6.5 + 0.5), для костюма
(2 * H + 0.3). Проверить работу этих методов на реальных данных.
Реализовать общий подсчет расхода ткани. Проверить на практике полученные на этом уроке знания: реализовать
абстрактные классы для основных классов проекта, проверить на практике работу декоратора @property.
"""
from abc import ABC, abstractmethod
class Wear(ABC):
@property
@abstractmethod
def consumption(self) -> float:
pass
@property
@abstractmethod
def params(self) -> float:
pass
class Suit(Wear):
def __init__(self, name: str, height: float):
self.__height = height
self.__name = name
@property
def consumption(self) -> float:
return 2*self.__height+0.3
@property
def params(self) -> float:
return self.__height
class Coat(Wear):
def __init__(self, name: str, size: float):
self.__name = name
self.__size = size
@property
def consumption(self) -> float:
return self.__size / 6.5 + 0.5
@property
def params(self) -> float:
return self.__size
while True:
coat_h_var = input("Введите размер пальто ")
if coat_h_var.isdigit():
coat_h_int = int(coat_h_var)
break
my_coat = Coat("burberry", coat_h_int)
print("Расход ткани на 1 пальто", my_coat.consumption)
while True:
coat_c_var = input("Введите количество пальто ")
if coat_c_var.isdigit():
coat_c_int = int(coat_c_var)
break
while True:
suit_s_var = input("Введите размер костюма ")
if suit_s_var.isdigit():
suit_s_int = int(suit_s_var)
break
my_suit = Coat("prada", suit_s_int)
print("Расход ткани на 1 костюм", my_suit.consumption)
while True:
suit_c_var = input("Введите количество костюмов ")
if suit_c_var.isdigit():
suit_c_int = int(suit_c_var)
break
print("Общий расход ткани", my_suit.consumption * suit_c_int + my_coat.consumption * coat_c_int)
| [
"olga@skat-vending.com"
] | olga@skat-vending.com |
8574e74809fd2344a0ec2897ad43bff7a1dd3a40 | 7f8483b792bd2dfa732a13c26590833f5902f083 | /Aula05/manipulacao_canais.py | a6f1a212109a13ac562163dda03b1d47a9abcc99 | [
"MIT"
] | permissive | thiagopollachini/introducao-opencv | c905c61a65a966d50df2838ba8463a7e0725a765 | c1b63bb2aca008821489f65479c957ce4f925c80 | refs/heads/master | 2021-09-13T23:50:20.365076 | 2018-05-06T03:19:51 | 2018-05-06T03:19:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | #Importando biblioteca do OpenCV
import cv2
#Importando biblioteca para computação científica
import numpy as np
#Lendo imagens
imagem_original = cv2.imread('../images/lena.png',1)
#Separa os canais da imagem
(B, G, R) = cv2.split(imagem_original)
cv2.imshow("Red", R)
cv2.imshow("Green", G)
cv2.imshow("Blue", B)
cv2.imshow("Original", imagem_original)
cv2.waitKey(0)
#Juntando os canais
fusao = cv2.merge([B,G,R])
cv2.imshow("Imagem fundida", fusao)
cv2.waitKey(0) | [
"viniciuscampos120@gmail.com"
] | viniciuscampos120@gmail.com |
4126a6bc42a32fee4d838aaaaec1268e2fac7a50 | d9efd71bcd858924c32761a24b37c285e3eb8f36 | /caption_generator_api/utils.py | c474a5e7a7cf2a7871fdcad24deb1e58874816f4 | [] | no_license | fg91/Neural-Image-Caption-Generation-Tutorial | 561c1bb33cf8c2827b9c0e5ab8c226512eeaacfc | 3416e64b47de7ac41249295e2de65d380421f4d7 | refs/heads/master | 2020-05-01T07:16:04.590640 | 2019-12-15T12:30:54 | 2019-12-15T12:30:54 | 177,348,369 | 70 | 9 | null | null | null | null | UTF-8 | Python | false | false | 2,802 | py | from scipy.misc import imresize
from scipy.ndimage.filters import gaussian_filter
from matplotlib.patheffects import Stroke, Normal
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# the functions fig2data and fig2img are taken from
# http://www.icare.univ-lille1.fr/tutorials/convert_a_matplotlib_figure
# Deprecation errors have been fixed
def fig2data ( fig ):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
buf = np.fromstring( fig.canvas.tostring_argb(), dtype=np.uint8 )
buf.shape = ( w, h,4 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll ( buf, 3, axis = 2 )
return buf
def fig2img ( fig ):
"""
@brief Convert a Matplotlib figure to a PIL Image in RGBA format and return it
@param fig a matplotlib figure
@return a Python Imaging Library ( PIL ) image
"""
# put the figure pixmap into a numpy array
buf = fig2data ( fig )
w, h, d = buf.shape
return Image.frombytes( "RGBA", ( w ,h ), buf.tostring( ) )
def draw_text(ax, xy, txt, sz=14):
text = ax.text(*xy, txt, verticalalignment='top', color='white', fontsize=sz, weight='bold')
draw_outline(text, 1)
def draw_outline(matplt_plot_obj, lw):
matplt_plot_obj.set_path_effects([Stroke(linewidth=lw, foreground='black'), Normal()])
def show_img(im, figsize=None, ax=None, alpha=1, cmap=None):
if not ax:
fig, ax = plt.subplots(figsize=figsize)
ax.imshow(im, alpha=alpha, cmap=cmap)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
return ax
def visualize_attention(im, pred, alphas, denorm, vocab, att_size=7, thresh=0., sz=224, return_fig_as_PIL_image=False):
cap_len = len(pred)
alphas = alphas.view(-1,1, att_size, att_size).cpu().data.numpy()
alphas = np.maximum(thresh, alphas)
alphas -= alphas.min()
alphas /= alphas.max()
figure, axes = plt.subplots(cap_len//5 + 1,5, figsize=(12,8))
for i, ax in enumerate(axes.flat):
if i <= cap_len:
ax = show_img(denorm(im), ax=ax)
if i > 0:
mask = np.array(Image.fromarray(alphas[i - 1,0]).resize((sz,sz)))
blurred_mask = gaussian_filter(mask, sigma=8)
show_img(blurred_mask, ax=ax, alpha=0.5, cmap='afmhot')
draw_text(ax, (0,0), vocab.itos[pred[i - 1]])
else:
ax.axis('off')
plt.tight_layout()
if return_fig_as_PIL_image:
return fig2img(figure)
| [
"fabiograetz@googlemail.com"
] | fabiograetz@googlemail.com |
c04c9fa2bf4ebb0e001ac9c901b729acbfa268d1 | ae537f4732aa31c7238e2287609738da881cb89c | /resources/item.py | 33ecafc1761c929d614cd892a1a08eb8112a8514 | [] | no_license | vincentgoh82/SchoolCode | 568e96126f350bff5d665bb87a0136d805b0c780 | 1904362d2bf1b673d9fcd2b6344aca8d7fa4aaa1 | refs/heads/master | 2021-07-09T21:10:20.905018 | 2017-10-05T10:31:40 | 2017-10-05T10:31:40 | 105,864,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | from flask_restful import Resource, reqparse
from flask_jwt import jwt_required
from models.item import ItemModel
class Item(Resource):
parser = reqparse.RequestParser()
parser.add_argument('price',
type=float,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('store_id',
type=int,
required=True,
help="Every item needs a store id."
)
@jwt_required()
def get(self, name):
item = ItemModel.find_by_name(name)
if item:
return item.json()
return {'message': 'Item not found'}, 404
def post(self, name):
if ItemModel.find_by_name(name):
return{'message': "An item with name '{}' already exists".format(name)}, 400#gone wrong with the request
data = Item.parser.parse_args()
item = ItemModel(name, **data)
try:
item.save_to_db()
except:
return{"message": "An error occurred inserting the item."}, 500 #internal server error
return item.json(), 201
def delete(self, name):
item = ItemModel.find_by_name(name)
if item:
item.delete_from_db()
return{"message": "Item deleted."}
def put(self, name):
data = Item.parser.parse_args()
item = ItemModel.find_by_name(name)
if item:
item.price = data['price']
else:
item = ItemModel(name, data['price'])
item.save_to_db()
return item.json()
class ItemList(Resource):
def get(self):
return {'items': [item.json() for item in ItemModel.query.all()]}
| [
"vincentgoh1982@gmail.com"
] | vincentgoh1982@gmail.com |
715807a84591cf453e82f10c07cfed4904e75100 | 42acd95998670c6815d7995eda8f54025086697f | /backend/src/api.py | 6563db1ce0158a18847ab77702d303ba6c8df441 | [] | no_license | ahmostafa91/coffee-shop | 2933a7ec26f006151fea04eeeed4eda9b8d5202e | 2a12e470b6773682f86afe21cb3cb74f75f6b783 | refs/heads/main | 2023-03-17T08:45:02.811929 | 2021-03-06T06:03:21 | 2021-03-06T06:03:21 | 345,019,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,909 | py | import os
from flask import Flask, request, jsonify, abort
from sqlalchemy import exc
import json
from flask_cors import CORS
from .database.models import db_drop_and_create_all, setup_db, Drink
from .auth.auth import AuthError, requires_auth
app = Flask(__name__)
setup_db(app)
CORS(app)
'''
@TODO uncomment the following line to initialize the datbase
!! NOTE THIS WILL DROP ALL RECORDS AND START YOUR DB FROM SCRATCH
!! NOTE THIS MUST BE UNCOMMENTED ON FIRST RUN
'''
db_drop_and_create_all()
## ROUTES
'''
@TODO implement endpoint
GET /drinks
it should be a public endpoint
it should contain only the drink.short() data representation
returns status code 200 and json {"success": True, "drinks": drinks} where drinks is the list of drinks
or appropriate status code indicating reason for failure
'''
@app.route('/drinks')
def get_drinks():
drinks = Drink.query.all()
# print('=========>', drinks)
return jsonify({
'success': True,
'drinks': [drink.short() for drink in drinks]
}), 200
'''
@TODO implement endpoint
GET /drinks-detail
it should require the 'get:drinks-detail' permission
it should contain the drink.long() data representation
returns status code 200 and json {"success": True, "drinks": drinks} where drinks is the list of drinks
or appropriate status code indicating reason for failure
'''
@app.route('/drinks-detail')
@requires_auth('get:drinks-detail')
def drinks_detail(payload):
try:
all_drinks = Drink.query.all()
return jsonify({
'success': True,
'drinks': [drink.long() for drink in all_drinks]
}), 200
except Exception:
abort(500)
'''
@TODO implement endpoint
POST /drinks
it should create a new row in the drinks table
it should require the 'post:drinks' permission
it should contain the drink.long() data representation
returns status code 200 and json {"success": True, "drinks": drink} where drink an array containing only the newly created drink
or appropriate status code indicating reason for failure
'''
@app.route('/drinks', methods=['POST'])
@requires_auth('post:drinks')
def create_drink(payload):
req = request.get_json()
try:
req_recipe = req['recipe']
if isinstance(req_recipe, dict):
req_recipe = [req_recipe]
drink = Drink()
drink.title = req['title']
# stringfy the object
drink.recipe = json.dumps(req_recipe)
drink.insert()
except BaseException:
abort(400)
return jsonify({'success': True, 'drinks': [drink.long()]})
'''
@TODO implement endpoint
PATCH /drinks/<id>
where <id> is the existing model id
it should respond with a 404 error if <id> is not found
it should update the corresponding row for <id>
it should require the 'patch:drinks' permission
it should contain the drink.long() data representation
returns status code 200 and json {"success": True, "drinks": drink} where drink an array containing only the updated drink
or appropriate status code indicating reason for failure
'''
@app.route('/drinks/<int:id>', methods=['PATCH'])
@requires_auth('patch:drinks')
def update_drink(payload, id):
req = request.get_json()
drink = Drink.query.filter(Drink.id == id).one_or_none()
if not drink:
abort(404)
try:
req_title = req.get('title')
req_recipe = req.get('recipe')
if req_title:
drink.title = req_title
if req_recipe:
drink.recipe = json.dumps(req['recipe'])
drink.update()
except BaseException:
abort(400)
return jsonify({'success': True, 'drinks': [drink.long()]}), 200
'''
@TODO implement endpoint
DELETE /drinks/<id>
where <id> is the existing model id
it should respond with a 404 error if <id> is not found
it should delete the corresponding row for <id>
it should require the 'delete:drinks' permission
returns status code 200 and json {"success": True, "delete": id} where id is the id of the deleted record
or appropriate status code indicating reason for failure
'''
@app.route('/drinks/<int:id>', methods=['DELETE'])
@requires_auth('delete:drinks')
def delete_drinks(payload, id):
try:
drink = Drink.query.filter(Drink.id == id).one_or_none()
if drink:
drink.delete()
return jsonify({
'success': True,
'delete': id
}), 200
else:
abort(404, f'No drink found with id: {id}')
except Exception:
abort(500)
## Error Handling
'''
Example error handling for unprocessable entity
'''
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
'''
@TODO implement error handlers using the @app.errorhandler(error) decorator
each error handler should return (with approprate messages):
jsonify({
"success": False,
"error": 404,
"message": "resource not found"
}), 404
'''
'''
@TODO implement error handler for 404
error handler should conform to general task above
'''
@app.errorhandler(404)
def not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "resource not found"
}), 404
'''
@TODO implement error handler for AuthError
error handler should conform to general task above
'''
@app.errorhandler(AuthError)
def not_authenticated(auth_error):
return jsonify({
"success": False,
"error": auth_error.status_code,
"message": auth_error.error
}), 401
| [
"a.moustafa@penta-b.com"
] | a.moustafa@penta-b.com |
c72ea0fdf63e7cab3cd12fac24e9a96fe75a01e2 | 50402cc4388dfee3a9dbe9e121ef217759ebdba8 | /etc/MOPSO-GP0/ZDT4.py | 1082e5005e8823de068729fbccebe4e6a539378f | [] | no_license | dqyi11/SVNBackup | bd46a69ec55e3a4f981a9bca4c8340944d8d5886 | 9ad38e38453ef8539011cf4d9a9c0a363e668759 | refs/heads/master | 2020-03-26T12:15:01.155873 | 2015-12-10T01:11:36 | 2015-12-10T01:11:36 | 144,883,382 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,609 | py | '''
Created on 2014-1-25
@author: Walter
'''
from SwarmND import *;
import numpy as np;
import sys;
if __name__ == '__main__':
def func1(x):
return x[0];
def func2(x):
sum = 0.0;
for i in range(2, 10):
sum += x[i]**2 - 10 * np.cos(4 * np.pi * x[i]);
g = 1 + 10 * 9 + sum;
h = 1 - np.sqrt(x[0]/g);
return g * h;
figFolder = sys.path[0];
figFolder = sys.path[0] + "\\zdt4";
paretoX = np.arange(0.0,1.0,0.005);
paretoY = np.zeros(len(paretoX));
localParetoY = np.zeros(len(paretoX));
paretoPos = [];
for i in range(len(paretoX)):
paretoY[i] = 1 - np.sqrt(paretoX[i]);
localParetoY[i] = 1 - np.sqrt(paretoX[i]/1.25);
fitPos = np.matrix(np.zeros((1,2), np.float));
fitPos[0,0] = paretoX[i];
fitPos[0,1] = paretoY[i];
paretoPos.append(fitPos);
swarm = SwarmND(100, 10);
swarm.setDisplayParam(600, 600, 20, 0.1)
swarm.setParam(2.0, 2.0, 0.8, [func1, func2]);
ws = [];
ws.append([0.0, 1.0]);
for i in range(1,10):
ws.append([-5.0, 5.0])
swarm.initParticles(ws);
swarm.paretoX = paretoX;
swarm.paretoY = paretoY;
swarm.localParetoX = paretoX;
swarm.localParetoY = localParetoY;
swarm.paretoPos = paretoPos;
runPlan = [30, 60, 80, 100];
count = 0;
for r in runPlan:
for t in range(r):
swarm.update();
count += 1;
swarm.plot(count, figFolder);
| [
"walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39"
] | walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39 |
7e15d512ec3c87a9d4dc6de189623ab45646f041 | efb3194a583cd79cc03dc91b9a96dfc0bdd3a344 | /stm32f/json_pkt.py | 8fab02dbeb225a6406222a1a16911d147abec342 | [
"Apache-2.0"
] | permissive | andersy005/capstone | 9227b0c19b4e16ea5e67a529937652408d0a35f2 | b4301ebc7c1447f3ce2ff034add985c1f417f065 | refs/heads/master | 2021-09-13T07:42:52.359116 | 2018-04-26T17:58:05 | 2018-04-26T17:58:05 | 118,843,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,517 | py | # This code should run fine on MicroPython or CPython.
#
# It allows objects which can be represented as JSON objects to be sent
# between two python programs (running on the same or different computers).
import json
from dump_mem import dump_mem
SOH = 0x01
STX = 0x02
ETX = 0x03
EOT = 0x04
# <SOH><LenLow><LenHigh><STX><PAYLOAD><ETX><LRC><EOT>
def lrc(str):
sum = 0
for b in str:
sum = (sum + b) & 0xff
return ((sum ^ 0xff) + 1) & 0xff
class JSON_Packet:
STATE_SOH = 0
STATE_LEN_0 = 1
STATE_LEN_1 = 2
STATE_STX = 3
STATE_PAYLOAD = 4
STATE_ETX = 5
STATE_LRC = 6
STATE_EOT = 7
def __init__(self, serial_port, show_packets=False):
self.serial_port = serial_port
self.show_packets = show_packets
self.pkt_len = 0
self.pkt_idx = 0
self.pkt = None
self.lrc = 0
self.state = JSON_Packet.STATE_SOH
def send(self, obj):
"""Converts a python object into its json representation and then sends
it using the 'serial_port' passed in the constructor.
"""
j_str = json.dumps(obj).encode('ascii')
j_len = len(j_str)
j_lrc = lrc(j_str)
hdr = bytearray((SOH, j_len & 0xff, j_len >> 8, STX))
ftr = bytearray((ETX, j_lrc, EOT))
if self.show_packets:
data = hdr + j_str + ftr
dump_mem(data, 'Send')
self.serial_port.write(hdr)
self.serial_port.write(j_str)
self.serial_port.write(ftr)
def process_byte(self, byte):
"""Processes a single byte. Returns a json object when one is
successfully parsed, otherwise returns None.
"""
if self.show_packets:
if byte >= ord(' ') and byte <= ord('~'):
print('Rcvd 0x%02x \'%c\'' % (byte, byte))
else:
print('Rcvd 0x%02x' % byte)
if self.state == JSON_Packet.STATE_SOH:
if byte == SOH:
self.state = JSON_Packet.STATE_LEN_0
elif self.state == JSON_Packet.STATE_LEN_0:
self.pkt_len = byte
self.state = JSON_Packet.STATE_LEN_1
elif self.state == JSON_Packet.STATE_LEN_1:
self.pkt_len += (byte << 8)
self.state = JSON_Packet.STATE_STX
elif self.state == JSON_Packet.STATE_STX:
if byte == STX:
self.state = JSON_Packet.STATE_PAYLOAD
self.pkt_idx = 0
self.pkt = bytearray(self.pkt_len)
self.lrc = 0
else:
self.state = JSON_Packet.STATE_SOH
elif self.state == JSON_Packet.STATE_PAYLOAD:
self.pkt[self.pkt_idx] = byte
self.lrc = (self.lrc + byte) & 0xff
self.pkt_idx += 1
if self.pkt_idx >= self.pkt_len:
self.state = JSON_Packet.STATE_ETX
elif self.state == JSON_Packet.STATE_ETX:
if byte == ETX:
self.state = JSON_Packet.STATE_LRC
else:
self.state = JSON_Packet.STATE_SOH
elif self.state == JSON_Packet.STATE_LRC:
self.lrc = ((self.lrc ^ 0xff) + 1) & 0xff
if self.lrc == byte:
self.state = JSON_Packet.STATE_EOT
else:
self.state = JSON_Packet.STATE_SOH
elif self.state == JSON_Packet.STATE_EOT:
self.state = JSON_Packet.STATE_SOH
if byte == EOT:
return json.loads(str(self.pkt, 'ascii'))
| [
"axbanihirwe@ualr.edu"
] | axbanihirwe@ualr.edu |
bd28753be51fb382e2fc2bb1f18dd523d88d8db0 | b28ca1c9a669f1a538b17837d25e394a8fd4ad10 | /SimpleAuth/wsgi.py | f7182e8691ce978835f002e1be4f6912795ed46d | [] | no_license | andricDu/SimpleAuth | e01a1c2b95cc9e8485a2e0be4cb8c772a6d5968f | 2de255c1d550314113a9b6529e59a68f09b29dd6 | refs/heads/master | 2021-08-18T12:40:40.865700 | 2020-04-21T14:34:26 | 2020-04-21T14:34:26 | 90,769,179 | 0 | 1 | null | 2021-06-10T18:34:25 | 2017-05-09T16:43:10 | Python | UTF-8 | Python | false | false | 398 | py | """
WSGI config for SimpleAuth project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SimpleAuth.settings")
application = get_wsgi_application()
| [
"dusan.andric@oicr.on.ca"
] | dusan.andric@oicr.on.ca |
07b9a817a5d2523a16a0e03b7548ae96e1853340 | 192b040fb4487d4634c41cdf9c66042853749937 | /colat/utils/stylegan_helper.py | 796ba7ca92e7cfdc1374a0100a6f519676562d52 | [] | no_license | kkodoo/latentclr | f62dbdb50d3a9ad0cd3869618c973d88cf4406fb | f5e88ee90f5c5dc38a42972117acf419dfa39da9 | refs/heads/main | 2023-08-29T10:24:46.831681 | 2021-10-11T19:32:25 | 2021-10-11T19:32:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,962 | py | # Copyright 2020 Erik Härkönen. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pickle
import sys
from collections import OrderedDict
from pathlib import Path
import numpy as np
import requests
import torch
import torch.nn as nn
import torch.nn.functional as F
# Reimplementation of StyleGAN in PyTorch
# Source: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb
class MyLinear(nn.Module):
"""Linear layer with equalized learning rate and custom learning rate multiplier."""
def __init__(
self,
input_size,
output_size,
gain=2 ** (0.5),
use_wscale=False,
lrmul=1,
bias=True,
):
super().__init__()
he_std = gain * input_size ** (-0.5) # He init
# Equalized learning rate and custom learning rate multiplier.
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(
torch.randn(output_size, input_size) * init_std
)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_size))
self.b_mul = lrmul
else:
self.bias = None
def forward(self, x):
bias = self.bias
if bias is not None:
bias = bias * self.b_mul
return F.linear(x, self.weight * self.w_mul, bias)
class MyConv2d(nn.Module):
"""Conv layer with equalized learning rate and custom learning rate multiplier."""
def __init__(
self,
input_channels,
output_channels,
kernel_size,
gain=2 ** (0.5),
use_wscale=False,
lrmul=1,
bias=True,
intermediate=None,
upscale=False,
):
super().__init__()
if upscale:
self.upscale = Upscale2d()
else:
self.upscale = None
he_std = gain * (input_channels * kernel_size ** 2) ** (-0.5) # He init
self.kernel_size = kernel_size
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(
torch.randn(output_channels, input_channels, kernel_size, kernel_size)
* init_std
)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_channels))
self.b_mul = lrmul
else:
self.bias = None
self.intermediate = intermediate
def forward(self, x):
bias = self.bias
if bias is not None:
bias = bias * self.b_mul
have_convolution = False
if self.upscale is not None and min(x.shape[2:]) * 2 >= 128:
# this is the fused upscale + conv from StyleGAN, sadly this seems incompatible with the non-fused way
# this really needs to be cleaned up and go into the conv...
w = self.weight * self.w_mul
w = w.permute(1, 0, 2, 3)
# probably applying a conv on w would be more efficient. also this quadruples the weight (average)?!
w = F.pad(w, (1, 1, 1, 1))
w = (
w[:, :, 1:, 1:]
+ w[:, :, :-1, 1:]
+ w[:, :, 1:, :-1]
+ w[:, :, :-1, :-1]
)
x = F.conv_transpose2d(x, w, stride=2, padding=(w.size(-1) - 1) // 2)
have_convolution = True
elif self.upscale is not None:
x = self.upscale(x)
if not have_convolution and self.intermediate is None:
return F.conv2d(
x, self.weight * self.w_mul, bias, padding=self.kernel_size // 2
)
elif not have_convolution:
x = F.conv2d(
x, self.weight * self.w_mul, None, padding=self.kernel_size // 2
)
if self.intermediate is not None:
x = self.intermediate(x)
if bias is not None:
x = x + bias.view(1, -1, 1, 1)
return x
class NoiseLayer(nn.Module):
"""adds noise. noise is per pixel (constant over channels) with per-channel weight"""
def __init__(self, channels):
super().__init__()
self.weight = nn.Parameter(torch.zeros(channels))
self.noise = None
def forward(self, x, noise=None):
if noise is None and self.noise is None:
noise = torch.randn(
x.size(0), 1, x.size(2), x.size(3), device=x.device, dtype=x.dtype
)
elif noise is None:
# here is a little trick: if you get all the noiselayers and set each
# modules .noise attribute, you can have pre-defined noise.
# Very useful for analysis
noise = self.noise
x = x + self.weight.view(1, -1, 1, 1) * noise
return x
class StyleMod(nn.Module):
def __init__(self, latent_size, channels, use_wscale):
super(StyleMod, self).__init__()
self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale=use_wscale)
def forward(self, x, latent):
style = self.lin(latent) # style => [batch_size, n_channels*2]
shape = [-1, 2, x.size(1)] + (x.dim() - 2) * [1]
style = style.view(shape) # [batch_size, 2, n_channels, ...]
x = x * (style[:, 0] + 1.0) + style[:, 1]
return x
class PixelNormLayer(nn.Module):
def __init__(self, epsilon=1e-8):
super().__init__()
self.epsilon = epsilon
def forward(self, x):
return x * torch.rsqrt(torch.mean(x ** 2, dim=1, keepdim=True) + self.epsilon)
class BlurLayer(nn.Module):
def __init__(self, kernel=[1, 2, 1], normalize=True, flip=False, stride=1):
super(BlurLayer, self).__init__()
kernel = [1, 2, 1]
kernel = torch.tensor(kernel, dtype=torch.float32)
kernel = kernel[:, None] * kernel[None, :]
kernel = kernel[None, None]
if normalize:
kernel = kernel / kernel.sum()
if flip:
kernel = kernel[:, :, ::-1, ::-1]
self.register_buffer("kernel", kernel)
self.stride = stride
def forward(self, x):
# expand kernel channels
kernel = self.kernel.expand(x.size(1), -1, -1, -1)
x = F.conv2d(
x,
kernel,
stride=self.stride,
padding=int((self.kernel.size(2) - 1) / 2),
groups=x.size(1),
)
return x
def upscale2d(x, factor=2, gain=1):
assert x.dim() == 4
if gain != 1:
x = x * gain
if factor != 1:
shape = x.shape
x = x.view(shape[0], shape[1], shape[2], 1, shape[3], 1).expand(
-1, -1, -1, factor, -1, factor
)
x = x.contiguous().view(
shape[0], shape[1], factor * shape[2], factor * shape[3]
)
return x
class Upscale2d(nn.Module):
def __init__(self, factor=2, gain=1):
super().__init__()
assert isinstance(factor, int) and factor >= 1
self.gain = gain
self.factor = factor
def forward(self, x):
return upscale2d(x, factor=self.factor, gain=self.gain)
class G_mapping(nn.Sequential):
def __init__(self, nonlinearity="lrelu", use_wscale=True):
act, gain = {
"relu": (torch.relu, np.sqrt(2)),
"lrelu": (nn.LeakyReLU(negative_slope=0.2), np.sqrt(2)),
}[nonlinearity]
layers = [
("pixel_norm", PixelNormLayer()),
(
"dense0",
MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale),
),
("dense0_act", act),
(
"dense1",
MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale),
),
("dense1_act", act),
(
"dense2",
MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale),
),
("dense2_act", act),
(
"dense3",
MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale),
),
("dense3_act", act),
(
"dense4",
MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale),
),
("dense4_act", act),
(
"dense5",
MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale),
),
("dense5_act", act),
(
"dense6",
MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale),
),
("dense6_act", act),
(
"dense7",
MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale),
),
("dense7_act", act),
]
super().__init__(OrderedDict(layers))
def forward(self, x):
return super().forward(x)
class Truncation(nn.Module):
def __init__(self, avg_latent, max_layer=8, threshold=0.7):
super().__init__()
self.max_layer = max_layer
self.threshold = threshold
self.register_buffer("avg_latent", avg_latent)
def forward(self, x):
assert x.dim() == 3
interp = torch.lerp(self.avg_latent, x, self.threshold)
do_trunc = (torch.arange(x.size(1)) < self.max_layer).view(1, -1, 1)
return torch.where(do_trunc, interp, x)
class LayerEpilogue(nn.Module):
"""Things to do at the end of each layer."""
def __init__(
self,
channels,
dlatent_size,
use_wscale,
use_noise,
use_pixel_norm,
use_instance_norm,
use_styles,
activation_layer,
):
super().__init__()
layers = []
if use_noise:
layers.append(("noise", NoiseLayer(channels)))
layers.append(("activation", activation_layer))
if use_pixel_norm:
layers.append(("pixel_norm", PixelNorm()))
if use_instance_norm:
layers.append(("instance_norm", nn.InstanceNorm2d(channels)))
self.top_epi = nn.Sequential(OrderedDict(layers))
if use_styles:
self.style_mod = StyleMod(dlatent_size, channels, use_wscale=use_wscale)
else:
self.style_mod = None
def forward(self, x, dlatents_in_slice=None):
x = self.top_epi(x)
if self.style_mod is not None:
x = self.style_mod(x, dlatents_in_slice)
else:
assert dlatents_in_slice is None
return x
class InputBlock(nn.Module):
def __init__(
self,
nf,
dlatent_size,
const_input_layer,
gain,
use_wscale,
use_noise,
use_pixel_norm,
use_instance_norm,
use_styles,
activation_layer,
):
super().__init__()
self.const_input_layer = const_input_layer
self.nf = nf
if self.const_input_layer:
# called 'const' in tf
self.const = nn.Parameter(torch.ones(1, nf, 4, 4))
self.bias = nn.Parameter(torch.ones(nf))
else:
self.dense = MyLinear(
dlatent_size, nf * 16, gain=gain / 4, use_wscale=use_wscale
) # tweak gain to match the official implementation of Progressing GAN
self.epi1 = LayerEpilogue(
nf,
dlatent_size,
use_wscale,
use_noise,
use_pixel_norm,
use_instance_norm,
use_styles,
activation_layer,
)
self.conv = MyConv2d(nf, nf, 3, gain=gain, use_wscale=use_wscale)
self.epi2 = LayerEpilogue(
nf,
dlatent_size,
use_wscale,
use_noise,
use_pixel_norm,
use_instance_norm,
use_styles,
activation_layer,
)
def forward(self, dlatents_in_range):
batch_size = dlatents_in_range.size(0)
if self.const_input_layer:
x = self.const.expand(batch_size, -1, -1, -1)
x = x + self.bias.view(1, -1, 1, 1)
else:
x = self.dense(dlatents_in_range[:, 0]).view(batch_size, self.nf, 4, 4)
x = self.epi1(x, dlatents_in_range[:, 0])
x = self.conv(x)
x = self.epi2(x, dlatents_in_range[:, 1])
return x
class GSynthesisBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
blur_filter,
dlatent_size,
gain,
use_wscale,
use_noise,
use_pixel_norm,
use_instance_norm,
use_styles,
activation_layer,
):
# 2**res x 2**res # res = 3..resolution_log2
super().__init__()
if blur_filter:
blur = BlurLayer(blur_filter)
else:
blur = None
self.conv0_up = MyConv2d(
in_channels,
out_channels,
kernel_size=3,
gain=gain,
use_wscale=use_wscale,
intermediate=blur,
upscale=True,
)
self.epi1 = LayerEpilogue(
out_channels,
dlatent_size,
use_wscale,
use_noise,
use_pixel_norm,
use_instance_norm,
use_styles,
activation_layer,
)
self.conv1 = MyConv2d(
out_channels, out_channels, kernel_size=3, gain=gain, use_wscale=use_wscale
)
self.epi2 = LayerEpilogue(
out_channels,
dlatent_size,
use_wscale,
use_noise,
use_pixel_norm,
use_instance_norm,
use_styles,
activation_layer,
)
def forward(self, x, dlatents_in_range):
x = self.conv0_up(x)
x = self.epi1(x, dlatents_in_range[:, 0])
x = self.conv1(x)
x = self.epi2(x, dlatents_in_range[:, 1])
return x
class G_synthesis(nn.Module):
def __init__(
self,
dlatent_size=512, # Disentangled latent (W) dimensionality.
num_channels=3, # Number of output color channels.
resolution=1024, # Output resolution.
fmap_base=8192, # Overall multiplier for the number of feature maps.
fmap_decay=1.0, # log2 feature map reduction when doubling the resolution.
fmap_max=512, # Maximum number of feature maps in any layer.
use_styles=True, # Enable style inputs?
const_input_layer=True, # First layer is a learned constant?
use_noise=True, # Enable noise inputs?
randomize_noise=True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
nonlinearity="lrelu", # Activation function: 'relu', 'lrelu'
use_wscale=True, # Enable equalized learning rate?
use_pixel_norm=False, # Enable pixelwise feature vector normalization?
use_instance_norm=True, # Enable instance normalization?
dtype=torch.float32, # Data type to use for activations and outputs.
blur_filter=[
1,
2,
1,
], # Low-pass filter to apply when resampling activations. None = no filtering.
):
super().__init__()
def nf(stage):
return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
self.dlatent_size = dlatent_size
resolution_log2 = int(np.log2(resolution))
assert resolution == 2 ** resolution_log2 and resolution >= 4
act, gain = {
"relu": (torch.relu, np.sqrt(2)),
"lrelu": (nn.LeakyReLU(negative_slope=0.2), np.sqrt(2)),
}[nonlinearity]
num_layers = resolution_log2 * 2 - 2
num_styles = num_layers if use_styles else 1
torgbs = []
blocks = []
for res in range(2, resolution_log2 + 1):
channels = nf(res - 1)
name = "{s}x{s}".format(s=2 ** res)
if res == 2:
blocks.append(
(
name,
InputBlock(
channels,
dlatent_size,
const_input_layer,
gain,
use_wscale,
use_noise,
use_pixel_norm,
use_instance_norm,
use_styles,
act,
),
)
)
else:
blocks.append(
(
name,
GSynthesisBlock(
last_channels,
channels,
blur_filter,
dlatent_size,
gain,
use_wscale,
use_noise,
use_pixel_norm,
use_instance_norm,
use_styles,
act,
),
)
)
last_channels = channels
self.torgb = MyConv2d(channels, num_channels, 1, gain=1, use_wscale=use_wscale)
self.blocks = nn.ModuleDict(OrderedDict(blocks))
def forward(self, dlatents_in):
# Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
# lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype)
batch_size = dlatents_in.size(0)
for i, m in enumerate(self.blocks.values()):
if i == 0:
x = m(dlatents_in[:, 2 * i : 2 * i + 2])
else:
x = m(x, dlatents_in[:, 2 * i : 2 * i + 2])
rgb = self.torgb(x)
return rgb
class StyleGAN_G(nn.Sequential):
def __init__(self, resolution, truncation=1.0):
self.resolution = resolution
self.layers = OrderedDict(
[
("g_mapping", G_mapping()),
# ('truncation', Truncation(avg_latent)),
("g_synthesis", G_synthesis(resolution=resolution)),
]
)
super().__init__(self.layers)
def forward(self, x, latent_is_w=False):
if isinstance(x, list):
assert len(x) == 18, "Must provide 1 or 18 latents"
if not latent_is_w:
x = [self.layers["g_mapping"].forward(l) for l in x]
x = torch.stack(x, dim=1)
else:
if not latent_is_w:
x = self.layers["g_mapping"].forward(x)
x = x.unsqueeze(1).expand(-1, 18, -1)
x = self.layers["g_synthesis"].forward(x)
return x
# From: https://github.com/lernapparat/lernapparat/releases/download/v2019-02-01/
def load_weights(self, checkpoint):
self.load_state_dict(torch.load(checkpoint))
def export_from_tf(self, pickle_path):
module_path = Path(__file__).parent / "stylegan_tf"
sys.path.append(str(module_path.resolve()))
import collections
import pickle
import dnnlib
import dnnlib.tflib
import torch
dnnlib.tflib.init_tf()
weights = pickle.load(open(pickle_path, "rb"))
weights_pt = [
collections.OrderedDict(
[
(k, torch.from_numpy(v.value().eval()))
for k, v in w.trainables.items()
]
)
for w in weights
]
# torch.save(weights_pt, pytorch_name)
# then on the PyTorch side run
(
state_G,
state_D,
state_Gs,
) = weights_pt # torch.load('./karras2019stylegan-ffhq-1024x1024.pt')
def key_translate(k):
k = k.lower().split("/")
if k[0] == "g_synthesis":
if not k[1].startswith("torgb"):
k.insert(1, "blocks")
k = ".".join(k)
k = (
k.replace("const.const", "const")
.replace("const.bias", "bias")
.replace("const.stylemod", "epi1.style_mod.lin")
.replace("const.noise.weight", "epi1.top_epi.noise.weight")
.replace("conv.noise.weight", "epi2.top_epi.noise.weight")
.replace("conv.stylemod", "epi2.style_mod.lin")
.replace("conv0_up.noise.weight", "epi1.top_epi.noise.weight")
.replace("conv0_up.stylemod", "epi1.style_mod.lin")
.replace("conv1.noise.weight", "epi2.top_epi.noise.weight")
.replace("conv1.stylemod", "epi2.style_mod.lin")
.replace("torgb_lod0", "torgb")
)
else:
k = ".".join(k)
return k
def weight_translate(k, w):
k = key_translate(k)
if k.endswith(".weight"):
if w.dim() == 2:
w = w.t()
elif w.dim() == 1:
pass
else:
assert w.dim() == 4
w = w.permute(3, 2, 0, 1)
return w
# we delete the useless torgb filters
param_dict = {
key_translate(k): weight_translate(k, v)
for k, v in state_Gs.items()
if "torgb_lod" not in key_translate(k)
}
if 1:
sd_shapes = {k: v.shape for k, v in self.state_dict().items()}
param_shapes = {k: v.shape for k, v in param_dict.items()}
for k in list(sd_shapes) + list(param_shapes):
pds = param_shapes.get(k)
sds = sd_shapes.get(k)
if pds is None:
print("sd only", k, sds)
elif sds is None:
print("pd only", k, pds)
elif sds != pds:
print("mismatch!", k, pds, sds)
self.load_state_dict(param_dict, strict=False) # needed for the blur kernels
torch.save(self.state_dict(), Path(pickle_path).with_suffix(".pt"))
| [
"okyksl@gmail.com"
] | okyksl@gmail.com |
545089b43d7ea9cf0b0315df7fda1767e7181dff | 628574233007517f0fde0b40317a68f6065f37ca | /Python/DjangoProjects/time_display_assignments/time_display_assignments/urls.py | d5f657802eb41ba481a1031393a28ae661e30133 | [] | no_license | carolynyen/DojoAssignments | 5a5d2df904bc2d650f945d09369a1d0ee5a316bc | a06ee21b968357e7bda77542d6a21b664a53136e | refs/heads/master | 2021-01-11T17:54:13.006990 | 2017-04-21T19:42:46 | 2017-04-21T19:42:46 | 79,866,508 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | """time_display_assignments URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^', include('apps.timedisplay.urls')),
]
| [
"whitehawk6888@yahoo.com"
] | whitehawk6888@yahoo.com |
18b80378c40737c228cdd1f7cb7e35ce5933b2bc | abeb6ca5a9723d96c432e318aa8cdbd774804c29 | /last-ansible/tweets_harvester/config.py | 9135f0ee7d15d91934df9edd4439cb14011aa259 | [] | no_license | ylin18/CCC-ass2 | cec056fb9e1b1f9cf020d0ecaf2993fb70748dfd | 9d24f0937a43152e697f6c7bed41181ce9618a29 | refs/heads/master | 2020-08-28T15:35:12.454736 | 2019-06-24T05:13:59 | 2019-06-24T05:13:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,337 | py | app_keys_tokens = [
{'consumer_key' : 'CP20F8yCMC85K26XY07w4XElp',
'consumer_secret' : '4t1r4cdlBPGVzkosnZ2gvBqXbet5MbuJIlkuN0JKYufWIdo4yM',
'access_token' : '1121041186305630208-hG4Jv9cfPOufx3vAgPpBUCODlWsHQH',
'access_token_secret' : 'OJSXpMxZDzY9XUo2gqoqZcLUyGY1C9duopI4032fywDPb'}
,
{'consumer_key' : '2BjmB9QN2UwT7BWGEYJc6mzyQ',
'consumer_secret' : 'dkP4itLYIM0rqhHef4BiRkEgp8n2STc5CZuddYzjpnRzN3QX0m',
'access_token' : '1121041186305630208-9pyRCJS3ltExpoKeTqKVrYcdSNnqHg',
'access_token_secret' : 'dWIS8xzpbuB1T77UZSQCHJGBOX2uT7A82UmiwpyuSfrkq'}
,
{'consumer_key' : 'W225IVMaLWc3Cio8Y2ZwHmwXT',
'consumer_secret' : 'D0Gebz3e1xqrSKKCNbQPCwLsjNdQVZxHguLekTU4zCavWysswy',
'access_token' : '1121041186305630208-vVcpClv576aYx9OJjVaWJkYA89m7eI',
'access_token_secret' : 'ZjUk3ppAaudL4KR3oDQo3K6lDMZRKrnGvj2wYRpzfx1uP'}
,
{'consumer_key' : 'ahKRXTnEizWqy4oHC4uBFxWuu',
'consumer_secret' : 'xF2Pc3JwGtSij9Ig0UhW5A5o4RVk1kxcbTk6jMGM7W7XfOub8w',
'access_token' : '1121041186305630208-85TVCtBvNc3RjW9RjmcBdwJn5FKxQm',
'access_token_secret' : 'l3qRsugZsCt1MApDSjtCwMFS19Jms2Y2QiGpUPfzeWVit'}
,
{'consumer_key' : 'IaRuxafuggm6eZvXdmurzA2MV',
'consumer_secret' : '9DwkZsuQHFHtnFD5JVfttZ6uNkqsm99yNxvpmPMgSlPxAtqjJ9',
'access_token' : '1121041186305630208-UJOUSKINRytWJY9OiCF9ANeqNacdMY',
'access_token_secret' : 'kQjU0Vc4x7oPnSeuOM4Jrs5waE21dtYOKHN9Wi0hcikxB'}
,
{'consumer_key' : 'W2pCfanNy4x8YiEEuC1pHbPU8',
'consumer_secret' : '3XfspXNraGydG3xKYfxZ6wLVaJsCZqYeSx0cOUFSv2ABNHNi8a',
'access_token' : '1121041186305630208-iHTAQCyxr0QcCPjEH8gpNYTZyomfjQ',
'access_token_secret' : 'nsNp6LLbb1SacGUbKlUPBFBEiV937Lct9LxHIgB5MYKnL'}
,
{'consumer_key' : 'kpC53YdFfY1Q4vXIEhLKM4lhT',
'consumer_secret' : '3qYR9E9oucDhQSwilPQxUVnmdr1tJwAOi0iMyKCYWy63coEQLZ',
'access_token' : '1121041186305630208-bFfc3Y4x0ueoHCyCvVUZxeMWRmhloR',
'access_token_secret' : 'O460teRRz2MJxQRto7ipMz3MyKA2fwZWwW4bMnMVKzKGZ'}
,
{'consumer_key' : 'hVmTeuwuAy44Ufr6q6SlIlSST',
'consumer_secret' : 'BsYDcvgtmbhdGDv57oI5Q3DwSsBEuPtjaIDjALelQsmnQaeQf1',
'access_token' : '1121041186305630208-9xcHlMmIVGemqa6jNPDYfrXOv05V8l',
'access_token_secret' : 'NULMzSH41CSFTzJSv8rUu0fPf2VmiqqKX80SqF06LkqT0'}
]
geocodes = {
'sydney':"-33.63,150.89,85km",
'melbourne':"-37.80,145.11,75km",
'brisbane':"-27.33,152.81,109km",
'perth':"-32.12,115.68,75km"
}
coordinates = {
'sydney':[150.00, -34.30, 151.62, -33.00],
'melbourne':[144.36,-38.50,145.88,-37.18],
'brisbane':[152.10,-28.34,153.55,-26.45],
'perth':[115.40,-32.80,116.41,-31.45]
}
search_appid = {
'sydney': 0,
'melbourne': 1,
'brisbane': 2,
'perth': 3
}
stream_appid = {
'sydney': 4,
'melbourne': 5,
'brisbane': 6,
'perth': 7
}
# city = 'Sydney'
# placeid = '0073b76548e5984f'
# centre = [-33.865143,151.209900]
# geocode = "-33.63,150.89,85km"
# coordinates = [150.00, -34.30, 151.62, -33.00]
# city = 'Melbourne'
# placeid = '01864a8a64df9dc4'
# centre = [-37.815338, 144.963226]
# geocode = "-37.80,145.11,75km" #all covery
# coordinates = [144.36,-38.50,145.88,-37.18] # greater
# city = 'Brisbane'
# placeid = '004ec16c62325149'
# centre = [-27.46207,153.01462]
# geocode = "-27.33,152.81,109km"
# coordinates = [152.10,-28.34,153.55,-26.45]
# city = 'Perth'
# placeid = '0118c71c0ed41109'
# centre = [-31.94475,115.86013]
# geocode = "-32.12,115.68,75km"
# coordinates = [115.40,-32.80,116.41,-31.45]
| [
"xudongma@ua-unistudent-ten-9-131-202.uniaccess.unimelb.edu.au"
] | xudongma@ua-unistudent-ten-9-131-202.uniaccess.unimelb.edu.au |
69eeb01074d12cecc75f2289a18fdde829495e7f | 03414057da375391470f28c8fa55bd8dfec751e3 | /app/api_1_0/__init__.py | b14eb42e0236fe410134b2c9de2fae0883b0d457 | [] | no_license | xingyuan1998/school | 6fe817046e052fd47d610ef7a492327f20637ea2 | 1780b9246621beb4aa56e84c6fb4571521482d2d | refs/heads/master | 2021-07-19T01:38:08.824341 | 2017-10-25T14:58:58 | 2017-10-25T14:58:58 | 107,509,754 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | from flask import Blueprint
api = Blueprint('api', __name__, url_prefix='/api')
from . import (
appointment,
auth,
circles,
news,
profess,
roommate,
secondhand,
tasks,
user
)
@api.route('/')
def hello():
return 'dddd'
| [
"xingyuan1998@163.com"
] | xingyuan1998@163.com |
820447248ef9ca3978c5079ad021994e1141f098 | 841cc95591bd1d5317310d8d809e2a2c0284be0b | /queries/query2.py | ed085ae4cbbe5c3c0497e79237223bc0b3f10d3b | [] | no_license | annwhoorma/DMD2_assignment1 | 2864e4b7a6380aa557bdd18b08a20f47212ec544 | 6934d842c3b3bbce89ea60469a3390f1922cdcfa | refs/heads/master | 2022-06-02T22:39:43.364294 | 2020-04-29T08:44:52 | 2020-04-29T08:44:52 | 259,871,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | from py2neo import Database
from py2neo import Graph, Node
import numpy as np
""" Connection to Neo4j Database """
dvdrental = "bolt://localhost:7687"
db = Database(dvdrental)
graph = Graph(password='13579qwer')
db = graph.begin(autocommit=False)
actors_list = graph.run("match(a:Actor) with a.ID as ids return ids").to_ndarray()
number_of_ids = graph.run("match(a:Actor) with count(a.ID) as number_of_ids return number_of_ids").to_ndarray()[0][0]
maxID = graph.run('''match(a:Actor)
with max(a.ID) as max_actor_ID
return max_actor_ID''').to_ndarray()[0][0]
ret = graph.run('''match (a1:Actor)-[:ACTS_IN]->(f:Film)
match (a2:Actor)-[:ACTS_IN]->(f:Film)
with a1.ID as actor1_ID, a2.ID as actor2_ID, count(f) as together
where actor1_ID <> actor2_ID
return actor1_ID, actor2_ID, together''').to_ndarray()
# ret: 1st col - actor1_ID, 2nd col - actor2_ID, 3rd col - how many films actor1 and actor2 had together
print(ret[0][0], ret[0][1], ret[0][2], ret[20867][0], ret[20867][1], ret[20867][2])
table = np.zeros([maxID+1, maxID+1], dtype=int)
for i in range(0, len(ret)-1):
act1 = ret[i][0]
act2 = ret[i][1]
table[act1][act2] = ret[i][2]
print(table)
np.savetxt("queries_results/query2_result.csv", table, delimiter=",")
db.commit() | [
"a.boronina@innopolis.university"
] | a.boronina@innopolis.university |
e6bc5b628771881d5e791a8ae89ab73749fabf68 | 4cb0b3c4acf4e30dda0f814fab8232bf13617422 | /Python/Django/survey_form/apps/surveys/urls.py | 861117e318407c2eea5b49f8496698d1e86e5b31 | [] | no_license | h0oper/DojoAssignments | d60336b3e67021be0e6a43c1f3693193f83b22d9 | 28472e7907a18725d702fc9617f27619fcc4fcfc | refs/heads/master | 2020-05-09T23:38:27.916674 | 2018-10-06T20:46:02 | 2018-10-06T20:46:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^process$', views.process),
url(r'^result$', views.result),
url(r'^goback$', views.goback)
] | [
"ccasil@ucsc.edu"
] | ccasil@ucsc.edu |
2d2fd39c2843868ed674ffcd8ec03183ce56c7d3 | 128a4a9f7d96aa159619947bb3f66752d8e79b75 | /train_functions.py | 7f2a12147ace02a13767d900228d4a43ded5d394 | [] | no_license | r0mer0m/DL-Medical-Images | 262fdda89612abc5a119b2dd60d1ded78359eeae | c0f4b2b6727d1680f13fdd4f1c7b950fd1c31251 | refs/heads/master | 2020-05-18T03:09:32.132147 | 2019-11-20T14:01:54 | 2019-11-20T14:01:54 | 184,136,383 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,012 | py |
from core import *
from utils import lr_loss_plot, save_model, load_model
# loss_function = F.binary_cross_entropy_with_logits
################################
######## 1st approach #########
################################
def get_optimizer(model, lr:float = .01, wd:float = 0.):
parameters = filter(lambda p: p.requires_grad, model.parameters())
optim = torch.optim.Adam(parameters, lr=lr, weight_decay=wd)
return optim
######### FIND LEARNING RATE & STORE/LOAD history #####
# def lr_finder(model, train_dl, p:(Path,str)=None, lr_low:float=1e-5, lr_high:float=1, epochs:int=2):
# '''
# Lr finder with the first approach
#
#
# :param model:
# :param train_dl:
# :param p:
# :param lr_low:
# :param lr_high:
# :param epochs:
# :return:
# '''
# losses = []
# if p: save_model(model, str(p))
#
# iterations = epochs * len(train_dl)
# delta = (lr_high - lr_low) / iterations
# lrs = [lr_low + i * delta for i in range(iterations)]
# model.train()
# ind = 0
# for i in range(epochs):
# train_dl.set_random_choices()
# for x, y in train_dl:
# optim = get_optimizer(model, lr=lrs[ind])
# x = x.cuda().float()
# y = y.cuda()
# out = model(x)
# loss = F.binary_cross_entropy_with_logits(out.squeeze(), y)
# loss.backward()
# optim.step()
# optim.zero_grad()
# losses.append(loss.item())
# ind += 1
# if p: load_model(model, str(p))
#
# return lrs, losses
######### Store/Load lr finder output #####
def to_csv(lrs,losses, file='lrs_losses.csv'):
with open(str(file), 'w') as f:
for i in range(len(lrs)):
f.write(f'{lrs[i]},{losses[i]}\n')
def from_csv(path):
if not isinstance(path, str):
path = str(path)
df = pd.read_csv(path, header=None)
lr = df[0].tolist()
losses = df[1].tolist()
return lr, losses
######### Define LR policy and training #####
def train_regular_policy(model, path, train_dl, valid_dl,
loss_function=F.binary_cross_entropy_with_logits,
lr_low=1e-6, lr_high=0.001, epochs=50, epsilon=.01, compute_metric=True, data=None):
if data is None:
data = train_dl.data
lr = lr_high
prev_loss, min_loss = np.inf, np.inf
for i in range(epochs):
model.train()
train_dl.set_random_choices()
total = 0
sum_loss = 0
optim = get_optimizer(model, lr=lr, wd=0)
for x, y in train_dl:
batch = y.shape[0]
out = model(x)
loss = loss_function(out, y)
optim.zero_grad()
loss.backward()
optim.step()
total += batch
sum_loss += batch * (loss.item())
print("lr %.7f train loss %.5f" % (lr, sum_loss / total))
if data == 'chest':
val_loss, measure = val_metrics_chest(model, valid_dl, compute_metric)
elif data == 'chest-PvsNP' or data == 'binary_task':
val_loss, measure = val_metrics_chest_PvsNP(model, valid_dl, compute_metric)
elif data == 'hands':
val_loss, measure = val_metrics_hands(model, valid_dl,
loss_function, compute_metric)
elif data == 'MURA':
val_loss, measure = val_metrics_MURA(model, valid_dl, compute_metric)
print("lr %.7f train loss %.5f" % (lr, sum_loss / total, measure))
if val_loss - prev_loss > epsilon:
lr = lr / 10.0
if val_loss < min_loss:
save_model(model, path)
min_loss = val_loss
prev_loss = val_loss
if lr < lr_low:
break
return sum_loss / total
################################
######## 2nd approach #########
################################
### Annealings ###
def exp_annealing(start_lr, end_lr, n):
ptg = np.linspace(0, 1, n)
return start_lr * (end_lr / start_lr) ** ptg
def cos_annealing(start_lr, end_lr, n_iterations):
i = np.arange(n_iterations)
c_i = 1 + np.cos(i * np.pi / n_iterations)
return end_lr + (start_lr - end_lr) / 2 * c_i
### Diff lr ###
def diff_range(val, alpha=1./3):
return [val * alpha ** i for i in range(2, -1, -1)]
#### POLICIES (Finder and Training) ######
class FinderPolicy:
def __init__(self, n_epochs, dl, min_lr=1e-7, max_lr=10):
'''
Implements exponential annealing policy from min_lr to max_lr
'''
total_iterations = n_epochs * len(dl)
self.lr_schedule = exp_annealing(min_lr, max_lr, total_iterations)
self.mom = .9 # constant momentum policy with default value
self.idx = -1
def step(self):
self.idx = self.idx + 1
return self.lr_schedule[self.idx], self.mom
# LR finder loop
def lr_finder(model, n_epochs, train_dl, min_lr=1e-4, max_lr=1e-1, save_path=None, early_stopping=200, plot_every=200):
if save_path: save_model(model, save_path)
model.train()
policy = FinderPolicy(n_epochs=n_epochs, dl=train_dl, min_lr=min_lr, max_lr=max_lr)
optimizer = OptimizerWrapper(model, policy)
lrs = optimizer.policy.lr_schedule
losses = []
cnt = 0
for _ in tqdm_notebook(range(n_epochs)):
train_dl.set_random_choices()
for it, (x, y) in enumerate(tqdm_notebook(train_dl, leave=False)):
optimizer.zero_grad()
out = model(x)
loss = F.binary_cross_entropy_with_logits(input=out.squeeze(), target=y)
loss.backward()
optimizer.step()
losses.append(loss.item())
if cnt % plot_every == (plot_every-1): lr_loss_plot(lrs, losses)
if cnt == early_stopping: return lrs[:cnt], losses
cnt += 1
if save_path: load_model(model, save_path)
return lrs, losses
class TrainingPolicy:
'''Cretes the lr and momentum policy'''
def __init__(self, n_epochs, dl, max_lr, pctg=.3, moms=(.95, .85),
delta=1e-4, div_factor=25.):
total_iterations = n_epochs * len(dl)
iter1 = int(total_iterations * pctg)
iter2 = total_iterations - int(total_iterations * pctg)
iterations = (iter1, iter2)
min_start = max_lr / div_factor
min_end = min_start * delta
lr_segments = ((min_start, max_lr), (max_lr, min_end))
mom_segments = (moms, (moms[1], moms[0]))
self.lr_schedule = self._create_schedule(lr_segments, iterations)
self.mom_schedule = self._create_schedule(mom_segments, iterations)
self.idx = -1
def _create_schedule(self, segments, iterations):
'''
Creates a schedule given a function, behaviour and size
'''
stages = [cos_annealing(start, end, n) for ((start, end), n) in zip(segments, iterations)]
return np.concatenate(stages)
def step(self):
self.idx += 1
return self.lr_schedule[self.idx], self.mom_schedule[self.idx]
#### OPTIMIZER WRAPPERS ######
class OptimizerWrapper:
'''Without using the momentum policy'''
def __init__(self, model, policy, wd=0, alpha=1. / 3):
self.policy = policy # TrainingPolicy(n_epochs=n_epochs, dl=dl, max_lr=max_lr)
self.model = model
self.alpha = alpha
self.wd = wd
# This assumes the model is defined by groups.
param_groups = [group.parameters() for group in list(self.model.children())[0]]
lr_0 = self.policy.lr_schedule[0]
mom_0 = self.policy.mom_schedule[0] if hasattr(self.policy, 'mom_schedule') else .9
groups = zip(param_groups, diff_range(lr_0, alpha=alpha), diff_range(mom_0, alpha=1))
self.optimizer = optim.Adam(
[{'params': p, 'lr': lr, 'mom': (mom, .999)} for p, lr, mom in groups]
)
def _update_optimizer(self):
lr_i, mom_i = self.policy.step()
groups = zip(self.optimizer.param_groups,
diff_range(lr_i, alpha=self.alpha),
diff_range(mom_i, alpha=1))
for param_group, lr, mom in groups:
param_group['lr'] = lr
param_group['mom'] = (mom, .999)
def _weight_decay(self):
for group in self.optimizer.param_group:
for p in group['params']: p.data.mul_(group['lr'] * self.wd)
def step(self):
self._update_optimizer()
if self.wd != 0: self._weight_decay()
self.optimizer.step()
def zero_grad(self):
self.optimizer.zero_grad()
########## METRICS #################
def R2L1(y, out):
y_bar = np.mean(y)
numerator = np.sum(np.absolute(out-y))
denominator = np.sum(np.absolute(y-y_bar))
return 1 - numerator/denominator
def ave_auc(probs, ys):
aucs = [roc_auc_score(ys[:, i], probs[:, i]) for i in range(probs.shape[1])]
return np.mean(aucs), aucs
########## VALIDATION #################
def cuda2cpu_classification(y): return y.long().cpu().numpy()
def cuda2cpu_regression(y): return y.cpu().numpy()
def validate_loop(model, valid_dl, task):
if task=='binary' or task=='multilabel':
cuda2cpu = cuda2cpu_classification
loss_fun = F.binary_cross_entropy_with_logits
elif task=='regression':
cuda2cpu = cuda2cpu_regression
loss_fun = F.l1_loss
model.eval()
total = 0
sum_loss = 0
ys = []
preds = []
for x, y in valid_dl:
out = model(x).squeeze()
loss = loss_fun(out.squeeze(), y)
batch = y.shape[0]
sum_loss += batch * (loss.item())
total += batch
preds.append(out.squeeze().detach().cpu().numpy())
ys.append(cuda2cpu(y))
return sum_loss/total, preds, ys
def validate_multilabel(model, valid_dl):
loss, preds, ys = validate_loop(model, valid_dl, 'multilabel')
preds = np.vstack(preds)
ys = np.vstack(ys)
mean_auc, aucs = ave_auc(preds, ys)
return loss, mean_auc, aucs
def validate_binary(model, valid_dl):
loss, preds, ys = validate_loop(model, valid_dl, 'binary')
preds = np.concatenate(preds)
ys = np.concatenate(ys)
auc = roc_auc_score(ys, preds)
accuracy = accuracy_score(ys, (preds>.5).astype(np.int))
return loss, auc, accuracy
def validate_regression(model, valid_dl):
loss, preds, ys = validate_loop(model, valid_dl, 'regression')
preds = np.concatenate(preds)
ys = np.concatenate(ys)
R2 = R2L1(y=ys,out=preds)
return loss, R2
########## TTA #################
def TTA_loop(model, valid_dl, task, ndl=4):
if task=='binary' or task=='multilabel':
cuda2cpu = cuda2cpu_classification
loss_fun = F.binary_cross_entropy_with_logits
elif task=='regression':
cuda2cpu = cuda2cpu_regression
loss_fun = F.l1_loss
model.eval()
total = 0
sum_loss = 0
ys = []
preds = [[] for _ in range(ndl)]
for i in range(ndl - 1):
valid_dl.set_random_choices()
for x, y in valid_dl:
out = model(x).squeeze()
loss = loss_fun(out.squeeze(), y)
batch = y.shape[0]
sum_loss += batch * (loss.item())
total += batch
preds[i].append(out.squeeze().detach().cpu().numpy())
for x, y in valid_dl:
out = model(x)
loss = loss_fun(out.squeeze(), y)
batch = y.shape[0]
sum_loss += batch * (loss.item())
total += batch
preds[ndl - 1].append(out.squeeze().detach().cpu().numpy())
ys.append(cuda2cpu(y))
return sum_loss / total, preds, ys
def TTA_multilabel(model, valid_dl, ndl=4):
loss, preds, ys = TTA_loop(model, valid_dl, 'multilabel', ndl)
preds = [np.vstack(pred) for pred in preds]
preds = np.mean(preds, axis=0)
ys = np.vstack(ys)
mean_auc, aucs = ave_auc(preds, ys)
print("TTA loss %.4f and auc %.4f" % (loss, mean_auc))
return loss, mean_auc, aucs
def TTA_binary(model, valid_dl, ndl=4):
loss, preds, ys = TTA_loop(model, valid_dl, 'binary', ndl)
preds = [np.concatenate(pred) for pred in preds]
preds = np.mean(preds, axis=0)
ys = np.concatenate(ys)
auc = roc_auc_score(ys, preds)
accuracy = accuracy_score(ys, (preds>0).astype(int))
print("TTA loss %.4f auc %.4f accuracy %.4f" % (loss, auc, accuracy))
return loss, auc, accuracy
def TTA_regression(model, valid_dl, ndl=4):
loss, preds, ys = TTA_loop(model, valid_dl, 'regression', ndl)
preds = [np.concatenate(pred) for pred in preds]
preds = np.mean(preds, axis=0)
ys = np.concatenate(ys)
R2 = R2L1(y=ys, out=preds)
print("TTA loss %.4f pseudo R2 (L1) %.4f " % (loss, R2))
return loss, R2
#### LR FINDER AND TRAINING WITH POLICY ####
# loss_functions = {'binary': F.binary_cross_entropy_with_logits,
# 'multilabel': F.binary_cross_entropy_with_logits,
# 'multiclass': F.cross_entropy,
# 'regression': F.l1_loss
# }
#
# def lr_finder(model, n_epochs, train_dl, min_lr=1e-7, max_lr=10, save_path=None,
# mode='exponential', bar=tqdm_notebook, early_stopping=200):
#
# if save_path: save_model(model, save_path)
#
# optimizer = FinderOptimizerWrapper(model, n_epochs, train_dl, min_lr=min_lr, max_lr=max_lr, wd=0, mode=mode)
#
# lrs = optimizer.policy.lr_schedule
# losses = []
# cnt = 0
#
# for _ in bar(range(n_epochs)):
# model.train()
# train_dl.set_random_choices()
# for it, (x, y) in enumerate(bar(train_dl)):
#
# optimizer.zero_grad()
#
# out = model(x)
# loss = F.binary_cross_entropy_with_logits(input=out, target=y)
#
# loss.backward()
# optimizer.step()
#
# losses.append(loss.item())
#
# if it%200 == 199:
# plt.plot(lrs[:len(losses)], losses)
# plt.xticks(rotation=45)
# plt.show()
#
# if cnt==early_stopping: return lrs[:cnt], losses
# cnt +=1
#
# if save_path: load_model(model, p)
#
# return lrs, losses
#
#
# def train(n_epochs, train_dl, valid_dl, model, div_factor=25., max_lr=.01, wd=0, alpha=1./ 3, classification_type='binary',
# save_path=None, bar=tqdm_notebook, val_function=None, unfreeze_during_loop:tuple=None):
#
# model.train()
#
# best_loss = np.inf
#
# loss_f = loss_functions[classification_type]
#
# validate = val_function if val_function else get_val_metric(train_dl)
#
# optimizer = OptimizerWrapper(model, n_epochs, train_dl, div_factor=div_factor, max_lr=max_lr, wd=wd, alpha=alpha)
#
# if unfreeze_during_loop:
# if not isinstance(unfreeze_during_loop, (list, tuple)): raise ValueError('unfreeze_during_loop requires to be None, list or a tuple')
# total_iter = n_epochs*len(train_dl)
# first_unfreeze = int(total_iter*unfreeze_during_loop[0])
# second_unfreeze = int(total_iter*unfreeze_during_loop[1])
#
# for epoch in bar(range(n_epochs)):
# div = 0
# agg_loss = 0
# if hasattr(train_dl, 'set_random_choices'): train_dl.set_random_choices()
# for i, (x, y) in enumerate(train_dl):
#
# if unfreeze_during_loop:
# if i == first_unfreeze: model.unfreeze(1)
# if i == second_unfreeze: model.unfreeze(0)
#
# out = model(x)
# optimizer.zero_grad()
# loss = loss_f(input=out, target=y)
# loss.backward()
# optimizer.step()
#
# agg_loss += loss.item()
# div += 1
#
#
# val_loss, measure = validate(model, valid_dl, True)
# print(f'Ep. {epoch+1} - train loss {agg_loss/div:.4f} - val loss {val_loss:.4f} AUC {measure:.4f}')
#
#
#
# if save_path and val_loss < best_loss:
# save_model(model, save_path)
# best_loss = val_loss
#
#
| [
"mromerocalvo@dons.usfca.edu"
] | mromerocalvo@dons.usfca.edu |
367dc9761443c981fdda7dc72a838157e0f2e0a8 | 6e87da516ab1af2646c45f6f7c6626081f45f00b | /xarm_moveit_servo/launch/_xarm_moveit_servo.launch.py | ea87c5ea477110a88acab45300c174c6d56bdbb1 | [
"BSD-3-Clause"
] | permissive | 0000duck/xarm_ros2 | 5598f6812b4f2da8b0d38c932282249033e21048 | 57892f0bd79ffdc9936dd3b340ae141c186a370d | refs/heads/master | 2023-08-24T02:49:43.560843 | 2021-10-21T07:40:10 | 2021-10-21T07:40:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,030 | py | #!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2021, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
import os
from ament_index_python import get_package_share_directory
from launch.launch_description_sources import load_python_launch_file_as_module
from launch import LaunchDescription
from launch.actions import OpaqueFunction, IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, PathJoinSubstitution
from launch_ros.substitutions import FindPackageShare
from launch_ros.actions import ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
from launch_ros.actions import Node
def launch_setup(context, *args, **kwargs):
dof = LaunchConfiguration('dof', default=7)
prefix = LaunchConfiguration('prefix', default='')
hw_ns = LaunchConfiguration('hw_ns', default='xarm')
limited = LaunchConfiguration('limited', default=False)
effort_control = LaunchConfiguration('effort_control', default=False)
velocity_control = LaunchConfiguration('velocity_control', default=False)
add_gripper = LaunchConfiguration('add_gripper', default=False)
add_vacuum_gripper = LaunchConfiguration('add_vacuum_gripper', default=False)
ros2_control_plugin = LaunchConfiguration('ros2_control_plugin', default='xarm_control/FakeXArmHW')
# 1: xbox360 wired
# 2: xbox360 wireless
# 3: spacemouse wireless
joystick_type = LaunchConfiguration('joystick_type', default=1)
add_other_geometry = LaunchConfiguration('add_other_geometry', default=False)
geometry_type = LaunchConfiguration('geometry_type', default='box')
geometry_mass = LaunchConfiguration('geometry_mass', default=0.1)
geometry_height = LaunchConfiguration('geometry_height', default=0.1)
geometry_radius = LaunchConfiguration('geometry_radius', default=0.1)
geometry_length = LaunchConfiguration('geometry_length', default=0.1)
geometry_width = LaunchConfiguration('geometry_width', default=0.1)
geometry_mesh_filename = LaunchConfiguration('geometry_mesh_filename', default='')
geometry_mesh_origin_xyz = LaunchConfiguration('geometry_mesh_origin_xyz', default='"0 0 0"')
geometry_mesh_origin_rpy = LaunchConfiguration('geometry_mesh_origin_rpy', default='"0 0 0"')
geometry_mesh_tcp_xyz = LaunchConfiguration('geometry_mesh_tcp_xyz', default='"0 0 0"')
geometry_mesh_tcp_rpy = LaunchConfiguration('geometry_mesh_tcp_rpy', default='"0 0 0"')
moveit_config_package_name = 'xarm_moveit_config'
xarm_type = 'xarm{}'.format(dof.perform(context))
ros_namespace = LaunchConfiguration('ros_namespace', default='').perform(context)
# robot_description_parameters
# xarm_moveit_config/launch/lib/xarm_moveit_config_lib.py
mod = load_python_launch_file_as_module(os.path.join(get_package_share_directory(moveit_config_package_name), 'launch', 'lib', 'xarm_moveit_config_lib.py'))
get_xarm_robot_description_parameters = getattr(mod, 'get_xarm_robot_description_parameters')
robot_description_parameters = get_xarm_robot_description_parameters(
xacro_urdf_file=PathJoinSubstitution([FindPackageShare('xarm_description'), 'urdf', 'xarm_device.urdf.xacro']),
xacro_srdf_file=PathJoinSubstitution([FindPackageShare('xarm_moveit_config'), 'srdf', 'xarm.srdf.xacro']),
urdf_arguments={
'prefix': prefix,
'hw_ns': hw_ns.perform(context).strip('/'),
'limited': limited,
'effort_control': effort_control,
'velocity_control': velocity_control,
'add_gripper': add_gripper,
'add_vacuum_gripper': add_vacuum_gripper,
'dof': dof,
'ros2_control_plugin': ros2_control_plugin,
'add_other_geometry': add_other_geometry,
'geometry_type': geometry_type,
'geometry_mass': geometry_mass,
'geometry_height': geometry_height,
'geometry_radius': geometry_radius,
'geometry_length': geometry_length,
'geometry_width': geometry_width,
'geometry_mesh_filename': geometry_mesh_filename,
'geometry_mesh_origin_xyz': geometry_mesh_origin_xyz,
'geometry_mesh_origin_rpy': geometry_mesh_origin_rpy,
'geometry_mesh_tcp_xyz': geometry_mesh_tcp_xyz,
'geometry_mesh_tcp_rpy': geometry_mesh_tcp_rpy,
},
srdf_arguments={
'prefix': prefix,
'dof': dof,
'add_gripper': add_gripper,
'add_other_geometry': add_other_geometry,
},
arguments={
'context': context,
'xarm_type': xarm_type,
}
)
load_yaml = getattr(mod, 'load_yaml')
servo_yaml = load_yaml('xarm_moveit_servo', "config/xarm_moveit_servo_config.yaml")
servo_yaml['move_group_name'] = xarm_type
xarm_traj_controller = '{}{}_traj_controller'.format(prefix.perform(context), xarm_type)
servo_yaml['command_out_topic'] = '/{}/joint_trajectory'.format(xarm_traj_controller)
servo_params = {"moveit_servo": servo_yaml}
controllers = ['joint_state_controller', xarm_traj_controller]
if add_gripper.perform(context) in ('True', 'true'):
controllers.append('{}xarm_gripper_traj_controller'.format(prefix.perform(context)))
# rviz_config_file = PathJoinSubstitution([FindPackageShare(moveit_config_package_name), 'rviz', 'moveit.rviz'])
rviz_config_file = PathJoinSubstitution([FindPackageShare('xarm_moveit_servo'), 'rviz', 'servo.rviz'])
rviz_node = Node(
package='rviz2',
executable='rviz2',
name='rviz2',
output='screen',
arguments=['-d', rviz_config_file],
parameters=[
robot_description_parameters,
],
remappings=[
('/tf', 'tf'),
('/tf_static', 'tf_static'),
]
)
# ros2 control launch
# xarm_controller/launch/_ros2_control.launch.py
ros2_control_launch = IncludeLaunchDescription(
PythonLaunchDescriptionSource(PathJoinSubstitution([FindPackageShare('xarm_controller'), 'launch', '_ros2_control.launch.py'])),
launch_arguments={
'prefix': prefix,
'hw_ns': hw_ns,
'limited': limited,
'effort_control': effort_control,
'velocity_control': velocity_control,
'add_gripper': add_gripper,
'add_vacuum_gripper': add_vacuum_gripper,
'dof': dof,
'ros2_control_plugin': ros2_control_plugin,
'add_other_geometry': add_other_geometry,
'geometry_type': geometry_type,
'geometry_mass': geometry_mass,
'geometry_height': geometry_height,
'geometry_radius': geometry_radius,
'geometry_length': geometry_length,
'geometry_width': geometry_width,
'geometry_mesh_filename': geometry_mesh_filename,
'geometry_mesh_origin_xyz': geometry_mesh_origin_xyz,
'geometry_mesh_origin_rpy': geometry_mesh_origin_rpy,
'geometry_mesh_tcp_xyz': geometry_mesh_tcp_xyz,
'geometry_mesh_tcp_rpy': geometry_mesh_tcp_rpy,
}.items(),
)
# Load controllers
load_controllers = []
for controller in controllers:
load_controllers.append(Node(
package='controller_manager',
executable='spawner.py',
output='screen',
arguments=[
controller,
'--controller-manager', '{}/controller_manager'.format(ros_namespace)
],
))
# Launch as much as possible in components
container = ComposableNodeContainer(
name='xarm_moveit_servo_container',
namespace='/',
package='rclcpp_components',
executable='component_container',
composable_node_descriptions=[
ComposableNode(
package='robot_state_publisher',
plugin='robot_state_publisher::RobotStatePublisher',
name='robot_state_publisher',
parameters=[robot_description_parameters],
),
ComposableNode(
package='tf2_ros',
plugin='tf2_ros::StaticTransformBroadcasterNode',
name='static_tf2_broadcaster',
parameters=[{'child_frame_id': 'link_base', 'frame_id': 'world'}],
),
ComposableNode(
package='moveit_servo',
plugin='moveit_servo::ServoServer',
name='servo_server',
parameters=[
servo_params,
robot_description_parameters,
],
extra_arguments=[{'use_intra_process_comms': True}],
),
ComposableNode(
package='xarm_moveit_servo',
plugin='xarm_moveit_servo::JoyToServoPub',
name='joy_to_servo_node',
parameters=[
servo_params,
{
'dof': dof,
'ros_queue_size': 10,
'joystick_type': joystick_type,
},
],
extra_arguments=[{'use_intra_process_comms': True}],
),
ComposableNode(
package='joy',
plugin='joy::Joy',
name='joy_node',
parameters=[
# {'autorepeat_rate': 50.0},
],
extra_arguments=[{'use_intra_process_comms': True}],
),
],
output='screen',
)
return [
rviz_node,
ros2_control_launch,
container,
] + load_controllers
def generate_launch_description():
return LaunchDescription([
OpaqueFunction(function=launch_setup)
])
| [
"vinman.cub@gmail.com"
] | vinman.cub@gmail.com |
fefa4008d3c6a8622e01e84a315130f060863036 | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/2-Python-Basics-part2/6-Logical-operators_20200414002000.py | 7a4ee8fd3c96f8e57b7e41dc522b12fb81613bec | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | # Short Circuting
is_Friend = True
is_User = True
if is_Friend or is_User:
print("both are true")
if is_Friend and is_User:
print("both are true")
age = 15
year = 2019
boy = "Vlad" | [
"tikana4@yahoo.com"
] | tikana4@yahoo.com |
b34f8b9f7f3ba7a6eedc23ea2331a5070bc75d4b | e21f51bc47bf7a5e2cf1bc38a745d3685144511a | /Flask/flask_parser.py | b88c750851cd6c041834aebfe7c8f240925f7f22 | [] | no_license | razmanika/DjangoWeb | 3b1a763a9a2bc888e316ddde9010ebfdd4a0a28e | e5a660f7fabe4e1912cdcf72ba938fc355b614c6 | refs/heads/master | 2020-06-14T16:21:06.690176 | 2019-07-03T13:14:24 | 2019-07-03T13:14:24 | 195,054,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | import requests
from bs4 import BeautifulSoup
url = requests.get('http://localhost:8080/echo')
soup = BeautifulSoup(url.content, 'html.parser')
url1 = requests.post('http://localhost:8080/echo')
soup1 = BeautifulSoup(url1.content, 'html.parser')
print(soup,soup1) | [
"nikoloz.razmadze.1@btu.edu.ge"
] | nikoloz.razmadze.1@btu.edu.ge |
a71d409d7b326fb5a4bd4507fcc6b07b84090dce | 59cdbbd0ed4e68e8b3b2ecf1b0a724ddb13fa5f2 | /config.py | 5354798ce4d7f2805275e21e966410dd0fd26abd | [] | no_license | doyleSM/flask-api-rest-example | a7bb2c4ab43b8092a18e4e0474fdbfb951931c77 | ef8760cfd5ce4a0090dd928b5bee72ab9d6c02ab | refs/heads/master | 2022-07-05T05:50:25.485950 | 2020-05-04T18:07:59 | 2020-05-04T18:07:59 | 254,703,415 | 0 | 0 | null | 2020-05-04T18:08:00 | 2020-04-10T18:10:03 | Python | UTF-8 | Python | false | false | 77 | py | import os
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ["DATABASE_URL"]
| [
"gbalk@inf.ufsm.br"
] | gbalk@inf.ufsm.br |
feb9eaf05f8f618e83419e8790d01f074d6592b0 | 6f1ae83ae56134f0ecd190e3c0d3d0947a08886f | /query_stanford_parser.py | 6f97a39b2353fd6465ec478efa41209c135d659c | [
"BSD-3-Clause"
] | permissive | yukiyakiZ/questplusplus | 60b2ea03bb32ba6cee99c222630cdae25e53ce8b | e30775f59ef827c318330febb84f8065fbcc3397 | refs/heads/master | 2020-05-07T14:29:21.651749 | 2019-05-05T11:55:04 | 2019-05-05T11:55:04 | 180,595,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,076 | py | # -*- coding:utf8 -*-
# Query Stanford Parser online to obtain the mean dependency distance calculation
import os
import re
import time
from bs4 import BeautifulSoup
import json
import requests
languageSet = ["English", "Chinese"]
url = 'http://nlp.stanford.edu:8080/parser/index.jsp'
def TimeIt(method):
def Timed(*args, **kw):
t1 = time.time()
result = method(*args, **kw)
t2 = time.time()
t3 = t2 - t1
print(
"{} spent {} seconds to run!".format(
method.__name__, t3))
return result
return Timed
def ParseHtml(text):
soup = BeautifulSoup(text, 'html.parser')
tags = soup.find_all('pre')
tag = tags[-1]
lines = tag.text.splitlines()
numTokens = len(lines)
depDistanceSum = 0
for line in lines:
# Excluding ROOT
if line:
if not re.search("ROOT",line):
try:
numStrs = re.findall("\d+", line)
print(line)
print(numStrs)
index2 = int(numStrs[-1])
index1 = int(re.findall(r"-(.+?),", line)[0])
depDistanceSum += abs(index1-index2)
except:
pass
print("Token number: {0}, dependency distance sum: {1}".format(numTokens, depDistanceSum))
return numTokens, depDistanceSum
@TimeIt
def FetchSentenceQueryRes(sentence, language="English"):
payLoad = {'query':sentence, 'parserSelect':language}
if language in languageSet:
r = requests.post(url, data=payLoad)
else:
raise ValueError("Unsupported language!")
numTokens, depDistanceSum = ParseHtml(r.text)
return numTokens, depDistanceSum
# print(r.encoding)
print("=== Test samples ===")
# FetchSentenceQueryRes("I love writing thesis very much .", "English")
# FetchSentenceQueryRes("我 非常 爱 写 论文 .", "Chinese")
FetchSentenceQueryRes("截至 2015 年 六月 底 , 全国 机动车 保有量 达 2.7 亿 余辆 , 其中 汽车 163 亿辆 . ", "Chinese")
sourceDir = "/Users/liangchengyu/questplusplus/input/ch_text_ordered/"
targetDir = "/Users/liangchengyu/questplusplus/input/en_text_ordered/"
# print("=== Process source texts ===")
# filenames = os.listdir(sourceDir) # unordered
# filenames.sort()
# with open('sourceDepDistance.txt', 'w', encoding="utf-8") as outfile:
# for filename in filenames:
# docDepDistanceSum = 0.0
# docNumTokens = 0.0
# numSentences = 0
# with open(sourceDir+filename, 'r', encoding='utf-8') as infile:
# print(sourceDir+filename)
# lines = infile.readlines()
# for line in lines:
# numSentences += 1
# numTokens, depDistanceSum = FetchSentenceQueryRes(line, "Chinese")
# docDepDistanceSum += depDistanceSum
# docNumTokens += numTokens
# print("Doc {0} with {1} sentences, mean dependency distance: {2}".format(filename, numSentences, docDepDistanceSum/(docNumTokens-numSentences)))
# outfile.write(str(docDepDistanceSum/(docNumTokens-numSentences))+"\n")
print("=== Process target texts ===")
filenames = os.listdir(targetDir) # unordered
filenames.sort()
with open('targetDepDistance.txt', 'w', encoding="utf-8") as outfile:
for filename in filenames:
docDepDistanceSum = 0.0
docNumTokens = 0.0
numSentences = 0
with open(targetDir+filename, 'r', encoding='utf-8') as infile:
print(targetDir+filename)
lines = infile.readlines()
for line in lines:
numSentences += 1
numTokens, depDistanceSum = FetchSentenceQueryRes(line, "English")
docDepDistanceSum += depDistanceSum
docNumTokens += numTokens
print("Doc {0} with {1} sentences, mean dependency distance: {2}".format(filename, numSentences, docDepDistanceSum/(docNumTokens-numSentences)))
outfile.write(str(docDepDistanceSum/(docNumTokens-numSentences))+"\n") | [
"liangcheng.yu46@gmail.com"
] | liangcheng.yu46@gmail.com |
c918caa29cf672dc3e1c12dfb518316fb26e5be4 | 9d140aab8f296235f5e23befaef1b4be72a97a2b | /MazeRightDown.py | 3a63d3f9b922c7d15f309b9025b0afd0b535a05e | [] | no_license | shreyal18ss/pyCodes | 38224d6a1a6a14510ccb27100b12f661c6c42bee | 82bd602d84eaaf253637206b7aadd90577abc5ba | refs/heads/master | 2020-07-04T12:54:55.801195 | 2020-06-02T14:54:55 | 2020-06-02T14:54:55 | 202,291,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py |
M=[[1,8,21,7],
[19,17,10,20],
[2,18,23,22],
[14,25,4,13]]
t=M[0][0]
x,y=0,0
w=t
print(t)
while(x<=3):
try:
if(M[x+1][y]<M[x][y+1]):
x+=1
elif(M[x+1][y]>M[x][y+1]):
y+=1
except(IndexError):
if(x<3):
x+=1
else:
break
t=M[x][y]
print(t)
w+=t
print(w)
| [
"noreply@github.com"
] | shreyal18ss.noreply@github.com |
b48ce47ae18e26b378819f5c9ecf150415fccd5a | 25cf15f81982348cdee729baa5c6c8ca19ab4506 | /Ziza/static/media/settings.py | 48970cf31f12b9a59aebed0ae3dce7f5eb92fa0c | [] | no_license | HarunColic/ZizaRepo | ca962f42cbb3a521e3121174d6bf615187dfb67c | 79cd051b88a39d678abd8aa329fd7cfdca40cb42 | refs/heads/master | 2020-03-26T15:17:32.182469 | 2020-03-03T12:00:46 | 2020-03-03T12:00:46 | 145,034,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,221 | py | u"""
Django settings for Ziza project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8yd0w1$#g6zzpigdqd7!3*80k5q7d=v!(rhv6#s_3fsw5=@t*v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['ziza.ba','www.ziza.ba','mail.ziza.ba', '127.0.0.1', '185.99.2.141']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'account',
'location',
'post',
'sweetify',
]
BROKET_HOST = "localhost"
BROKER_PORT = "5672"
BROKER_USER = "guest"
BROKER_PASSWORD = "guest"
BROKER_VHOST = "/"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Ziza.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Ziza.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
#ONLINE DB
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'dpswllhm',
'USER': 'dpswllhm',
'PASSWORD': 'INNeGggbTkmqk_zValpRFXdINOMS2HVW',
'HOST': 'horton.elephantsql.com',
'PORT': '',
}
}
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'zizadb',
'USER': 'cola',
'PASSWORD': 'cola',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Sarajevo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
ADMIN_MEDIA_PREFIX = 'static/admin'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'media')
MEDIA_URL = '/media/'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'testingziza@gmail.com'
EMAIL_HOST_PASSWORD = 'Verification1'
EMAIL_PORT = 587
| [
"haruncolic@hotmail.com"
] | haruncolic@hotmail.com |
83b3c2d69dac365eecb94e332cdd2162199e5997 | 4e6ebd9c5cbd5cb7458e12bfce7e2ce930ad2315 | /src/datasets/tools/harmonization_mapping.py | 8e954c1bd3416a162521f2ab38fd296936f35e29 | [
"CC0-1.0"
] | permissive | qifang-robotics/lidar-harmonization | a9473a991c896ffd293898c6abfd34b9dd245130 | c99087fe5f807f3343ec07acd048b21aab57bbf7 | refs/heads/master | 2023-06-13T05:32:59.682066 | 2021-07-03T23:52:13 | 2021-07-03T23:52:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,468 | py | import pandas as pd
import numpy as np
from pathlib import Path
from shutil import copyfile
from src.datasets.tools.transforms import GlobalShift
import code
class HarmonizationMapping:
def __init__(self, config):
scans_path = config['dataset']['scans_path']
target_scan_num = config['dataset']['target_scan']
harmonization_path = config['dataset']['harmonized_path']
self.harmonization_path = Path(harmonization_path)
self.harmonization_path.mkdir(exist_ok=True, parents=True)
# 1. collect all scans
scans = [str(f) for f in Path(scans_path).glob("*.npy")]
# 2. select target scan(s)
target_scan_path = Path(scans_path) / (target_scan_num+".npy")
# copy to the harmonized path.
# - one time this just didn't work. Deleting the copy and restarting the
# program seemed to work.
if not (self.harmonization_path / (target_scan_num+".npy")).exists():
if not config['dataset']['shift']:
copyfile(
str(target_scan_path),
str(self.harmonization_path / (target_scan_num+".npy")))
else:
# move this later?
target = np.load(str(target_scan_path))
G = GlobalShift(**config["dataset"])
target = G(target)
np.save(str(self.harmonization_path / (target_scan_num+".npy")), target)
if not config['dataset']['create_new']:
if (self.harmonization_path / "df.csv").exists():
self.df = pd.read_csv((self.harmonization_path / "df.csv"), index_col=0)
else:
exit(f"Couldn't find HM csv file at {self.harmonization_path / 'df.csv'}")
else:
if (self.harmonization_path / "df.csv").exists():
# store a backup just in case
copyfile(str(self.harmonization_path / "df.csv"),
str(self.harmonization_path / "df_old.csv")
)
# initialize the df
self.df = pd.DataFrame(
columns=["source_scan",
"harmonization_target",
"source_scan_path",
"harmonization_scan_path",
"processing_stage"])
self.df.source_scan_path = scans
self.df.harmonization_target = [None]*len(scans)
self.df.harmonization_scan_path = [None]*len(scans)
self.df.source_scan = [int(Path(f).stem) for f in scans]
self.df.processing_stage = [0]*len(scans)
# setup target scan
target_scan_num = int(target_scan_num)
self.df.loc[self.df.source_scan == target_scan_num, "harmonization_target"] = int(target_scan_num)
self.df.loc[self.df.source_scan == target_scan_num, "harmonization_scan_path"] = str(self.harmonization_path / (str(target_scan_num)+".npy"))
self.df.loc[self.df.source_scan == target_scan_num, "processing_stage"] = 2
# need processing stages for each source. Sources start at stage 0.
# Stage 0 means that the sources haven't been identified as having
# any overlap with a target scan. By extension, they don't have
# examples in the dataset, nor do they have the harmonized
# version. A source scan enters stage one after overlap in the
# scan has been detected and examples have been added to the
# dataset. After a model is trained with the new dataset, this
# source scan can then be harmonized with the target. The source
# scan enters stage 2 after it has been harmonized. This source
# scan can now be used as a target scan to search for overlap
# regions with other soure scans. After all sources have been
# checked for overlap, the stage 2 source scan can then be moved
# to stage 3 (done). Stage 3 scans do not have to be used again.
# The harmonization is process is finished when all scans are stage
# 2 or higher OR all scans are stage 3 or stage 0.
self.save()
def __getitem__(self, source_scan_num):
# return the entire row for a source scan num (float or int or str)
return self.df.loc[self.df.source_scan == int(source_scan_num)]
def __len__(self):
return len(self.df)
def save(self):
self.df.to_csv(self.harmonization_path / "df.csv")
def done(self):
# there are two conditions for being done. If either are not satisified,
# then the whole process is not finished. The first condition is that
# all sources must be harmonized (all scans are stage 2 and above). In
# the event that a scan does not contain enough overlap to reach stage
# 1, all stage 2 and above scans will be harmonized to stage 3 while
# searching for overlap, so there will be no stage 1 or stage 2 sources
# remaining.
# All scans are harmonized
cond1 = ((1 not in self.df.processing_stage.values) and
(0 not in self.df.processing_stage.values))
# All scans are harmonized except for stage 0 scans which don't have
# any reasonable overlap
cond2 = ((2 not in self.df.processing_stage.values) and
(1 not in self.df.processing_stage.values))
return cond1 or cond2
def add_target(self, source_scan_num, harmonization_target_num):
self.df.loc[self.df.source_scan == int(source_scan_num), "harmonization_target"] = harmonization_target_num
self.save()
def incr_stage(self, source_scan_num):
self.df.loc[self.df.source_scan == int(source_scan_num), "processing_stage"] += 1
self.save()
def get_stage(self, stage_num):
return self.df.loc[self.df.processing_stage == int(stage_num)].source_scan.values.tolist()
def add_harmonized_scan_path(self, source_scan_num):
self.df.loc[self.df.source_scan == int(source_scan_num), "harmonization_scan_path"] = str(self.harmonization_path / (str(source_scan_num)+".npy"))
self.save()
def print_mapping(self):
print("Final Mapping:")
for idx, row in self.df.iterrows():
print(f"{row.source_scan}: {row.harmonization_target}")
| [
"d.t.jones@outlook.com"
] | d.t.jones@outlook.com |
34e5301e4eb916f43547d87a83b4bf95c03b3885 | 5982e164eb9e4622a2adda8c166ef39af22a09ad | /model_conf.py | 4c142f04ce06fe644cb36bff1be97eeb078b4aaa | [] | no_license | guy-amir/core | 0f82cbef4c4c8ffe5be76118428a8353090f087b | d9f920a16e503e4db4efff53750143a89ee15bdd | refs/heads/master | 2022-11-27T18:32:36.337902 | 2020-07-20T10:49:37 | 2020-07-20T10:49:37 | 266,842,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,646 | py | import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
class cifar_net(nn.Module):
def __init__(self):
super(cifar_net, self).__init__()
self.conv_layer1 = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv_layer2 = nn.Sequential(
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
)
self.conv_layer3 = nn.Sequential(
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_layer1 = nn.Sequential(
nn.Dropout(p=0.1),
nn.Linear(4096, 1024),
nn.BatchNorm1d(1024),
nn.ReLU(inplace=True),
)
# self.fc_layer2 = nn.Sequential(
# nn.Linear(1024, 512),
# nn.ReLU(inplace=True),
# nn.Dropout(p=0.1),
# nn.Linear(512, 10)
# )
self.fc_layer2 = nn.Sequential(
nn.Linear(1024, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.1),
nn.Linear(512, 256)
)
# self.for_tree = nn.Sequential(
# nn.Linear(1024, 512),
# nn.ReLU(inplace=True),
# nn.Dropout(p=0.1),
# nn.Linear(512, 10)
# )
# self.softmax = nn.Sequential(
# nn.Softmax() #dim=1) #maybe add dim if necessarry
# )
def forward(self, x):
cl1 = self.conv_layer1(x)
cl2 = self.conv_layer2(cl1)
cl3 = self.conv_layer3(cl2)
# flatten
cl3 = cl3.view(cl3.size(0), -1)
# fc layer
fc1 = self.fc_layer1(cl3)
fc2 = self.fc_layer2(fc1)
#softmax
# sm = self.softmax(fc2)
return x,cl1,cl2,cl3,fc1,fc2 #option a - smoothness testing
# return fc2 #option b - no smoothness testing
class Forest(nn.Module):
def __init__(self, prms):
super(Forest, self).__init__()
self.trees = nn.ModuleList()
self.prms = prms
self.y_hat_avg= []
self.mu_list = []
#The neural network that feeds into the trees:
self.prenet = cifar_net()
for _ in range(self.prms.n_trees):
tree = Tree(prms)
self.trees.append(tree)
def forward(self, xb,yb=None,layer=None, save_flag = False):
self.predictions = []
if self.training:
#convert yb from tensor to one_hot
yb_onehot = torch.zeros(yb.size(0), int(yb.max()+1))
yb = yb.view(-1,1)
if yb.is_cuda:
yb_onehot = yb_onehot.cuda()
yb_onehot.scatter_(1, yb, 1)
self.predictions = []
if self.prms.use_prenet:
self.pred_list = self.prenet(xb)
xb = self.pred_list[-1]
if (self.prms.use_tree == False):
return xb
for tree in self.trees:
#construct routing probability tree:
mu = tree(xb)
#find the nodes that are leaves:
mu_midpoint = int(mu.size(1)/2)
mu_leaves = mu[:,mu_midpoint:]
# NL = mu_leaves.sum(1)
#create a normalizing factor for leaves:
N = mu.sum(0)
if self.training:
if self.prms.classification:
self.y_hat = yb_onehot.t() @ mu/N
y_hat_leaves = self.y_hat[:,mu_midpoint:]
self.y_hat_batch_avg.append(self.y_hat.unsqueeze(2))
####################################################################
else:
y_hat_val_avg = torch.cat(self.y_hat_avg, dim=2)
y_hat_val_avg = torch.sum(y_hat_val_avg, dim=2)/y_hat_val_avg.size(2)
y_hat_leaves = y_hat_val_avg[:,mu_midpoint:]
####################################################################
pred = (mu_leaves @ y_hat_leaves.t())
if save_flag:
self.mu_list.append(mu)
self.y_hat_val_avg = y_hat_val_avg
self.predictions.append(pred.unsqueeze(1))
####################################################
# if self.training:
# self.y_hat_batch_avg = torch.cat(self.y_hat_batch_avg, dim=2)
# self.y_hat_batch_avg = torch.sum(self.y_hat_batch_avg, dim=2)/self.prms.n_trees
# self.y_hat_avg.append(self.y_hat_batch_avg.unsqueeze(2))
#######################################################
self.prediction = torch.cat(self.predictions, dim=1)
self.prediction = torch.sum(self.prediction, dim=1)/self.prms.n_trees
return self.prediction
def forward_wavelets(self, xb,cutoff_nodes,yb=None, layer=None, save_flag = False):
#convert yb from tensor to one_hot
yb_onehot = torch.zeros(yb.size(0), int(yb.max()+1))
yb = yb.view(-1,1)
if yb.is_cuda:
yb_onehot = yb_onehot.cuda()
yb_onehot.scatter_(1, yb, 1)
self.predictions = []
if self.prms.use_prenet:
self.pred_list = self.prenet(xb)
xb = self.pred_list[-1]
if (self.prms.use_tree == False):
return xb
for tree in self.trees:
#construct routing probability tree:
mu = tree(xb)
nu = torch.zeros(mu.size())
#find the nodes that are leaves:
leaves = torch.zeros(mu.size(1))
for j in cutoff_nodes:
nu[:,j] = mu[:,j]
if 2*j>=nu.size(1):
leaves[j] = 1
else:
if not (cutoff_nodes==2*j).sum() and not (cutoff_nodes==(2*j+1)).sum():
leaves[j] = 1
# print(f"leaves: {leaves}")
#normalize leaf probabilities:
nu_leaves = nu*leaves
nu_normalize_factor = nu_leaves.sum(1)
nu_normalized = (nu_leaves.t()/nu_normalize_factor).cuda()
# N = mu.sum(0)
eps = 10^-20
self.y_hat = nu_normalized.cuda() @ yb_onehot
self.y_hat = self.y_hat.t()/(self.y_hat.sum(1)+eps)
pred = (self.y_hat @ nu_normalized.cuda()).t()
if save_flag:
self.mu_list.append(mu)
self.y_hat_val_avg = y_hat_val_avg
self.predictions.append(pred.unsqueeze(2))
self.prediction = torch.cat(self.predictions, dim=2)
self.prediction = torch.sum(self.prediction, dim=2)/self.prms.n_trees
if self.prms.check_smoothness == True:
self.pred_list = list(self.pred_list)
self.pred_list.append(self.prediction)
return self.pred_list
else:
return self.prediction
class Tree(nn.Module):
def __init__(self,prms):
super(Tree, self).__init__()
self.depth = prms.tree_depth
self.n_leaf = 2 ** prms.tree_depth
self.n_nodes = self.n_leaf#-1
self.n_features = prms.features4tree
self.mu_cache = []
self.prms = prms
self.decision = nn.Sigmoid()
#################################################################################################################
onehot = np.eye(prms.feature_length)
# randomly use some neurons in the feature layer to compute decision function
self.using_idx = np.random.choice(prms.feature_length, self.n_leaf, replace=True)
self.feature_mask = onehot[self.using_idx].T
self.feature_mask = nn.parameter.Parameter(torch.from_numpy(self.feature_mask).type(torch.FloatTensor), requires_grad=False)
#################################################################################################################
def forward(self, x, save_flag = False):
if x.is_cuda and not self.feature_mask.is_cuda:
self.feature_mask = self.feature_mask.cuda()
feats = torch.mm(x.view(-1,self.feature_mask.size(0)), self.feature_mask)
decision = self.decision(feats) # passed sigmoid->[batch_size,n_leaf]
decision = self.decision(feats) # passed sigmoid->[batch_size,n_leaf]
decision = torch.unsqueeze(decision,dim=2) # ->[batch_size,n_leaf,1]
decision_comp = 1-decision
decision = torch.cat((decision,decision_comp),dim=2) # -> [batch_size,n_leaf,2]
mu = x.data.new(x.size(0),1,1).fill_(1.)
big_mu = x.data.new(x.size(0),2,1).fill_(1.)
begin_idx = 1
end_idx = 2
for n_layer in range(0, self.depth):
# mu stores the probability a sample is routed at certain node
# repeat it to be multiplied for left and right routing
mu = mu.repeat(1, 1, 2)
# the routing probability at n_layer
_decision = decision[:, begin_idx:end_idx, :] # -> [batch_size,2**n_layer,2]
mu = mu*_decision # -> [batch_size,2**n_layer,2]
begin_idx = end_idx
end_idx = begin_idx + 2 ** (n_layer+1)
# merge left and right nodes to the same layer
mu = mu.view(x.size(0), -1, 1)
big_mu = torch.cat((big_mu,mu),1)
big_mu = big_mu.view(x.size(0), -1)
# self.mu_cache.append(big_mu)
return big_mu #-> [batch size,n_leaf]
def level2nodes(tree_level):
return 2**(tree_level+1)
def level2node_delta(tree_level):
start = level2nodes(tree_level-1)
end = level2nodes(tree_level)
return [start,end] | [
"guy.amir.tech@gmail.com"
] | guy.amir.tech@gmail.com |
9afc659a83985ca5e7a34f87ceb3a5de075cc25b | 5a3b070f39715f604a8bfc38888b6ee4382e54ac | /TalkTalk-Server/app.py | aa21f179f70f37f987a80665e81a7a672d8cc074 | [] | no_license | aupaprunia/talktalk | 717245ec0378559abf2dba0793822d19613faf57 | 895418aa25ad154449f4036362a77b615092b00b | refs/heads/main | 2023-04-13T03:53:37.361677 | 2021-04-11T19:08:54 | 2021-04-11T19:08:54 | 356,480,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,022 | py | from flask import Flask, request
import pyrebase
import requests
choice_dict = {1:"Sad", 2: "Happy", 3: "Angry", 4: "Excited"}
config = {"apiKey": "AIzaSyBrey3ZZ5X74WrAQuj7HISWLl70PqP8dnA",
"authDomain": "trialproject-55deb.firebaseapp.com",
"databaseURL": "https://trialproject-55deb-default-rtdb.firebaseio.com",
"projectId": "trialproject-55deb",
"storageBucket": "trialproject-55deb.appspot.com",
"messagingSenderId": "930590452475",
"appId": "1:930590452475:web:d8857d9906874468fd5e5e"
}
firebase = pyrebase.initialize_app(config)
auth = firebase.auth()
db = firebase.database()
app = Flask(__name__)
# @app.route('/signup', methods =['GET'])
# def signup():
# register = request.get_json()
# email = register['email']
# password = register['password']
# auth.create_user_with_email_and_password(email, password)
# return {"status": " success", "email": email, "password": password}
@app.route('/signin/<string:email>/<string:password>', methods = ['GET'])
def signin(email, password):
try:
result = auth.sign_in_with_email_and_password(email, password)
global userId
userId = result['localId']
get_token = db.child("Users").child(userId).get()
global token
token = get_token.val()['token']
name = get_token.val()['name']
return{"token": token, "status": 1, "name": name}
except:
return {"status": 0}
@app.route('/speaker/<int:choice>', methods = ["GET"])
def speaker(choice):
try:
users = db.child("Online").child("Listener").child(choice_dict[choice]).get()
uid = ""
flag = True
for key in users.val():
if flag == True:
uid = key
flag = False
db.child("Online").child("Listener").child(choice_dict[choice]).child(uid).child("status").set("1")
db.child("Users").child(userId).child("token").set(token-1)
url = "https://fcm.googleapis.com/fcm/send"
payload="{\r\n \"to\":\"/topics/"+userId+",\r\n \"data\": {\r\n \"title\": \"Alert\",\r\n \"body\": \"You have an incoming call...\"\r\n }\r\n}"
headers = {'Authorization': 'key=AAAA2KuDavs:APA91bGCwqzJYQntRNVZU4WfjDh71D2kLvI4ei3iXr9BIlrz-lzp3HdzZWKAWghUwZK0i1rvC0RKFl2rdk1uyAf3RozvlPO1snRvwYpxJVz5qAH5keFgzygj8h16D0g-YDHrz6SoqJfh',
'Content-Type': 'application/json'}
response = requests.request("POST", url, headers=headers, data=payload)
print(response)
return {"channel_name": uid, "status":1}
except:
return {"message": "No Listner available. Try reconnecting later.", "status":0}
@app.route('/listner/<int:choice>', methods = ["GET"])
def push_listner(choice):
db.child("Online").child("Listener").child(choice_dict[choice]).child(userId).child("status").set("0")
db.child("Online").child("Listener").child(choice_dict[choice]).child(userId).child("uid").set(userId)
db.child("Users").child(userId).child("token").set(token+1)
return {"status" : 1, "message": "You will be connected to a speaker shortly."}
if __name__ == '__main__':
app.run(debug = True) | [
"="
] | = |
08dfeef07dc2184dd58ed15584e4a9d792be3383 | 3a8c2bd3b8df9054ed0c26f48616209859faa719 | /Challenges/Hackerrank-DynamicArray.py | c63264cadb5c93066503209dd51764b1eaa68ce0 | [] | no_license | AusCommsteam/Algorithm-and-Data-Structures-and-Coding-Challenges | 684f1ca2f9ee3c49d0b17ecb1e80707efe305c82 | 98fb752c574a6ec5961a274e41a44275b56da194 | refs/heads/master | 2023-09-01T23:58:15.514231 | 2021-09-10T12:42:03 | 2021-09-10T12:42:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | #!/bin/python3
import os
import sys
#
# Complete the dynamicArray function below.
#
def dynamicArray(n, queries):
#
# Write your code here.
#
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nq = input().split()
n = int(nq[0])
q = int(nq[1])
queries = []
for _ in range(q):
queries.append(list(map(int, input().rstrip().split())))
result = dynamicArray(n, queries)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
| [
"bennyhwangg@gmail.com"
] | bennyhwangg@gmail.com |
ef3c9320250f5176230f9473e05712b816609be3 | 1a9e6140d633c660360dbc543b4d7bcb7ad1c0d9 | /tests/QueueTest/run.py | 239b7b4710c2ceb58e626c4d8ade1bb87bcd117e | [
"Apache-2.0"
] | permissive | mjj29/apama-epl-containers | aa80c2e3806e196c8444b4ae97ae6e41ca662e82 | d3f4d5f9ebd21481ab90a9386a733d394cd01f2a | refs/heads/master | 2022-11-11T06:43:57.481269 | 2020-06-29T15:05:35 | 2020-06-29T15:05:35 | 275,584,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | from pysys.constants import *
from pysys.basetest import BaseTest
from apama.correlator import CorrelatorHelper
import os
class PySysTest(BaseTest):
def execute(self):
corr = CorrelatorHelper(self, name='correlator')
corr.start(logfile='correlator.log')
corr.injectEPL('../../../Queue.mon')
tests = os.listdir(self.input);
tests.sort()
for test in tests:
if test.endswith('.mon'):
corr.injectEPL(test)
corr.flush()
corr.shutdown()
def validate(self):
self.assertGrep('correlator.log', expr=' ERROR ', contains=False)
| [
"github@matthew.ath.cx"
] | github@matthew.ath.cx |
1bc7f18b6e720d8b738fcb1a45352c500021c7e8 | 180c58986bfe06ff04a0d36bd6107aa9d2b27373 | /tools/harness/harness.py | 53c67266194582960bb561df6c2b0b09d77d7bed | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-dco-1.1",
"MIT"
] | permissive | mattweingarten/advanced-operating-systems | 086d88ab25a03a32f3ade11ae34edc638428a369 | 603f9fd81e8246521f05a7aeefcbd3ad12f71657 | refs/heads/master | 2023-09-01T10:26:02.100704 | 2021-10-24T11:25:07 | 2021-10-24T11:25:07 | 420,660,000 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,025 | py | #
# Copyright (c) 2009-2011, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
#
import os
import types
import string
import datetime
import debug
import re
class Harness:
RAW_FILE_NAME = 'raw.txt'
MENU_LST_FILE_NAME = 'menu.lst'
BOOT_FILE_NAME = 'bootlog.txt'
TERM_FILTER = re.compile("\[\d\d?m")
def _clean_line(self, line):
# filter output line of control characters
filtered_out = filter(lambda c: c in string.printable, line.rstrip())
# Delete terminal color codes from output
filtered_out = self.TERM_FILTER.sub('', filtered_out)
return filtered_out
def _write_menu_lst_debug(self, test, build, machine, path):
# Ignore for tests that do not implement get_modules
if hasattr(test, "get_modules"):
menu_lst_file_name = os.path.join(path, self.MENU_LST_FILE_NAME)
debug.verbose("harness: writing menu.lst to %s" % menu_lst_file_name)
with open(menu_lst_file_name, "w") as menu:
menu.write( test.get_modules(build, machine).get_menu_data("/") )
def run_test(self, build, machine, test, path):
# Open files for raw output from the victim and log data from the test
raw_file_name = os.path.join(path, self.RAW_FILE_NAME)
debug.verbose('open %s for raw output' % raw_file_name)
raw_file = open(raw_file_name, 'w')
# run the test, dumping the output to the raw file as we go
try:
debug.verbose('harness: setup test')
test.setup(build, machine, path)
self._write_menu_lst_debug(test, build, machine, path)
debug.verbose('harness: run test')
starttime = datetime.datetime.now()
for out in test.run(build, machine, path):
# timedelta for the time this line was emitted from the start of the run
timestamp = datetime.datetime.now() - starttime
# format as string, discarding sub-second precision
timestr = str(timestamp).split('.', 1)[0]
debug.debug('[%s] %s' % (timestr, self._clean_line(out)))
# log full raw line (without timestamp) to output file
raw_file.write(out)
debug.verbose('harness: output complete')
except KeyboardInterrupt:
# let the user know that we are on our way out
debug.error('Interrupted! Performing cleanup...')
raise
finally:
raw_file.close()
debug.verbose('harness: cleanup test')
test.cleanup(machine)
def process_output(self, test, path):
"""Process raw.txt and return array of output lines that begins with grubs
output, avoids having encoding issues when generating other report files"""
raw_file_name = os.path.join(path, self.RAW_FILE_NAME)
if os.path.exists(raw_file_name):
idx = 0
with open(raw_file_name, 'r') as rf:
lines = rf.readlines()
for idx, line in enumerate(lines):
if line.strip() == "root (nd)" or \
line.strip().startswith("Kernel starting at address") or \
"Barrelfish CPU driver starting on ARMv8" in line:
break
if idx == len(lines)-1:
debug.verbose('magic string "root (nd)" or "Kernel starting at address" not found, assuming no garbage in output')
idx=0
return [ unicode(self._clean_line(l), errors='replace') for l in lines[idx:] ]
# file did not exist
return ["could not open %s to process test output" % raw_file_name]
def extract_errors(self, test, path):
raw_file_name = os.path.join(path, self.RAW_FILE_NAME)
debug.verbose('open %s for raw input' % raw_file_name)
raw_file = open(raw_file_name, 'r')
try:
results = test.process_data(path, raw_file)
finally:
raw_file.close()
errors = [results.reason()]
try:
errors += results.errors
except:
pass
return errors
def process_results(self, test, path):
# open raw file for input processing
raw_file_name = os.path.join(path, self.RAW_FILE_NAME)
debug.verbose('open %s for raw input' % raw_file_name)
raw_file = open(raw_file_name, 'r')
try:
results = test.process_data(path, raw_file)
finally:
raw_file.close()
if not results:
debug.verbose('no results')
return True # no results, assume success
retval = True # everything OK
# Process raw.txt and make a bootlog.txt that begins with grubs or
# Barrelfish's output, avoids having encoding issues when viewing logfiles
boot_file_name = os.path.join(path, self.BOOT_FILE_NAME)
if os.path.exists(raw_file_name):
idx = 0
with open(raw_file_name, 'r') as rf:
lines = rf.readlines()
for idx, line in enumerate(lines):
if line.strip() == "root (nd)" or \
"Barrelfish CPU driver starting" in line.strip():
break
if idx > 0:
with open(boot_file_name, 'w') as wf:
wf.writelines(lines[idx:])
else:
debug.verbose('Magic string root (nd) not found, do not write bootlog.txt')
else:
debug.verbose('No file named %s exists. Do not create bootlog.txt.' % raw_file_name)
# if a single result, turn it into a list
if not isinstance(results, types.ListType):
results = [results]
for result in results:
# see if it passed
try:
passed = result.passed()
except NotImplementedError:
passed = None
if passed is False:
debug.log('Test %s FAILED %s' % (test.name, '(' + result.reason() + ')') )
retval = False
elif passed:
debug.verbose('Test %s PASSED' % test.name)
# write it to a file
name = result.name if result.name else 'results'
data_file_name = os.path.join(path, name + '.dat')
debug.verbose('create %s for processed output' % data_file_name)
data_file = open(data_file_name, 'w')
try:
result.to_file(data_file)
data_file.close()
except NotImplementedError:
debug.verbose('no processed output, remove %s' % data_file_name)
data_file.close()
os.remove(data_file_name)
return retval
| [
"daniel.schwyn@inf.ethz.ch"
] | daniel.schwyn@inf.ethz.ch |
36fdbfe12bf2e030e562d801fd497d4834a91dc5 | 562504cf85d80c5e9a0a851a0a9d4f0db07b2b1f | /kt_web/schedule.py | cdf7b0f269ed256bc03d86b7bd247e8748507aa7 | [
"MIT"
] | permissive | MrKiven/KT-Web | 07cfe3c400b9059cf2cfd833f1caddf7e4ac7443 | 9256a4943ac5bb3c14fbec5faf17ef84ff0feaf4 | refs/heads/master | 2021-01-18T22:00:53.558305 | 2016-09-27T10:32:40 | 2016-09-27T10:32:40 | 51,062,361 | 17 | 6 | null | 2016-09-14T08:19:37 | 2016-02-04T08:28:33 | Python | UTF-8 | Python | false | false | 2,555 | py | # -*- coding: utf-8 -*-
import gevent
import gevent.event
class Schedule(object):
"""Schedule Object"""
def __init__(self, func, stop_event):
self.func = func
self.stop_event = stop_event
self.g = None
@property
def name(self):
return self.func.__name__
def trigger(self):
if not self.g and callable(self.func):
self.g = gevent.spawn(self.func, self.stop_event)
def stop(self):
if self.g:
gevent.kill(self.g)
class ScheduleManager(object):
"""Schedules Manager
Example:
from zeus_core.schedule import schedule_manager
def task(stop_event):
while not stop_event.is_set():
do_stuff()
schedule_manager.clear_schedules() # clear all exist schedules
schedule_manager.add(task)
schedule_manager.trigger(task) # or `event_manager.trigger_all()`
# here is gunicorn's main loop...
# After some condition you want to stop *task* schedule, just do:
schedule_manager.stop(task) # or `schedule_manager.set_events()` to
stop all schedules
Generally, all schedules will stop after gunicorn master stop.
Note:
In *task* func, there must be IO operation or `gevent.sleep()` explicitly
to switch greenlet.
"""
def __init__(self):
"""Schedules Manager
Initialize a global *gevent.event.Event* to manager all greenlets.
Initialize a dict to store all events registered.
"""
self.stop_event = gevent.event.Event()
self.schedules = {}
def add(self, func):
"""Same function only register once"""
if func not in self.schedules:
self.schedules[func] = Schedule(func, self.stop_event)
def trigger(self, func):
"""Trigger given func to run as a greenlet"""
self.schedules[func].trigger()
def trigger_all(self):
"""Trigger all func in manager to run"""
for func in self.schedules:
self.trigger(func)
def clear(self):
"""Clear schedules in manager"""
self.schedules.clear()
def clear_schedules(self):
"""Set *gevent.event.Event* False"""
self.stop_event.clear()
def stop(self, func):
"""Stop given func schedule"""
self.schedules[func].stop()
def set_events(self):
"""Stop all schedules"""
if not self.stop_event.is_set():
self.stop_event.set() # set event True
schedule_manager = ScheduleManager()
| [
"kiven.mr@gmail.com"
] | kiven.mr@gmail.com |
91a07b21c96e78c49fbbef9d59d11dcb8579ac4f | 4a77af523c9e319ba44c6ceaf821208f95c97d5c | /random_name.py | f4926d0b278f3403fe852324fc1f85b2655aeb45 | [] | no_license | marteczkah/BAM_coding_resources | 2de2a9585a505094431b047f718f1126fa0bb81d | b5cd143bfcafa8b0374b7a04b90db517b79c048a | refs/heads/main | 2023-07-13T16:02:12.792447 | 2021-08-18T16:02:08 | 2021-08-18T16:02:08 | 387,182,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | from random import randint
names = ['Ariya', 'Chandini', 'Kaitlyn', 'Anjali', 'Josephine', 'Zachias',
'Joel', 'Jayden', 'Larriyah', 'Trevor', 'Kimiwa', 'Suvil', 'Walddy',
'Eladio', 'Drew', 'Sasha', 'Lauren', 'Neesh', 'Raisha', 'Faye']
random_index = randint(0, len(names) - 1)
print(names[random_index])
| [
"48037981+marteczkah@users.noreply.github.com"
] | 48037981+marteczkah@users.noreply.github.com |
f3c3f92fb93dfc923bdd1d17ba4bdb315bab5d81 | 74cd45191c0f735acc80f13599fe72044da53c56 | /Greedy_Algorithms_Minimum_Spanning_Trees_and_Dynamic_Programming/Assignment2/unionfind.py | 75d3fca9b2c361e9c60bdce4e2136a2f6a1cf367 | [] | no_license | shenghao001/algorithms | 6c41041f5132ce775acc20d0f9d4487cd3e093b6 | 09410e0214be715949057b3edc0a037cb40680c9 | refs/heads/master | 2022-03-01T07:00:56.510327 | 2019-08-31T17:25:43 | 2019-08-31T17:25:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | """ Union Find basic structure """
class UnionFind():
def __init__(self, nodes):
self.root = dict(zip(nodes, nodes))
self.subtree = dict(zip(nodes, [[node] for node in nodes]))
def find(self, node):
""" find the root of a node """
return self.root[node]
def union(self, i, j):
""" union two nodes i and j by merging a smaller tree to the larger one """
pi, pj = self.root[i], self.root[j]
if pi != pj:
if len(self.subtree[pj]) > len(self.subtree[pi]):
pi, pj = pj, pi
for node in self.subtree[pj]:
self.root[node] = pi
self.subtree[pi] += self.subtree[pj]
del self.subtree[pj]
else:
return
| [
"anmourchen@gmail.com"
] | anmourchen@gmail.com |
af9e8bb66e0e059bd3abada0e92af12d59469d4d | 6e70b35111371cf082a3a9294f96f946a28128f7 | /nets/densenet.py | 70f28e644f743e844a3bf084f1996aae19f1f260 | [] | no_license | SDMrFeng/quiz-w8-densenet | 46a4dc5c398591d5dc52fa1bd9a478735aec6f63 | 6456a484e57a652cbf9dfbdb393cf545876ebec9 | refs/heads/master | 2020-03-28T15:49:38.090040 | 2018-09-16T07:00:59 | 2018-09-16T07:00:59 | 148,629,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,130 | py | """Contains a variant of the densenet model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def trunc_normal(stddev): return tf.truncated_normal_initializer(stddev=stddev)
def bn_act_conv_drp(current, num_outputs, kernel_size, scope='block'):
current = slim.batch_norm(current, scope=scope + '_bn')
current = tf.nn.relu(current)
current = slim.conv2d(current, num_outputs, kernel_size, scope=scope + '_conv')
current = slim.dropout(current, scope=scope + '_dropout')
return current
def block(net, layers, growth, scope='block'):
for idx in range(layers):
bottleneck = bn_act_conv_drp(net, 4 * growth, [1, 1],
scope=scope + '_conv1x1' + str(idx))
tmp = bn_act_conv_drp(bottleneck, growth, [3, 3],
scope=scope + '_conv3x3' + str(idx))
net = tf.concat(axis=3, values=[net, tmp])
return net
def transition(net, num_outputs, scope='transition'):
net = bn_act_conv_drp(net, num_outputs, [1, 1], scope=scope + '_conv1x1')
net = slim.avg_pool2d(net, [2, 2], stride=2, scope=scope + '_avgpool2x2')
return net
def densenet(images, num_classes=1001, is_training=False,
dropout_keep_prob=0.8,
scope='densenet'):
"""Creates a variant of the densenet model.
images: A batch of `Tensors` of size [batch_size, height, width, channels].
num_classes: the number of classes in the dataset.
is_training: specifies whether or not we're currently training the model.
This variable will determine the behaviour of the dropout layer.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, `num_classes`]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
growth = 24
compression_rate = 0.5
def reduce_dim(input_feature):
return int(int(input_feature.shape[-1]) * compression_rate)
end_points = {}
with tf.variable_scope(scope, 'DenseNet', [images, num_classes]):
with slim.arg_scope(bn_drp_scope(is_training=is_training,
keep_prob=dropout_keep_prob)) as ssc:
############# My code start ##############
#224 x 224 x 3
end_point = 'Conv2d_0'
net = slim.conv2d(images, 2 * growth, [7, 7], stride=2, scope=end_point)
end_points[end_point] = net
#112 x 112 x 2g (g:growth)
end_point = 'MaxPool_0'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
#56 x 56 x 2g
end_point = 'DenseBlock_1'
net = block(net, 6, growth, scope=end_point)
end_points[end_point] = net
#56 x 56
end_point = 'Transition_1'
net = transition(net, reduce_dim(net), scope=end_point)
end_points[end_point] = net
#28 x 28
end_point = 'DenseBlock_2'
net = block(net, 12, growth, scope=end_point)
end_points[end_point] = net
#28 x 28
end_point = 'Transition_2'
net = transition(net, reduce_dim(net), scope=end_point)
end_points[end_point] = net
#14 x 14
end_point = 'DenseBlock_3'
net = block(net, 24, growth, scope=end_point)
end_points[end_point] = net
#14 x 14
end_point = 'Transition_3'
net = transition(net, reduce_dim(net), scope=end_point)
end_points[end_point] = net
#7 x 7
end_point = 'DenseBlock_4'
net = block(net, 16, growth, scope=end_point)
end_points[end_point] = net
#7 x 7
end_point = 'last_bn_relu'
net = slim.batch_norm(net, scope=end_point)
net = tf.nn.relu(net)
end_points[end_point] = net
#7 x 7
# Global average pooling.
end_point = 'global_avg_pool'
net = slim.avg_pool2d(net, net.shape[1:3], scope=end_point)
end_points[end_point] = net
#1 x 1
# Fully-connected
end_point = 'logits'
biases_initializer = tf.constant_initializer(0.1)
pre_logits = slim.conv2d(net, num_classes, [1, 1],
biases_initializer=biases_initializer,
scope=end_point)
logits = tf.squeeze(pre_logits, [1, 2], name='SpatialSqueeze')
end_points[end_point] = logits
# Softmax prediction
end_points['predictions'] = slim.softmax(logits, scope='predictions')
############### My code end #############
return logits, end_points
def bn_drp_scope(is_training=True, keep_prob=0.8):
keep_prob = keep_prob if is_training else 1
with slim.arg_scope(
[slim.batch_norm],
scale=True, is_training=is_training, updates_collections=None):
with slim.arg_scope(
[slim.dropout],
is_training=is_training, keep_prob=keep_prob) as bsc:
return bsc
def densenet_arg_scope(weight_decay=0.004):
"""Defines the default densenet argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.contrib.layers.variance_scaling_initializer(
factor=2.0, mode='FAN_IN', uniform=False),
activation_fn=None, biases_initializer=None, padding='same',
stride=1) as sc:
return sc
densenet.default_image_size = 224
| [
"fengxuezhi@outlook.com"
] | fengxuezhi@outlook.com |
485514f8b755653e4f00af632730eb4f46e3e4bc | ed65f5edb7abbb4664f619626ea633deeeb9e571 | /E.py | b0f7f9d99454c014cf36ba403fe3dced274d8eb5 | [] | no_license | JorgeAndre12/Intelligent-drowsiness-monitor-for-safer-driving-through-vision. | 2cab29248da799fec98a07affe41d7adcf761961 | 2a1e03bcb9d6243246fc180d9c15adb74f34752d | refs/heads/master | 2020-05-09T20:30:57.442899 | 2019-06-01T18:10:05 | 2019-06-01T18:10:05 | 181,410,160 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,323 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 01:48:57 2019
@author: VAI
"""
import smbus
import numpy as np
import urllib.request
import cv2
import pygame
import time
import os
import math
import requests
class MMA7455():
bus = smbus.SMBus(1)
def __init__(self):
self.bus.write_byte_data(0x1D, 0x16, 0x55) # Setup the Mode
self.bus.write_byte_data(0x1D, 0x10, 0) # Calibrate
self.bus.write_byte_data(0x1D, 0x11, 0) # Calibrate
self.bus.write_byte_data(0x1D, 0x12, 0) # Calibrate
self.bus.write_byte_data(0x1D, 0x13, 0) # Calibrate
self.bus.write_byte_data(0x1D, 0x14, 0) # Calibrate
self.bus.write_byte_data(0x1D, 0x15, 0) # Calibrate
def getValueX(self):
return self.bus.read_byte_data(0x1D, 0x06)
def getValueY(self):
return self.bus.read_byte_data(0x1D, 0x07)
def getValueZ(self):
return self.bus.read_byte_data(0x1D, 0x08)
file = 'b.mp3'
pygame.init()
pygame.mixer.init()
# multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascade
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('haarcascade/haarcascade_frontalface_default.xml')
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
eye_cascade = cv2.CascadeClassifier('haarcascade/haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
url = 'http://funnel.soracom.io'
payload = '{"deviceid" : "Car 0001", "lat" : 19.635, "lon" : -99.276}'
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
nf=1
ne=1
count=0
mma = MMA7455()
xmem=mma.getValueX()
ymem=mma.getValueY()
zmem=mma.getValueZ()
if(xmem > 127):
xmem=xmem-255
if(ymem > 127):
ymem=ymem-255
if(zmem > 127):
zmem=zmem-255
time1=time.time()
time2=time.time()
while 1:
x = mma.getValueX()
y = mma.getValueY()
z = mma.getValueZ()
if(x > 127):
x=x-255
if(y > 127):
y=y-255
if(z > 127):
z=z-255
if(abs(xmem-x)>10):
print('crash')
r = requests.post(url, data=payload, headers=headers)
exit()
if(abs(ymem-y)>10):
print('crash')
r = requests.post(url, data=payload, headers=headers)
exit()
if(abs(zmem-z)>10):
print('crash')
r = requests.post(url, data=payload, headers=headers)
exit()
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray,1.3, 40)
ne=len(eyes)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
nf=len(faces)
if(nf>0 and ne<1):
time1=time.time()
print(time1-time2)
if((time1-time2)>=3):
pygame.mixer.music.load(file)
pygame.mixer.music.play()
else:
pygame.mixer.music.stop()
time1=time.time()
time2=time1
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cv2.destroyAllWindows() | [
"noreply@github.com"
] | JorgeAndre12.noreply@github.com |
dd1e606bd18714221455e014139774e53a1e0be2 | 51c099284113773d0d46529443b171d3b871102a | /venv/lib/python3.6/site-packages/twilio/rest/preview/hosted_numbers/__init__.py | 316f6a948e493ba2b10e134f95cb5ec7c11587a8 | [] | no_license | ameerbadri/twilio-taskrouter-realtime-dashboard | a9b1582858ef1bb7bf6e2a1fae47349134cc6072 | 984ca897e53bb04cebba20d909b4c6977a7f306e | refs/heads/master | 2023-02-24T08:05:36.340105 | 2022-09-28T21:10:44 | 2022-09-28T21:10:44 | 101,177,223 | 58 | 45 | null | 2023-02-15T21:33:47 | 2017-08-23T12:25:12 | Python | UTF-8 | Python | false | false | 1,194 | py | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.version import Version
from twilio.rest.preview.hosted_numbers.hosted_number_order import HostedNumberOrderList
class HostedNumbers(Version):
def __init__(self, domain):
"""
Initialize the HostedNumbers version of Preview
:returns: HostedNumbers version of Preview
:rtype: twilio.rest.preview.hosted_numbers.HostedNumbers.HostedNumbers
"""
super(HostedNumbers, self).__init__(domain)
self.version = 'HostedNumbers'
self._hosted_number_orders = None
@property
def hosted_number_orders(self):
"""
:rtype: twilio.rest.preview.hosted_numbers.hosted_number_order.HostedNumberOrderList
"""
if self._hosted_number_orders is None:
self._hosted_number_orders = HostedNumberOrderList(self)
return self._hosted_number_orders
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.HostedNumbers>'
| [
"ameerbadri@gmail.com"
] | ameerbadri@gmail.com |
354649abe3c11f6cdb1d9b5428f93f1147c33e86 | f593d8878681406d5f8a9c76d124f7dc4e056342 | /ml/xcc.py | 8bbf0ef512822b005329f4d6c8754e70446755f9 | [
"Apache-2.0"
] | permissive | paxtonhare/MarkLogic-Sublime | 3a6adc692825b8325bcbaeb68034bb0a1c1c13c1 | 75a9c8e8d1e2f53f2939644b46dff682df0b2703 | refs/heads/master | 2021-08-07T21:56:55.784555 | 2021-01-26T17:20:34 | 2021-01-26T17:20:34 | 16,205,265 | 15 | 5 | null | 2016-05-22T21:05:24 | 2014-01-24T13:30:48 | Python | UTF-8 | Python | false | false | 6,381 | py | import sys
import re
if sys.version_info >= (3,):
import http.client
import urllib.parse
import urllib.request
from urllib.error import HTTPError
else:
import httplib
import urllib
import urllib2
from urllib2 import HTTPError
import socket
from .ml_utils import MlUtils
from .ml_settings import MlSettings
class Xcc():
def __init__(self):
self.settings = {
"ml_host": "localhost",
"xcc_port": "8000",
"content_database": "Documents",
"modules_database": "Modules",
"user": "admin",
"password": "admin",
"timeout": "1",
"use_https": False
}
mlSettings = MlSettings()
for setting in ["ml_host", "xcc_port", "use_https", "content_database", "modules_database", "user", "password", "timeout", "output_options"]:
value = mlSettings.get_xcc_pref(setting)
if value == None:
continue
self.settings[setting] = value
if MlSettings.debug():
for k in self.settings:
MlUtils.log("%s => %s" % (k, self.settings[k]))
self.base_url = "http"
if (self.settings["use_https"] == True):
self.base_url = self.base_url + "s"
self.base_url = self.base_url + "://" + self.settings["ml_host"] + ":" + self.settings["xcc_port"] + "/"
MlUtils.log("base_url: " + self.base_url)
def encode_params(self, params):
if sys.version_info >= (3,):
parse = urllib.parse
else:
parse = urllib
return parse.urlencode(params)
def http(self, url, user, password, params, verb, headers, realm = "public"):
# configure the timeout for htttp
timeout = float(self.settings['timeout'])
socket.setdefaulttimeout(timeout)
if sys.version_info >= (3,):
client = urllib.request
else:
client = urllib2
passwdmngr = client.HTTPPasswordMgrWithDefaultRealm()
passwdmngr.add_password(realm, url, user, password)
digest_authhandler = client.HTTPDigestAuthHandler(passwdmngr)
basic_authhandler = client.HTTPBasicAuthHandler(passwdmngr)
opener = client.build_opener(basic_authhandler, digest_authhandler)
client.install_opener(opener)
if (verb == "PUT" and self.is_string(params)):
params = params.encode('utf-8')
if sys.version_info >= (3,):
req = client.Request(url=url, headers=headers, method=verb, data=params)
else:
req = client.Request(url=url, headers=headers, data=params)
req.get_method = lambda: verb
return client.urlopen(req)
def is_string(self, input):
if sys.version_info >= (3,):
return isinstance(input, str)
else:
return isinstance(input, basestring)
def get_header(self, response, header):
if sys.version_info >= (3,):
return response.getheader(header)
else:
return response.info().getheader(header)
def fix_entity_refs(self, query):
return '&'.join(query.split('&'))
def run_query(self, query, query_type="xquery", check=False, skip_dbs=False):
if (skip_dbs == False and "content_database" in self.settings):
content_db = self.settings["content_database"]
else:
content_db = None
if (skip_dbs == False and "modules_database" in self.settings):
modules_db = self.settings["modules_database"]
else:
modules_db = None
query = self.fix_entity_refs(query)
query = query.replace('"', '""')
eval_func = "xdmp:eval"
if query_type == "javascript":
eval_func = "xdmp:javascript-eval"
new_query = """
%s(
"%s",
(),
<options xmlns="xdmp:eval">
<isolation>different-transaction</isolation>
""" % (eval_func, query)#.format(query, eval_func)
if (content_db != None):
new_query = new_query + '<database>{{xdmp:database("{0}")}}</database>'.format(content_db)
if (modules_db != None):
new_query = new_query + '<modules>{{xdmp:database("{0}")}}</modules>'.format(modules_db)
if (check == True):
new_query = new_query + '<static-check>true</static-check>'
new_query = new_query + """
</options>)
"""
if (check == True):
new_query = "try {" + new_query + "} catch($ex) { $ex[error:code != ('XDMP-MODNOTFOUND')] }"
output_options = ""
if "output_options" in self.settings:
for option in self.settings["output_options"]:
output_options = """%sdeclare option xdmp:output "%s";\n""" % (output_options, option)
new_query = output_options + new_query
p = { "xquery": new_query }
params = self.encode_params(p)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept-Encoding": "gzip,deflate,sdch",
"Accept": "*/*"
}
url = self.base_url + "eval"
MlUtils.log("url: " + url)
try:
response = self.http(url, self.settings["user"], self.settings["password"], str.encode(params), "POST", headers)
MlUtils.log(response)
content_length = self.get_header(response, "Content-Length")
if content_length != "0":
content_type = self.get_header(response, "Content-Type")
if content_type:
boundary = re.sub("^.*boundary=(.*)$", "\\1", content_type)
body = response.read()
if boundary:
# remove the last
content = re.sub(r"[\r\n]+--%s--[\r\n]+$" % boundary, "", body.decode())
# remove the first
content = re.compile(r"^[\r\n]+--%s.+?[\r\n]+" % boundary, re.M | re.DOTALL).sub("", content)
# split on the boundaries
regex_str = r"[\r\n]+--%s.+?[\r\n]+" % boundary
prog = re.compile(regex_str, re.M | re.DOTALL)
parts = []
partSplitter = re.compile(r"[\r\n][\r\n]", re.M | re.DOTALL)
for part in prog.split(content):
splits = partSplitter.split(part)
parts.append(splits[len(splits) - 1])
result = "\n\n".join(parts)
else:
result = body.decode()
MlUtils.log(result)
return result
else:
return ""
except HTTPError as e:
raise Exception(e.read().decode("utf-8"))
def insert_file(self, uri, file_contents):
if ("modules_database" in self.settings):
modules_db = self.settings["modules_database"]
else:
raise Exception('No modules database configured')
params = {}
params["uri"] = uri
params["format"] = "text"
params["dbname"] = modules_db
headers = {
'Content-Type': "text/xml",
'Accept': "text/html, text/xml, image/gif, image/jpeg, application/vnd.marklogic.sequence, application/vnd.marklogic.document, */*"
}
url = self.base_url + "insert?" + self.encode_params(params)
try:
response = self.http(url, self.settings["user"], self.settings["password"], file_contents, "PUT", headers)
except HTTPError as e:
return e.read().decode("utf-8")
| [
"paxton@greenllama.com"
] | paxton@greenllama.com |
bd080db414250c7460293da72e2625c463127dcf | 55a4d7ed3ad3bdf89e995eef2705719ecd989f25 | /main/tensorflow_test/hmm_天气_活动理解.py | 1318a13a359255ef5e47ef393f656642d7456de5 | [] | no_license | ichoukou/Bigdata | 31c1169ca742de5ab8c5671d88198338b79ab901 | 537d90ad24eff4742689eeaeabe48c6ffd9fae16 | refs/heads/master | 2020-04-17T04:58:15.532811 | 2018-12-11T08:56:42 | 2018-12-11T08:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,842 | py | # coding:utf-8
states = ('Rainy', 'Sunny')
observations = ('walk', 'shop', 'clean')
start_probability = {'Rainy': 0.6, 'Sunny': 0.4}
transition_probability = {
'Rainy': {'Rainy': 0.7, 'Sunny': 0.3},
'Sunny': {'Rainy': 0.4, 'Sunny': 0.6},
}
emission_probability = {
'Rainy': {'walk': 0.1, 'shop': 0.4, 'clean': 0.5},
'Sunny': {'walk': 0.6, 'shop': 0.3, 'clean': 0.1},
}
# 打印路径概率表
def print_dptable(V):
print " ",
for i in range(len(V)): print "%7d" % i,
print
for y in V[0].keys():
print "%.10s: " % y,
for t in range(len(V)):
print "%.12s" % ("%f" % V[t][y]),
print
def viterbi(obs, states, start_p, trans_p, emit_p):
"""
:param obs:观测序列
:param states:隐状态
:param start_p:初始概率(隐状态)
:param trans_p:转移概率(隐状态)
:param emit_p: 发射概率 (隐状态表现为显状态的概率)
:return:
"""
# 路径概率表 V[时间][隐状态] = 概率
V = [{}]
# 一个中间变量,代表当前状态是哪个隐状态
path = {}
# 初始化初始状态 (t == 0)
for y in states:
V[0][y] = start_p[y] * emit_p[y][obs[0]]
path[y] = [y]
# 对 t > 0 跑一遍维特比算法
for t in range(1, len(obs)): # [1,2]
V.append({})
newpath = {}
for y in states:
# 概率 隐状态 = 前状态是y0的概率 * y0转移到y的概率 * y表现为当前状态的概率
# print [(V[t - 1][y0] * trans_p[y0][y] * emit_p[y][obs[t]], y0) for y0 in states]
#计算当前循环下,天气为y的概率,可由前一天是阴天、晴天两种情况得来,但是取概率最大的作为当前链。
(prob, state) = max([(V[t - 1][y0] * trans_p[y0][y] * emit_p[y][obs[t]], y0) for y0 in states])
# 记录最大概率
V[t][y] = prob
print V
# 更新晴天、雨天的路径,更新当前为晴天、雨天的链路径,最后一个
newpath[y] = path[state] + [y]
# print newpath
# 不需要保留旧路径
path = newpath
#打印列表,每天的晴天、阴天的最大的概率值输出,作为后一天晴天、阴天的输入。总之每天的计算输出,只保留“一个”晴天的输出和阴天的输出。
print_dptable(V)
(prob, state) = max([(V[len(obs) - 1][y], y) for y in states])
return (prob, path[state])
def example():
return viterbi(observations,
states,
start_probability,
transition_probability,
emission_probability)
#注意: max([(4,'hello'),(3,'hello'),(10,'hello')]) 比较的是[]中的()中第一个数值!!!!!!!
print example()
| [
"985819225@qq.com"
] | 985819225@qq.com |
fa96549438a9bdc21019bf9d66d468f439d20122 | 7517ac3ca1a77a23f75bb774a39cfe826c77f687 | /run_carver_on_2018_07_02_using_simple_for_loop.py | d0d2731f001743b2b78a15607075f801acc68db3 | [] | no_license | carshadi/carver | 7f24859a3becaeaf41046912490c54d31f805163 | 6f568e1254d024139769950142e139909c2c1c9d | refs/heads/master | 2022-08-03T06:10:43.880192 | 2019-11-15T22:46:53 | 2019-11-15T22:46:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | #! /usr/bin/env python
import navigator
navigator.main(['-i', '/nrs/mouselight/SAMPLES/2018-07-02',
'-s', '/groups/mousebrainmicro/mousebrainmicro/scripts/gt/2018-07-02/consensus-neurons-with-machine-centerpoints-labelled-as-swcs',
'-o', '/nrs/funke/mouselight-v2/2018-07-02',
'-f'])
| [
"taylora@janelia.hhmi.org"
] | taylora@janelia.hhmi.org |
a25a9a45abf6afeb485d96f23c00c3d70ff087dc | b8f9d2cafb8958cdb417f05156acb6aadf90f4dd | /MachineLearning/NetworkAnalysis/PageRank.py | 5d5647d240c30f7abe41a25e7aa9ec6bbe87407e | [] | no_license | Anova07/Data-Science | 8d14f78236de0053e2d31cc8cd85b9c70dfa2c8a | 86dd24fb04a199536ae8f3f5f843aae3fc69c086 | refs/heads/master | 2021-12-08T10:35:35.512188 | 2016-03-06T19:08:58 | 2016-03-06T19:08:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,945 | py | import math, random, re
from collections import defaultdict, Counter, deque
from LinearUtils.Vectors import dotProduct, magnitude, scalarMultiply, shape, distance
from LinearUtils.Matrices import getRow, getCol, generateMatrix
from functools import partial
# Code from Data Science from Scratch - github
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" }
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# give each user a friends list
for user in users:
user["friends"] = []
# and fill it
for i, j in friendships:
users[i]["friends"].append(users[j]) # add i as a friend of j
users[j]["friends"].append(users[i]) # add j as a friend of i
endorsements = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2), (2, 1), (1, 3),
(2, 3), (3, 4), (5, 4), (5, 6), (7, 5), (6, 8), (8, 7), (8, 9)]
def PageRank(users, damping = 0.85, num_iters = 100):
"""
A simplified version looks like this:
1. There is a total of 1.0 (or 100%) PageRank in the network.
2. Initially this PageRank is equally distributed among nodes.
3. At each step, a large fraction of each node’s PageRank is distributed evenly among its outgoing links.
4. At each step, the remainder of each node’s PageRank is distributed evenly among all nodes.
"""
# initially distribute PageRank evenly
num_users = len(users)
pr = { user["id"] : 1 / num_users for user in users }
# this is the small fraction of PageRank
# that each node gets each iteration
base_pr = (1 - damping) / num_users
for __ in range(num_iters):
next_pr = { user["id"] : base_pr for user in users }
for user in users:
# distribute PageRank to outgoing links
links_pr = pr[user["id"]] * damping
for endorsee in user["endorses"]:
next_pr[endorsee["id"]] += links_pr / len(user["endorses"])
pr = next_pr
return pr
if __name__ == "__main__":
for user in users:
user["endorses"] = [] # add one list to track outgoing endorsements
user["endorsed_by"] = [] # and another to track endorsements
for source_id, target_id in endorsements:
users[source_id]["endorses"].append(users[target_id])
users[target_id]["endorsed_by"].append(users[source_id])
endorsements_by_id = [(user["id"], len(user["endorsed_by"]))
for user in users]
sorted(endorsements_by_id, key=lambda pair: pair[1], reverse=True)
print("PageRank")
for user_id, pr in PageRank(users).items():
print(user_id, pr)
| [
"titu1994@gmail.com"
] | titu1994@gmail.com |
6a3f7a9e2c64be5b0255a25b57bce8436509b3bb | c89579b97327167bdf5322d9eb12e37ed1563189 | /api/__init__.py | 33c708794f8488e56c30b5caea6d30bf0b7164c4 | [] | no_license | Alweezy/movies-read | bff9f4c592f6187b175c64faed62b28980069027 | a5bde0df99ea648e9368ceba22b7ad0290bfa6f8 | refs/heads/master | 2020-05-21T21:50:01.604487 | 2019-05-12T11:58:07 | 2019-05-12T11:58:07 | 186,162,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | import os
from flask import Flask
app = Flask(__name__)
from . import movies | [
"alvin@Alvins-MacBook-Pro.local"
] | alvin@Alvins-MacBook-Pro.local |
c7ef812fb6b1c0a1bcbf2e8e463e19da84748944 | 6b265b404d74b09e1b1e3710e8ea872cd50f4263 | /Python/Exercises/TreeChecker/check_tree_2.0.py | 857bec02ba2b491a4a9f7d5ad9e1b2461082a30e | [
"CC-BY-4.0"
] | permissive | gjbex/training-material | cdc189469ae2c7d43784ecdcb4bcca10ecbc21ae | e748466a2af9f3388a8b0ed091aa061dbfc752d6 | refs/heads/master | 2023-08-17T11:02:27.322865 | 2023-04-27T14:42:55 | 2023-04-27T14:42:55 | 18,587,808 | 130 | 60 | CC-BY-4.0 | 2023-08-03T07:07:25 | 2014-04-09T06:35:58 | Jupyter Notebook | UTF-8 | Python | false | false | 1,828 | py | #!/usr/bin/env python
import sys
class BaseError(Exception):
def __init__(self, position):
super().__init__()
self._position = position
@property
def position(self):
return self._position
def __str__(self):
return self.message
class MissingRBError(BaseError):
def __init__(self, position):
super().__init__(position)
msg = 'missing right bracket for bracket at {0}'
self.message = msg.format(position)
class MissingLBError(BaseError):
def __init__(self, position):
super().__init__(position)
msg = 'missing left bracket for bracket at {0}'
self.message = msg.format(position)
class TrailingCharsError(BaseError):
def __init__(self, position):
super().__init__(position)
self.message = 'trailing characters at position {0}'.format(position)
def check_tree(tree):
bracket_positions = []
position = 1
for character in tree:
if character == '(':
bracket_positions.append(position)
elif character == ')':
if bracket_positions:
bracket_positions.pop()
else:
raise MissingLBError(position)
if len(bracket_positions) == 0:
break
position += 1
if len(bracket_positions) == 0 and position < len(tree) - 1:
raise TrailingCharsError(position + 1)
elif len(bracket_positions) > 0:
raise MissingRBError(bracket_positions.pop())
def main():
tree = ''.join([line.strip() for line in sys.stdin.readlines()])
try:
check_tree(tree)
except BaseError as error:
sys.stderr.write('### error: {0}\n'.format(str(error)))
return 1
else:
return 0
if __name__ == '__main__':
status = main()
sys.exit(status)
| [
"geertjan.bex@uhasselt.be"
] | geertjan.bex@uhasselt.be |
da51eb2e09b9061ead9e3fc0c508dcf164e120ad | 6fb3449a8c38e37a279e01268358dd062445f458 | /mysite/urls.py | 2d49f38d3201317df024e7ec1a900383551ca778 | [] | no_license | mikalail/SportsStats | b756563e6291ffc9d2d7104329dd0a8277048464 | 44c0a4e012273c27143683e4774cf76964145936 | refs/heads/master | 2020-12-24T10:57:25.636835 | 2016-11-08T23:28:53 | 2016-11-08T23:28:53 | 73,224,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('sports.urls')),
]
| [
"mikalail@yahoo.com"
] | mikalail@yahoo.com |
fb88ca66b76486a545fb6f24294c2de0f3b48ebc | 8dde2278d17fa0aed506f99cc77abc0543bd6607 | /client/bouquets.py | ce5133bf8d96fb0f274c4aebbee9e7b19253fe0a | [
"MIT"
] | permissive | thred/openwebif-client | 0a672ca2894f16d4907671c0f905e591df9ecadc | 0eb7711b47be95935ae69ec4acbec95357592205 | refs/heads/master | 2020-04-03T07:40:38.293985 | 2018-10-30T20:30:48 | 2018-10-30T20:30:48 | 155,109,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | import commands
import utils
def getBRef(name):
json = utils.requestJson("bouquets")
for bouquet in json["bouquets"]:
if bouquet[1] == name:
return bouquet[0]
else:
raise ValueError("Bouquet not found: " + name)
def getDefaultBRef():
json = utils.requestJson("bouquets")
return json["bouquets"][0][0]
def consume():
json = utils.requestJson("bouquets")
for bouquet in json["bouquets"]:
print(bouquet[1])
def help():
print("""\
Usage: owifc bouquets
Lists all known bouquets.""")
commands.register("bouquets", "Lists all known bouquets.",
lambda: consume(), lambda: help())
| [
"thred@users.noreply.github.com"
] | thred@users.noreply.github.com |
3a88ba0146dd6f13de5b231f22b57f1192ef083e | 85b218e0e4456404e2bc6be6b13e3fec11c5e86b | /appointment/urls.py | 69d25c665e5f1c461bbff5bcdf960904cafe5695 | [] | no_license | Leviona/barbershop-site | 6d6fddb0b0c4c7c011cf676b5ccdb6b5e713bbee | f932fa53a765da219bcff9fbac9904d6863ef1ce | refs/heads/master | 2020-03-17T04:56:53.382824 | 2018-06-30T03:51:02 | 2018-06-30T03:51:02 | 133,295,856 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | from django.urls import path
from . import views
app_name = "appointment"
urlpatterns = [
path('', views.home_page, name="home_page"),
]
| [
"svzef@outlook.com"
] | svzef@outlook.com |
35d3072fb03715d46ecb63d4005ca431e4838776 | b42850bc3e36bbd1683070393582617f2b3cd8e6 | /Inheritance/players_and_monsters/muse_elf.py | a9582d2cc187778ca11c8be953479c42fb935ab3 | [] | no_license | marianidchenko/Python_OOP | aecca18be6df3850c0efbf2fa6d25bf3ff53ae96 | 547c12cbdad5b8c16fa55bba6c03b71db181ad2b | refs/heads/main | 2023-07-09T05:42:43.863681 | 2021-08-14T14:55:51 | 2021-08-14T14:55:51 | 381,572,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | from Inheritance.players_and_monsters.elf import Elf
class MuseElf(Elf):
pass
| [
"marianidchenko@gmail.com"
] | marianidchenko@gmail.com |
6f4a8621cbc7c24e77d06254fbc9e3d8cf13db78 | 843ed91ada6131fceee495e3e398d426db182457 | /core/views.py | d42b2c5459c63c5fe8d3d8358de24af2bfad3970 | [] | no_license | meydson/Aula_Django_DIO | 67ae1d889e4927dcb44b25ddbb220b79c518b6a5 | 12d4a646e26fd0dd879255754bba76acc1e608fb | refs/heads/master | 2022-11-07T20:25:57.276238 | 2020-06-30T00:51:25 | 2020-06-30T00:51:25 | 275,959,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | from django.shortcuts import render, HttpResponse
# Create your views here.
def hello(requests, nome):
return HttpResponse('<h1>Hello {}</h1>'.format(nome)) | [
"meydsonbaracho@gmail.com"
] | meydsonbaracho@gmail.com |
e4b8698761a7e70c874c568c855a7858b383725f | 815575ebe1fae77118dae5d9eabb4e749f657e70 | /UserInteface/game_engine/RelativeSprite.py | 3c0f0580b6f64606b181e4c169c71958ac7397a5 | [] | no_license | BlackAndGoldAutonomousRacing/UserInterface | 66171a31e9a722a92fc1cdcd94155dadd40d20c7 | 38535b18b6cb42afe882cc30b4018040ed267768 | refs/heads/main | 2023-09-02T19:01:25.769073 | 2021-11-05T00:01:56 | 2021-11-05T00:01:56 | 416,441,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,659 | py |
'''
===============================================================================
ENGR 133 Fa 2020
Assignment Information
Assignment: Final Project
Author: Alec Pannunzio, afpannun@purdue.edu
Team ID: LC4-5
===============================================================================
'''
from game_engine.Object2D import Object2D
from game_engine.ObjectDraw import ObjectDraw
from game_engine.GameEngineToolbox import checkType
from game_engine.Sprite import Sprite
from math import atan2, sqrt, sin, cos, pi
# a more advanced sprite that can be painted in the perspective of another object. Basically a simple way of doing cameras
# NOTE: this object will act as a normal Sprite unless you set the camera with setCamera().
# NOTE: if it is a first person game it is customary to set all of the gameobjects' cameras to the RelativeSprite representing the character, INCLUDING setting the camera of the character to itself.
'''
class members:
hasCamera - bool - whether the RelativeSprite has a camera assigned to it. If this is false this object will be painted the same as a normal Sprite.
camera - Object2D - the object that we will paint relative to
zeroXPosition - float - offSets the sprites x position by this amount
zeroYPosition - float - offSets the sprites y position by this amount
zeroRotation - float - offsets the sprites rotation by this amount
displayXPosition - float - the x position that we will display (relative to the camera)
displayYPosition - float - the y position that we will display (relative to the camera)
displayRotation - float - the rotation that we will display (relative to the camera)
'''
class RelativeSprite(Sprite):
def __init__(self,name,xPosition,yPosition,scaling,imgSource,objectDraw):
super(RelativeSprite,self).__init__(name,xPosition,yPosition,scaling,imgSource);
checkType(objectDraw,ObjectDraw,"objectDraw must be an ObjectDraw");
self.hasCamera = False;
self.objectDraw = objectDraw;
self.camera = None;
self.zeroXPosition = self.objectDraw.screenSizeX/2;
self.zeroYPosition = self.objectDraw.screenSizeY/2;
self.zeroRotation = 0;
self.displayXPosition = xPosition;
self.displayYPosition = yPosition;
self.displayRotation = 0;
# sets the camera of this object to the passed Object2D
def setCamera(self,camera):
assert issubclass(camera.__class__, Object2D); # make sure the object we are adding is a child of Object2D
self.camera = camera;
self.hasCamera = True;
# removes the camera from the object so it paints like a normal Sprite
def removeCamera(self):
self.camera = None;
self.hasCamera = False;
# calls the update method of superclasses and updates the displayimage to be in the perspective of the camera
def update(self):
if not self.hasCamera:
super(RelativeSprite,self).update(); # update like a normal Sprite
if (self.zeroRotation != 0):
self.rotation += self.zeroRotation;
super(RelativeSprite,self).updateDisplayImage(); # update the display image with the relative values
self.rotation -= self.zeroRotation;
else:
super(Sprite,self).update(); # call the update method of Object2D, NOT Sprite
#save current position
prevRotation = self.rotation;
self.displayXPosition = self.xPosition;
self.displayYPosition = self.yPosition;
cameraPosition = self.camera.getPosition();
#translate relative to the camera
self.displayXPosition -= cameraPosition[0];
self.displayYPosition -= cameraPosition[1];
'''
rotate around camera
'''
#convert to polar
angle = atan2(self.displayYPosition,self.displayXPosition); # the angle of this object relative to the camera
radius = sqrt(self.displayXPosition**2 + self.displayYPosition**2) # thie distance this object is away from the camera
# rotate the object around the camera <the camera's rotation>
angle -= pi*self.camera.getRotation()/180;
# convert back to rectangular and assign to displayPosition
self.displayXPosition = radius * cos(angle);
self.displayYPosition = radius * sin(angle);
self.rotation -= self.camera.getRotation(); # add internal rotation to match camera
#add zero offsets
self.rotation += self.zeroRotation;
self.displayXPosition += self.zeroXPosition;
self.displayYPosition += self.zeroYPosition;
#set displayRotation
self.displayRotation = self.rotation;
'''
update the display image with the relative values
'''
super(RelativeSprite,self).updateDisplayImage(); # update the display image with the relative values
'''
reset rotation back to where it was before
'''
self.rotation = prevRotation;
# we don't have to reset position since we used displayPosition rather than directly changing the object's position
#paint relative to the camera
def paint(self,screen):
if self.hasCamera:
# if we have a camera paint using the relative values
screen.blit(self.displayImg,[self.displayXPosition-self.showSizeX/2, self.displayYPosition-self.showSizeY/2]);
else:
# otherwise just use Sprite's paint method
super(RelativeSprite,self).paint(screen);
# set the zero position of the relativeSprite
def setZeroPosition(self,zeroX,zeroY):
checkType(zeroX,(int,float),"zero position must be a number");
checkType(zeroY,(int,float),"zero position must be a number");
self.zeroXPosition = zeroX;
self.zeroYPosiiton = zeroY;
#set the zero rotation of the relativeSprite
def setZeroRotation(self,zeroRot):
checkType(zeroRot,(int,float),"zeroRotation must be a number");
self.zeroRotation = zeroRot;
'''
===============================================================================
ACADEMIC INTEGRITY STATEMENT
I have not used source code obtained from any other unauthorized
source, either modified or unmodified. Neither have I provided
access to my code to another. The project I am submitting
is my own original work.
===============================================================================
'''
| [
"35432488+samiam567@users.noreply.github.com"
] | 35432488+samiam567@users.noreply.github.com |
658b34c8593e518f6e856b6afb5c1d107b89f6bc | 98f1a0bfa5b20a0b81e9e555d76e706c62d949c9 | /examples/pytorch/stgcn_wave/model.py | 2463721f1b38ea34e09db1c8e3b064a7db69e439 | [
"Apache-2.0"
] | permissive | dmlc/dgl | 3a8fbca3a7f0e9adf6e69679ad62948df48dfc42 | bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1 | refs/heads/master | 2023-08-31T16:33:21.139163 | 2023-08-31T07:49:22 | 2023-08-31T07:49:22 | 130,375,797 | 12,631 | 3,482 | Apache-2.0 | 2023-09-14T15:48:24 | 2018-04-20T14:49:09 | Python | UTF-8 | Python | false | false | 3,480 | py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from dgl.nn.pytorch import GraphConv
from dgl.nn.pytorch.conv import ChebConv
class TemporalConvLayer(nn.Module):
"""Temporal convolution layer.
arguments
---------
c_in : int
The number of input channels (features)
c_out : int
The number of output channels (features)
dia : int
The dilation size
"""
def __init__(self, c_in, c_out, dia=1):
super(TemporalConvLayer, self).__init__()
self.c_out = c_out
self.c_in = c_in
self.conv = nn.Conv2d(
c_in, c_out, (2, 1), 1, dilation=dia, padding=(0, 0)
)
def forward(self, x):
return torch.relu(self.conv(x))
class SpatioConvLayer(nn.Module):
def __init__(self, c, Lk): # c : hidden dimension Lk: graph matrix
super(SpatioConvLayer, self).__init__()
self.g = Lk
self.gc = GraphConv(c, c, activation=F.relu)
# self.gc = ChebConv(c, c, 3)
def init(self):
stdv = 1.0 / math.sqrt(self.W.weight.size(1))
self.W.weight.data.uniform_(-stdv, stdv)
def forward(self, x):
x = x.transpose(0, 3)
x = x.transpose(1, 3)
output = self.gc(self.g, x)
output = output.transpose(1, 3)
output = output.transpose(0, 3)
return torch.relu(output)
class FullyConvLayer(nn.Module):
def __init__(self, c):
super(FullyConvLayer, self).__init__()
self.conv = nn.Conv2d(c, 1, 1)
def forward(self, x):
return self.conv(x)
class OutputLayer(nn.Module):
def __init__(self, c, T, n):
super(OutputLayer, self).__init__()
self.tconv1 = nn.Conv2d(c, c, (T, 1), 1, dilation=1, padding=(0, 0))
self.ln = nn.LayerNorm([n, c])
self.tconv2 = nn.Conv2d(c, c, (1, 1), 1, dilation=1, padding=(0, 0))
self.fc = FullyConvLayer(c)
def forward(self, x):
x_t1 = self.tconv1(x)
x_ln = self.ln(x_t1.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
x_t2 = self.tconv2(x_ln)
return self.fc(x_t2)
class STGCN_WAVE(nn.Module):
def __init__(
self, c, T, n, Lk, p, num_layers, device, control_str="TNTSTNTST"
):
super(STGCN_WAVE, self).__init__()
self.control_str = control_str # model structure controller
self.num_layers = len(control_str)
self.layers = nn.ModuleList([])
cnt = 0
diapower = 0
for i in range(self.num_layers):
i_layer = control_str[i]
if i_layer == "T": # Temporal Layer
self.layers.append(
TemporalConvLayer(c[cnt], c[cnt + 1], dia=2**diapower)
)
diapower += 1
cnt += 1
if i_layer == "S": # Spatio Layer
self.layers.append(SpatioConvLayer(c[cnt], Lk))
if i_layer == "N": # Norm Layer
self.layers.append(nn.LayerNorm([n, c[cnt]]))
self.output = OutputLayer(c[cnt], T + 1 - 2 ** (diapower), n)
for layer in self.layers:
layer = layer.to(device)
def forward(self, x):
for i in range(self.num_layers):
i_layer = self.control_str[i]
if i_layer == "N":
x = self.layers[i](x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
else:
x = self.layers[i](x)
return self.output(x)
| [
"noreply@github.com"
] | dmlc.noreply@github.com |
4ad2bde6bdca921a681ab2a19739f7ec109f0855 | 72f55f2b9899af5cf60789b935a69f2e5ddfc814 | /src/analytics/migrations/0007_auto_20161212_0735.py | 030a645dc7873b36a13eae12fd481f3aeaae826b | [] | no_license | apapatp/svrup-learning-no-rest | cf4ab52de0a060207844a8fb3c58455b7142ff35 | 554d26245a9db2e22f4ed22928f4815b186e486b | refs/heads/master | 2021-01-12T09:32:15.866093 | 2016-12-30T09:47:43 | 2016-12-30T09:47:43 | 76,186,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('analytics', '0006_auto_20161212_0624'),
]
operations = [
migrations.AlterField(
model_name='pageview',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 12, 7, 35, 14, 118666, tzinfo=utc)),
preserve_default=True,
),
]
| [
"tolu@Tolus-MacBook-Pro.local"
] | tolu@Tolus-MacBook-Pro.local |
65199f5c83020b074cf08c024357136753dc811f | cbde70bf9eb6ee3d8b26b23a509298f3f199b29b | /tensorflow_binding/transducer_tensorflow/__init__.py | 8b21e359f752eb88a4464c39fd2fdb07a2183fcb | [
"MIT"
] | permissive | pkuVanilla1207/warp-rnnt | 64aeab173fa2dfec13355b564aba6e792df62e80 | 6de0527c5e23021a6a914c0826dff5c5ec8fe3d5 | refs/heads/master | 2023-03-18T07:10:41.867541 | 2021-03-09T20:06:57 | 2021-03-09T20:06:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,757 | py | import imp
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops.nn_grad import _BroadcastMul
from typing import Optional, AnyStr
lib_file = imp.find_module('kernels', __path__)[1]
_warp_transducer = tf.load_op_library(lib_file)
def transducer_loss(
log_probs, labels, frames_lengths, labels_lengths,
average_frames: bool = False,
reduction: Optional[AnyStr] = None,
blank: int = 0):
"""The CUDA-Warp Transducer loss.
Args:
log_probs (FloatTensor): Input tensor with shape (N, T, U, V)
where N is the minibatch size, T is the maximum number of
input frames, U is the maximum number of output labels and V is
the vocabulary of labels (including the blank).
labels (IntTensor): Tensor with shape (N, U-1) representing the
reference labels for all samples in the minibatch.
frames_lengths (IntTensor): Tensor with shape (N,) representing the
number of frames for each sample in the minibatch.
labels_lengths (IntTensor): Tensor with shape (N,) representing the
length of the transcription for each sample in the minibatch.
average_frames (bool, optional): Specifies whether the loss of each
sample should be divided by its number of frames.
Default: False.
reduction (string, optional): Specifies the type of reduction.
Default: None.
blank (int, optional): label used to represent the blank symbol.
Default: 0.
"""
assert average_frames is None or isinstance(average_frames, bool)
assert reduction is None or reduction in ("none", "mean", "sum")
assert isinstance(blank, int)
costs, _ = _warp_transducer.transducer_loss(
log_probs, labels, frames_lengths, labels_lengths, blank)
if average_frames:
costs = costs / frames_lengths # (N,)
if reduction == "sum":
return tf.reduce_sum(costs)
elif reduction == "mean":
return tf.reduce_mean(costs)
return costs
@ops.RegisterGradient("TransducerLoss")
def _TransducerLossGrad(op, grad_loss, _):
"""The derivative provided by Transducer Loss.
Args:
op: the TransducerLoss op.
grad_loss: The backprop for cost.
Returns:
The Transducer Loss gradient.
"""
grad = op.outputs[1]
# NOTE since here we are batch first, cannot use _BroadcastMul
grad_loss = tf.reshape(grad_loss, (-1, 1, 1, 1))
return [grad_loss * grad, None, None, None]
@ops.RegisterShape("TransducerLoss")
def _TransducerLossShape(op):
inputs_shape = op.inputs[0].get_shape().with_rank(4)
batch_size = inputs_shape[0]
return [batch_size, inputs_shape]
| [
"lekai.huang@gmail.com"
] | lekai.huang@gmail.com |
de171643d720ac13e8a745fb6fe61a49ef535492 | 431f9d1f7a84ee40520fd88fa6aa4e7b0d235047 | /geometric_controller/src/ss/trajectory_simulation.py | 1b5ab398a461b4320bbd39442869d76e517a8ecc | [] | no_license | indsy123/Quadrotor-Navigation-using-Receding-Horizon-planning | aa0571457292ea2b1eefcd2119332430083c7c42 | 255e573a42660420fa0d3ce6dac252df8d737c8c | refs/heads/master | 2020-07-26T13:14:34.814707 | 2020-03-24T21:38:16 | 2020-03-24T21:38:16 | 208,655,725 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,758 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 3 17:28:19 2017
This initial script generates a dummy trajectory. This should be replaced
eventually with what you want the trajectory to be or your own method to
generate the trajectory.
Basically I made my own message file called "Desired_trajectory"
that is a message type have position, velocity, acceleration and direction.
Velocity and acceleration need not be here as Lee's paper says the
trajectory is an (x,y,z) position of CoG and a direction.
The current trajectory is the one used in example (1) in the paper
"Geomentric tracking control of a quadrotor in SE(3)" by T Lee.
You can change it the way you want. just get a curve [x(t),y(t),z(t)] and a
direction [cos(pi*t), sin(pi*t),0] or as you fancy. Differentiate the x, y and
z to get velocities and accelerations.
While it is possible to get rid of velocities and accelerations here and
calculate them in the controller script,I found it was not resulting in much
saving in terms of time.
It will also be needed to change queue_size and publishing frequency in
"r = rospy.Rate(n). With this function my laptop can generate at the most
155 hz.
"""
__author__ = 'Indrajeet yadav'
__version__ = '0.1'
__license__ = 'Nil'
import numpy as np
import rospy
from isy_geometric_controller.msg import Desired_Trajectory
from isy_geometric_controller.msg import modifiedodometry
from nav_msgs.msg import Odometry
import time
import scipy
from scipy import special
class trajectory(object):
"calculates desired position, linear velocity, linear acceleration and direction"
def __init__(self, name_of_uav, time_instance):
self.time = time.time()
self.counter = 0
self.uav = name_of_uav
self.pub = rospy.Publisher('/desired_trajectory', Desired_Trajectory, queue_size = 10, tcp_nodelay = True)
self.T = 12
self.w = 2*np.pi/self.T
try:
#rospy.Subscriber('/'+self.uav+'/odom', Odometry, self.callback, queue_size = 10, tcp_nodelay = True)
rospy.Subscriber('/'+self.uav+'/odometry_sensor1/odometry', Odometry, self.callback, queue_size = 100, tcp_nodelay = True)
#rospy.Subscriber('/'+self.uav+'/odom', Odometry, self.callback, queue_size = 100, tcp_nodelay = True)
except:
print('problem subscribing to odometry topic')
def callback(self, data):
#print self.time
msg = Desired_Trajectory()
msg.header.stamp = data.header.stamp
#msg.header.stamp = rospy.Time.now()
t = time.time()
tt = t-self.time
if tt<=3:
#msg.desired_position.x = 1.0 * np.cos(self.w*tt)
#msg.desired_position.y = 1.0 * 0.5* np.sin(2*self.w*tt)
#msg.desired_position.z = 0.75 + 0.25*np.sin(self.w*tt)
msg.desired_velocity.x = 0#-1.0 * (self.w) * np.sin(self.w*tt)
msg.desired_velocity.y = 0#1.0*0.5 * (2*self.w) * np.cos(2*self.w*tt)
msg.desired_velocity.z = 0.5#0.25*self.w*np.cos(self.w*tt)
msg.desired_acceleration.x = 0#-1.0 * (self.w)**2 * np.cos(self.w*tt)
msg.desired_acceleration.y = 0#-1.0*0.5 * (2*self.w)**2 * np.sin(2*self.w*tt)
msg.desired_acceleration.z = 0#-0.25*self.w**2*np.sin(self.w*tt)
msg.desired_direction.x = 1#np.cos(2*self.w*tt)
msg.desired_direction.y = 0#np.sin(2*self.w*tt)
msg.desired_direction.z = 0
else:
#msg.desired_position.x = 1.0
#msg.desired_position.y = 0.0
#msg.desired_position.z = 0.15
msg.desired_velocity.x = 0.0
msg.desired_velocity.y = 0.0
msg.desired_velocity.z = 0
msg.desired_acceleration.x = 0.0
msg.desired_acceleration.y = 0.0
msg.desired_acceleration.z = 0
msg.desired_direction.x = 1
msg.desired_direction.y = 0
msg.desired_direction.z = 0
msg.controller = 1 # position controller
self.pub.publish(msg)
# may get rid of the code below evntually when the trajectory topic will be
# subscribed in the main controller script. Remember to initilize the
# "Trajectory" node in controller script eventually.
if __name__ == '__main__':
name = 'firefly'
#name = rospy.get_param('~vehicle_name')
rospy.init_node('Trajectory', anonymous=False, log_level=rospy.DEBUG)
r = rospy.Rate(200)
start_time = time.time()
try:
while not rospy.is_shutdown():
current_time = time.time()
t = current_time-start_time
#print t
traj = trajectory(name, current_time)
rospy.spin()
#print 'a' , time.time()-a
r.sleep()
except rospy.ROSInterruptException():
pass
| [
"indragt@udel.edu"
] | indragt@udel.edu |
290b82503d5a09f87feff4d7c52eaa5bb272622d | 848fad01ed3f55e4c9a47d227f7cbabfe4f4df73 | /utils/oss.py | 86ba32fb2bafa32bb59bdb18e0650710b4b7c800 | [] | no_license | cx2c/ali_sdk | 51934a7b941fdb710b849ee6d61ce0ea9a88474f | 9edb3ff58a71feb5917982d68a57854c10c26e15 | refs/heads/master | 2020-03-21T06:49:26.720386 | 2018-06-22T02:34:32 | 2018-06-22T02:34:32 | 138,244,060 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "w.z"
# Date: 2018/3/21
class Oss(object):
pass | [
"w.z@zhangweideMBPX.lan"
] | w.z@zhangweideMBPX.lan |
f661b97983d5da36c5d8f23356b77bb41fdbff71 | dd05972a3bf9d15f332fbff420f10afe1977c0d8 | /competition/base_example/aliceTest.py | 76fec14b823615e7488647e1a92bf8e51c2b7006 | [
"BSD-2-Clause"
] | permissive | StephanieWehner/QI-Competition2018 | b70df8c5bb343c534c2c0bd8fc0e7d6bb6183f25 | cc1139c81e39f66b77c046414bcac8de45807557 | refs/heads/master | 2020-03-23T05:45:09.885955 | 2018-08-08T20:03:29 | 2018-08-08T20:03:29 | 141,164,280 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,108 | py | #
# Copyright (c) 2017, Stephanie Wehner and Axel Dahlberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by Stephanie Wehner, QuTech.
# 4. Neither the name of the QuTech organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from SimulaQron.general.hostConfig import *
from SimulaQron.cqc.backend.cqcHeader import *
from SimulaQron.cqc.pythonLib.cqc import *
from SimulaQron.toolbox.measurements import parity_meas
import random
#####################################################################################################
#
# main
#
def main():
# Initialize the connection
Alice = CQCConnection("Alice")
# Create EPR pairs
q1 = Alice.createEPR("Bob")
q2 = Alice.createEPR("Bob")
# Make sure we order the qubits consistently with Bob
# Get entanglement IDs
q1_ID = q1.get_entInfo().id_AB
q2_ID = q2.get_entInfo().id_AB
if q1_ID < q2_ID:
qa = q1
qc = q2
else:
qa = q2
qc = q1
# Get row
row = 0
# Perform the three measurements
if row == 0:
m0 = parity_meas([qa, qc], "XI", Alice)
m1 = parity_meas([qa, qc], "XX", Alice)
m2 = parity_meas([qa, qc], "IX", Alice)
else:
m0 = 0
m1 = 0
m2 = 0
print("\n")
print("==========================")
print("App {}: row is:".format(Alice.name))
for _ in range(row):
print("(___)")
print("({}{}{})".format(m0, m1, m2))
for _ in range(2-row):
print("(___)")
print("==========================")
print("\n")
# Clear qubits
qa.measure()
qc.measure()
# Stop the connections
Alice.close()
##################################################################################################
main()
| [
"axel.dahlberg12@gmail.com"
] | axel.dahlberg12@gmail.com |
986bf659063dbb4023eaaf094cd1d3cccd06ebdb | 44dbb043e52f00c9a797b1bea8f1df50dd621842 | /os-example-4.py | 69064074cfa33ba2ae8384a237bc9351ebad664a | [] | no_license | peterdocter/standardmodels | 140c238d3bef31db59641087e3f3d5413d4baba1 | 7addc313c16b416d0970461998885833614570ad | refs/heads/master | 2020-12-30T16:59:30.489486 | 2016-12-13T06:32:03 | 2016-12-13T06:32:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | import os
# where are we?
cwd = os.getcwd()
print "1", cwd
# go down
os.chdir("samples")
print "2", os.getcwd()
# go back up
os.chdir(os.pardir)
print "3", os.getcwd() | [
"415074476@qq.com"
] | 415074476@qq.com |
7352b0e05bca2fbe6125d96a47f9b75c32c44715 | 542b256178e8f0d9a30423fc6eed23b021cf4a64 | /Mask_RCNN-master/model.py | 8dc408116a11f74ca04d412646ebcdb46547ce55 | [
"MIT"
] | permissive | gtagency/Project_Nucleus | caed1b9cec3e49a93f43b501e4e6de7e3cbe3ad5 | a14632a682915f3f389af53817f692cf6e57357d | refs/heads/master | 2021-04-28T01:11:22.146707 | 2018-05-11T01:00:47 | 2018-05-11T01:00:47 | 122,269,451 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 111,323 | py | """
Mask R-CNN
The main Mask R-CNN model implemenetation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import sys
import glob
import random
import math
import datetime
import itertools
import json
import re
import logging
from collections import OrderedDict
import numpy as np
import scipy.misc
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.initializers as KI
import keras.engine as KE
import keras.models as KM
import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} min: {:10.5f} max: {:10.5f}".format(
str(array.shape),
array.min() if array.size else "",
array.max() if array.size else ""))
print(text)
class BatchNorm(KL.BatchNormalization):
"""Batch Normalization class. Subclasses the Keras BN class and
hardcodes training=False so the BN layer doesn't update
during training.
Batch normalization has a negative effect on training if batches are small
so we disable it here.
"""
def call(self, inputs, training=None):
return super(self.__class__, self).call(inputs, training=False)
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(axis=3, name=bn_name_base + '1')(shortcut)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False):
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(axis=3, name='bn_conv1')(x)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i))
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 4] where each row is y1, x1, y2, x2
deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, 4] each row is y1, x1, y2, x2
window: [4] in the form y1, x1, y2, x2
"""
# Split corners
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, anchors, (bg prob, fg prob)]
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, anchors,
config=None, **kwargs):
"""
anchors: [N, (y1, x1, y2, x2)] anchors defined in image coordinates
"""
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
self.anchors = anchors.astype(np.float32)
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Base anchors
anchors = self.anchors
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = min(6000, self.anchors.shape[0])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
anchors = utils.batch_slice(ix, lambda x: tf.gather(anchors, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]
height, width = self.config.IMAGE_SHAPE[:2]
window = np.array([0, 0, height, width]).astype(np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Normalize dimensions to range of 0 to 1.
normalized_boxes = boxes / np.array([[height, width, height, width]])
# Non-max suppression
def nms(normalized_boxes, scores):
indices = tf.image.non_max_suppression(
normalized_boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(normalized_boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([normalized_boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementatin of Log2. TF doesn't have a native implemenation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [height, width] of the output pooled regions. Usually [7, 7]
- image_shape: [height, width, channels]. Shape of input image in pixels
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, image_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
self.image_shape = tuple(image_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[1:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(
self.image_shape[0] * self.image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indicies for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
pooled = tf.expand_dims(pooled, 0)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[1][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
Class-specific bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine postive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.argmax(positive_overlaps, axis=1)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI corrdinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinements.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, 1), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def clip_to_window(window, boxes):
"""
window: (y1, x1, y2, x2). The window in the image we want to clip to.
boxes: [N, (y1, x1, y2, x2)]
"""
boxes[:, 0] = np.maximum(np.minimum(boxes[:, 0], window[2]), window[0])
boxes[:, 1] = np.maximum(np.minimum(boxes[:, 1], window[3]), window[1])
boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], window[2]), window[0])
boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], window[3]), window[1])
return boxes
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in image coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)] where
coordinates are in image domain.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Convert coordiates to image domain
# TODO: better to keep them normalized until later
height, width = config.IMAGE_SHAPE[:2]
refined_rois *= tf.constant([height, width, height, width], dtype=tf.float32)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# Round and cast to int since we're deadling with pixels now
refined_rois = tf.to_int32(tf.rint(refined_rois))
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.to_float(tf.gather(pre_nms_rois, ixs)),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are in image domain.
detections = tf.concat([
tf.to_float(tf.gather(refined_rois, keep)),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are in image domain
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Run detection refinement graph on each item in the batch
_, _, window, _ = parse_image_meta_graph(image_meta)
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
# Region Proposal Network (RPN)
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the featuremap
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns:
logits: [N, NUM_CLASSES] classifier logits (before softmax)
probs: [N, NUM_CLASSES] classifier probabilities
bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_classifier")([rois] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3), name='mrcnn_class_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_class_bn2')(x)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns: Masks [batch, roi_count, height, width, num_classes]
"""
# ROI Pooling
# Shape: [batch, boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_mask")([rois] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn2')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn3')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn4')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typicallly: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
# TODO: use smooth_l1_loss() rather than reimplementing here
# to reduce code duplication
diff = K.abs(target_bbox - rpn_bbox)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indicies.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: If true, apply random image augmentation. Currently, only
horizontal flipping is offered.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
shape = image.shape
image, window, scale, padding = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
max_dim=config.IMAGE_MAX_DIM,
padding=config.IMAGE_PADDING)
mask = utils.resize_mask(mask, scale, padding)
# Random horizontal flips.
if augment:
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, shape, window, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Grund truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indicies of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks.
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(scipy.misc.imresize(class_mask.astype(float), (gt_h, gt_w),
interp='nearest') / 255.0).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = scipy.misc.imresize(
m.astype(float), config.MASK_SHAPE, interp='nearest') / 255.0
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# TODO: If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argmax(overlaps, axis=0)
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=True, random_rois=0,
batch_size=1, detection_targets=False):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: If True, applies image augmentation to images (currently only
horizontal flips are supported)
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The containtes
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, size of image meta]
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinately.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
if config.USE_MINI_MASK:
batch_gt_masks = np.zeros((batch_size, config.MINI_MASK_SHAPE[0], config.MINI_MASK_SHAPE[1],
config.MAX_GT_INSTANCES))
else:
batch_gt_masks = np.zeros(
(batch_size, image.shape[0], image.shape[1], config.MAX_GT_INSTANCES))
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=config.IMAGE_SHAPE.tolist(), name="input_image")
input_image_meta = KL.Input(shape=[None], name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
h, w = K.shape(input_image)[1], K.shape(input_image)[2]
image_scale = K.cast(K.stack([h, w, h, w], axis=0), tf.float32)
gt_boxes = KL.Lambda(lambda x: x / image_scale)(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
_, C2, C3, C4, C5 = resnet_graph(input_image, "resnet101", stage5=True)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(256, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(256, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(256, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Generate Anchors
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), 256)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
anchors=self.anchors,
config=config)([rpn_class, rpn_bbox])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
_, _, _, active_class_ids = KL.Lambda(lambda x: parse_image_meta_graph(x),
mask=[None, None, None, None])(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates to 0-1 range.
target_rois = KL.Lambda(lambda x: K.cast(
x, tf.float32) / image_scale[:4])(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in image coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Convert boxes to normalized coordinates
# TODO: let DetectionLayer return normalized coordinates to avoid
# unnecessary conversions
h, w = config.IMAGE_SHAPE[:2]
detection_boxes = KL.Lambda(
lambda x: x[..., :4] / np.array([h, w, h, w]))(detections)
# Create masks for detections
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
model = KM.Model([input_image, input_image_meta],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
log_dir: The directory where events and weights are saved
checkpoint_path: the path to the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
return None, None
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
return dir_name, None
checkpoint = os.path.join(dir_name, checkpoints[-1])
return dir_name, checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the correspoding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exlude: list of layer names to excluce
"""
import h5py
from keras.engine import topology
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
topology.load_weights_from_hdf5_group_by_name(f, layers)
else:
topology.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum,
clipnorm=5.0)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = ["rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
self.keras_model.add_loss(
tf.reduce_mean(layer.output, keep_dims=True))
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(optimizer=optimizer, loss=[
None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
self.keras_model.metrics_tensors.append(tf.reduce_mean(
layer.output, keep_dims=True))
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainble layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5
regex = r".*/\w+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})/mask\_rcnn\_\w+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
self.epoch = int(m.group(6)) + 1
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heaads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE,
augment=False)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = max(self.config.BATCH_SIZE // 2, 2)
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=next(val_generator),
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
#workers=workers,
use_multiprocessing=True,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matricies [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matricies:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image to fit the model expected size
# TODO: move resizing to mold_image()
molded_image, window, scale, padding = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
max_dim=self.config.IMAGE_MAX_DIM,
padding=self.config.IMAGE_PADDING)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, window,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)]
mrcnn_mask: [N, height, width, num_classes]
image_shape: [height, width, depth] Original size of the image before resizing
window: [y1, x1, y2, x2] Box in the image where the real image is
excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Compute scale and shift to translate coordinates to image domain.
h_scale = image_shape[0] / (window[2] - window[0])
w_scale = image_shape[1] / (window[3] - window[1])
scale = min(h_scale, w_scale)
shift = window[:2] # y, x
scales = np.array([scale, scale, scale, scale])
shifts = np.array([shift[0], shift[1], shift[0], shift[1]])
# Translate bounding boxes to image domain
boxes = np.multiply(boxes - shifts, scales).astype(np.int32)
# Filter out detections with zero area. Often only happens in early
# stages of training when the network weights are still a bit random.
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty((0,) + masks.shape[1:3])
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
# Run object detection
detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, \
rois, rpn_class, rpn_bbox =\
self.keras_model.predict([molded_images, image_metas], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs):
"""Runs a sub-set of the computation graph that computes the given
outputs.
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Run inference
molded_images, image_metas, windows = self.mold_inputs(images)
# TODO: support training mode?
# if TEST_MODE == "training":
# model_in = [molded_images, image_metas,
# target_rpn_match, target_rpn_bbox,
# gt_boxes, gt_masks]
# if not config.USE_RPN_ROIS:
# model_in.append(target_rois)
# if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
# model_in.append(1.)
# outputs_np = kf(model_in)
# else:
model_in = [molded_images, image_metas]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, image_shape, window, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
image_shape: [height, width, channels]
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
"""
image_id = meta[:, 0]
image_shape = meta[:, 1:4]
window = meta[:, 4:8] # (y1, x1, y2, x2) window of image in in pixels
active_class_ids = meta[:, 8:]
return [image_id, image_shape, window, active_class_ids]
def mold_image(images, config):
"""Takes RGB images with 0-255 values and subtraces
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name=None):
"""Often boxes are represented with matricies of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
| [
"will.crawford@live.com"
] | will.crawford@live.com |
235af1bbc670e956e37e472b363d092d53a2e10f | 7927424f1983eecc7c7b2f0ebaf61ad552d2a7e7 | /zigzag.py | 1e4ea4b1030d84d3446c45f2f19960e1f1f9aafc | [] | no_license | 6reg/automate | 295931d3ecf0e69e01921cc45d452fadfd1e6581 | 11e5de461ece3d8d111f3dc13de088788baf19a2 | refs/heads/main | 2023-03-08T18:39:42.991280 | 2021-02-22T20:53:13 | 2021-02-22T20:53:13 | 334,780,031 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | import time, sys
indent = 0 # How many spaces to indent
indentIncreasing = True # Whether the indentation is increasing or not
try:
while True: # The main program loop.
print(' ' * indent, end='')
print('********')
time.sleep(0.1) # Pause for the 1/10 of a second.
if indentIncreasing:
# Increase the number of spaces:
indent = indent + 1
if indent == 20:
# Change direction:
indentIncreasing = False
else:
# Decrease the number of spaces:
indent = indent - 1
if indent == 0:
# Change direction:
indentIncreasing = True
except KeyboardInterrupt:
sys.exit()
| [
"mathiasgreg@gmail.com"
] | mathiasgreg@gmail.com |
caebf84579717f9af88612898b4b4390d7755b86 | f62be83925849ab2841565ab264dedf1ee74a689 | /S&PTimeTest.py | 3e69a50e2aaa5ea747781acb9fb522f9a524ad58 | [] | no_license | evy555/Stock-day-of-week-return-analysis | 043501581615bfe1979878ad01cada990eb9cb08 | 28cee2a04475db6801a4fe8c8f0a51c9a5f2959b | refs/heads/master | 2020-04-01T20:59:05.613334 | 2016-06-08T22:46:08 | 2016-06-08T22:46:08 | 60,735,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,653 | py | import pandas as pd
import numpy as np
import os
import pandas.io.data
from pandas import Series, DataFrame
from pandas import ExcelWriter
from pandas import read_csv
import matplotlib.pyplot as plt
import datetime
from scipy.stats import ttest_1samp
import matplotlib.pyplot as plt
from random import randint
now = datetime.datetime.now()
list = '^GSPC'
start = None
while start is None:
try:
start = datetime.datetime(randint(1950,2015), randint(1,12), randint(1,31))
except:
pass
end = datetime.datetime(now.year, now.month, now.day)
df = pd.io.data.get_data_yahoo(list, start, end)['Adj Close']
df = DataFrame(df)
df['Returns'] = df.pct_change()
df['Date'] = df.index
df['Date'] = [time.date() for time in df['Date']]
l = df.index.values
for i in range(0,len(l)):
df.loc[l[i], 'DayoftheWeek'] = datetime.datetime.strptime(str(df.loc[l[i], 'Date']), '%Y-%m-%d').strftime('%A')
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
Monday = 0
MonCount = 0
Mon = []
Tuesday = 0
TueCount = 0
Tue = []
Wednesday = 0
WedCount = 0
Wed = []
Thursday = 0
ThuCount = 0
Thu = []
Friday = 0
FriCount = 0
Fri = []
#Need to loop through days and then loop through df to sum up all returns while also summing the total count. Then create average
for i in range(1,len(l)):
dump = 0
if df.loc[l[i], 'DayoftheWeek'] == 'Monday':
Monday = Monday + df.loc[l[i], "Returns"]
MonCount = MonCount + 1
Mon.append(df.loc[l[i],'Returns'])
if df.loc[l[i], 'DayoftheWeek'] == 'Tuesday':
Tuesday = Tuesday + df.loc[l[i], "Returns"]
TueCount = TueCount + 1
Tue.append(df.loc[l[i],'Returns'])
if df.loc[l[i], 'DayoftheWeek'] == 'Wednesday':
Wednesday = Wednesday + df.loc[l[i], "Returns"]
WedCount = WedCount + 1
Wed.append(df.loc[l[i],'Returns'])
if df.loc[l[i], 'DayoftheWeek'] == 'Thursday':
Thursday = Thursday + df.loc[l[i], "Returns"]
ThuCount = ThuCount + 1
Thu.append(df.loc[l[i],'Returns'])
if df.loc[l[i], 'DayoftheWeek'] == 'Friday':
Friday = Friday + df.loc[l[i], "Returns"]
FriCount = FriCount + 1
Fri.append(df.loc[l[i],'Returns'])
else:
dump = dump + df.loc[l[i], 'Returns']
dict = {'Monday': Monday/MonCount, 'Tuesday': Tuesday/TueCount, 'Wednesday': Wednesday/WedCount, 'Thursday': Thursday/ThuCount, 'Friday': Friday/FriCount}
dg = pd.Series(dict, name='DailyValue')
dff = DataFrame(dg)
dff['Day'] = dff.index
dff['Sorter'] = [5,1,4,2,3]
dff.sort_values(by = ['Sorter'], inplace = True)
#dff.sort(['Day'], ascending = True)
#dff.plot(kind='bar', grid = True, y = ['DailyValue'])
plt.show()
# Buy/Sell decision
for i in range(1,len(l)):
if df.loc[l[i], 'DayoftheWeek'] == 'Friday':
df.loc[l[i], "Signal"] = "Sell"
df.loc[l[i], "Market"] = 1
elif df.loc[l[i], 'DayoftheWeek'] == 'Monday':
df.loc[l[i], "Signal"] = "Buy"
df.loc[l[i], "Market"] = 0
else:
df.loc[l[i], 'Signal'] = "Hold"
df.loc[l[i], "Market"] = 1
# Investment calculations
df['Investment'] = ""
df['S&P500 Investment'] = ''
df['Investment'][0] = 10000
df['S&P500 Investment'][0] = 10000
for i in range(1,len(l)):
df.loc[l[i], 'S&P500 Investment'] = df.loc[l[i-1], 'S&P500 Investment'] * (1 + df.loc[l[i], 'Returns'])
if df.loc[l[i], "Signal"] == "Sell":
df.loc[l[i], "Investment"] = df.loc[l[i-1], 'Investment'] * (1 + df.loc[l[i], "Returns"])
elif df.loc[l[i], "Signal"] == "Buy":
df.loc[l[i], "Investment"] = df.loc[l[i-1], 'Investment']
elif df.loc[l[i], 'Signal'] == "Hold":
df.loc[l[i], 'Investment'] = df.loc[l[i-1], 'Investment'] * (1 + df.loc[l[i], "Returns"])
print(df.head())
#Excess Return over S&P500 Column
#for i in range(1,len(l)):
# df.loc[l[i], 'Excess Return'] = df.loc[l[i], 'Investment'] - df.loc[l[i], 'S&P500 Investment']
file = ExcelWriter('Time1.xlsx')
df.to_excel(file, 'Data')
file.close()
os.startfile('Time1.xlsx')
df.plot(y = ['Investment', 'S&P500 Investment'])
plt.show()
print("Average Monday return: %s" % (Monday/MonCount))
print("Average Tuesday return: %s" % (Tuesday/TueCount))
print("Average Wednesday return: %s" % (Wednesday/WedCount))
print("Average Thursday return: %s" % (Thursday/ThuCount))
print("Average Friday return: %s" % (Friday/FriCount))
print("1 sample t-tests for each day to test significance of daily returns against 0 are as follows:")
print(ttest_1samp(Mon,0))
print(ttest_1samp(Tue,0))
print(ttest_1samp(Wed,0))
print(ttest_1samp(Thu,0))
print(ttest_1samp(Fri,0))
| [
"evansj556@yahoo.com"
] | evansj556@yahoo.com |
a8518cb7746c3200f3217bba2498fb3fe7e3c877 | 878eb4b539d77051dd7330389b90d988d9aef8f3 | /CAPITULO 7/Exercicio R.py | af3ef2eb5c93a5dc7848b95c4831130451504429 | [
"MIT"
] | permissive | LarmIg/Algoritmos-Python | a031badc9f607cbbc109ee4ca8bfe60d5636d867 | f2c9889705cacac007833f6ab9a413b06213f882 | refs/heads/master | 2022-11-25T05:36:26.313469 | 2020-07-25T11:59:36 | 2020-07-25T11:59:36 | 282,436,201 | 0 | 0 | MIT | 2020-07-25T12:08:13 | 2020-07-25T12:08:12 | null | UTF-8 | Python | false | false | 767 | py | # Elaborar um programa que efetue a leitura de dados em duas matrizes (A e B) de uma dimensão do tipo vetor, sendo a matriz A com dez elementos e a matriz B com cinco elementos. Os elementos a serem armazenados nas matrizes devem ser do tipo cadeia. Construir uma matriz C com a capacidade de armazenar um total de 15 elementos e executar a junção das matrizes A e B na matriz C. Apresentar os dados da matriz C em ordem alfabética descendente
A = []
B = []
C = []
for i in range(10):
A.append(str(input('Informe um valor para a Matriz A[{}]'.format(i))))
for i in range(5):
B.append(str(input('Informe um valor para a Matriz B[{}]'.format(i))))
C = A + B
C.sort(reverse = True)
for i in range(len(C)):
print('C[{}] = {}'.format(i, C[i]))
| [
"noreply@github.com"
] | LarmIg.noreply@github.com |
696193e4863c900c995b49d8854b2fd947ef2ebd | 9dc21ebb553fd116826c7cbae7d8c5eba47423d1 | /cloneGraph.py | 81681ac2a31cf11b69ac78e24d755d692f4aee77 | [] | no_license | KJSui/leetcode-2020 | a475a8b8481231757222c5afaad2856a92572f89 | 37cf89e7fb1351b1deff09271d9bb5852395054e | refs/heads/main | 2023-04-05T19:46:25.647605 | 2021-05-06T20:40:06 | 2021-05-06T20:40:06 | 365,031,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | class Solution:
def __init__(self):
self.copy = {}
def cloneGraph(self, node):
if not node:
return None
newNode = Node(node.val)
neight = []
for i in neight:
if i in self.copy:
neight.append(self.copy[i])
else:
neight.append(self.cloneGraph(i))
newNode.neighbors = neight
return newNode
| [
"jsui@digitalocean.com"
] | jsui@digitalocean.com |
ebc3ed1ffe0e2caca9b9f1ca118b77aa614a399c | 04a0ff31201c67a0e6a9654369ddd3f712303584 | /module_5/pages/basket_page.py | 9458b62174897720b13b36a5b9c95bcc2af01498 | [] | no_license | titun9/stepik_lessons | 58c73e136a7be2950f7071d637a495e182a1291f | 26515d9edb2a2b8f7a09a598405d5cecb6a94f7d | refs/heads/master | 2023-06-09T07:21:20.696296 | 2021-06-30T14:00:25 | 2021-06-30T14:00:25 | 366,700,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | from .base_page import BasePage
from .locators import BasketPageLocators
from .locators import BasePageLocators
class BasketPage(BasePage):
def get_correct_message_empty_basket(self):
language = self.browser.find_element(*BasePageLocators.LANGUAGE_PAGE).get_attribute("lang")
dictionary_message = {"ru": "Ваша корзина пуста", "en-gb": "Your basket is empty", \
"es": "Tu carrito esta vacío", "fr": "Votre panier est vide"}
message = dictionary_message[language]
return message
def should_be_message_empty_basket(self):
message = self.get_correct_message_empty_basket()
message_basket_empty = self.browser.find_element(*BasketPageLocators.MESSAGE_EMPTY_BASKET).text
assert message in message_basket_empty, f"No message that basket is empty"
def should_be_product_in_basket_page(self):
assert self.is_element_present(*BasketPageLocators.TABLE_ADDED_PRODUCT), \
"Basket is not empty"
def should_not_be_product_in_basket_page(self):
assert self.is_not_element_present(*BasketPageLocators.TABLE_ADDED_PRODUCT), \
"Basket is empty"
| [
"butkevichas@cheops-edu.ru"
] | butkevichas@cheops-edu.ru |
2a77ffc8692138609c559a9df4f3206508debd09 | 0c153f489e523afdc33b950a6b9ee21af09e968e | /cpp/run_scripts/run_fom_basis.py | 57249f1dc5a1dc4ca8146c3f4d9c3f9afc8a949f | [] | no_license | Pressio/pressio-sisc-burgers1d | 86f1acb31d40d1aefa83b61bb4e8a7d70621cf1a | 671f45b7abd5dc59d574b6d26cc4a5f23ee90306 | refs/heads/master | 2021-01-26T01:04:20.594259 | 2020-04-26T11:32:00 | 2020-04-26T11:32:00 | 243,249,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | #!/usr/bin/env python
import sys, os, time
import subprocess
import numpy as np
import os.path
from argparse import ArgumentParser
import re
import myutils, constants
def main(exeName):
# args for the executable
args = ("./"+exeName, "input.txt")
print("Starting basis runs")
# loop over mesh sizes
for iMesh in range(0, constants.num_meshes):
currentMeshSize = constants.mesh_sizes[iMesh]
print("Current currentMeshSize = ", currentMeshSize)
# create folder
parentDir='meshSize' + str(currentMeshSize)
if not os.path.exists(parentDir):
os.system('mkdir ' + parentDir)
# loop over various basis size
for i in range(0, constants.num_rom_sizes):
romSize = constants.rom_sizes[i]
print("Current romSize = ", romSize)
# based on the size of rom and number of ode steps,
# compute the sampling frequency
assert(constants.numStepsBasis % romSize == 0)
samplingFreq = int(constants.numStepsBasis/romSize)
# create input file
myutils.createInputFileFomForBasis(currentMeshSize, samplingFreq)
os.system("./" + exeName + " input.txt")
#popen = subprocess.Popen(args, stdout=subprocess.PIPE)
#popen.wait()
#output = popen.stdout.read()
# create dir for this number of basis
childDir=parentDir + '/basis' + str(romSize)
if not os.path.exists(childDir): os.system('mkdir ' + childDir)
# copy files there
os.system('mv input.txt ' + childDir)
os.system('mv basis.txt ' + childDir)
os.system('mv snapshots.txt ' + childDir)
os.system('mv yFom.txt ' + childDir)
print("Done with basis runs")
if __name__== "__main__":
parser = ArgumentParser()
parser.add_argument("-exe", "--exe", dest="exeName",
help="generate basis for fom")
args = parser.parse_args()
main(args.exeName)
| [
"fnrizzi@sandia.gov"
] | fnrizzi@sandia.gov |
95b2abdf3b691a753c2587061a681df8fd8851d1 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/messenger/proto/xmpp/extensions/chat.py | 567a173fdee232fd567d9e3a472d0a0c272f68b0 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 9,509 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/messenger/proto/xmpp/extensions/chat.py
import calendar
from datetime import datetime
import json
import time
from debug_utils import LOG_CURRENT_EXCEPTION
from messenger.proto.xmpp.extensions import PyExtension, PyHandler, PyQuery
from messenger.proto.xmpp.extensions.dataform import DataForm, Field
from messenger.proto.xmpp.extensions.ext_constants import XML_NAME_SPACE as _NS
from messenger.proto.xmpp.extensions.ext_constants import XML_TAG_NAME as _TAG
from messenger.proto.xmpp.extensions.shared_handlers import IQHandler
from messenger.proto.xmpp.extensions.shared_queries import MessageQuery
from messenger.proto.xmpp.extensions.shared_queries import PresenceQuery
from messenger.proto.xmpp.extensions.wg_items import WgSharedExtension
from messenger.proto.xmpp.gloox_constants import IQ_TYPE, CHAT_STATE, MESSAGE_TYPE_ATTR, PRESENCE
from messenger.proto.xmpp.wrappers import ChatMessage
class ChatStateExtension(PyExtension):
def __init__(self, state=CHAT_STATE.UNDEFINED):
super(ChatStateExtension, self).__init__(state)
self.setXmlNs(_NS.CHAT_STATES)
@classmethod
def getDefaultData(cls):
return CHAT_STATE.UNDEFINED
def getXPath(self, index=None, suffix='', name=None):
if self.getName() == CHAT_STATE.UNDEFINED:
paths = []
getXPath = super(ChatStateExtension, self).getXPath
for state in CHAT_STATE.RANGE:
paths.append(getXPath(index, suffix, state))
name = paths
else:
name = super(ChatStateExtension, self).getXPath(index, suffix, name)
return name
def parseTag(self, pyGlooxTag):
result = pyGlooxTag.filterXPath('|'.join(CHAT_STATE.RANGE))
if result:
state = result[0].getTagName()
if state not in CHAT_STATE.RANGE:
state = self.getDefaultData()
else:
state = self.getDefaultData()
return state
class DelayExtension(PyExtension):
def __init__(self):
super(DelayExtension, self).__init__(_TAG.DELAY)
self.setXmlNs(_NS.DELAY)
@classmethod
def getDefaultData(cls):
return time.time()
def parseTag(self, pyGlooxTag):
stamp = pyGlooxTag.findAttribute('stamp')
if stamp:
try:
tm = time.strptime(stamp, '%Y-%m-%dT%H:%M:%SZ')
tm = tm[0:8] + (0,)
sentAt = calendar.timegm(tm)
except ValueError:
try:
dt = datetime.strptime(stamp, '%Y-%m-%dT%H:%M:%S.%fZ')
sentAt = calendar.timegm(dt.timetuple()) + dt.microsecond / 1000000.0
except ValueError:
LOG_CURRENT_EXCEPTION()
sentAt = self.getDefaultData()
else:
sentAt = self.getDefaultData()
return sentAt
class MessageIDExtension(PyExtension):
def __init__(self):
super(MessageIDExtension, self).__init__(_TAG.WG_MESSAGE_ID)
self.setXmlNs(_NS.WG_MESSAGE_ID)
@classmethod
def getDefaultData(cls):
pass
def parseTag(self, pyGlooxTag):
return pyGlooxTag.findAttribute('uuid')
class ChatHistoryQuery(PyExtension):
def __init__(self, jid, limit):
super(ChatHistoryQuery, self).__init__(_TAG.QUERY)
self.setXmlNs(_NS.WG_PRIVATE_HISTORY)
self.setAttribute('with', str(jid))
self.setAttribute('limit', limit)
class PrivateHistoryItem(PyExtension):
def __init__(self):
super(PrivateHistoryItem, self).__init__(_TAG.WG_PRIVATE_HISTORY)
self.setXmlNs(_NS.WG_PRIVATE_HISTORY)
@classmethod
def getDefaultData(cls):
return ('', False)
def parseTag(self, pyGlooxTag):
requestID = pyGlooxTag.findAttribute('request-id')
isFinal = pyGlooxTag.findAttribute('final')
if isFinal:
isFinal = json.loads(isFinal)
else:
isFinal = False
return (requestID, isFinal)
class _MucPrivilegesExtension(PyExtension):
def __init__(self, affiliation='', role=''):
super(_MucPrivilegesExtension, self).__init__(_TAG.WG_MUC_PRIVILEGES)
self.setAttribute('affiliation', affiliation)
self.setAttribute('role', role)
@classmethod
def getDefaultData(cls):
pass
def parseTag(self, pyGlooxTag):
affiliation = pyGlooxTag.findAttribute('affiliation') or 'none'
role = pyGlooxTag.findAttribute('role') or 'none'
return (affiliation, role)
class MessageWgSharedExtension(WgSharedExtension):
def __init__(self, includeNS=True):
super(MessageWgSharedExtension, self).__init__(includeNS)
self.setChild(_MucPrivilegesExtension())
@classmethod
def getDefaultData(cls):
return super(MessageWgSharedExtension, cls).getDefaultData()
def parseTag(self, pyGlooxTag):
info = super(MessageWgSharedExtension, self).parseTag(pyGlooxTag)
affiliation, role = self._getChildData(pyGlooxTag, 0, _MucPrivilegesExtension.getDefaultData())
info['affiliation'] = affiliation
info['role'] = role
return info
class _MessageCustomExtension(PyExtension):
def __init__(self, msgType, state=CHAT_STATE.UNDEFINED):
super(_MessageCustomExtension, self).__init__(_TAG.MESSAGE)
self.setAttribute('type', msgType)
self.setChild(ChatStateExtension(state))
self.setChild(MessageWgSharedExtension(False))
self.setChild(DelayExtension())
self.setChild(MessageIDExtension())
self.setChild(PrivateHistoryItem())
@classmethod
def getDefaultData(cls):
return ChatMessage()
def parseTag(self, pyGlooxTag):
message = ChatMessage()
message.state = self._getChildData(pyGlooxTag, 0, ChatStateExtension.getDefaultData())
info = self._getChildData(pyGlooxTag, 1, MessageWgSharedExtension.getDefaultData())
if info:
message.accountDBID = info['dbID']
message.accountName = info['name']
message.accountRole = info['role']
message.accountAffiliation = info['affiliation']
message.sentAt = self._getChildData(pyGlooxTag, 2, DelayExtension.getDefaultData())
message.uuid = self._getChildData(pyGlooxTag, 3, MessageIDExtension.getDefaultData())
message.requestID, message.isFinalInHistory = self._getChildData(pyGlooxTag, 4, PrivateHistoryItem.getDefaultData())
return message
class ChatMessageHolder(MessageQuery):
def __init__(self, msgType, to, msgBody='', state=CHAT_STATE.UNDEFINED):
if state:
ext = ChatStateExtension(state)
else:
ext = None
super(ChatMessageHolder, self).__init__(msgType, to, msgBody, ext)
return
class MessageHandler(PyHandler):
__slots__ = ('_typeAttr',)
def __init__(self, typeAttr):
self._typeAttr = typeAttr
super(MessageHandler, self).__init__(_MessageCustomExtension(self._typeAttr, CHAT_STATE.UNDEFINED))
def getFilterString(self):
return "/{0}[@type='{1}']".format(self._ext.getName(), self._typeAttr)
class ChatMessageHandler(MessageHandler):
def __init__(self):
super(ChatMessageHandler, self).__init__(MESSAGE_TYPE_ATTR.CHAT)
class GetChatHistoryQuery(PyQuery):
def __init__(self, jid, limit):
super(GetChatHistoryQuery, self).__init__(IQ_TYPE.GET, ChatHistoryQuery(jid, limit))
class MUCEntryQuery(PresenceQuery):
def __init__(self, to):
super(MUCEntryQuery, self).__init__(PRESENCE.AVAILABLE, to)
class MUCLeaveQuery(PresenceQuery):
def __init__(self, to):
super(MUCLeaveQuery, self).__init__(PRESENCE.UNAVAILABLE, to)
class OwnerConfigurationForm(PyExtension):
def __init__(self, fields=None):
super(OwnerConfigurationForm, self).__init__(_TAG.QUERY)
self.setXmlNs(_NS.MUC_OWNER)
self.setChild(DataForm(fields))
@classmethod
def getDefaultData(cls):
return DataForm.getDefaultData()
def parseTag(self, pyGlooxTag):
return self._getChildData(pyGlooxTag, 0, DataForm.getDefaultData())
class OwnerConfigurationFormQuery(PyQuery):
def __init__(self, to):
super(OwnerConfigurationFormQuery, self).__init__(IQ_TYPE.GET, OwnerConfigurationForm(), to)
class OwnerConfigurationFormSet(PyQuery):
def __init__(self, to, fields):
super(OwnerConfigurationFormSet, self).__init__(IQ_TYPE.SET, OwnerConfigurationForm(fields), to)
class OwnerConfigurationFormHandler(IQHandler):
def __init__(self):
super(OwnerConfigurationFormHandler, self).__init__(OwnerConfigurationForm())
class UserRoomConfigurationFormSet(OwnerConfigurationFormSet):
def __init__(self, to, room, password=''):
fields = (Field('text-single', 'muc#roomconfig_roomname', room),
Field('boolean', 'muc#roomconfig_persistentroom', 1),
Field('boolean', 'muc#roomconfig_publicroom', 1),
Field('boolean', 'muc#roomconfig_membersonly', 0),
Field('boolean', 'muc#roomconfig_allowinvites', 1),
Field('boolean', 'muc#roomconfig_survive_reboot', 1))
if password:
fields += (Field('boolean', 'muc#roomconfig_passwordprotectedroom', 1), Field('text-single', 'muc#roomconfig_roomsecret', password))
super(UserRoomConfigurationFormSet, self).__init__(to, fields)
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
97d55e2aec24c8c3c273787b6a0bfb6e207c6ee0 | c261f0e98eedb4f0d85e92bd6ab8f4ae47096269 | /lifeservice/schedule117/04美食下载团购糯米/getNuomiOtherCinemaMap.py | 7e6d7d90119847ca9a6a6e964889df38e7707452 | [] | no_license | ShenDezhou/CPP | 24379fe24f3c8588a7859ee586527d5cc6bfbe73 | 933c1e764a6ed2879b26aa548ff67153ca026bf6 | refs/heads/master | 2021-01-11T22:09:24.900695 | 2017-04-05T02:04:07 | 2017-04-05T02:04:07 | 78,928,291 | 0 | 1 | null | null | null | null | GB18030 | Python | false | false | 1,328 | py |
#coding=gb2312
nuomiCinemaMap = dict()
otherCinemaMap = dict()
input = '/fuwu/Merger/Output/movie/cinema_movie_rel.table'
for line in open(input):
segs = line.strip('\n').decode('gb2312', 'ignore').split('\t')
cinemaid, source, ting = segs[1], segs[3], segs[9]
if source.find(u'糯米') != -1:
if cinemaid not in nuomiCinemaMap:
nuomiCinemaMap[cinemaid] = []
if ting not in nuomiCinemaMap[cinemaid]:
nuomiCinemaMap[cinemaid].append(ting)
else:
if cinemaid not in otherCinemaMap:
otherCinemaMap[cinemaid] = []
if ting not in otherCinemaMap[cinemaid]:
otherCinemaMap[cinemaid].append(ting)
# 糯米影院的厅名称是否都被包含
for cinemaid in otherCinemaMap:
if cinemaid not in nuomiCinemaMap:
#print ('#%s\t%s\t%s' % (cinemaid, u'糯米', '\t'.join(nuomiCinemaMap[cinemaid]))).encode('gb2312', 'ignore')
continue
noMatchTingList = []
for ting in nuomiCinemaMap[cinemaid]:
if ting not in otherCinemaMap[cinemaid]:
noMatchTingList.append(ting)
if len(noMatchTingList) == 0:
continue
# 存在不一致的情况
normTing = '\t'.join(otherCinemaMap[cinemaid])
noMatchTing = '\t'.join(noMatchTingList)
print ('%s\t%s\t%s' % (cinemaid, u'非糯米', normTing)).encode('gb2312', 'ignore')
print ('%s\t%s\t%s' % (cinemaid, u'糯米', noMatchTing)).encode('gb2312', 'ignore')
| [
"bangtech@sina.com"
] | bangtech@sina.com |
b6e8f2be226188fbb1defabbcc1d134f8fc8e070 | 3570f2e7b8d5666cbd2d29a4c75965a75699a3e2 | /pyodbc/run_test.py | 1b0460f4bd5adc94625a5a8b380978050e9a9c4a | [] | no_license | ilanschnell/recipes | 7876225db2eb08b21d4d1ab426d40f94650192fd | c946b446a002d55ecffff6ce789cf9dcb57a65a6 | refs/heads/master | 2023-08-19T19:40:17.750037 | 2022-01-21T00:27:38 | 2022-01-21T00:27:38 | 119,077,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | import sys
from os.path import isfile
print(sys.version)
print(sys.executable)
import pyodbc
print(pyodbc)
if sys.platform == 'darwin':
driver_path = '/Users/ilan/a/envs/py38/lib/libsqlite3odbc.dylib'
elif sys.platform.startswith('linux'):
driver_path = '/home/osboxes/bin/libsqlite3odbc-0.9996.so'
if not isfile(driver_path):
raise Exception('so such file: %r' % driver_path)
connect_string = (
"DRIVER={%s};SERVER=localhost;DATABASE=./test.sqlite;Trusted_connection=yes"
% driver_path
)
cnxn = pyodbc.connect(connect_string)
cursor = cnxn.cursor()
try:
cursor.execute('drop table foo')
except:
pass
cursor.execute('create table foo (symbol varchar(5), price float)')
N = 1000
for i in range(N):
cursor.execute("insert into foo (symbol, price) values (?, ?)",
(str(i), float(i)))
cursor.execute("commit")
cursor.execute("select * from foo")
dictarray = cursor.fetchdictarray()
cursor.close()
for i in range(N):
assert dictarray['symbol'][i] == str(i)
assert (dictarray['price'][i] - float(i)) < 1E-10
# tab completion fails in ipython for pyodbc.Cursor
assert pyodbc.Cursor.fetchdictarray.__doc__
print("Done.")
| [
"ilanschnell@gmail.com"
] | ilanschnell@gmail.com |
05fd2afde8a2efa035b5c2ee861b1f0e9b62fc97 | 8bdf78e902a02e3bd175e759fc98fd37277247af | /youtube_dl/extractor/mangomolo.py | 2db503f2b13dc8499a6f665ef97d3e09cfcdf35b | [
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | oxidius2/youtube-dl | 191f5bde4992313308d2ab010cdb82ecd0d1b654 | 30d9e20938fa91ece09c376b67030647215d48df | refs/heads/master | 2017-03-20T13:01:36.106539 | 2016-09-16T21:06:55 | 2016-09-16T21:06:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | # coding: utf-8
from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
int_or_none,
)
class MangomoloBaseIE(InfoExtractor):
def _get_real_id(self, page_id):
return page_id
def _real_extract(self, url):
page_id = self._get_real_id(self._match_id(url))
webpage = self._download_webpage(url, page_id)
hidden_inputs = self._hidden_inputs(webpage)
m3u8_entry_protocol = 'm3u8' if self._IS_LIVE else 'm3u8_native'
format_url = self._html_search_regex(
[
r'file\s*:\s*"(https?://[^"]+?/playlist.m3u8)',
r'<a[^>]+href="(rtsp://[^"]+)"'
], webpage, 'format url')
formats = self._extract_wowza_formats(
format_url, page_id, m3u8_entry_protocol, ['smil'])
self._sort_formats(formats)
return {
'id': page_id,
'title': self._live_title(page_id) if self._IS_LIVE else page_id,
'uploader_id': hidden_inputs.get('userid'),
'duration': int_or_none(hidden_inputs.get('duration')),
'is_live': self._IS_LIVE,
'formats': formats,
}
class MangomoloVideoIE(MangomoloBaseIE):
IE_NAME = 'mangomolo:video'
_VALID_URL = r'https?://admin\.mangomolo.com/analytics/index\.php/customers/embed/video\?.*?\bid=(?P<id>\d+)'
_IS_LIVE = False
class MangomoloLiveIE(MangomoloBaseIE):
IE_NAME = 'mangomolo:live'
_VALID_URL = r'https?://admin\.mangomolo.com/analytics/index\.php/customers/embed/index\?.*?\bchannelid=(?P<id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)'
_IS_LIVE = True
def _get_real_id(self, page_id):
return base64.b64decode(compat_urllib_parse_unquote(page_id).encode()).decode()
| [
"remitamine@gmail.com"
] | remitamine@gmail.com |
a31be73325befa7634569a9b289ebac7e238c219 | f4bdd0d988ed63ed314f5703abd3543cded9f49e | /Amazon/Reviews & Big Data Analytics/Amazon_LDA.py | 32ae2a94f52d0aa94ba4eaf229433dab27abf4ff | [] | no_license | jessicakaye/Python-Projects | 643f0e1808163187cfe3db7d5adff800e2e3a98c | 8365e84f110b53df2bd54604f2206e9bc1f09617 | refs/heads/master | 2022-05-02T07:37:09.591545 | 2022-03-10T01:28:39 | 2022-03-10T01:28:39 | 253,980,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,244 | py | # Amazon_LDA.py
# 4/28/20
# @jessicakaye
# Used to conduct LDA on the top 10 most reviewed Amazon products in a dataset
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from wordcloud import WordCloud
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from time import time
from time import time
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from wordcloud import WordCloud
pd.set_option('display.max_columns', None)
# Load the dataset!
df = pd.read_json('AmazonData_text_processed_full.json', lines = True)
print(df)
print(df.describe())
# Let's drop those duplicates
df.drop_duplicates(['overall', 'reviewText', 'reviewTime', 'asin', 'reviewerID'], inplace=True)
#plot for all of the products
plt.figure(figsize=(16,10))
ax = sns.countplot(x='asin', data = df, palette = 'Set1', order=df['asin'].value_counts().index)
plt.xlabel('ASIN', fontsize=12)
plt.ylabel('Count', fontsize=12)
total = float(len(df))
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x()+p.get_width()/2.,
height + 10,
'{}'.format(height),
ha="center")
plt.title("Count of Reviews Per ASIN")
plt.savefig("Count of Reviews Per ASIN.png")
#Distribution of Ratings!
plt.figure()
ax = sns.countplot(x='overall', data=df, palette='Set1', order=df['overall'].value_counts().index)
plt.xlabel('overall', fontsize=12)
plt.ylabel('Count', fontsize=12)
total = float(len(df))
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x() + p.get_width() / 2.,
height + 10,
'{0:.0%}'.format(height / total),
ha="center")
plt.title("Count of Reviews Per Rating")
plt.savefig("Count of Reviews Per Rating.png")
# Distribution of NPS Categories!
plt.figure()
ax = sns.countplot(x='nps_category', data=df, palette='Set1', order=df['nps_category'].value_counts().index)
plt.xlabel('nps_category', fontsize=12)
plt.ylabel('Count', fontsize=12)
total = float(len(df))
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x() + p.get_width() / 2.,
height + 10,
'{0:.0%}'.format(height / total),
ha="center")
plt.title("Count of Reviews Per NPS Category")
plt.savefig("Count of Reviews Per NPS Category.png")
# Let's create a wordcloud!
wordcloud = WordCloud(background_color="white", max_words=5000, contour_width=3, contour_color='steelblue')
wordcloud.generate(df['filtered'].to_string())
# plot the wordcloud!
plt.figure(figsize=(16,10))
plt.imshow(wordcloud, interpolation="bilinear")
plt.savefig('wordcloudoftop10products')
# Let's optimize our df and try using CountVectorizer
# I already have these columns from text processing in Spark, but I want to try the following in sklearn
amazon_df = df.drop(labels=['raw_features', 'features'], axis=1)
# Let's create a list of all of the different ASINs
list_asins = amazon_df.asin.unique()
sns.set_style('whitegrid')
# Helper function
def plot_10_most_common_words(asin, count_data, count_vectorizer):
words = count_vectorizer.get_feature_names()
total_counts = np.zeros(len(words))
for t in count_data:
total_counts += t.toarray()[0]
count_dict = (zip(words, total_counts))
count_dict = sorted(count_dict, key=lambda x: x[1], reverse=True)[0:10]
words = [w[0] for w in count_dict]
counts = [w[1] for w in count_dict]
x_pos = np.arange(len(words))
plt.figure(2, figsize=(15, 15 / 1.6180))
plt.subplot(title=f'10 most common words for {asin}')
sns.set_context("notebook", font_scale=1.25, rc={"lines.linewidth": 2.5})
sns.barplot(x_pos, counts, palette='husl')
plt.xticks(x_pos, words, rotation=90)
plt.xlabel('words')
plt.ylabel('counts')
plt.tight_layout()
plt.savefig(f'{asin}_topwords.png')
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
def topics_words(model, feature_names, n_top_words):
topics = []
words =[]
for topic_idx, topic in enumerate(model.components_):
topics.append(topic_idx)
words.append([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])
new_df = pd.DataFrame(list(zip(topics, words)), columns=['topicID', 'words'])
return new_df
n_top_words = 6
n_components = 7
all_words_and_topics = pd.DataFrame(columns=['topicID', 'words', 'asin', 'num documents'])
all_asins_df = pd.DataFrame(columns=list(amazon_df.columns.values))
# We want to find the top words per product. Let's create a loop.
for asin in list_asins:
asin_df = amazon_df.loc[amazon_df['asin'] == str(asin)]
asin_df.reset_index(inplace=True)
# Initialise the count vectorizer with the English stop words
# We are going to use the raw term count for LDA
print("Extracting tf features for LDA...")
stop_words = ENGLISH_STOP_WORDS
cv = CountVectorizer(stop_words='english', analyzer=lambda x:[w for w in x if w not in stop_words])
# Fit and transform the processed titles
t0 = time()
count_vector = cv.fit_transform(asin_df['filtered'])
print("done in %0.3fs." % (time() - t0))
print()
# Materialize the sparse data
data_dense = count_vector.todense()
# Compute Sparsicity = Percentage of Non-Zero cells
print("Sparsicity: ", ((data_dense > 0).sum() / data_dense.size) * 100, "%")
# Visualise the 10 most common words
plot_10_most_common_words(asin, count_vector, cv)
print("Fitting LDA models with tf features...")
lda = LatentDirichletAllocation(n_components=n_components, learning_method='online')
t0 = time()
# This is the Document - Topic Matrix
lda_output = lda.fit_transform(count_vector)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = cv.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
# Log Likelihood: Higher the better
print("Log Likelihood: ", lda.score(count_vector))
# Perplexity: Lower the better. Perplexity = exp(-1. * log-likelihood per word)
print("Perplexity: ", lda.perplexity(count_vector))
# See model parameters
# print(lda.get_params())
# column names
topicnames = ["Topic" + str(i) for i in range(lda.n_components)]
# index names
docnames = ["Doc" + str(i) for i in range(asin_df.shape[0])]
# Make the pandas dataframe
df_document_topic = pd.DataFrame(np.round(lda_output, 2), columns=topicnames)#, index=docnames)
# Get dominant topic for each document
dominant_topic = np.argmax(df_document_topic.values, axis=1)
df_document_topic['dominant_topic_weight'] = np.amax(df_document_topic, axis=1)
df_document_topic['dominant_topic'] = dominant_topic
print(df_document_topic)
asin_df = asin_df.join(df_document_topic['dominant_topic'].astype('int'), how = 'inner')
asin_df = asin_df.join(df_document_topic['dominant_topic_weight'], how='inner')
all_asins_df = pd.concat([all_asins_df, asin_df])
#What is the topic distribution across documents?
df_topic_distribution = df_document_topic['dominant_topic'].value_counts().reset_index(name="num documents")
df_topic_distribution.columns = ['topicID', 'num documents']
print(df_topic_distribution)
asintw = topics_words(lda, tf_feature_names, n_top_words)
asintw['asin'] = asin
asintw = asintw.merge(df_topic_distribution, on = "topicID", how = "inner")
all_words_and_topics = pd.concat([all_words_and_topics, asintw])
print(all_words_and_topics)
print(all_asins_df)
all_asins_df.to_csv('all_asins_and_indices.csv')
all_words_and_topics.to_csv('all_words_and_topics.csv')
#
#
# # plt.show()
| [
"noreply@github.com"
] | jessicakaye.noreply@github.com |
897350387fa941830a98c5edbca3834b1d382a04 | 77e0adf27f8ce8ada31937045d31d063f6661434 | /noteapp/serializers.py | d79624bd60e6d29c39a0ea99f8d0c5c9c37ab2a7 | [] | no_license | naveenijeri/urbanstop_drf | f84185d6e1ba043e96535e67429d1cf421430eee | 33dfe71507cc02d85e5e1b1e19efc40eed24c4f4 | refs/heads/master | 2021-09-23T09:22:58.472057 | 2020-03-14T08:31:26 | 2020-03-14T08:31:26 | 247,235,337 | 0 | 0 | null | 2021-09-22T18:43:36 | 2020-03-14T07:56:29 | Python | UTF-8 | Python | false | false | 1,354 | py | from .models import NoteModel,UserModel
from rest_framework import serializers
class UserModelSerializer(serializers.ModelSerializer):
class Meta:
model=UserModel
fields=('username',)
class NoteModelSerializer(serializers.ModelSerializer):
user_note = UserModelSerializer(many=True)
class Meta:
model=NoteModel
fields=('id','note_text','created_date','updated_date','user_note')
def create(self, validated_data):
user_data = validated_data.pop('user_note')
note = NoteModel.objects.create(**validated_data)
for user_data in user_data:
UserModel.objects.create(notemodel=note, **user_data)
return note
def update(self, instance, validated_data):
user_data = validated_data.pop('user_note')
users = (instance.user_note).all()
users = list(users)
instance.note_text = validated_data.get('note_text', instance.note_text)
instance.created_date = validated_data.get('created_date', instance.created_date)
instance.updated_date = validated_data.get('updated_date', instance.updated_date)
instance.save()
for user_data in user_data:
user = users.pop(0)
user.username = user_data.get('username', user.username)
user.save()
return instance
| [
"naveen.ijeri123@gmail.com"
] | naveen.ijeri123@gmail.com |
2cbf9ce5648b670ee81e72a542610d78690a54f4 | 1097ed333a4000634e68a590ee6ffc6129ae61e3 | /written_examination/matrix8.py | 017cb25ae0dcc0f546bd9b3cf05825723bb344a7 | [
"MIT"
] | permissive | AutuanLiu/Code-Storm2019 | 1bbe890c7ca0d033c32348173bfebba612623a90 | 8efc7c5475fd888f7d86c3b08a3c1c9e55c1ac30 | refs/heads/master | 2020-04-23T07:03:08.975232 | 2019-10-24T08:56:26 | 2019-10-24T08:56:26 | 170,995,032 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,513 | py | def getSum(i, j, n, m, maps): # [i, j]单阵入口,[n,m]矩阵维度数,maps矩阵
queue, sump, maps[i][j] = [[i, j]], maps[i][j], 0 # 初始化队列
while queue:
x, y = queue[0][0], queue[0][1] # 获取队列头元素
for dx, dy in zip((-1, -1, 0, 1, 1, 1, 0, -1), (0, 1, 1, 1, 0, -1, -1, -1)): # 8个方向
nx, ny = x + dx, y + dy
if -1 < nx < n and -1 < ny < m and maps[nx][ny] != 0:
queue.append([nx, ny]) # 入队
sump += maps[nx][ny] # 累计兵力
maps[nx][ny] = 0 # 累计过的单个区域兵力为0
del queue[0] # 出队
return sump # 返回单阵的兵力总和
if __name__ == '__main__':
maps = [[34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 30], [0, 23, 10, 5, 5, 0, 0, 0, 5, 5, 5, 5, 5, 0, 0, 0, 30, 0, 40, 0],
[0, 9, 0, 0, 5, 0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0, 0, 30, 0, 0], [0, 8, 7, 7, 0, 5, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 7, 0, 9, 0],
[0, 9, 0, 0, 5, 0, 5, 0, 0, 12, 12, 0, 0, 0, 0, 10, 0, 0, 0, 9], [0, 0, 0, 0, 5, 0, 0, 5, 0, 12, 12, 0, 0, 5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 12, 0, 0, 5, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0], [40, 30, 3, 6, 6, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 10, 0],
[0, 0, 20, 0, 0, 6, 6, 0, 0, 0, 0, 0, 0, 0, 5, 6, 5, 10, 10, 0], [40, 30, 3, 7, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 10, 0],
[0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 17, 0, 0, 6, 5, 7, 7, 0], [0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 20, 0, 0, 7, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 0, 10, 0, 0, 0], [0, 20, 0, 0, 7, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 0, 10, 0, 0, 0],
[0, 20, 0, 0, 7, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 0, 10, 0, 0, 0], [0, 30, 0, 7, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 10, 0, 50],
[0, 40, 7, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0, 50, 0], [43, 30, 25, 10, 50, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0, 0, 0, 50, 0, 0]]
n, m = 20, 20 # 输入行列
army = []
for i in range(20):
for j in range(20):
if maps[i][j] != 0:
army.append(getSum(i, j, n, m, maps)) # 获取每个单阵的兵力和
print('每个单阵兵力和:', army)
print('单阵兵力最多为:', max(army))
print('单阵兵力最少为:', min(army))
| [
"autuanliu@163.com"
] | autuanliu@163.com |
b11a8a7651e0f8dc115584ee90faf956ed6a1f89 | 997449072baa8e50a143ae1152fd4fa83c8e1068 | /devel/.private/rrtplanner/lib/python2.7/dist-packages/rrtplanner/msg/_rrtResult.py | 7672fe8883172dee48ff70b467d5d95c919942d0 | [] | no_license | idrissahil/catkin_ws | c547a6f7be812cc0bb1a93042026f746d34e7e70 | b5d8b60c882b60bb19b8d4529257ca513b8256e3 | refs/heads/master | 2022-01-24T12:51:28.038620 | 2019-06-02T16:05:45 | 2019-06-02T16:05:45 | 175,048,655 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,030 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rrtplanner/rrtResult.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import nav_msgs.msg
import std_msgs.msg
class rrtResult(genpy.Message):
_md5sum = "58d6f138c7de7ef47c75d4b7e5df5472"
_type = "rrtplanner/rrtResult"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# Define the result
nav_msgs/Path path
================================================================================
MSG: nav_msgs/Path
#An array of poses that represents a Path for a robot to follow
Header header
geometry_msgs/PoseStamped[] poses
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/PoseStamped
# A Pose with reference coordinate frame and timestamp
Header header
Pose pose
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['path']
_slot_types = ['nav_msgs/Path']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
path
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(rrtResult, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.path is None:
self.path = nav_msgs.msg.Path()
else:
self.path = nav_msgs.msg.Path()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs))
_x = self.path.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.path.poses)
buff.write(_struct_I.pack(length))
for val1 in self.path.poses:
_v1 = val1.header
buff.write(_get_struct_I().pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v3 = val1.pose
_v4 = _v3.position
_x = _v4
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v5 = _v3.orientation
_x = _v5
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.path is None:
self.path = nav_msgs.msg.Path()
end = 0
_x = self
start = end
end += 12
(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.path.header.frame_id = str[start:end].decode('utf-8')
else:
self.path.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.path.poses = []
for i in range(0, length):
val1 = geometry_msgs.msg.PoseStamped()
_v6 = val1.header
start = end
end += 4
(_v6.seq,) = _get_struct_I().unpack(str[start:end])
_v7 = _v6.stamp
_x = _v7
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v6.frame_id = str[start:end].decode('utf-8')
else:
_v6.frame_id = str[start:end]
_v8 = val1.pose
_v9 = _v8.position
_x = _v9
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v10 = _v8.orientation
_x = _v10
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
self.path.poses.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs))
_x = self.path.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.path.poses)
buff.write(_struct_I.pack(length))
for val1 in self.path.poses:
_v11 = val1.header
buff.write(_get_struct_I().pack(_v11.seq))
_v12 = _v11.stamp
_x = _v12
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v11.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v13 = val1.pose
_v14 = _v13.position
_x = _v14
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v15 = _v13.orientation
_x = _v15
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.path is None:
self.path = nav_msgs.msg.Path()
end = 0
_x = self
start = end
end += 12
(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.path.header.frame_id = str[start:end].decode('utf-8')
else:
self.path.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.path.poses = []
for i in range(0, length):
val1 = geometry_msgs.msg.PoseStamped()
_v16 = val1.header
start = end
end += 4
(_v16.seq,) = _get_struct_I().unpack(str[start:end])
_v17 = _v16.stamp
_x = _v17
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v16.frame_id = str[start:end].decode('utf-8')
else:
_v16.frame_id = str[start:end]
_v18 = val1.pose
_v19 = _v18.position
_x = _v19
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v20 = _v18.orientation
_x = _v20
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
self.path.poses.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_4d = None
def _get_struct_4d():
global _struct_4d
if _struct_4d is None:
_struct_4d = struct.Struct("<4d")
return _struct_4d
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_3d = None
def _get_struct_3d():
global _struct_3d
if _struct_3d is None:
_struct_3d = struct.Struct("<3d")
return _struct_3d
| [
"idrissahil3@gmail.com"
] | idrissahil3@gmail.com |
78e09543d9fe810959a5f9c88d88fc9890e0a11d | 228a253a698fd8ceb0af4e63187ee201004aca4e | /IotServer.py | d6306058174631582c8a438fc2b709bd31389722 | [] | no_license | mtpajula/iotLocalNetworkServer | 4b16a5d93f5dcaab98afaec1e37a317d35bb4649 | aa3c0187dff14c4bf568afa554f82cf13a2500f5 | refs/heads/master | 2021-05-11T14:34:57.921236 | 2018-02-23T17:40:29 | 2018-02-23T17:40:29 | 117,707,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,580 | py | # -*- coding: utf-8 -*-
from IotServerDevice import *
from time import sleep
import copy
import sys
class IotServer:
wait = 10
def __init__(self):
self.d = IotServerDevice()
def printer(self, category, message):
if category == "t1":
print("\n")
print(message)
print("======================================")
elif category == "t2":
print("\n")
print(message)
print("--------------------------------------")
elif category == "p":
print(message)
elif category == "error":
print(" ! ERROR: " + message)
'''
run in terminal command mode
Example: IotServer.py device=server command="reset devices"
'''
def send_command(self, device, command):
self.printer("p","Run in terminal command mode")
#self.printer("t1","Load devices from db")
self.d.collect_iot(True)
for d in self.d.c.devices:
if d.name == device:
d.receive_command('command', command)
if self.d.name == device:
self.d.receive_command('command', command)
# Send messages to db
self.send_message();
def close_db(self):
self.d.db.con.conn.close()
def send_message(self):
self.printer("t1","Send messages to db")
self.d.db.set_messages(self.d.c.devices)
self.d.db.set_messages([self.d])
'''
run in normal mode
'''
def run(self, schedule = False):
self.printer("p","Run in normal mode")
# Get devs from db
#self.printer("t1","Load devices from db")
self.d.collect_iot(True)
# get commands
self.printer("t1","Get commands")
self.d.db.get_commands(self.d.c.devices)
self.d.db.get_commands([self.d])
# Send messages to db
self.send_message();
'''
run in schedule mode
'''
def runSchedule(self):
self.printer("p","Run in schedule mode")
# Get devs from db
#self.printer("t1","Load devices from db")
self.d.collect_iot(True)
# Get scheduled commands
self.printer("t1","Get scheduled commands")
self.d.db.get_schedules(self.d.c.devices)
self.d.db.get_schedules([self.d])
# get commands
self.printer("t1","Get commands")
self.d.db.get_commands(self.d.c.devices)
self.d.db.get_commands([self.d])
# Send messages to db
self.send_message();
'''
run in status mode
'''
def runStatus(self):
self.printer("p","Run in status mode")
# Get devs from db
#self.printer("t1","Load devices from db")
self.d.collect_iot(True)
# save statuses to db
self.printer("t1","Save statuses to db")
self.d.db.set_status(self.d.c.devices)
self.d.db.set_status([self.d])
# Send messages to db
self.send_message();
if __name__ == '__main__':
iot = IotServer()
if "schedule" in sys.argv:
iot.runSchedule()
iot.close_db()
sys.exit()
if "status" in sys.argv:
iot.runStatus()
iot.close_db()
sys.exit()
c = None
d = None
for ar in sys.argv:
if "command=" in ar:
arp = ar.split("=")
c = arp[1]
elif "device=" in ar:
arp = ar.split("=")
d = arp[1]
if c != None and d != None:
iot.send_command(d,c)
iot.close_db()
sys.exit()
iot.run()
iot.close_db()
| [
"mtpajula@gmail.com"
] | mtpajula@gmail.com |
a497ba217122e7b18367fa57adc6a0602064311d | eb333acea85364d39f2811ae368dd35bc84392f0 | /exts/counting.py | 0b1623741328e7c6745febe4359c2f8f373a044b | [] | no_license | blueeidk/vendetta | 7312b37e469ba2abbb46be07ba84365086f0cac3 | e697dd3ebc224d50399dd8c4c0ee1d8f67085151 | refs/heads/master | 2023-04-12T19:22:13.009886 | 2021-05-10T20:29:42 | 2021-05-10T20:29:42 | 366,365,871 | 0 | 0 | null | 2021-05-11T12:01:11 | 2021-05-11T11:58:46 | null | UTF-8 | Python | false | false | 1,939 | py | import discord
from discord.ext import commands, tasks
from discord import Webhook, AsyncWebhookAdapter
class Counting(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.current_num = -1
self.fetch_num.start()
def cog_unload(self):
self.fetch_num.cancel()
@tasks.loop(seconds=60*1)
async def fetch_num(self):
await self.bot.wait_until_ready()
channel = self.bot.get_channel(self.bot.config["counting_channel"])
async for message in channel.history(limit=100):
try:
self.current_num = int(message.content)
break
except ValueError:
continue
if self.current_num == -1:
self.current_num = 0
@commands.Cog.listener()
async def on_message(self, message):
if message.channel.id == self.bot.config["counting_channel"] and not message.author.bot:
await message.delete()
try:
if int(message.content) != self.current_num + 1:
raise ValueError
except ValueError:
webhook = Webhook.from_url(self.bot.config["counting_webhookurl"],
adapter=AsyncWebhookAdapter(self.bot.session))
await webhook.send(message.content, username=message.author.name, avatar_url=message.author.avatar_url)
self.current_num = 0
await message.channel.send("Looks like someone made a mistake! Lets start again:")
await message.channel.send("0")
return
webhook = Webhook.from_url(self.bot.config["counting_webhookurl"], adapter=AsyncWebhookAdapter(self.bot.session))
await webhook.send(message.content, username=message.author.name, avatar_url=message.author.avatar_url)
self.current_num += 1
def setup(bot):
bot.add_cog(Counting(bot)) | [
"niteblock@gmail.com"
] | niteblock@gmail.com |
e2a2d639b617529303a24cb365818a069f9e4628 | 423e396e226494c34f99851cc050d929f3f144c8 | /posts/admin.py | cb3ff4597adc8ff8a87e027e420a3d4c0b3387da | [] | no_license | Marihuana-Kox/hw05_final | 1ff1a34cdcb9d66fe715ffbf8d9f5fb0d0ca2820 | 77a20ac2571fec13b979e763859de6f2bce43537 | refs/heads/master | 2022-12-09T13:53:21.195711 | 2020-03-10T17:45:21 | 2020-03-10T17:45:21 | 243,992,895 | 0 | 0 | null | 2022-12-08T07:24:27 | 2020-02-29T15:27:50 | Python | UTF-8 | Python | false | false | 1,129 | py | from django.contrib import admin
from .models import Post, Group, Comment
class PostAdmin(admin.ModelAdmin):
# перечисляем поля, которые должны отображаться в админке
list_display = ("pk", "text", "pub_date", "author")
# добавляем интерфейс для поиска по тексту постов
search_fields = ("text",)
# добавляем возможность фильтрации по дате
list_filter = ("pub_date", "author")
# это свойство сработает для всех колонок: где пусто - там будет эта строка
empty_value_display = '-пусто-'
class CommentAdmin(admin.ModelAdmin):
list_display = ("pk", "text", "author", "created")
search_fields = ("text",)
list_filter = ("created", "author")
# при регистрации модели Post источником конфигурации для неё назначаем класс PostAdmin
admin.site.register(Post, PostAdmin)
admin.site.register(Group)
admin.site.register(Comment, CommentAdmin)
| [
"yakuhs@yandex.ru"
] | yakuhs@yandex.ru |
f4fb165252962fe02564d44fc8d8a6cb9eaef1e9 | c591f5676468a7447f0e4f104c4889debb35c051 | /resources/idc/__init__.py | 4a6431ad2c6890dd3d7348b37981f6a9a2f2b983 | [] | no_license | zhagyilig/Adahome | 3f3bc1b664bd65964b8befa78405c07da3c8a228 | 76f08be7c21e90bb58803aa1c11be59f66332f42 | refs/heads/dev | 2022-12-12T11:51:30.341859 | 2019-07-10T04:22:12 | 2019-07-10T04:22:12 | 149,948,322 | 2 | 4 | null | 2022-12-08T01:01:36 | 2018-09-23T04:39:23 | HTML | UTF-8 | Python | false | false | 3,671 | py | # coding=utf-8
# author: zhangyiling
from django.shortcuts import render
from django.views.generic import TemplateView, ListView
from django.contrib.auth.mixins import LoginRequiredMixin # 登陆验证
from django.shortcuts import redirect # 页面跳转
from django.shortcuts import reverse # 反转解析url的'name='
from django.http import HttpResponse
from resources.models import Idc
import json
from resources.forms import CreateIdcForm
'''
1. 添加idc, 使用模版视图
'''
class AddidcTemView(LoginRequiredMixin, TemplateView):
template_name = 'resources/idc/add_idc.html'
def post(self, request):
'''
获取添加idc表单提交的数据
:param request:
:return:
'''
# print(request.POST) # 打印表单提交的数据
# print(reverse('success', kwargs={'next': 'user_list'}))
# 输出: /dashboard/success/user_list/
# print(redirect('success', next='user_list'))
# 输出: <HttpResponseRedirect status_code=302, "text/html; charset=utf-8", url="/dashboard/success/user_list/">
# reverse
# redirect: 两个的区别:reverse传入的是字典信息:kwargs;而redirect是arg,kwargs
""" 更新使用django表单验证
# 第一步: 获取表单数据
name = request.POST.get('name', '')
idc_name = request.POST.get('idc_name', '')
address = request.POST.get('address', '')
phone = request.POST.get('phone', '')
email = request.POST.get('email', '')
username = request.POST.get('username', '')
# 第二步: 验证数据, 这里只是简单的校验
error_msg = []
if not name:
error_msg.append('idc简称不能为空')
if not idc_name:
error_msg.append('idc_name不能为空')
if error_msg:
# print(error_msg)
return redirect('error', next='add_idc', msg=json.dumps(error_msg, ensure_ascii=False))
# 第三步: 实例化
idc = Idc()
idc.name = name
idc.idc_name = idc_name
idc.address = address
idc.phone = phone
idc.email = email
idc.username = username
try:
idc.save()
except Exception as e:
return redirect('error', next='idc_list', msg=e.args)
return redirect('success', next='idc_list') # 返回成功页面;next是success的关键参数名
# return redirect('error', next='user_list', msg='这是错误页面测试')# 返回错误页面;next/msg是error的关键参数名
"""
# 使用django表单验证
idcform = CreateIdcForm(request.POST) # request.POST 表单提交的数据
# print('idcform %s' %idcform)
if idcform.is_valid(): # 验证数据
idc = Idc(**idcform.cleaned_data) # cleaned_data 获取数据
try:
idc.save()
return redirect('success', next='idc_list')
except Exception as e:
return redirect('error', next='idc_list', msg=e.args)
else:
# print(json.dumps(json.loads(idcform.errors.as_json()), ensure_ascii=False))
# return HttpResponse('')
error_msg = json.dumps(json.loads(idcform.errors.as_json()), ensure_ascii=False)
return redirect('error', next='idc_list', msg=error_msg)
'''
2.idc 详细信息列表, 使用ListView
'''
class IdcListView(LoginRequiredMixin, ListView):
template_name = 'resources/idc/idc_list.html'
model = Idc
paginate_by = 10 # 一个页面5个条目
ordering = 'id' # 列表按id排序
| [
"YilingZhang@YilingZhang.local"
] | YilingZhang@YilingZhang.local |
ada7809ed008445486cb53ed74ffb2f3f533ab06 | c05ed32f1ef7e1eb7d73efd674e7d1fd710ad171 | /daily-coding-problems/problem429.py | f131f4e79b05103324b498c75f6d6f5240e45cd3 | [] | no_license | carlhinderer/python-exercises | c8367517fdf835fa1117f96dbfee3dccc596afa6 | 4e09bbb4c4e2bd5644ed50e997db9f3c289a18f7 | refs/heads/master | 2021-06-01T16:17:00.389134 | 2021-02-09T18:21:01 | 2021-02-09T18:21:01 | 150,902,917 | 0 | 0 | null | 2021-04-20T20:33:11 | 2018-09-29T21:03:36 | Python | UTF-8 | Python | false | false | 533 | py | # Problem 429
# Medium
# Asked by Stitch Fix
#
# Pascal's triangle is a triangular array of integers constructed with the
# following formula:
#
# The first row consists of the number 1.
#
# For each subsequent row, each element is the sum of the numbers directly
# above it, on either side.
#
# For example, here are the first few rows:
#
# 1
# 1 1
# 1 2 1
# 1 3 3 1
# 1 4 6 4 1
#
# Given an input k, return the kth row of Pascal's triangle.
#
# Bonus: Can you do this using only O(k) space?
# | [
"carl.hinderer4@gmail.com"
] | carl.hinderer4@gmail.com |
254a54f04d7e2527304887a3982a7456e97068b4 | a088c5e4c4c2e6c722ba2df47c35f4f98d540412 | /eduzen_bot/plugins/messages/inline.py | 3469090624de031336b06b61a3e51716ad9cbd40 | [] | no_license | mikael85/bot | c884602363dba9efb716940981494987fa37e3d3 | 86751cf57061ae317804cfc19806ebb15d9ac8b4 | refs/heads/master | 2020-11-30T02:15:42.221636 | 2019-08-24T16:39:01 | 2019-08-24T16:39:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | import logging
from uuid import uuid4
from telegram import InlineQueryResultArticle, InputTextMessageContent, ParseMode
from telegram.utils.helpers import escape_markdown
logger = logging.getLogger()
def code_markdown(bot, update):
query = update.inline_query.query
if not query:
return
results = [
InlineQueryResultArticle(
id=uuid4(),
title="code",
input_message_content=InputTextMessageContent(
f"```\n{query}\n```", parse_mode=ParseMode.MARKDOWN
),
),
InlineQueryResultArticle(
id=uuid4(), title="Caps", input_message_content=InputTextMessageContent(query.upper())
),
InlineQueryResultArticle(
id=uuid4(),
title="Bold",
input_message_content=InputTextMessageContent(
"*{}*".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN
),
),
InlineQueryResultArticle(
id=uuid4(),
title="Italic",
input_message_content=InputTextMessageContent(
"_{}_".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN
),
),
]
bot.answer_inline_query(update.inline_query.id, results)
| [
"eduardo.a.enriquez@gmail.com"
] | eduardo.a.enriquez@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.