text stringlengths 8 6.05M |
|---|
"""
onsider the following algorithm to generate a sequence of numbers.
Start with an integer n. If n is even, divide by 2. If n is odd,
multiply by 3 and add 1. Repeat this process with the new value of n,
terminating when n = 1.
"""
def three_plus_one(n):
result = []
result.append(n)
while n != 1:
if n % 2 == 0:
n /= 2
else:
n = n * 3 + 1
result.append(n)
return result
print three_plus_one(22)
|
""" Authorize the SMART API.
"""
import itertools
import logging
import time
from urllib import parse
import uuid
from selenium import webdriver
from selenium.common.exceptions import (
NoAlertPresentException,
NoSuchElementException,
StaleElementReferenceException,
TimeoutException,
UnexpectedAlertPresentException,
WebDriverException,
)
from selenium.webdriver.remote.remote_connection import RemoteConnection
from selenium.webdriver.support.expected_conditions import visibility_of
from selenium.webdriver.support.ui import WebDriverWait
AUTHORIZE_TIMEOUT = 15
CONNECTION_TIMEOUT = 60
IMPLICIT_TIMEOUT = 10
VISIBILITY_TIMEOUT = 10
class StepRunner(object):
""" I know how to run steps!
"""
def __init__(self, config=None):
self.browser = None
self.config = config
if not self.config:
self.config = {}
def open(self):
""" Connect to the selenium webdriver.
"""
self.browser = self._browser()
return self.browser
def close(self):
""" Close the virtual browser.
"""
self.browser.quit()
return None
def get(self, url):
""" Tell the browser to load a URL.
"""
self.browser.get(url)
def accept_alerts(self):
""" Accept any alerts that pop up.
"""
while True:
try:
alert = self.browser.switch_to_alert()
alert.accept()
except NoAlertPresentException:
break
def execute_step(self, step):
""" Execute a provided step.
"""
if 'wait' in step:
time.sleep(step['wait'])
try:
elem = self.browser.find_element_by_css_selector(step['element'])
# Make sure the element is visible before we continue
wait = WebDriverWait(self.browser, VISIBILITY_TIMEOUT)
wait.until(visibility_of(elem))
except (NoSuchElementException, StaleElementReferenceException) as err:
if step.get('optional'):
return
raise ElementNotFoundException(str(err), self.browser)
except TimeoutException:
if not elem.is_displayed() and step.get('optional'):
return
elif not elem.is_displayed():
msg = 'Element is hidden: {0}'.format(step['element'])
raise ElementNotFoundException(msg, self.browser)
except AttributeError as err:
# This happens when an alert pops up during `wait.until`.
# For some reason, `elem` gets returned as a string containing
# error message instead of an element.
self.accept_alerts()
raise AuthorizationException(str(err), self.browser)
# Apply the action to the matched element.
# Only one action can be applied per step.
if 'send_keys' in step:
elem.send_keys(step['send_keys'])
elif 'click' in step:
try:
elem.click()
except WebDriverException as err:
raise AuthorizationException(str(err), self.browser)
elif 'execute_script' in step:
self.browser.execute_script(step['execute_script'])
else:
raise NoStepCommandException(step, self.browser)
def get_query(self, base_url=None):
""" Get a parsed query from the current URL.
If base_url is provided, wait until the current URL matches it.
"""
if base_url:
# Only return the query if the base_url is what we expect it to be.
try:
wait = WebDriverWait(self.browser, AUTHORIZE_TIMEOUT)
wait.until(CurrentUrlContains(base_url))
except UnexpectedAlertPresentException as err:
self.accept_alerts()
raise AuthorizationException(str(err), self.browser)
except TimeoutException:
raise AuthorizationException('Authorization timed out.', self.browser)
url = parse.urlparse(self.browser.current_url)
return parse.parse_qs(url.query)
@property
def current_url(self):
""" Return the browser's current URL.
"""
return self.browser.current_url
def _browser(self):
""" Initialize a Firefox webdriver.
"""
RemoteConnection.set_timeout(CONNECTION_TIMEOUT)
profile = webdriver.FirefoxProfile()
preferences = self.config.get('preferences', {})
for key, value in preferences.items():
profile.set_preference(key, value)
driver = webdriver.Firefox(profile)
# Wait for UI events to complete before failing to find an element.
driver.implicitly_wait(IMPLICIT_TIMEOUT)
return driver
class Authorizer(object):
""" Orchestrate the authorization path.
Attributes:
config (dict): The oauth config for this vendor.
authorize_url (string): The vendor's authorize endpoint.
Example:
Implements the context manager methods.
authorizer = Authorizer()
with authorizer:
token = authorizer.authorize()
Is equivalent to:
authorizer = Authorizer()
try:
authorizer.runner.open()
token = authorizer.authorize()
finally:
authorizer.runner.close()
"""
authorize_url = None
config = None
runner = None
_state = None
def __init__(self, config, authorize_url, step_runner=None):
self.config = config
self.authorize_url = authorize_url
self.runner = step_runner
self.log = logging.getLogger(__name__)
if not self.runner:
self.runner = StepRunner(self.config.get('browser', {}))
def authorize(self):
""" The actual authorization method.
"""
if not self.runner.browser:
raise Exception('Webdriver must be connected first.')
parameters = self.launch_params
self.ask_for_authorization(parameters)
response = self.provide_user_input()
try:
self._validate_state(response)
self._validate_code(response)
except AssertionError as err:
raise ValidationErrorException(str(err), self.runner.browser)
return response['code'][0]
def ask_for_authorization(self, parameters):
""" Ask for authorization.
Step 1 of the SMART authorization process.
"""
self.log.info('Ask for authorization')
# Store the "state" parameter so that we can validate it later
self._state = parameters['state']
self.log.info('STATE: %s', self._state)
authorize_url = self.authorize_url + '?' + parse.urlencode(parameters)
self.log.info('AUTHORIZE URL: %s', authorize_url)
# in some cases, we might need to rewrite the authorize URL that comes
# from the conformance statement, such as when all the components are
# interacting through the docker network
authorize_url_rewrite = self.config.get('authorize_url_rewrite')
if authorize_url_rewrite:
authorize_url = authorize_url.replace(
authorize_url_rewrite['from_host'],
authorize_url_rewrite['to_host']
)
self.log.info('Rewriting authorize URL to: %s', authorize_url)
self.runner.get(authorize_url)
def provide_user_input(self):
""" Provide end-user input to EHR.
Step 2 of the SMART authorization process. Usually this would include
logging in and clicking an "authorize" button.
"""
self.log.info('Provide user input')
steps = itertools.chain(self.config.get('sign_in_steps', []),
self.config.get('authorize_steps', []))
for step in steps:
self.runner.execute_step(step)
# HTTPS is the recommended protocol, but in development we don't have
# a certificate installed. If we get redirected to an https URL
# instead of an http URL, just accept it.
base_url = self.config['redirect_uri'].replace('http://', '')
query = self.runner.get_query(base_url=base_url)
self.log.info('REDIRECT URI: %s', self.runner.current_url)
if 'error' in query:
raise ReturnedErrorException(query['error'],
query.get('error_description'),
self.runner.browser)
return query
@property
def launch_params(self):
""" The params to send to the authorize url.
"""
state = str(uuid.uuid4())
params = {
'response_type': 'code',
'client_id': self.config['client_id'],
'redirect_uri': self.config['redirect_uri'],
'scope': self.config['scope'],
'state': state,
'aud': self.config['aud'],
}
if 'extra_launch_params' in self.config:
params.update(self.config['extra_launch_params'])
return params
def __enter__(self):
self.runner.open()
def __exit__(self, exc_type, exc_value, traceback):
self.runner.close()
def _validate_state(self, query):
assert 'state' in query, 'Missing state parameter.'
assert len(query['state']) == 1, 'Too many state parameters.'
assert query['state'][0] == self._state, \
'Returned state parameter does not match sent state.'
def _validate_code(self, query):
assert 'code' in query, 'Missing code parameter.'
assert len(query['code']) == 1, 'Too many code parameters.'
class AuthorizationRevoker(object):
""" Orchestrate the revoke authorization path.
Attributes:
config (dict): The oauth config for this vendor.
revoke_url (str): The vendor's "manage" endpoint.
Example:
Implements the context manager methods.
revoker = AuthorizationRevoker()
with revoker:
token = revoker.revoke_authorization()
"""
def __init__(self, config, revoke_url, step_runner=None):
self.config = config
self.revoke_url = revoke_url
self.runner = step_runner
if not self.runner:
self.runner = StepRunner(self.config.get('browser', {}))
def revoke_authorization(self):
""" The actual revoke authorization method.
"""
if not self.runner.browser:
raise Exception('Webdriver must be connected first.')
self.runner.get(self.revoke_url)
steps = itertools.chain(self.config.get('sign_in_steps', []),
self.config.get('revoke_steps', []))
for step in steps:
self.runner.execute_step(step)
def __enter__(self):
self.runner.open()
def __exit__(self, exc_type, exc_value, traceback):
self.runner.close()
class CurrentUrlContains(object):
""" An expectation that the browser's current url contains a
case-sensitive substring.
@see: http://selenium-python.readthedocs.io/waits.html
Returns:
True when the URL matches, False otherwise
"""
def __init__(self, substring):
self.substring = substring
def __call__(self, driver):
return self.substring in driver.current_url
class AuthorizationException(Exception):
""" An Error occured during the authorization process.
"""
def __init__(self, message, browser):
# Save a screenshot so we can see what's up
path = 'testsuite/static/screenshots/{0}.png'.format(uuid.uuid4())
browser.save_screenshot(path)
super().__init__(message,
path.replace('testsuite', ''),
browser.current_url)
class ValidationErrorException(AuthorizationException):
""" An error occurred validating the response.
"""
class ReturnedErrorException(AuthorizationException):
""" The redirect URI contained an "error" parameter.
"""
def __init__(self, error, description, browser):
message = 'Error: {0}\nDescription: {1}'.format(error, description)
super().__init__(message, browser)
class ElementNotFoundException(AuthorizationException):
""" An element could not be found for a step.
"""
class NoStepCommandException(AuthorizationException):
""" A step was provided without an action.
"""
def __init__(self, step, browser):
message = 'No command was provided for step: {0}'.format(step)
super().__init__(message, browser)
|
import sys
from bs4 import BeautifulSoup
import re
soup = BeautifulSoup(open(sys.argv[1]), 'html.parser')
#doc=soup.find('div', {'class':'contentus'})
#text=doc.get_text()
text=soup.get_text()
text = re.sub('[0-9]+', '\1 ',text)
for c in '[|]':
text=text.replace(c,'')
text=text.replace('{b}','ƀ').replace('{d}','đ').replace('. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ','').replace('Heliand','').replace('Fitte','').replace('______________________________________________________________________________','').replace('Die Evangelisten (Book of Kells, um )','').replace('______________________________________________________________________________','').replace('<<< Übersicht <<< vorige Seite','').replace('BIBLIOTHECA AUGUSTANA','').replace('nächste Seite >>>','').replace('bibliotheca Augustana','').replace('\num\n','').strip()
#text=" ".join(text.split()) #this optional step removes a lot of whitespace
print(text)
#call this on hel.txt
#####
#extra_symbols = ['(',')','[',']','{','}','{','}','1','2','3','4','5','6','7','8','9','10']
#text.replace().replace(.replace(.replace(.replace(.replace(.replace(
#print(text)
|
from collections import Counter
file = "./2020/Day10/mattinput.txt"
def jolt_adapter(int_list):
difference_list = []
for i in range(len(int_list)):
if i - 1 < 0:
difference_list.append(int_list[i])
else:
difference_list.append(int_list[i] - int_list[i - 1])
difference_list.append(3)
return difference_list
def compute_adapter_configurations(difference_list):
confiuration_list = list(filter(None, ''.join(str(n)
for n in difference_list).split('3')))
total_configs = 1
for i in confiuration_list:
if len(i) == 2:
total_configs *= 2
elif len(i) == 3:
total_configs *= 4
elif len(i) == 4:
total_configs *= 7
return total_configs
int_list = []
with open(file, 'r') as f:
for row in f:
int_list.append(int(row.replace('\n', '')))
int_list.sort()
difference_list = jolt_adapter(int_list)
count = Counter(difference_list)
print(f"#1 {count[1]*count[3]}")
print(f"#2 {compute_adapter_configurations(difference_list)}")
|
km = int(input('Quantos km até o destino da sua viagem? '))
if km <= 200:
p = 0.50*km
print('A sua passagem vai custar R${}'.format(p))
else:
p2 = 0.45*km
print('A sua passagem vai custar R${}'.format(p2)) |
"""
Let's call an array A a mountain if the following properties hold:
A.length >= 3
There exists some 0 < i < A.length - 1 such that A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1]
Given an array that is definitely a mountain,
return any i such that A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1].
Input: [0,1,0]
Output: 1
"""
from typing import List
class Solution:
def peakIndexInMountainArray(self, A: List[int]) -> int:
"""********** Better in larger Arrays - Binary search ***************"""
""" Doesn't work if there are repetition of numbers, eg: [0,1,1,1,1,1,1,12] """
left, right = 0, len(A) - 1
while left < right:
mid = (left + right) // 2
if A[mid - 1] <= A[mid] and A[mid] <= A[mid + 1]:
left = mid
elif A[mid - 1] >= A[mid] and A[mid] >= A[mid + 1]:
right = mid
else:
break
return mid
"""Linear Search"""
"""for index in range(len(A)-1):
if A[index] > A[index+1]:
return index
******* 1 Liner Solution ************
return A.index(max(A))
"""
""" Faster : beats 99% """
""" lo, hi = 0, len(A)-1
mid = lo + hi // 2
if A[mid]<A[mid+1]:
lo= mid+1
else :
hi = mid
for index in range(lo , hi+1, 1):
if A[index] > A[index+1]:
return index """
if __name__ == "__main__":
my_solution = Solution()
print(my_solution.peakIndexInMountainArray([0,0,1,2,2,2,2,2,3,3,8,3,3,3,2,1,0]))
|
/Users/daniel/anaconda/lib/python3.6/tokenize.py |
#!/usr/bin/python3
def uppercase(str):
for letters in str:
if ord('a') <= ord(letters) and ord(letters) <= ord('z'):
letters = chr(ord(letters) - 32)
print("{}".format(letters), end="")
print()
|
from .models import *
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model=User
fields=('name','phone',)
class DocumentSerializer(serializers.ModelSerializer):
owner=UserSerializer
class Meta:
model=Document
fields=('owner','created_time','type','source_type','source_id',) |
#!/usr/bin/python3
import brlapi
from subprocess import Popen, PIPE
from re import findall
def batinfo():
key = 0
acpi = Popen("acpi", stdout = PIPE, stderr = PIPE, shell = True)
batteryInfo = str(acpi.stdout.read())
if len(acpi.stderr.read()) > 3:
batteryInfo = 'Erreur : acpi non trouvé. Veuillez l\'installer pour utiliser ce module.'
else:
batteryInfo = batteryInfo.replace('b\'' ,'')
batteryInfo = batteryInfo.replace('\\n\'', '')
if len(findall('Battery', batteryInfo)) == 1:
batteryInfo = batteryInfo.replace('Battery 0: ','')
translate_fr = {
'remaining':'restant',
'Charging':'En charge',
'until charged':'avant charge complète',
'Full':'Complète',
'will never fully charge':'ne jamais la charger complètement',
'Battery':'Batterie',
'Unknown':'Inconnu',
'Discharging':'Sur batterie'
}
for i in translate_fr:
batteryInfo = batteryInfo.replace(i, translate_fr[i])
try:
b = brlapi.Connection()
b.enterTtyMode()
b.writeText(batteryInfo)
key = b.readKey()
finally:
return key
def main():
batinfo()
return 0
main()
|
#!/usr/bin/env python3
icnt = 5000000
#icnt = 5 # Test
count = 0
def genAgen():
genAval = 699
#genAval = 65 # Test
for i in range(0, icnt):
genAval = genAval * 16807 % 2147483647
while genAval % 4 != 0:
genAval = genAval * 16807 % 2147483647
yield genAval
def genBgen():
genBval = 124
#genBval = 8921 # Test
for i in range(0, icnt):
genBval = genBval * 48271 % 2147483647
while genBval % 8 != 0:
genBval = genBval * 48271 % 2147483647
yield genBval
for i, j in zip(genAgen(), genBgen()):
#print(i, j) # Test
if (i & 0xFFFF) == (j & 0xFFFF):
count += 1
print(count)
|
from .a_scan import AScan as ReshapeAScan
import src.basic_correct.b_scan as bbscan
import src.ADC.contrast_full_range_stretch_ADC as ADC
import numpy as np
import PIL
from PIL import ImageFilter
from PIL import Image
import math
import pywt
import os
import os.path as path
import pdb
class BScan(bbscan.BScan):
def __init__(self,dir,ADC=ADC.ADC()):
super(BScan,self).__init__(dir, ADC)
def save_all_b_scans(self):
folder='tuned-B-Scan/BScan'
out_dir = path.join(self.test_dir,folder)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for num in range(self.conf.numB):
pil_B_scan = Image.fromarray(self.b_scan(num))
out_file = path.join(out_dir, str(num)+".png")
pil_B_scan.save(out_file, 'png')
def b_scan(self,index,despeckle='NAWT'):
spectrums = self.read_B_spectrums(self.test_dir,index)
a_scans = []
for i in range(self.conf.numB):
a_scan_i = ReshapeAScan(self.conf.ref_spectrum,self.conf.resampling_table,self.conf.range).a_scan(spectrums[i])
a_scans.append(a_scan_i)
range_data = np.transpose(a_scans)
image = self.ADC.to_img(range_data)
if(despeckle == 'NAWT'):
from .despeckle.wavelet_NAWT_thresold import despeckle
image = despeckle(image)
if(despeckle == 'bilateral'):
from .despeckle.bilateral_filter import despeckle
image = despeckle(image)
if(despeckle == 'Median'):
pilimg = PIL.Image.fromarray(image.astype("float")).convert('RGB')
median_filtered = pilimg.filter(ImageFilter.MedianFilter(5))
image = np.asarray(median_filtered.convert('L'))
pilimg = PIL.Image.fromarray(image.astype("float")).convert('L')
numpy_img = np.asarray(pilimg)
return numpy_img |
import math
def mergesort(a):
l=len(a)
al=a[:int(l/2)]
ar=a[int(l/2):]
if len(al)>1:
al=mergesort(al)
if len(ar)>1:
ar=mergesort(ar)
ta=list()
i=0
j=0
while i<len(al) and j<len(ar):
if al[i]>ar[j]:
ta.append(ar[j])
j=j+1
else:
ta.append(al[i])
i=i+1
if i is not len(al):
while i<len(al):
ta.append(al[i])
i=i+1
if j is not len(ar):
while j<len(ar):
ta.append(ar[j])
j=j+1
a=ta
return a
def split(ax,ay,a,b):
p1=list();q1=list();p2=list();q2=list()
if len(ax)>8:
qx=ax[:int(len(ax)/2)]
rx=ax[int(len(ax)/2):]
qy=ay[:int(len(ay)/2)]
ry=ay[int(len(ay)/2):]
p2,q2=split(rx,ry,a,b)
p1,q1=split(qx,qy,a,b)
else:
p2,q2=[ax[0],a[ax[0]]],[ax[1],a[ax[1]]]
ddd=distance(p2,q2);
pmin=list();
qmin=list();
for i in range(len(ax)):
for j in ax[i+1:]:
if ddd>distance([ax[i],a[ax[i]]],[j,a[j]]):
ddd=distance([ax[i],a[ax[i]]],[j,a[j]])
pmin=[ax[i],a[ax[i]]];qmin=[j,a[j]]
return pmin,qmin;
def splitd(x,y,d,a,b):
ps=list()
_x=x[int(len(x)/2)]
for i in x:
if i < (_x+d) and i>(_x-d):
ps.append(a[i])
bestd=d
p,q=0,0
for i in range((len(ps)-1)):
for j in range(min(7,(len(ps)-i-1))):
dis=distance([b[ps[i]],ps[i]],[b[ps[i+j+1]],ps[i+j+1]])
if dis< bestd:
bestd=dis
p,q=[b[ps[i]],ps[i]],[b[ps[i+j+1]],ps[i+j+1]]
return p,q
def distance(a1,a2):return math.sqrt((a1[0]-a2[0])**2+(a1[1]-a2[1])**2)
def closestpairs(a):#points will be in form of dictionary as all x are unique
b=dict()
for i in a:
b[a[i]]=i
px=list(a)
py=list(a.values())
px=mergesort(px)
py=mergesort(py)
p1=list();q1=list()
p1,q1=split(px,py,a,b)
d=distance(p1,q1)
p3,q3=splitd(px,py,d,a,b)
if p3 is not 0 and q3 is not 0:
return p3,q3
else:
return p1,q1
inp=dict()
while True:
print("enter x and then y")
try:
x,y=list(map(int,input().split()))
except:
break
inp[x]=y
p,q=closestpairs(inp)
print('(',p,',',q,') distance -',distance(p,q))
|
import unittest
from katas.kyu_8.rock_paper_scissors import rps
class RockPaperScissorsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(rps('rock', 'scissors'), 'Player 1 won!')
def test_equals_2(self):
self.assertEqual(rps('scissors', 'paper'), 'Player 1 won!')
def test_equals_3(self):
self.assertEqual(rps('paper', 'rock'), 'Player 1 won!')
def test_equals_4(self):
self.assertEqual(rps('scissors', 'rock'), 'Player 2 won!')
def test_equals_5(self):
self.assertEqual(rps('paper', 'scissors'), 'Player 2 won!')
def test_equals_6(self):
self.assertEqual(rps('rock', 'paper'), 'Player 2 won!')
def test_equals_7(self):
self.assertEqual(rps('rock', 'rock'), 'Draw!')
def test_equals_8(self):
self.assertEqual(rps('scissors', 'scissors'), 'Draw!')
def test_equals_9(self):
self.assertEqual(rps('paper', 'paper'), 'Draw!')
|
# encoding:utf-8
#!/usr/bin/python
#-*-coding:utf-8-*-
import MySQLdb
db = MySQLdb.connect(host="localhost",user="root",passwd="4242",\
db="coolSignIn",charset="utf8",use_unicode=True)
cursor = db.cursor()
data = ["学生","201226630205",1]
length = 20
for i in xrange(length):
data[0] = data[0] + str(i)
data[1] = str(int(data[1]) + 1)
sql ="insert into Student(studentName,studentNO,sheetID)\
values('%s','%s','%d')" % tuple(data)
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
data[0] = "学生"
db.close()
|
"""Syncronizes cell Zookeeper with LDAP data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import hashlib
import json
import io
import logging
import sqlite3
import tempfile
from treadmill import admin
from treadmill import context
from treadmill import fs
from treadmill import utils
from treadmill import zknamespace as z
from treadmill import zkutils
from treadmill.scheduler import masterapi
_LOGGER = logging.getLogger(__name__)
def _match_appgroup(group):
"""Match if appgroup belongs to the cell.
"""
return context.GLOBAL.cell in group.get('cells', [])
def _sync_collection(zkclient, entities, zkpath, match=None):
"""Sync ldap collection to Zookeeper.
"""
_LOGGER.info('Sync: %s', zkpath)
zkclient.ensure_path(zkpath)
in_zk = zkclient.get_children(zkpath)
to_sync = {}
for entity in entities:
name = entity.pop('_id')
if match and not match(entity):
_LOGGER.debug('Skip: %s', name)
continue
to_sync[name] = entity
for to_del in set(in_zk) - set(to_sync):
_LOGGER.info('Delete: %s', to_del)
zkutils.ensure_deleted(zkclient, z.join_zookeeper_path(zkpath, to_del))
# Add or update current app-groups
for name, entity in to_sync.items():
if zkutils.put(zkclient, z.join_zookeeper_path(zkpath, name),
entity, check_content=True):
_LOGGER.info('Update: %s', name)
else:
_LOGGER.info('Up to date: %s', name)
def _appgroup_group_by_proid(cell_app_groups):
"""Group list of app groups by proid pattern."""
# create reverse lookup of appgroups by proid.
def _key(item):
return (item.get('pattern'),
item.get('group-type'),
item.get('endpoints'),
item.get('data'))
groups_by_proid = collections.defaultdict(list)
checksum_by_proid = collections.defaultdict(hashlib.sha1)
for group in sorted(cell_app_groups, key=_key):
data = json.dumps(utils.equals_list2dict(group.get('data', [])))
pattern = group.get('pattern')
if not pattern:
_LOGGER.warning('Invalid app-group, no pattern: %r', group)
continue
proid, _rest = pattern.split('.', 1)
# Create a flat table, and endpoints is a list.
endpoints = ','.join(group.get('endpoints', []))
group_type = group.get('group-type')
row = (pattern, group_type, endpoints, data)
groups_by_proid[proid].append(row)
for item in row:
if item:
checksum_by_proid[proid].update(item.encode('utf8'))
return groups_by_proid, checksum_by_proid
def _create_lookup_db(rows):
"""Create lookup db file."""
with tempfile.NamedTemporaryFile(delete=False) as f:
pass
conn = sqlite3.connect(f.name)
with conn:
conn.execute(
"""
CREATE TABLE appgroups (
pattern text,
group_type text,
endpoints text,
data text
)
"""
)
conn.executemany(
"""
INSERT INTO appgroups (
pattern, group_type, endpoints, data
) VALUES(?, ?, ?, ?)
""",
rows
)
conn.close()
return f.name
def _sync_appgroup_lookups(zkclient, cell_app_groups):
"""Sync app group lookup databases."""
groups_by_proid, checksum_by_proid = _appgroup_group_by_proid(
cell_app_groups
)
for proid in groups_by_proid:
if not groups_by_proid[proid]:
_LOGGER.debug('Appgroups not defined for proid: %s', proid)
zkutils.ensure_deleted(z.path.appgroup_lookup, proid)
continue
# If node already exists with the proper checksum, ensure that others
# are removed, but not recreate.
digest = checksum_by_proid[proid].hexdigest()
if zkclient.exists(z.path.appgroup_lookup(proid, digest)):
_LOGGER.debug('Appgroup lookup for proid %s is up to date: %s',
proid, digest)
continue
db_file = _create_lookup_db(groups_by_proid[proid])
try:
_save_appgroup_lookup(zkclient, db_file, proid, digest)
finally:
fs.rm_safe(db_file)
def _save_appgroup_lookup(zkclient, db_file, proid, digest):
"""Save appgroup lookup to Zookeeper."""
with io.open(db_file, 'rb') as f:
zkutils.put(zkclient, z.path.appgroup_lookup(proid, digest),
f.read())
_remove_extra_appgroup_lookup(zkclient, proid, digest)
def _remove_extra_appgroup_lookup(zkclient, proid, digest):
"""Remove extra app group lookups, leaving the only current one."""
lookup_path = z.path.appgroup_lookup(proid)
for node in zkclient.get_children(lookup_path):
if node == digest:
continue
zkutils.ensure_deleted(zkclient, z.path.appgroup_lookup(proid, node))
def sync_appgroups():
"""Sync app-groups from LDAP to Zookeeper."""
_LOGGER.info('Sync appgroups.')
admin_app_group = admin.AppGroup(context.GLOBAL.ldap.conn)
app_groups = admin_app_group.list({})
cell_app_groups = [group for group in app_groups if _match_appgroup(group)]
_sync_collection(context.GLOBAL.zk.conn,
cell_app_groups, z.path.appgroup())
_sync_appgroup_lookups(context.GLOBAL.zk.conn, cell_app_groups)
def sync_partitions():
"""Syncs partitions to Zookeeper.
"""
_LOGGER.info('Sync: partitions.')
zkclient = context.GLOBAL.zk.conn
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
partitions = admin_cell.partitions(context.GLOBAL.cell)
zkclient.ensure_path(z.path.partition())
in_zk = zkclient.get_children(z.path.partition())
names = [partition['_id'] for partition in partitions]
for extra in set(in_zk) - set(names):
_LOGGER.debug('Delete: %s', extra)
zkutils.ensure_deleted(zkclient, z.path.partition(extra))
# Add or update current partitions
for partition in partitions:
zkname = partition['_id']
if 'reboot-schedule' in partition:
try:
partition['reboot-schedule'] = utils.reboot_schedule(
partition['reboot-schedule']
)
except ValueError:
_LOGGER.info('Invalid reboot schedule, ignoring.')
if zkutils.put(zkclient, z.path.partition(zkname),
partition, check_content=True):
_LOGGER.info('Update: %s', zkname)
else:
_LOGGER.info('Up to date: %s', zkname)
def sync_allocations():
"""Syncronize allocations.
"""
_LOGGER.info('Sync allocations.')
zkclient = context.GLOBAL.zk.conn
admin_alloc = admin.CellAllocation(context.GLOBAL.ldap.conn)
allocations = admin_alloc.list({'cell': context.GLOBAL.cell})
filtered = []
for alloc in allocations:
_LOGGER.info('Sync allocation: %s', alloc)
name, _cell = alloc['_id'].rsplit('/', 1)
alloc['name'] = name
filtered.append(alloc)
masterapi.update_allocations(zkclient, filtered)
def sync_servers():
"""Sync global servers list."""
_LOGGER.info('Sync servers.')
admin_srv = admin.Server(context.GLOBAL.ldap.conn)
global_servers = admin_srv.list({})
zkutils.ensure_exists(
context.GLOBAL.zk.conn,
z.path.globals('servers'),
data=[server['_id'] for server in global_servers]
)
|
import sys, os
import inspect
import http
import re
from dotenv import load_dotenv
from flask import Flask, request, make_response
import json
from functools import reduce
from App.app_cors.functions import valid_origin, preflight_request_response
from App.type_info.functions import members_names, is_hashable, is_iterable
import logging
from . import logger
load_dotenv()
def _api_success(payload):
if 'message' not in payload:
payload['message'] = 'ready'
response = make_response({'status': 'ok',
'data': payload})
response.access_control_allow_origin = valid_origin()
return response
def _api_error(payload, message='error', status=http.HTTPStatus.INTERNAL_SERVER_ERROR):
payload['message'] = message
response = make_response({'status': 'error',
'data': payload})
response.status_code = status
response.access_control_allow_origin = valid_origin()
return response
def create_backend():
logger.info('create_backend')
rest_server = Flask(__name__, template_folder = '../templates')
# GET / (Handshake route)
@rest_server.route('/', methods=['GET'])
def hello():
return _api_success({})
# GET /python_version
@rest_server.route('/python-version', methods=['GET'])
def python_version():
return _api_success({'version': sys.version})
# /python-eval preflight route
@rest_server.route('/python-eval', methods=['OPTIONS'])
def preflight_route():
return preflight_request_response()
# POST /python-eval
@rest_server.route('/python-eval', methods=['POST'])
def python_eval():
payload = json.loads(request.data)
if 'expression' in payload:
try:
result = eval(payload['expression'])
except BaseException as exc:
return _api_success({'exception': exc.args})
else:
return _api_success({'result': str(result),
'type': type(result).__name__,
'iterable': is_iterable(result),
'hashable': is_hashable(result),
'details': members_names(result)})
else:
return _api_error({})
return rest_server
if __name__ == "__main__":
logger.debug('***** Starting backend')
backend = create_backend()
backend.run() |
print('2 задание')
str = input('Введите сторку: ')
list = str.split(';')
max = list[0]
for i in range(len(list)):
if len(list[i]) > len(max):
max = list[i]
print('Самое длинное слово: ', max)
#python task2.py |
from uff import converters, model # noqa
from uff.converters.tensorflow.conversion_helpers import from_tensorflow # noqa
from uff.converters.tensorflow.conversion_helpers import from_tensorflow_frozen_model # noqa
'''
uff
~~~
Universal Framework Format Toolkit.
Convert models from common frameworks (e.g. tensorflow, caffe2)
to a single format
:copyright: (c) 2017 NVIDIA Corporation
'''
__version__ = '0.6.3'
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from asyncio import sleep
from dazl import Network, connect
from dazl.testing import SandboxLauncher
import pytest
from .dars import UploadTest
@pytest.mark.asyncio
async def test_dar_uploads_near_startup(sandbox: SandboxLauncher) -> None:
package_ids = []
network = Network()
network.set_config(url=sandbox.url)
async def upload_dars_and_verify():
await upload_test_dars(network)
metadata = await network.aio_global().metadata()
package_ids.extend(network.lookup.package_ids())
await network.aio_run(upload_dars_and_verify(), keep_open=False)
# Because we use a single sandbox process, it's somewhat difficult to assert that the specific
# DAR we are attempting to upload has indeed been uploaded, because packages are global and
# other tests will upload packages as well. However, we know that we HAVE indeed uploaded
# SOMETHING, and the Sandbox tests are started without any packages at all. So assume that a
# non-zero package ID list means that DAR uploading works.
assert len(package_ids) > 0
@pytest.mark.asyncio
@pytest.mark.skip(
"Background package polling will soon be disabled, and packages will be loaded on an as-needed "
"basis. When this happens, PackagesAddedEvent will be dropped. If this is still a use-case you "
"need, please write your own poller around lookup.package_ids."
)
async def test_package_events(sandbox: SandboxLauncher) -> None:
initial_events = []
follow_up_events = []
async with connect(url=sandbox.url, admin=True) as conn:
party_info = await conn.allocate_party()
network = Network()
network.set_config(url=sandbox.url)
client = network.aio_party(party_info.party)
async def upload_dars_and_verify():
# make sure the client is "ready" before uploading DARs, because we are explicitly
# checking to make sure proper reporting of packages that are uploaded after a
# client is running and # operational
await client.ready()
await upload_test_dars(network)
await (await network.aio_global().metadata()).package_loader.load_all()
# give the client some time to pick up the new packages; unfortunately there isn't
# much more to do here except wait
await sleep(10)
client.add_ledger_packages_added(lambda _: initial_events.append(_), initial=True)
client.add_ledger_packages_added(lambda _: follow_up_events.append(_))
await network.aio_run(upload_dars_and_verify(), keep_open=False)
assert len(initial_events) == 2
assert len(follow_up_events) == 1
async def upload_test_dars(network: Network) -> None:
g = network.aio_global()
await g.ensure_dar(UploadTest)
|
from django import forms
from django.contrib.auth import password_validation
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator
from django.forms import inlineformset_factory
from PIL import Image
from .models import AdvUser, Review, AdditionalImage, Rebuttal, AdditionalImageReb, ComplaintsAndSuggestions
from django.utils.translation import gettext_lazy as _
_MAX_SIZE = 800
# ------------Authorization-------------
class LoginForm(AuthenticationForm):
username = forms.CharField(label='', widget=forms.TextInput(attrs={'placeholder': 'Введите логин', 'class': 'form-text--input', 'id': 'field1'},))
password = forms.CharField(label='', widget=forms.PasswordInput(attrs={'placeholder': 'Введите пароль', 'class': 'form-text--input'}),)
class ChangeUserInfoForm(forms.ModelForm):
username = forms.CharField(widget=forms.TextInput(attrs={"placeholder": "Введите логин", "class": "form-text--input2"}))
email = forms.EmailField(required=True, label='Email', widget=forms.EmailInput(attrs={"placeholder": "Введите e-mail", "class": "form-text--input2"}))
class Meta:
model = AdvUser
fields = ('username', 'email', 'send_messages')
class ProfPasswordChangeForm(PasswordChangeForm):
old_password = forms.CharField(
label=_("Old password"),
widget=forms.PasswordInput(
attrs={"placeholder": "Старый пароль", "class": "form-text--input2", 'autofocus': True}),
)
new_password1 = forms.CharField(
label=_("New password"),
widget=forms.PasswordInput(attrs={"placeholder": "Новый пароль", "class": "form-text--input2"}),
help_text=password_validation.password_validators_help_text_html(),
)
new_password2 = forms.CharField(
label=_(""),
widget=forms.PasswordInput(attrs={"placeholder": "Подтверждение нового пароля", "class": "form-text--input2"}),
)
class UserPasswordResetForm(PasswordResetForm):
email = forms.EmailField(
max_length=254,
widget=forms.EmailInput(attrs={'placeholder': 'Введите email', 'class': 'form-text--input'})
)
def clean(self):
super().clean()
data = self.cleaned_data.get('email')
email_user = AdvUser.objects.filter(email=data)
if not email_user.exists():
errors = {'email': ValidationError('Нет пользователей с таким Email')}
raise ValidationError(errors)
class ComSugForm(forms.ModelForm):
title = forms.CharField(widget=forms.TextInput(attrs={"placeholder": "Введите название сообщения", "class": "form-text--input2 form-text--input2--position"}))
body = forms.CharField(required=True, label='Email', widget=forms.Textarea(attrs={"placeholder": "Напишите текст сообщения", "class": "form-text--input3"}))
class Meta:
model = ComplaintsAndSuggestions
fields = '__all__'
widgets = {'user_id': forms.HiddenInput}
class UserSetPasswordForm(SetPasswordForm):
new_password1 = forms.CharField(
label=_("Новый пароль"),
widget=forms.PasswordInput(attrs={"placeholder": "Введите новый пароль", "class": "form-text--input2"}),
strip=False,
help_text=password_validation.password_validators_help_text_html(),
)
new_password2 = forms.CharField(
label=_("Подтвердите пароль"),
strip=False,
widget=forms.PasswordInput(attrs={"placeholder": "Подтвердите пароль", "class": "form-text--input2"}),
)
class RegisterUserForm(forms.ModelForm):
username = forms.CharField(widget=forms.TextInput(attrs={"placeholder": "Введите логин", "class": "form-text--input2"}), validators=[MinLengthValidator(limit_value=5)],
help_text='Минимальная длина логина 5 символов')
email = forms.EmailField(widget=forms.EmailInput(attrs={"placeholder": "Введите e-mail", "class": "form-text--input2"}))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={"placeholder": "Введите пароль", "class": "form-text--input2"}),
help_text=password_validation.password_validators_help_text_html(),
)
password2 = forms.CharField(widget=forms.PasswordInput(attrs={"placeholder": "Подтвердите пароль", "class": "form-text--input2"}),
help_text='Введите тот же самый пароль еще раз для проверки',)
def clean_password1(self):
password1 = self.cleaned_data['password1']
if password1:
password_validation.validate_password(password1)
return password1
def clean(self):
super().clean()
try:
password1 = self.cleaned_data['password1']
password2 = self.cleaned_data['password2']
if password1 and password2 and password1 != password2:
errors = {'password2': ValidationError('Введенные пароли не совпадают', code='password_mismatch')}
raise ValidationError(errors)
except KeyError:
errors = {'password1': ValidationError('Придумайте пароль понадёжнее', code='password_mismatch')}
raise ValidationError(errors)
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data['password1'])
user.is_active = True
user.is_activated = True
if commit:
user.save()
return user
class Meta:
model = AdvUser
fields = ('username', 'email', 'password1', 'password2', 'send_messages')
# -----------Review--------------
class CustomModelForm(forms.ModelForm):
def save(self, commit=True):
image = super().save(self)
filepath = image.image.path
width = image.image.width
height = image.image.height
max_size = max(width, height)
if max_size > _MAX_SIZE:
image = Image.open(filepath)
image = image.resize(
(round(width / max_size * _MAX_SIZE),
round(height / max_size * _MAX_SIZE)),
Image.LANCZOS
)
image.save(filepath)
class Meta:
model = AdditionalImageReb
fields = '__all__'
class ReviewForm(forms.ModelForm):
class Meta:
model = Review
fields = '__all__'
widgets = {
'title': forms.Textarea(
attrs={"placeholder": "Напишите название ПРАВЕДНОГО ГНЕВА...", "id": "name_anger", "spellcheck": "true",
"class": "name_anger", "maxlength": "2000", "rows": "2", "cols": "200"}),
'content': forms.Textarea(
attrs={"placeholder": "Напишите текст ПРАВЕДНОГО ГНЕВА...", "id": "text_anger", "spellcheck": "true",
"class": "user_rebuttal", "maxlength": "10000", "rows": "2", "cols": "200"}),
'country': forms.Textarea(
attrs={"placeholder": "Страна", "id": "pay_dop1", "class": "find_width2"}),
'city': forms.Textarea(
attrs={"placeholder": "Город", "id": "pay_dop2", "class": "find_width2"}),
'address': forms.Textarea(
attrs={"placeholder": "Адрес далее", "id": "pay_dop3", "class": "find_width3"}),
'author': forms.HiddenInput,
'price': forms.HiddenInput,
'is_active': forms.HiddenInput,
'is_paid': forms.HiddenInput,
'braintree_id': forms.HiddenInput,
'event_date': forms.HiddenInput,
}
AIFormSet = inlineformset_factory(
Review, AdditionalImage,
fields='__all__', extra=5,
form=CustomModelForm
)
# -----------Rebuttal--------------
class RebuttalForm(forms.ModelForm):
class Meta:
model = Rebuttal
fields = '__all__'
widgets = {'author': forms.HiddenInput,
'review': forms.HiddenInput,
'is_active': forms.HiddenInput,
'price': forms.HiddenInput,
'braintree_id': forms.HiddenInput,
'is_paid': forms.HiddenInput}
AIRebFormSet = inlineformset_factory(
Rebuttal, AdditionalImageReb,
fields='__all__', extra=5,
form=CustomModelForm
)
|
import requests
from bs4 import BeautifulSoup as bs
from datetime import datetime
import re
class RssFeeder(object):
def __init__(self, _url, _collection, _category):
self.url = _url
self.collection = _collection
self.data = {}
self.category = _category
def runCrawl(self):
response = requests.get(self.url)
soup = bs(response.content, "html.parser")
items_soup = soup.find_all("item")
items = []
self.data['collection'] = self.collection
self.data['category'] = self.category
for item in items_soup:
title = item.title.text
link = item.link.text
desc = item.description.text
if item.pubdate is not None:
pubdate = item.pubdate.text
pubdate = self.dateParsing(pubdate)
else:
# pubdate가 없을때 현재시간을 pubdate로 함
pubdate = datetime.now()
insert_data = {
"_id" : link,
"pubdate" : pubdate,
"title" : title,
"description" : desc,
"category" : self.category
}
print("="*30)
print(pubdate)
print(self.collection)
print(desc[:100])
items.append( insert_data )
self.data['items'] = items
return self.data
##
# pubdate 를 datetime 객체로만들어 return
# input : pubdate(pubdate에 해당하는 문자열)
# output: datetime object
def dateParsing(self, _pubdate):
parser_type1 = re.compile('[a-zA-Z]+, \d+ [a-zA-Z]+ \d+ \d+:\d+:\d+ GMT')
parser_type2 = re.compile('[a-zA-Z]+, \d+ [a-zA-Z]+ \d+ \d+:\d+:\d+ .0900')
parser_type3 = re.compile('\d+-\d+-\d+ \d+:\d+:\d+')
parser_type4 = re.compile('\d+.0900')
result1 = parser_type1.search(_pubdate)
result2 = parser_type2.search(_pubdate)
result3 = parser_type3.search(_pubdate)
result4 = parser_type4.search(_pubdate)
if result1 is not None:
pubdate = result1.group()
pubdate = pubdate[:-4]
pubdate = datetime.strptime(pubdate,"%a, %d %b %Y %H:%M:%S")
elif result2 is not None:
pubdate = result2.group()
pubdate = pubdate[:-6]
pubdate = datetime.strptime(pubdate,"%a, %d %b %Y %H:%M:%S")
elif result3 is not None:
pubdate = result3.group()
pubdate = datetime.strptime(pubdate,"%Y-%m-%d %H:%M:%S")
elif result4 is not None:
pubdate = result4.group()
year = int(pubdate[:4])
month = int(pubdate[4:6])
day = int(pubdate[6:8])
hour = int(pubdate[8:10])
minute = int(pubdate[10:12])
sec = int(pubdate[12:14])
pubdate = datetime(year, month, day, hour, minute, sec)
if result1 is None and result2 is None and result3 is None and result4 is None:
print("parsing error!")
exit()
return pubdate
|
from django.db import models
import datetime
from django.contrib.auth.models import User
from django.utils import timezone
from functools import reduce
# Create your models here.
class Topic(models.Model):
name = models.CharField(max_length=200)
category = models.CharField(max_length=200, blank=False, default='Development')
def __str__(self):
return self.name
class Course(models.Model):
topic = models.ForeignKey(Topic, related_name='courses', on_delete=models.CASCADE)
name = models.CharField(max_length=200)
price = models.DecimalField(max_digits=10, decimal_places=2)
hours = models.PositiveIntegerField(default=0)
for_everyone = models.BooleanField(default=True)
description = models.TextField(max_length=300, null=True, blank=True)
interested = models.PositiveIntegerField(default=0)
stages = models.PositiveIntegerField(default=3)
def __str__(self):
return self.name
# If price is greater than 150.00 => discount 10%
def discount(self):
self.price = self.price - self.price*10/100 # discount 10%
self.save()
class Student(User):
CITY_CHOICES = [('WS', 'Windsor'), ('CG', 'Calgery'), ('MR', 'Montreal'), ('VC', 'Vancouver')]
school = models.CharField(max_length=50, null=True, blank=True)
address = models.CharField(max_length=300, null=True, blank=True)
city = models.CharField(max_length=2, choices=CITY_CHOICES, default='WS')
interested_in = models.ManyToManyField(Topic)
def __str__(self):
return '{} {}'.format(self.first_name, self.last_name) # first and last name from User
class Order(models.Model):
STATUS_CHOICES = [(0, 'Cancelled'), (1, 'Order Confirmed')]
# course = models.ForeignKey(Course, related_name='courses', on_delete=models.CASCADE)
courses = models.ManyToManyField(Course)
Student = models.ForeignKey(Student, related_name='student', on_delete=models.CASCADE)
levels = models.PositiveIntegerField()
order_status = models.IntegerField(choices=STATUS_CHOICES, default=1)
order_date = models.DateField(default=datetime.date.today)
def __str__(self):
return '{} {} {} {} {} {} {}'.format(self.Student.first_name, self.Student.last_name,
self.levels, self.order_status, self.order_date,
self.total_cost(), self.combined_course_names())
def combined_course_names(self):
course_names = ''
for course in self.courses.all():
course_names += ' - ' + course.name
return course_names
def total_cost(self):
total_cost = 0
for course in self.courses.all():
total_cost += course.price
return total_cost
|
# -*- coding: utf-8 -*-
#
# This file is part of Flask-AppExts
# Copyright (C) 2015 CERN.
#
# Flask-AppExts is free software; you can redistribute it and/or
# modify it under the terms of the Revised BSD License; see LICENSE
# file for more details.
"""Admin extension."""
from __future__ import absolute_import, unicode_literals, print_function
from flask_admin import Admin
from flask_registry import ModuleAutoDiscoveryRegistry
class AdminDiscoveryRegistry(ModuleAutoDiscoveryRegistry):
setup_func_name = 'setup_app'
def register(self, module, *args, **kwargs):
super(AdminDiscoveryRegistry, self).register(
module, self.app, *args, **kwargs
)
def setup_app(app):
"""Initialize Admin."""
# Create registry and run discovery
Admin(app, template_mode='bootstrap3')
app.extensions['registry']['admin'] = AdminDiscoveryRegistry(
'admin', app=app, with_setup=True)
|
from restaurant import Restaurant
from User import *
from Admin import *
res=Restaurant("xiaocao","drinking")
res.open_restaurant()
admin=Admin("BB","Z")
admin.show_privileges()
|
def oddTuples(aTup):
'''
aTup: a tuple
returns: tuple, every other element of aTup.
'''
newTup = ()
odd = 0
while odd < len(aTup):
if len(aTup) == 0:
break
newTup = newTup + (aTup[odd],)
odd += 2
return newTup
|
###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ..util import *
spec = load_test_spec("vpp", "deinterlace")
@slash.requires(have_gst)
@slash.requires(*have_gst_element("vaapi"))
@slash.requires(*have_gst_element("vaapipostproc"))
@slash.requires(*have_gst_element("checksumsink2"))
@slash.parametrize(*gen_vpp_deinterlace_parameters(spec, ["bob", "weave", "motion-adaptive", "motion-compensated"]))
@platform_tags(VPP_PLATFORMS)
def test_default(case, method):
params = spec[case].copy()
params.update(
method = map_deinterlace_method(method),
mformat = mapformat(params["format"]),
tff = params.get("tff", 1))
if params["method"] is None:
slash.skip_test("{} method not supported".format(method))
params["decoded"] = get_media()._test_artifact(
"{}_deinterlace_{method}_{format}_{width}x{height}"
".yuv".format(case, **params))
call(
"gst-launch-1.0 -vf filesrc location={source} num-buffers={frames}"
" ! rawvideoparse format={mformat} width={width} height={height}"
" interlaced=true top-field-first={tff}"
" ! vaapipostproc format={mformat} width={width} height={height}"
" deinterlace-mode=1 deinterlace-method={method} ! checksumsink2"
" file-checksum=false frame-checksum=false plane-checksum=false"
" dump-output=true dump-location={decoded}".format(**params))
params.setdefault("metric", dict(type = "md5"))
check_metric(**params)
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from .serializers import UserSerializer, UserLogSerializer
from .models import UsersMan
from rest_framework import status
from django.db.models import F
#For posting the information about users and saving it in db and getting the information about all users from db
class UsersView(APIView):
def post(self, request):
ser = UserSerializer(data=request.data)
if ser.is_valid(raise_exception=True):
ser.save()
return Response(ser.data, status=status.HTTP_201_CREATED)
return Response(ser.errors, status=status.HTTP_400_BAD_REQUEST)
def get(self, request):
data = UsersMan.objects.values(
'user_id',
'name',
'time_zone'
)
ser = UserSerializer(data, many=True)
return Response(ser.data)
#for displaying the final result.
class MembersActivity(APIView):
def get(self, request):
data = UsersMan.objects.all()
ser = UserLogSerializer(data, many=True)
return Response(ser.data) |
from import_export import resources
from import_export.fields import Field
from .models import Finding
class FindingResource(resources.ModelResource):
severity = Field(attribute='severity__severity', column_name='severity')
finding_type = Field(attribute='finding_type__finding_type', column_name='finding_type')
class Meta:
model = Finding
skip_unchanged = True
fields = (
'id', 'title', 'description', 'impact', 'mitigation',
'replication_steps', 'host_detection_techniques',
'network_detection_techniques', 'references',
'finding_guidance'
)
export_order = (
'id', 'severity', 'finding_type', 'title', 'description',
'impact', 'mitigation', 'replication_steps',
'host_detection_techniques', 'network_detection_techniques',
'references', 'finding_guidance'
)
|
import speech_recognition as sr
r = sr.Recognizer()
mic = sr.Microphone(device_index=1)
with mic as source:
r.adjust_for_ambient_noise(source, duration=1)
print("What is your name: ")
audio = r.listen(source, timeout=7)
print("Wait till your voice is recognised......\n")
try:
print("You are entering as " + r.recognize_google(audio) + "?")
val = input("Yes or No?\n")
if val == "yes" and "YES" and "Yes" :
print("You are entering as " + r.recognize_google(audio)+ ".")
except:
print("Unable to Understand,Please try it once again") |
from aiogram import types
import logging
from aiogram.dispatcher.filters import Command
from aiogram.types import CallbackQuery
from keyboards.inline.callback_datas import buy_callback
from keyboards.inline.choice_buttons import choice, pear_keyboard_terrain, pear_keyboard_route
from loader import dp
@dp.message_handler(Command("items"))
async def show_items(message: types.Message):
await message.answer(text="Мы можем предложить карту местности,и карту маршрутов.\n"
"Если же вам ничего не нужно жмите - Отмена"
, reply_markup=choice
)
@dp.callback_query_handler(buy_callback.filter(item_name="terrain_map"))
async def buying_terrain_map(call: CallbackQuery):
await call.answer(cache_time=60)
# Логирование
# logging.info(f"callback_data := {call.data}")
await call.message.answer(f"Вы выбрали купить карту местности!\n",reply_markup=pear_keyboard_terrain)
@dp.callback_query_handler(buy_callback.filter(item_name="route_map"))
async def buying_route_map(call: CallbackQuery):
await call.answer(cache_time=60)
# Логирование
# logging.info(f"callback_data := {call.data}")
await call.message.answer(f"Вы выбрали купить карту маршрутов!\n",reply_markup=pear_keyboard_route)
|
class Transform:
def __init__(self, coords=(0, 0), parent=None):
self.local_x, self.local_y = coords
self.parent = parent
self.children = []
def get_global_coords(self):
if not self.parent:
return int(self.local_x), int(self.local_y)
parent_global_coords = self.parent.get_global_coords()
return (int(self.local_x) + parent_global_coords[0],
int(self.local_y) + parent_global_coords[1])
def attach(self, child):
if child.parent:
child.parent.children.remove(child)
old_parent = child.parent.get_global_coords()
else:
old_parent = (0, 0)
new_parent = self.get_global_coords()
child.local_x += old_parent[0] - new_parent[0]
child.local_y += old_parent[1] - new_parent[1]
self.children.append(child)
child.parent = self
def move_to(self, coords, delta_time):
speed = delta_time / 300
self.local_x = self.local_x - (self.local_x - coords[0]) * speed
self.local_y = self.local_y - (self.local_y - coords[1]) * speed
|
from django.conf.urls import include, url
from django.contrib import admin
from . import views
app_name = 'main'
urlpatterns = [
url(r'^list/$', views.list, name='list'),
url(r'^$', views.index, name='index'),
url(r'^(?P<slug>[-\w]+)/$' ,views.detail, name='detail'),
] |
import torch
from utils import one_hot
class ExponentialFamilyArray(torch.nn.Module):
"""
ExponentialFamilyArray computes log-densities of exponential families in parallel. ExponentialFamilyArray is
abstract and needs to be derived, in order to implement a concrete exponential family.
The main use of ExponentialFamilyArray is to compute the densities for FactorizedLeafLayer, which computes products
of densities over single RVs. All densities over single RVs are computed in parallel via ExponentialFamilyArray.
Note that when we talk about single RVs, these can in fact be multi-dimensional. A natural use-case is RGB image
data: it is natural to consider pixels as single RVs, which are, however, 3-dimensional vectors each.
Although ExponentialFamilyArray is not derived from class Layer, it implements a similar interface. It is intended
that ExponentialFamilyArray is a helper class for FactorizedLeafLayer, which just forwards calls to the Layer
interface.
Best to think of ExponentialFamilyArray as an array of log-densities, of shape array_shape, parallel for each RV.
When evaluated, it returns a tensor of shape (batch_size, num_var, *array_shape) -- for each sample in the batch and
each RV, it evaluates an array of array_shape densities, each with their own parameters. Here, num_var is the number
of random variables, i.e. the size of the set (boldface) X in the paper.
The boolean use_em indicates if we want to use the on-board EM algorithm (alternatives would be SGD, Adam,...).
After the ExponentialFamilyArray has been generated, we need to initialize it. There are several options for
initialization (see also method initialize(...) below):
'default': use the default initializer (to be written in derived classes).
Tensor: provide a custom initialization.
In order to implement a concrete exponential family, we need to derive this class and implement
sufficient_statistics(self, x)
log_normalizer(self, theta)
log_h(self, x)
expectation_to_natural(self, phi)
default_initializer(self)
project_params(self, params)
reparam_function(self)
_sample(self, *args, **kwargs)
Please see docstrings of these functions below, for further details.
"""
def __init__(self, num_var, num_dims, array_shape, num_stats, use_em):
"""
:param num_var: number of random variables (int)
:param num_dims: dimensionality of random variables (int)
:param array_shape: shape of log-probability tensor, (tuple of ints)
log-probability tensor will be of shape (batch_size, num_var,) + array_shape
:param num_stats: number of sufficient statistics of exponential family (int)
:param use_em: use internal EM algorithm? (bool)
"""
super(ExponentialFamilyArray, self).__init__()
self.num_var = num_var
self.num_dims = num_dims
self.array_shape = array_shape
self.num_stats = num_stats
self.params_shape = (num_var, *array_shape, num_stats)
self.params = None
self.ll = None
self.suff_stats = None
self.marginalization_idx = None
self.marginalization_mask = None
self._use_em = use_em
self._p_acc = None
self._stats_acc = None
self._online_em_frequency = None
self._online_em_stepsize = None
self._online_em_counter = 0
# if em is switched off, we re-parametrize the expectation parameters
# self.reparam holds the function object for this task
# self.reparam = None
# if not self._use_em:
# self.reparam = self.reparam_function()
# --------------------------------------------------------------------------------
# The following functions need to be implemented to specify an exponential family.
def sufficient_statistics(self, x):
"""
The sufficient statistics function for the implemented exponential family (called T(x) in the paper).
:param x: observed data (Tensor).
If self.num_dims == 1, this can be either of shape (batch_size, self.num_var, 1) or
(batch_size, self.num_var).
If self.num_dims > 1, this must be of shape (batch_size, self.num_var, self.num_dims).
:return: sufficient statistics of the implemented exponential family (Tensor).
Must be of shape (batch_size, self.num_var, self.num_stats)
"""
raise NotImplementedError
def log_normalizer(self, theta):
"""
Log-normalizer of the implemented exponential family (called A(theta) in the paper).
:param theta: natural parameters (Tensor). Must be of shape (self.num_var, *self.array_shape, self.num_stats).
:return: log-normalizer (Tensor). Must be of shape (self.num_var, *self.array_shape).
"""
raise NotImplementedError
def log_h(self, x):
"""
The log of the base measure (called h(x) in the paper).
:param x: observed data (Tensor).
If self.num_dims == 1, this can be either of shape (batch_size, self.num_var, 1) or
(batch_size, self.num_var).
If self.num_dims > 1, this must be of shape (batch_size, self.num_var, self.num_dims).
:return: log(h) of the implemented exponential family (Tensor).
Can either be a scalar or must be of shape (batch_size, self.num_var)
"""
raise NotImplementedError
def expectation_to_natural(self, phi):
"""
Conversion from expectations parameters phi to natural parameters theta, for the implemented exponential
family.
:param phi: expectation parameters (Tensor). Must be of shape (self.num_var, *self.array_shape, self.num_stats).
:return: natural parameters theta (Tensor). Same shape as phi.
"""
raise NotImplementedError
def default_initializer(self):
"""
Default initializer for params.
:return: initial parameters for the implemented exponential family (Tensor).
Must be of shape (self.num_var, *self.array_shape, self.num_stats)
"""
raise NotImplementedError
def project_params(self, params):
"""
Project onto parameters' constraint set.
Exponential families are usually defined on a constrained domain, e.g. the second parameter of a Gaussian needs
to be non-negative. The EM algorithm takes the parameters sometimes out of their domain. This function projects
them back onto their domain.
:param params: the current parameters, same shape as self.params.
:return: projected parameters, same shape as self.params.
"""
raise NotImplementedError
def reparam_function(self):
"""
Re-parameterize parameters, in order that they stay in their constrained domain.
When we are not using the EM, we need to transform unconstrained (real-valued) parameters to the constrained set
of the expectation parameter. This function should return such a function (i.e. the return value should not be
a projection, but a function which does the projection).
:return: function object f which takes as input unconstrained parameters (Tensor) and returns re-parametrized
parameters.
"""
raise NotImplementedError
def _sample(self, num_samples, params, **kwargs):
"""
Helper function for sampling the exponential family.
:param num_samples: number of samples to be produced
:param params: expectation parameters (phi) of the exponential family, of shape
(self.num_var, *self.array_shape, self.num_stats)
:param kwargs: keyword arguments
Depending on the implementation, kwargs can also contain further arguments.
:return: i.i.d. samples of the exponential family (Tensor).
Should be of shape (num_samples, self.num_var, self.num_dims, *self.array_shape)
"""
raise NotImplementedError
def _argmax(self, params, **kwargs):
"""
Helper function for getting the argmax of the exponential family.
:param params: expectation parameters (phi) of the exponential family, of shape
(self.num_var, *self.array_shape, self.num_stats)
:param kwargs: keyword arguments
Depending on the implementation, kwargs can also contain further arguments.
:return: argmax of the exponential family (Tensor).
Should be of shape (self.num_var, self.num_dims, *self.array_shape)
"""
raise NotImplementedError
# --------------------------------------------------------------------------------
def initialize(self, initializer='default'):
"""
Initialize the parameters for this ExponentialFamilyArray.
:param initializer: denotes the initialization method.
If 'default' (str): use the default initialization, and store the parameters locally.
If Tensor: provide custom initial parameters.
:return: None
"""
if type(initializer) == str and initializer == 'default':
# default initializer; when em is switched off, we reparametrize and use Gaussian noise as init values.
if self._use_em:
self.params = torch.nn.Parameter(self.default_initializer())
else:
self.params = torch.nn.Parameter(torch.randn(self.params_shape))
elif type(initializer) == torch.Tensor:
# provided initializer
if initializer.shape != self.params_shape:
raise AssertionError("Incorrect parameter shape.")
self.params = torch.nn.Parameter(initializer)
else:
raise AssertionError("Unknown initializer.")
def forward(self, x):
"""
Evaluates the exponential family, in log-domain. For a single log-density we would compute
log_h(X) + <params, T(X)> + A(params)
Here, we do this in parallel and compute an array of log-densities of shape array_shape, for each sample in the
batch and each RV.
:param x: input data (Tensor).
If self.num_dims == 1, this can be either of shape (batch_size, self.num_var, 1) or
(batch_size, self.num_var).
If self.num_dims > 1, this must be of shape (batch_size, self.num_var, self.num_dims).
:return: log-densities of implemented exponential family (Tensor).
Will be of shape (batch_size, self.num_var, *self.array_shape)
"""
if self._use_em:
with torch.no_grad():
theta = self.expectation_to_natural(self.params)
else:
phi = self.reparam(self.params)
theta = self.expectation_to_natural(phi)
# suff_stats: (batch_size, self.num_var, self.num_stats)
self.suff_stats = self.sufficient_statistics(x)
# reshape for broadcasting
shape = self.suff_stats.shape
shape = shape[0:2] + (1,) * len(self.array_shape) + (shape[2],)
self.suff_stats = self.suff_stats.reshape(shape)
# log_normalizer: (self.num_var, *self.array_shape)
log_normalizer = self.log_normalizer(theta)
# log_h: scalar, or (batch_size, self.num_var)
log_h = self.log_h(x)
if len(log_h.shape) > 0:
# reshape for broadcasting
log_h = log_h.reshape(log_h.shape[0:2] + (1,) * len(self.array_shape))
# compute the exponential family tensor
# (batch_size, self.num_var, *self.array_shape)
self.ll = log_h + (theta.unsqueeze(0) * self.suff_stats).sum(-1) - log_normalizer
if self._use_em:
# EM needs the gradient with respect to self.ll
self.ll.requires_grad_()
# Marginalization in PCs works by simply setting leaves corresponding to marginalized variables to 1 (0 in
# (log-domain). We achieve this by a simple multiplicative 0-1 mask, generated here.
# TODO: the marginalization mask doesn't need to be computed every time; only when marginalization_idx changes.
if self.marginalization_idx is not None:
with torch.no_grad():
self.marginalization_mask = torch.ones(self.num_var, dtype=self.ll.dtype, device=self.ll.device)
self.marginalization_mask.data[self.marginalization_idx] = 0.0
shape = (1, self.num_var) + (1,) * len(self.array_shape)
self.marginalization_mask = self.marginalization_mask.reshape(shape)
self.marginalization_mask.requires_grad_(False)
else:
self.marginalization_mask = None
if self.marginalization_mask is not None:
output = self.ll * self.marginalization_mask
else:
output = self.ll
return output
def sample(self, num_samples=1, **kwargs):
if self._use_em:
params = self.params
else:
with torch.no_grad():
params = self.reparam(self.params)
return self._sample(num_samples, params, **kwargs)
def argmax(self, **kwargs):
if self._use_em:
params = self.params
else:
with torch.no_grad():
params = self.reparam(self.params)
return self._argmax(params, **kwargs)
def em_set_hyperparams(self, online_em_frequency, online_em_stepsize, purge=True):
"""Set new setting for online EM."""
if purge:
self.em_purge()
self._online_em_counter = 0
self._online_em_frequency = online_em_frequency
self._online_em_stepsize = online_em_stepsize
def em_purge(self):
""" Discard em statistics."""
if self.ll is not None and self.ll.grad is not None:
self.ll.grad.zero_()
self._p_acc = None
self._stats_acc = None
def em_process_batch(self):
"""
Accumulate EM statistics of current batch. This should typically be called via EinsumNetwork.em_process_batch().
"""
if not self._use_em:
raise AssertionError("em_process_batch called while _use_em==False.")
if self.params is None:
return
with torch.no_grad():
p = self.ll.grad
weighted_stats = (p.unsqueeze(-1) * self.suff_stats).sum(0)
p = p.sum(0)
if self._p_acc is None:
self._p_acc = torch.zeros_like(p)
self._p_acc += p
if self._stats_acc is None:
self._stats_acc = torch.zeros_like(weighted_stats)
self._stats_acc += weighted_stats
self.ll.grad.zero_()
if self._online_em_frequency is not None:
self._online_em_counter += 1
if self._online_em_counter == self._online_em_frequency:
self.em_update(True)
self._online_em_counter = 0
def em_update(self, _triggered=False):
"""
Do an EM update. If the setting is online EM (online_em_stepsize is not None), then this function does nothing,
since updates are triggered automatically. (Thus, leave the private parameter _triggered alone)
:param _triggered: for internal use, don't set
:return: None
"""
if not self._use_em:
raise AssertionError("em_update called while _use_em==False.")
if self._online_em_stepsize is not None and not _triggered:
return
with torch.no_grad():
if self._online_em_stepsize is None:
self.params.data = self._stats_acc / (self._p_acc.unsqueeze(-1) + 1e-12)
else:
s = self._online_em_stepsize
self.params.data = (1. - s) * self.params + s * (self._stats_acc / (self._p_acc.unsqueeze(-1) + 1e-12))
self.params.data = self.project_params(self.params.data)
self._p_acc = None
self._stats_acc = None
def set_marginalization_idx(self, idx):
"""Set indicices of marginalized variables."""
self.marginalization_idx = idx
def get_marginalization_idx(self):
"""Set indicices of marginalized variables."""
return self.marginalization_idx
def shift_last_axis_to(x, i):
"""This takes the last axis of tensor x and inserts it at position i"""
num_axes = len(x.shape)
return x.permute(tuple(range(i)) + (num_axes - 1,) + tuple(range(i, num_axes - 1)))
class NormalArray(ExponentialFamilyArray):
"""Implementation of Normal distribution."""
def __init__(self, num_var, num_dims, array_shape, min_var=0.0001, max_var=10., use_em=True):
super(NormalArray, self).__init__(num_var, num_dims, array_shape, 2 * num_dims, use_em=use_em)
self.log_2pi = torch.tensor(1.8378770664093453)
self.min_var = min_var
self.max_var = max_var
def default_initializer(self):
phi = torch.empty(self.num_var, *self.array_shape, 2*self.num_dims)
with torch.no_grad():
phi[..., 0:self.num_dims] = torch.randn(self.num_var, *self.array_shape, self.num_dims)
phi[..., self.num_dims:] = 1. + phi[..., 0:self.num_dims]**2
return phi
def project_params(self, phi):
phi_project = phi.clone()
mu2 = phi_project[..., 0:self.num_dims] ** 2
phi_project[..., self.num_dims:] -= mu2
phi_project[..., self.num_dims:] = torch.clamp(phi_project[..., self.num_dims:], self.min_var, self.max_var)
phi_project[..., self.num_dims:] += mu2
return phi_project
# remove function wrapper to avoid pickling error
def reparam(self, params_in):
mu = params_in[..., 0:self.num_dims].clone()
var = self.min_var + torch.sigmoid(params_in[..., self.num_dims:]) * (self.max_var - self.min_var)
return torch.cat((mu, var + mu**2), -1)
def sufficient_statistics(self, x):
if len(x.shape) == 2:
stats = torch.stack((x, x ** 2), -1)
elif len(x.shape) == 3:
stats = torch.cat((x, x**2), -1)
else:
raise AssertionError("Input must be 2 or 3 dimensional tensor.")
return stats
def expectation_to_natural(self, phi):
var = phi[..., self.num_dims:] - phi[..., 0:self.num_dims] ** 2
theta1 = phi[..., 0:self.num_dims] / var
theta2 = - 1. / (2. * var)
return torch.cat((theta1, theta2), -1)
def log_normalizer(self, theta):
log_normalizer = -theta[..., 0:self.num_dims] ** 2 / (4 * theta[..., self.num_dims:]) - 0.5 * torch.log(-2. * theta[..., self.num_dims:])
log_normalizer = torch.sum(log_normalizer, -1)
return log_normalizer
def log_h(self, x):
return -0.5 * self.log_2pi * self.num_dims
def _sample(self, num_samples, params, std_correction=1.0):
with torch.no_grad():
mu = params[..., 0:self.num_dims]
var = params[..., self.num_dims:] - mu**2
std = torch.sqrt(var)
shape = (num_samples,) + mu.shape
samples = mu.unsqueeze(0) + std_correction * std.unsqueeze(0) * torch.randn(shape, dtype=mu.dtype, device=mu.device)
return shift_last_axis_to(samples, 2)
def _argmax(self, params, **kwargs):
with torch.no_grad():
mu = params[..., 0:self.num_dims]
return shift_last_axis_to(mu, 1)
class BinomialArray(ExponentialFamilyArray):
"""Implementation of Binomial distribution."""
def __init__(self, num_var, num_dims, array_shape, N, use_em=True):
super(BinomialArray, self).__init__(num_var, num_dims, array_shape, num_dims, use_em=use_em)
self.N = torch.tensor(float(N))
def default_initializer(self):
phi = (0.01 + 0.98 * torch.rand(self.num_var, *self.array_shape, self.num_dims)) * self.N
return phi
def project_params(self, phi):
return torch.clamp(phi, 0.0, self.N)
def reparam_function(self):
def reparam(params):
return torch.sigmoid(params * 0.1) * float(self.N)
return reparam
def sufficient_statistics(self, x):
if len(x.shape) == 2:
stats = x.unsqueeze(-1)
elif len(x.shape) == 3:
stats = x
else:
raise AssertionError("Input must be 2 or 3 dimensional tensor.")
return stats
def expectation_to_natural(self, phi):
theta = torch.clamp(phi / self.N, 1e-6, 1. - 1e-6)
theta = torch.log(theta) - torch.log(1. - theta)
return theta
def log_normalizer(self, theta):
return torch.sum(self.N * torch.nn.functional.softplus(theta), -1)
def log_h(self, x):
if self.N == 1:
return torch.zeros([], device=x.device)
else:
log_h = torch.lgamma(self.N + 1.) - torch.lgamma(x + 1.) - torch.lgamma(self.N + 1. - x)
if len(x.shape) == 3:
log_h = log_h.sum(-1)
return log_h
def _sample(self, num_samples, params, dtype=torch.float32, memory_efficient_binomial_sampling=True):
with torch.no_grad():
params = params / self.N
if memory_efficient_binomial_sampling:
samples = torch.zeros((num_samples,) + params.shape, dtype=dtype, device=params.device)
for n in range(int(self.N)):
rand = torch.rand((num_samples,) + params.shape, device=params.device)
samples += (rand < params).type(dtype)
else:
rand = torch.rand((num_samples,) + params.shape + (int(self.N),), device=params.device)
samples = torch.sum(rand < params.unsqueeze(-1), -1).type(dtype)
return shift_last_axis_to(samples, 2)
def _argmax(self, params, dtype=torch.float32):
with torch.no_grad():
params = params / self.N
mode = torch.clamp(torch.floor((self.N + 1.) * params), 0.0, self.N).type(dtype)
return shift_last_axis_to(mode, 1)
class CategoricalArray(ExponentialFamilyArray):
"""Implementation of Categorical distribution."""
def __init__(self, num_var, num_dims, array_shape, K, use_em=True):
super(CategoricalArray, self).__init__(num_var, num_dims, array_shape, num_dims * K, use_em=use_em)
self.K = K
def default_initializer(self):
phi = (0.01 + 0.98 * torch.rand(self.num_var, *self.array_shape, self.num_dims * self.K))
return phi
def project_params(self, phi):
"""Note that this is not actually l2-projection. For simplicity, we simply renormalize."""
phi = phi.reshape(self.num_var, *self.array_shape, self.num_dims, self.K)
phi = torch.clamp(phi, min=1e-12)
phi = phi / torch.sum(phi, -1, keepdim=True)
return phi.reshape(self.num_var, *self.array_shape, self.num_dims * self.K)
def reparam_function(self):
def reparam(params):
return torch.nn.functional.softmax(params, -1)
return reparam
def sufficient_statistics(self, x):
if len(x.shape) == 2:
stats = one_hot(x.long(), self.K)
elif len(x.shape) == 3:
stats = one_hot(x.long(), self.K).reshape(-1, self.num_dims * self.K)
else:
raise AssertionError("Input must be 2 or 3 dimensional tensor.")
return stats
def expectation_to_natural(self, phi):
theta = torch.clamp(phi, 1e-12, 1.)
theta = theta.reshape(self.num_var, *self.array_shape, self.num_dims, self.K)
theta /= theta.sum(-1, keepdim=True)
theta = theta.reshape(self.num_var, *self.array_shape, self.num_dims * self.K)
theta = torch.log(theta)
return theta
def log_normalizer(self, theta):
return 0.0
def log_h(self, x):
return torch.zeros([], device=x.device)
def _sample(self, num_samples, params, dtype=torch.float32):
with torch.no_grad():
dist = params.reshape(self.num_var, *self.array_shape, self.num_dims, self.K)
cum_sum = torch.cumsum(dist[..., 0:-1], -1)
rand = torch.rand((num_samples,) + cum_sum.shape[0:-1] + (1,), device=cum_sum.device)
samples = torch.sum(rand > cum_sum, -1).type(dtype)
return shift_last_axis_to(samples, 2)
def _argmax(self, params, dtype=torch.float32):
with torch.no_grad():
dist = params.reshape(self.num_var, *self.array_shape, self.num_dims, self.K)
mode = torch.argmax(dist, -1).type(dtype)
return shift_last_axis_to(mode, 1)
|
from django.db import models
class Department(models.Model):
name = models.CharField(max_length=10, verbose_name='部门名称')
brief_introduction = models.CharField(max_length=500, verbose_name='部门简介', null=True, blank=True)
is_delete = models.BooleanField(verbose_name='是否删除')
def __str__(self):
return '<Department: {}>'.format(self.name)
class StaffMember(models.Model):
name = models.CharField(max_length=10, verbose_name='名字')
work_department = models.ForeignKey(Department, on_delete=models.CASCADE)
phone_num = models.CharField(max_length=11, verbose_name='手机号码', null=True, blank=True)
grade = models.CharField(max_length=4, verbose_name='年级')
school_department = models.CharField(max_length=10, verbose_name='系别')
major = models.CharField(max_length=20, verbose_name='专业')
personal_signature = models.CharField(max_length=30, verbose_name='个性签名', null=True, blank=True)
brief_introduction = models.CharField(max_length=500, verbose_name='个人简介', null=True, blank=True)
start_entry = models.DateField(verbose_name='起始任职', null=True, blank=True)
end_quit = models.DateField(verbose_name='结束任职', null=True, blank=True)
is_incumbent = models.BooleanField(verbose_name='是否在任')
is_first_generation = models.BooleanField(verbose_name='是否初代')
is_man = models.BooleanField(verbose_name='性别男')
is_delisting = models.BooleanField(verbose_name='是否除名')
|
import random
from secret_words import word_list
import json
import score_board
def get_secret_word():
word = random.choice(word_list)
return word.lower()
def play_hanman(word):
allowed_errors = 5
guesses = []
done = False
player_name = input("Please enter your name: ")
player_score = score_board.get_player_score(player_name)
print(player_name," Score :" ,player_score)
while not done:
for letter in word:
if letter.lower() in guesses:
print(letter, end=" ")
else:
print("-", end=" ")
print("")
guess = input(f"Allowed Error Left {allowed_errors},Guess your Letter : ")
guesses.append(guess.lower())
if guess.lower() not in word.lower():
allowed_errors -= 1;
if allowed_errors == 0:
break
done = True
for letter in word:
if letter.lower() not in guesses:
done = False
if done:
player_score += 10
print("hurray You found the word!,It was ", word)
print("Score obtained in this round is : ", 10)
else:
print("The Game is over! the word was ", word)
print("Score obtained in this round is : ", 0)
print("Your updated score is : ", player_score)
score_board.update_player_score(player_name, player_score)
score_board.update_top_10_player_list(player_name, player_score)
if __name__ == "__main__":
word = get_secret_word()
play_hanman(word)
while input("Play Again? (Y/N) ").lower() == "y":
word = get_secret_word()
play_hanman(word)
score_board.print_top_10_players()
score_board.print_all_players_scores()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 14:49:27 2020
@author: nerohmot
"""
import struct
from .common import command_ABC
class GET_BOOT_SEQUENCE(command_ABC):
'''
Description: With this we can get the current boot sequence and timing.
Input: None
Output:
| Index | Name | Type | Default | BOOT_STAGE | Description |
|:-----:|:-----------|:--------|:--------|:-----------|:-----------------------------------|
|0 | RAIL1_SEQ | uint8 | 1 | 1 | RAIL1 sequence number |
|1 | RAIL1_DEL | uint8 | 100 | 1 | Wait 100ms after turning on RAIL1 |
|2 | RAIL2_SEQ | uint8 | 2 | 1 | RAIL2 sequence number |
|3 | RAIL2_DEL | uint8 | 100 | 1 | Wait 100ms after turning on RAIL2 |
|4 | P25V0D_SEQ | uint8 | 3 | 1 | P25V0D sequence number |
|5 | P25V0D_DEL | uint8 | 100 | 1 | Wait 100ms after turning on P25V0D |
|6 | P17V0D_SEQ | uint8 | 3 | 1 | P17V0D sequence number |
|7 | P17V0D_DEL | uint8 | 100 | 1 | Wait 100ms after turning on P17V0D |
|8 | N7V0D_SEQ | uint8 | 3 | 1 | N7V0D sequence number |
|9 | N7V0D_DEL | uint8 | 100 | 1 | Wait 100ms after turning on N7V0D |
|10 | P15V0A_SEQ | uint8 | 4 | 1 | P15V0A sequence number |
|11 | P15V0A_DEL | uint8 | 100 | 1 | Wait 100ms after turning on P15V0A |
|12 | N15V0A_SEQ | uint8 | 4 | 1 | N15V0A sequence number |
|13 | N15V0A_DEL | uint8 | 100 | 1 | Wait 100ms after turning on N15V0A |
|14 | P5V0D_SEQ | uint8 | 5 | 1 | P5V0D sequence number |
|15 | P5V0D_DEL | uint8 | 100 | 1 | Wait 100ms after turning on P5V0D |
|16 | P5V0A_SEQ | uint8 | 5 | 1 | P5V0A sequence number |
|17 | P5V0A_DEL | uint8 | 100 | 1 | Wait 100ms after turning on P5V0A |
|18 | N5V0A_SEQ | uint8 | 5 | 1 | N5V0A sequence number |
|19 | N5V0A_DEL | uint8 | 100 | 1 | Wait 100ms after turning on N5V0A |
|20 | P3V3D_SEQ | uint8 | 6 | 1 | P3V3D sequence number |
|21 | P3V3D_DEL | uint8 | 100 | 1 | Wait 100ms after turning on P3V3D |
|22 | PVLB_SEQ | uint8 | 7 | 2 | PVLB sequence number |
|23 | PVLB_DEL | uint8 | 100 | 2 | Wait 100ms after turning on PVLB |
|24 | P5V0R_SEQ | uint8 | 8 | 2 | P5V0R sequence number |
|25 | P5V0R_DEL | uint8 | 100 | 2 | Wait 100ms after turning on P5V0R |
Note: It is possible to switch some rails on together! In such case one needs to wait the longest of the delays before continuing
(in case the the DELays are not the same).
'''
command = 0x03
sub_command = 0x04
payload = b''
def receive(self, DA, ACK, RXTX, PAYLOAD):
(RAIL1_SEQ, RAIL1_DEL,
RAIL2_SEQ, RAIL2_DEL,
P25V0D_SEQ, P25V0D_DEL,
P17V0D_SEQ, P17V0D_DEL,
N7V0D_SEQ, N7V0D_DEL,
P15V0A_SEQ, P15V0A_DEL,
N15V0A_SEQ, N15V0A_DEL,
P5V0D_SEQ, P5V0D_DEL,
P5V0A_SEQ, P5V0A_DEL,
N5V0A_SEQ, N5V0A_DEL,
P3V3D_SEQ, P3V3D_DEL,
PVLB_SEQ, PVLB_DEL,
P5V0R_SEQ, P5V0R_DEL) = struct.unpack('BBBBBBBBBBBBBBBBBBBBBBBBBB', PAYLOAD)
line = "Get Boot Sequence Reply :"
line += f" RAIL1 sequence # is {RAIL1_SEQ} and delay is {RAIL1_DEL} ms"
line += f" RAIL2 sequence # is {RAIL2_SEQ} and delay is {RAIL2_DEL} ms"
line += f" P25V0D sequence # is {P25V0D_SEQ} and delay is {P25V0D_DEL} ms"
line += f" P17V0D sequence # is {P17V0D_SEQ} and delay is {P17V0D_DEL} ms"
line += f" N7V0D sequence # is {N7V0D_SEQ} and delay is {N7V0D_DEL} ms"
line += f" P15V0A sequence # is {P15V0A_SEQ} and delay is {P15V0A_DEL} ms"
line += f" N15V0A sequence # is {N15V0A_SEQ} and delay is {N15V0A_DEL} ms"
line += f" P5V0D sequence # is {P5V0D_SEQ} and delay is {P5V0D_DEL} ms"
line += f" P5V0A sequence # is {P5V0A_SEQ} and delay is {P5V0A_DEL} ms"
line += f" N5V0A sequence # is {N5V0A_SEQ} and delay is {N5V0A_DEL} ms"
line += f" P3V3D sequence # is {P3V3D_SEQ} and delay is {P3V3D_DEL} ms"
line += f" PVLB sequence # is {PVLB_SEQ} and delay is {PVLB_DEL} ms"
line += f" P5V0R sequence # is {P5V0R_SEQ} and delay is {P5V0R_DEL} ms"
line = f"DA={DA} CMD={self.command} SCMD={self.sub_command} ACK={ACK} RXTX={RXTX} PAYLOAD={PAYLOAD}"
self.parent.output_te.append(line)
|
import socket
import sys
import struct
import fcntl
import array
import threading
import time
import json
import multiprocessing
import datetime
import serial
import servo.servo as servo
import peltier.peltier as peltier
CLIENT_ADDR = ('10.22.214.188', 8000)
accel_gyro = ["", ""]
servo_data = ["","",""]
sock = 'nil'
# Initialize once for reading and writing
ser = serial.Serial('/dev/ttyACM0', 9600)
class server (threading.Thread):
def __init__(self, threadID, name, port):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.port = port
if (name == "peltier"):
peltier.Peltier.init()
if (name == "servo"):
servo.Servo()
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = get_ip(sock)
if (self.name is 'accel_gyro' or self.name is 'servo_read'):
ip = "127.0.0.1"
server_address = (ip, self.port)
sock.bind(server_address)
print '%s %s %s \n' % (ip, self.port, self.name)
servo_exec = None
peltier_exec = None
while True:
data, addr = sock.recvfrom(1024)
print self.port
# JSON Info
print(data)
info = json.loads(data)
print datetime.datetime.now().time()
if (self.name is 'servo'):
#handle servo data in seperate thread
# { 'timestamp': aabbccxxyyzz, 'angle': 300 }
if (servo_exec is None or (servo_exec is not None and not servo_exec.is_alive())):
if (info["angle"] is not 0):
servo_exec = threading.Thread(target=servo.Servo.tilt, args=(info["angle"], "finger", ))
else:
servo_exec = threading.Thread(target=servo.Servo.stop, args=())
servo_exec.start()
elif (self.name is 'accel_gyro'):
# {'accel_gyro_x': xxx.xxx, 'accel_gyro_y': xxx.xxx}
print(info["accel_gyro"])
accel_gyro[0] = info["accel_gyro"]["x"]
accel_gyro[1] = info["accel_gyro"]["y"]
sock_d = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock_d.sendto("{ \"timestamp\": " + str(int(time.time())) + ", \"servo\": [" + str(servo_data[0]) + ", " + str(servo_data[1]) + ", " + str(servo_data[2]) + "], \"accel_gyro\": {\"x\": " + str(accel_gyro[0]) + ", \"y\": " + str(accel_gyro[1]) + "}}", ("10.22.214.188", 8000))
elif (self.name is 'servo_read'):
servo_data[0] = info["servo"][0]
servo_data[1] = info["servo"][1]
servo_data[2] = info["servo"][2]
else:
# { 'timestamp': aabbccxxyyzz, 'temperature': 5 }
if (peltier_exec is None or (peltier_exec is not None and not peltier_exec.is_alive())):
if (info["temperature"] > 0):
peltier_exec = threading.Thread(target=peltier.Peltier.hot, args=())
elif (info["temperature"] == 0):
peltier_exec = threading.Thread(target=peltier.Peltier.stop, args=())
else:
peltier_exec = threading.Thread(target=peltier.Peltier.cold, args=())
peltier_exec.start()
def get_ip(sock):
ip = 0
interfaces = get_interfaces(sock)
for interface in interfaces:
if interface[0] is 'eth0' or 'wlan0':
ip = interface[1]
return ip
def format_ip(ip):
return str(ord(ip[0])) + '.' \
+ str(ord(ip[1])) + '.' \
+ str(ord(ip[2])) + '.' \
+ str(ord(ip[3]))
def get_interfaces(sock):
total_bytes = 256 * 32;
interface_info = array.array('B', '\0' * total_bytes)
output_bytes = struct.unpack('iL', fcntl.ioctl(
sock.fileno(),
0x8912,
struct.pack('iL', total_bytes, interface_info.buffer_info()[0])
))[0]
interfaces = []
str_interface_info = interface_info.tostring()
for i in range (0, output_bytes, 32):
name = str_interface_info[i:i+32].split('\0', 1)[0]
ip = str_interface_info[i+20:i+24]
interfaces.append((
name,
format_ip(ip)
))
return interfaces
servo_server = server(1, "servo", 3000)
peltier_server = server(2, "peltier", 3001)
accel_gyro_server = server(3, "accel_gyro", 3002)
servo_read_server = server(4, "servo_read", 3003)
servo_server.daemon = True
peltier_server.daemon = True
accel_gyro_server.daemon = True
servo_read_server.daemon = True
# Running on seperate thread
accel_gyro_server.start()
# Running on seperate thread
servo_server.start()
# Running on seperate thread
servo_read_server.start()
# Running on main thread
peltier_server.run()
|
"""A Community is a thin wrapper around a long-form time-series geodataframe."""
import tempfile
from pathlib import PurePath
from warnings import warn
import contextily as ctx
import geopandas as gpd
import mapclassify.classifiers as classifiers
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scikitplot as skplt
from ._data import _Map, datasets
from .analyze import cluster as _cluster
from .analyze import regionalize as _cluster_spatial
from .analyze import sequence as _sequence
from .analyze import transition as _transition
from .analyze import predict_labels as _predict_labels
from .harmonize import harmonize as _harmonize
from .io import _fips_filter, _fipstable, _from_db, get_lehd, adjust_inflation
from .util import gif_from_path as _gif_from_path
from .visualize import plot_transition_matrix as _plot_transitions
from .visualize import plot_transition_graphs as _plot_transition_graphs
schemes = {}
for classifier in classifiers.CLASSIFIERS:
schemes[classifier.lower()] = getattr(classifiers, classifier)
class Community:
"""Spatial and tabular data for a collection of "neighborhoods" over time.
A community is a collection of "neighborhoods" represented by spatial
boundaries (e.g. census tracts, or blocks in the US), and tabular data
which describe the composition of each neighborhood (e.g. data from
surveys, sensors, or geocoded misc.). A Community can be large (e.g. a
metropolitan region), or small (e.g. a handfull of census tracts) and
may have data pertaining to multiple discrete points in time.
Parameters
----------
gdf : geopandas.GeoDataFrame
long-form geodataframe that holds spatial and tabular data.
harmonized : bool
Whether neighborhood boundaries have been harmonized into a set of
time-consistent units
Attributes
----------
gdf : geopandas.GeoDataFrame
long-form geodataframe that stores neighborhood-level attributes
and geometries for one or more time periods
harmonized : bool
Whether neighborhood boundaries have been harmonized into
consistent units over time
models : dict of geosnap.analyze.ModelResults
Dictionary of model instances that have been fitted on the community.
The model name is the key and the value is an instance of geosnap.analyze.ModelResults.
For cluster models, the model name will match a column on the Community.gdf.
"""
def __init__(self, gdf=None, harmonized=None, **kwargs):
"""Initialize a new Community.
Parameters
----------
gdf : geopandas.GeoDataFrame
long-form geodataframe that stores neighborhood-level attributes
and geometries for one or more time periods
harmonized : bool
Whether neighborhood boundaries have been harmonized into
consistent units over time
**kwargs : kwargs
extra keyword arguments `**kwargs`.
"""
self.gdf = gdf
self.harmonized = harmonized
self.models = _Map()
def harmonize(
self,
target_year=None,
weights_method="area",
extensive_variables=None,
intensive_variables=None,
allocate_total=True,
raster=None,
codes="developed",
force_crs_match=True,
):
"""Standardize inconsistent boundaries into time-static ones.
Parameters
----------
target_year: int
Polygons from this year will become the target boundaries for
spatial interpolation.
weights_method : string
The method that the harmonization will be conducted. This can be set to
* area : harmonization using simple area-weighted interprolation.
* dasymetric : harmonization using area-weighted interpolation with raster-based
ancillary data such as <https://www.mrlc.gov/data/nlcd-2016-land-cover-conus>
to mask out uninhabited land.
extensive_variables : list
The names of variables in each dataset of raw_community that contains
extensive variables to be harmonized (see (2) in Notes).
intensive_variables : list
The names of variables in each dataset of raw_community that contains
intensive variables to be harmonized (see (2) in Notes).
allocate_total : bool
True if total value of source area should be allocated.
False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is
exhausted by intersections. See (3) in Notes for more details.
raster : str
the path to a local raster image to be used as a dasymetric mask. If using
"dasymetric" as the weights method, this is a required argument.
codes : list of ints
list of raster pixel values that should be considered as
'populated'. Default values are consistent with the National Land Cover
Database (NLCD), and include
* 21 (Developed, Open Space)
* 22 (Developed, Low Intensity)
* 23 (Developed, Medium Intensity)
* 24 (Developed, High Intensity)
The description of each code can be found here:
<https://www.mrlc.gov/sites/default/files/metadata/landcover.html>
Ignored if not using dasymetric harmonizatiton.
force_crs_match : bool. Default is True.
Wheter the Coordinate Reference System (CRS) of the polygon will be
reprojected to the CRS of the raster file. It is recommended to
leave this argument True.
Only taken into consideration for harmonization raster based.
Notes
-----
1) A quick explanation of extensive and intensive variables can be found
here: <http://ibis.geog.ubc.ca/courses/geob370/notes/intensive_extensive.htm>
2) For an extensive variable, the estimate at target polygon j (default case) is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{k,j}
Returns
-------
None
New data are added to the input Community
"""
# convert the long-form into a list of dataframes
# data = [x[1] for x in self.gdf.groupby("year")]
if codes == "developed":
codes = [21, 22, 23, 24]
gdf = _harmonize(
self.gdf,
target_year=target_year,
weights_method=weights_method,
extensive_variables=extensive_variables,
intensive_variables=intensive_variables,
allocate_total=allocate_total,
raster=raster,
codes=codes,
force_crs_match=force_crs_match,
)
return Community(gdf, harmonized=True)
def cluster(
self,
n_clusters=6,
method=None,
best_model=False,
columns=None,
verbose=False,
scaler="std",
pooling="fixed",
**kwargs,
):
"""Create a geodemographic typology by running a cluster analysis on the study area's neighborhood attributes.
Parameters
----------
n_clusters : int, required
the number of clusters to model. The default is 6).
method : str in ['kmeans', 'ward', 'affinity_propagation', 'spectral', 'gaussian_mixture', 'hdbscan'], required
the clustering algorithm used to identify neighborhood types
best_model : bool, optional
if using a gaussian mixture model, use BIC to choose the best
n_clusters. (the default is False).
columns : array-like, required
subset of columns on which to apply the clustering
verbose : bool, optional
whether to print warning messages (the default is False).
scaler : None or scaler from sklearn.preprocessing, optional
a scikit-learn preprocessing class that will be used to rescale the
data. Defaults to sklearn.preprocessing.StandardScaler
pooling : ["fixed", "pooled", "unique"], optional (default='fixed')
How to treat temporal data when applying scaling. Options include:
* fixed : scaling is fixed to each time period
* pooled : data are pooled across all time periods
* unique : if scaling, apply the scaler to each time period, then generate clusters unique to each time period.
Returns
-------
geosnap.Community
a copy of input Community with neighborhood cluster labels appended
as a new column. If the cluster is already present, the name will be incremented
"""
harmonized = self.harmonized
gdf, model, model_name = _cluster(
gdf=self.gdf.copy(),
n_clusters=n_clusters,
method=method,
best_model=best_model,
columns=columns,
verbose=verbose,
scaler=scaler,
pooling=pooling,
**kwargs,
)
comm = Community(gdf, harmonized=harmonized)
comm.models.update(
self.models
) # keep any existing models in the input Community
comm.models[model_name] = model
return comm
def regionalize(
self,
n_clusters=6,
spatial_weights="rook",
method=None,
best_model=False,
columns=None,
threshold_variable="count",
threshold=10,
return_model=False,
scaler=None,
weights_kwargs=None,
**kwargs,
):
"""Create a *spatial* geodemographic typology by running a cluster analysis on the metro area's neighborhood attributes and including a contiguity constraint.
Parameters
----------
n_clusters : int, required
the number of clusters to model. The default is 6).
spatial_weights : str ('queen' or 'rook') or libpysal.weights.W instance, optional
spatial weights matrix specification` (the default is "rook"). If 'rook' or 'queen'
then contiguity weights will be constructed internally, otherwise pass a
libpysal.weights.W with additional arguments specified in weights_kwargs
weights_kwargs : dict, optional
If passing a libpysal.weights.W instance to spatial_weights, these additional
keyword arguments that will be passed to the weights constructor
method : str in ['ward_spatial', 'spenc', 'skater', 'azp', 'max_p'], required
the clustering algorithm used to identify neighborhood types
columns : array-like, required
subset of columns on which to apply the clustering
threshold_variable : str, required if using max-p, optional otherwise
for max-p, which variable should define `p`. The default is "count",
which will grow regions until the threshold number of polygons have
been aggregated
threshold : numeric, optional
threshold to use for max-p clustering (the default is 10).
scaler : None or scaler from sklearn.preprocessing, optional
a scikit-learn preprocessing class that will be used to rescale the
data. Defaults to sklearn.preprocessing.StandardScaler
Returns
-------
geosnap.Community
a copy of input Community with neighborhood cluster labels appended
as a new column. If the cluster is already present, the name will be incremented
"""
harmonized = self.harmonized
gdf, model, model_name = _cluster_spatial(
gdf=self.gdf.copy(),
n_clusters=n_clusters,
spatial_weights=spatial_weights,
method=method,
best_model=best_model,
columns=columns,
threshold_variable=threshold_variable,
threshold=threshold,
return_model=return_model,
scaler=scaler,
weights_kwargs=weights_kwargs,
**kwargs,
)
comm = Community(gdf, harmonized=harmonized)
comm.models.update(
self.models
) # keep any existing models in the input Community
comm.models[model_name] = model
return comm
def plot_transition_matrix(
self,
cluster_col=None,
w_type="queen",
w_options=None,
figsize=(13, 12),
n_rows=3,
n_cols=3,
suptitle=None,
savefig=None,
dpi=300,
**kwargs,
):
"""Plot global and spatially-conditioned transition matrices as heatmaps
Parameters
----------
cluster_col : str
column on the Community.gdf containing neighborhood type labels
w_type : str {'queen', 'rook'}
which type of libpysal spatial weights objects to encode connectivity
w_options : dict
additional options passed to a libpysal weights constructor (e.g. `k` for a KNN weights matrix)
figsize : tuple, optional
size of the resulting figure (13, 12)
n_rows : int, optional
rows in the plot; n_rows * n_cols must be >= the number of neighborhood types
n_cols : int, optional
columns in the plot; n_rows * n_cols must be >= the number of neighborhood types
suptitle : str, optional
title of the figure
title_kwds : dict, optional
additional keyword options for formatting the title
savefig : str, optional
location the plot will be saved
dpi : int, optional
dpi of the resulting image, default is 300
Returns
-------
matplotlib Axes
the axes on which the plots are drawn
"""
ax = _plot_transitions(
self,
cluster_col=cluster_col,
w_type=w_type,
w_options=w_options,
figsize=figsize,
n_rows=n_rows,
n_cols=n_cols,
suptitle=suptitle,
savefig=savefig,
dpi=dpi,
**kwargs,
)
return ax
def plot_transition_graphs(
self,
cluster_col=None,
w_type="queen",
layout="dot",
args="-n -Groot=0 -Goverlap=false -Gmindist=3.5 -Gsize=30,30!",
output_dir=".",
):
"""Plot a network graph representation of global and spatially-conditioned transition matrices.
This function requires pygraphviz to be installed. For linux and macos, it can be installed with
`conda install -c conda-forge pygraphviz`. At the time of this writing there is no pygraphviz build
available for Windows from mainstream conda channels, but it can be installed with
`conda install -c alubbock pygraphviz`
Parameters
----------
cluster_col : str
column on the Community.gdf containing neighborhood type labels
output_dir : str
the location that output images will be placed
w_type : str {'queen', 'rook'}
which type of libpysal spatial weights objects to encode connectivity
layout : str, 'dot'
graphviz layout for plotting
args : str, optional
additional arguments passed to graphviz.
default is "-n -Groot=0 -Goverlap=false -Gnodesep=0.01 -Gfont_size=1 -Gmindist=3.5 -Gsize=30,30!"
Returns
------
None
"""
_plot_transition_graphs(
self,
cluster_col=cluster_col,
w_type=w_type,
layout=layout,
args=args,
output_dir=output_dir,
)
def plot_silhouette(self, model_name=None, year=None, **kwargs):
"""Make silhouette plot of the Community model.
Parameters
----------
model_name : str , required
model to be silhouette plotted
year : int, optional
year to be plotted if model created with pooling=='unique'
kwargs : **kwargs, optional
pass through to plot_silhouette()
Returns
-------
silhouette plot of given model.
"""
if not year:
fig = skplt.metrics.plot_silhouette(
self.models[model_name].X.values,
self.models[model_name].labels,
**kwargs,
)
else:
fig = skplt.metrics.plot_silhouette(
self.models[model_name][year].X.values,
self.models[model_name][year].labels,
**kwargs,
)
return fig
def plot_silhouette_map(
self,
model_name=None,
year=None,
ctxmap=ctx.providers.Stamen.TonerLite,
save_fig=None,
figsize=(12, 3),
alpha=0.5,
cmap="bwr",
title="",
dpi=500,
time_var="year",
id_var="geoid",
**kwargs,
):
"""Plot of the silhouette scores of a Community model.
Parameters
----------
model_name : str , required
model to be plotted
year : int, optional
year to be plotted if model created with pooling=='unique'
ctxmap : contextily map provider, optional
contextily basemap. Set to False for no basemap.
Default is ctx.providers.Stamen.TonerLite
save_fig : str, optional
path to save figure if desired.
figsize : tuple, optional
an order tuple where x is width and y is height
default is 12 inches wide and 3 inches high
alpha : float, optional
how transparent the plotted objects are
Default is 0.5
cmap : string, optional
cmap to be plotted
default is 'bwr'
title : string, optional
title of figure
default is no title
dpi : int, optional
dpi of the saved image if save_fig=True
default is 500
time_var : string, optional
the column in the community gdf that identifies time period
default is 'year' from US census data
id_var : string, optional
column in gdf that identifies geographic units
default is 'geoid' from US census data
kwargs : **kwargs, optional
pass through to matplotlib pyplot
Returns
-------
silhouette scores mapped onto community geography
"""
# Check for and use previously calculated values for graphs
# Checking both arrays at the same time would be more efficient, but
# comparing NumPy arrays with `and` is not allowed, and many solutions that try to compare numpy arrays
# directly require error handling, so check if objects contain numpy arrays separately.
df = self.gdf.copy()
if not year:
if self.models[model_name].silhouettes is None:
self.models[model_name].sil_scores()
else:
if self.models[model_name][year].silhouettes is None:
self.models[model_name][year].sil_scores()
f, ax = plt.subplots(1, 2, figsize=figsize)
if ctxmap: # need to convert crs to mercator before graphing
if df.crs != 3857:
df = df.to_crs(epsg=3857)
if not year:
ax[0].hist(self.models[model_name].silhouettes["silhouettes"])
df.join(self.models[model_name].silhouettes, on=[id_var, time_var]).plot(
"silhouettes",
ax=ax[1],
alpha=alpha,
legend=True,
vmin=-1,
vmax=1,
cmap=cmap,
**kwargs,
)
if ctxmap:
ctx.add_basemap(ax[1], source=ctxmap)
else:
ax[0].hist(self.models[model_name][year].silhouettes["silhouettes"])
df[df.year == year].join(
self.models[model_name][year].silhouettes, on=[id_var, time_var]
).plot(
"silhouettes",
ax=ax[1],
alpha=alpha,
legend=True,
vmin=-1,
vmax=1,
cmap=cmap,
**kwargs,
)
if ctxmap:
ctx.add_basemap(ax[1], source=ctxmap)
ax[1].axis("off")
# using both tight_layout() and passing title makes plots and title overlap, so only use one
if title:
f.suptitle(title)
else:
f.tight_layout()
if save_fig:
f.savefig(save_fig, dpi=dpi, bbox_inches="tight")
return ax
def plot_next_best_label(
self,
model_name=None,
year=None,
ctxmap=ctx.providers.Stamen.TonerLite,
save_fig=None,
figsize=(12, 3),
title="",
alpha=0.5,
dpi=500,
time_var="year",
id_var="geoid",
**kwargs,
):
"""Plot the nearest_labels of the community model.
Parameters
----------
model_name : str , required
model to be plotted
year : int, optional
year to be plotted if model created with pooling=='unique'
ctxmap : contextily map provider, optional
contextily basemap. Set to False for no basemap.
Default is ctx.providers.Stamen.TonerLite
save_fig : str, optional
path to save figure if desired.
figsize : tuple, optional
an order tuple where x is width and y is height
default is 12 inches wide and 3 inches high
title : string, optional
title of figure
default is no title
alpha : float, optional
how transparent the plotted objects are
Default is 0.5
dpi : int, optional
dpi of the saved image if save_fig=True
default is 500
time_var : string, optional
the column in the community gdf that identifies time period
default is 'year' from US census data
id_var : string, optional
column in gdf that identifies geographic units
default is 'geoid' from US census data
kwargs : **kwargs, optional
pass through to matplotlib pyplot
Returns
-------
nearest_labels scores of the passed model plotted onto community geography
and an array made up of the the model labels and nearest labels that was used to graph the values
"""
df = self.gdf.copy()
if isinstance(self.models[model_name], dict) and not year:
raise InputError(
"This model has unique results for each time period; You must supply a value for `year`"
)
# If the user has already calculated, respect already calculated values
if not year:
if self.models[model_name].nearest_labels is None:
self.models[model_name].nearest_label().astype(int)
else:
if self.models[model_name][year].nearest_labels is None:
self.models[model_name][year].nearest_label().astype(int)
f, ax = plt.subplots(1, 2, figsize=figsize)
if ctxmap:
if df.crs == 3857:
pass
else: # need to convert crs to mercator before graphing
df = df.to_crs(epsg=3857)
if not year:
temp_df = df.join(
self.models[model_name].nearest_labels, on=[id_var, time_var]
)
temp_df = temp_df[["nearest_label", "geometry", model_name]]
temp_df.set_index(model_name, inplace=True)
df.plot(model_name, ax=ax[0], alpha=0.5, legend=True, categorical=True)
temp_df.plot(
"nearest_label",
ax=ax[1],
legend=True,
categorical=True,
alpha=alpha,
**kwargs,
)
if ctxmap:
ctx.add_basemap(ax[0], source=ctxmap)
ctx.add_basemap(ax[1], source=ctxmap)
else:
temp_df = df.join(
self.models[model_name][year].nearest_labels, on=[id_var, time_var]
)
temp_df = temp_df[["nearest_label", time_var, "geometry", model_name]]
temp_df.set_index(model_name, inplace=True)
df[df.year == year].plot(
model_name, ax=ax[0], alpha=alpha, legend=True, categorical=True
)
temp_df[temp_df.year == year].plot(
"nearest_label",
ax=ax[1],
alpha=alpha,
legend=True,
categorical=True,
**kwargs,
)
if ctxmap:
ctx.add_basemap(ax[0], source=ctxmap)
ctx.add_basemap(ax[1], source=ctxmap)
ax[0].axis("off")
ax[1].axis("off")
if title:
f.suptitle(title)
else:
f.tight_layout()
if save_fig:
f.savefig(save_fig, dpi=dpi, bbox_inches="tight")
return ax
def plot_path_silhouette(
self,
model_name=None,
year=None,
ctxmap=ctx.providers.Stamen.TonerLite,
save_fig=None,
figsize=(12, 3),
title="",
alpha=0.5,
cmap="bwr",
dpi=500,
time_var="year",
id_var="geoid",
**kwargs,
):
"""Plot the path_silhouettes of Commmunity model.
Parameters
----------
model_name : str , required
model to be plotted
year : int, optional
year to be plotted if model created with pooling=='unique'
ctxmap : contextily map provider, optional
contextily basemap. Set to False for no basemap.
Default is ctx.providers.Stamen.TonerLite
save_fig : str, optional
path to save figure if desired.
figsize : tuple, optional
an order tuple where x is width and y is height
default is 12 inches wide and 3 inches high
title : string, optional
title of figure
default is no title
alpha : float, optional
how transparent the plotted objects are
Default is 0.5
cmap : string, optional
cmap to be plotted
default is 'bwr'
dpi : int, optional
dpi of the saved image if save_fig=True
default is 500
time_var : string, optional
the column in the community gdf that identifies time period
default is 'year' from US census data
id_var : string, optional
column in gdf that identifies geographic units
default is 'geoid' from US census data
kwargs : **kwargs, optional
pass through to matplotlib pyplot
Returns
-------
path_silhouette scores of the passed model plotted onto community geography
"""
if not year:
if self.models[model_name].path_silhouettes is None:
self.models[model_name].path_sil()
else:
if self.models[model_name][year].path_silhouettes is None:
self.models[model_name][year].path_sil()
f, ax = plt.subplots(1, 2, figsize=figsize)
if ctxmap: # need to convert crs to mercator before graphing
self.gdf = self.gdf.to_crs(epsg=3857)
if not year:
ax[0].hist(self.models[model_name].path_silhouettes["path_silhouettes"])
self.gdf.join(
self.models[model_name][year].path_silhouettes, on=[id_var, time_var]
).plot(
"path_silhouettes",
ax=ax[1],
alpha=alpha,
legend=True,
vmin=-1,
vmax=1,
cmap=cmap,
**kwargs,
)
if ctxmap:
ctx.add_basemap(ax[1], source=ctxmap)
else:
ax[0].hist(
self.models[model_name][year].path_silhouettes["path_silhouettes"]
)
self.gdf[self.gdf.year == year].join(
self.models[model_name][year].path_silhouettes, on=[id_var, time_var]
).plot(
"path_silhouettes",
ax=ax[1],
alpha=alpha,
legend=True,
vmin=-1,
vmax=1,
cmap=cmap,
**kwargs,
)
if ctxmap:
ctx.add_basemap(ax[1], source=ctxmap)
ax[1].axis("off")
if title:
f.suptitle(title)
else:
f.tight_layout()
if save_fig:
f.savefig(save_fig, dpi=dpi, bbox_inches="tight")
return ax
def plot_boundary_silhouette(
self,
model_name=None,
year=None,
ctxmap=ctx.providers.Stamen.TonerLite,
save_fig=None,
figsize=(12, 3),
title="",
alpha=0.5,
cmap="bwr",
dpi=500,
time_var="year",
id_var="geoid",
**kwargs,
):
"""Plot boundary_silhouettes of the Commiunity model.
Parameters
----------
model_name : str , required
model to be silhouette plotted
year : int, optional
year to be plotted if model created with pooling=='unique'
ctxmap : contextily map provider, optional
contextily basemap. Set to False for no basemap.
Default is ctx.providers.Stamen.TonerLite
figsize : tuple, optional
an order tuple where x is width and y is height
default is 12 inches wide and 3 inches high
title : string, optional
title of figure
default is no title
save_fig : str, optional
path to save figure if desired.
alpha : float, optional
how transparent the plotted objects are
Default is 0.5
cmap : string, optional
cmap to be plotted
default is 'bwr'
dpi : int, optional
dpi of the saved image if save_fig=True
default is 500
time_var : string, optional
the column in the community gdf that identifies time period
default is 'year' from US census data
id_var : string, optional
column in gdf that identifies geographic units
default is 'geoid' from US census data
kwargs : **kwargs, optional
pass through to matplotlib pyplot
Returns
-------
boundary_silhouette scores of the passed model plotted onto community geography
"""
# If the user has already calculated , respect already calculated values
if not year:
if self.models[model_name].boundary_silhouettes is None:
self.models[model_name].boundary_sil()
else:
if self.models[model_name][year].boundary_silhouettes is None:
self.models[model_name][year].boundary_sil()
f, ax = plt.subplots(1, 2, figsize=figsize)
if ctxmap: # need to convert crs to mercator before graphing
self.gdf = self.gdf.to_crs(epsg=3857)
# To make visualization of boundary_silhouettes informative we need to remove the graphing of zero values
if not year:
ax[0].hist(
self.models[model_name].boundary_silhouettes["boundary_silhouettes"][
self.models[model_name].boundary_silhouettes["boundary_silhouettes"]
!= 0
]
)
self.gdf.join(
self.models[model_name][year].boundary_silhouettes,
on=[id_var, time_var],
).plot(
"boundary_silhouettes",
ax=ax[1],
legend=True,
alpha=alpha,
vmin=-1,
vmax=1,
cmap=cmap,
**kwargs,
)
if ctxmap:
ctx.add_basemap(ax[1], source=ctxmap)
else:
ax[0].hist(
self.models[model_name][year].boundary_silhouettes[
"boundary_silhouettes"
][
self.models[model_name][year].boundary_silhouettes[
"boundary_silhouettes"
]
!= 0
]
)
self.gdf[self.gdf.year == year].join(
self.models[model_name][year].boundary_silhouettes,
on=[id_var, time_var],
).plot(
"boundary_silhouettes",
ax=ax[1],
legend=True,
alpha=alpha,
vmin=-1,
vmax=1,
cmap=cmap,
**kwargs,
)
if ctxmap:
ctx.add_basemap(ax[1], source=ctxmap)
ax[1].axis("off")
if title:
f.suptitle(title)
else:
f.tight_layout()
if save_fig:
f.savefig(save_fig, dpi=dpi, bbox_inches="tight")
return ax
def plot_timeseries(
self,
column,
title="",
years=None,
scheme="quantiles",
k=5,
pooled=True,
cmap=None,
legend=True,
categorical=False,
save_fig=None,
dpi=200,
legend_kwds="default",
missing_kwds="default",
figsize=None,
ncols=None,
nrows=None,
ctxmap=ctx.providers.Stamen.TonerLite,
alpha=1,
**kwargs,
):
"""Plot an attribute from a Community arranged as a timeseries.
Parameters
----------
Community : Community object
community object
column : str
column to be graphed in a time series
title : str, optional
desired title of figure
years : list, optional
years to be graphed
default is every year in dataframe.
scheme : string,optional
matplotlib scheme to be used
default is 'quantiles'
k : int, optional
number of bins to graph. k may be ignored
or unnecessary for some schemes, like headtailbreaks, maxp, and maximum_breaks
Default is 5.
pooled : bool, optional
whether the classification should be pooled across time periods or unique to each.
E.g. with a 'quantile' scheme, pooled=True indicates that quantiles should be identified
on the entire time series, whereas pooled=False indicates that they should be calculated
independently for each time period
legend : bool, optional
whether to display a legend on the plot
categorical : bool, optional
whether the data should be plotted as categorical as opposed to continuous
save_fig : str, optional
path to save figure if desired.
dpi : int, optional
dpi of the saved image if save_fig=True
default is 500
legend_kwds : dictionary, optional
parameters for the legend
missing_kwds : dictionary, optional
parameters for the plotting missing data
Default is 1 column on the bottom of the graph.
ncols : int, optional
number of columns in the figure
if passing ncols, nrows must also be passed
default is None
nrows : int, optional
number of rows in the figure
if passing nrows, ncols must also be passed
default is None
figsize : tuple, optional
the desired size of the matplotlib figure
ctxmap : contextily map provider, optional
contextily basemap. Set to False for no basemap.
Default is Stamen.TonerLite
alpha : int (optional)
Transparency parameter passed to matplotlib
"""
# proplot needs to be used as a function-level import,
# as it influences all figures when imported at the top of the file
import proplot as plot
if categorical: # there's no pooled classification for categorical
pooled = False
df = self.gdf
if categorical and not cmap:
cmap = "Accent"
elif not cmap:
cmap = "Blues"
if legend_kwds == "default":
legend_kwds = {"ncols": 1, "loc": "b"}
if missing_kwds == "default":
missing_kwds = {
"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values",
}
if ctxmap: # need to convert crs to mercator before graphing
if df.crs != 3857:
df = df.to_crs(epsg=3857)
if (
pooled
): # if pooling the classifier, create one from scratch and pass to user defined
classifier = schemes[scheme](self.gdf[column].dropna().values, k=k)
if not years:
if nrows is None and ncols is None:
f, axs = plot.subplots(ncols=len(df.year.unique()), figsize=figsize)
else:
f, axs = plot.subplots(ncols=ncols, nrows=nrows, figsize=figsize)
for i, year in enumerate(
sorted(df.year.unique())
): # sort to prevent graphing out of order
if categorical:
df[df.year == year].plot(
column=column,
ax=axs[i],
categorical=True,
cmap=cmap,
legend=legend,
legend_kwds=legend_kwds,
missing_kwds=missing_kwds,
alpha=alpha,
)
else:
if pooled:
df[df.year == year].plot(
column=column,
ax=axs[i],
scheme="user_defined",
classification_kwds={"bins": classifier.bins},
k=k,
cmap=cmap,
**kwargs,
legend=legend,
legend_kwds=legend_kwds,
alpha=alpha,
)
else:
df[df.year == year].plot(
column=column,
ax=axs[i],
scheme=scheme,
k=k,
cmap=cmap,
**kwargs,
legend=legend,
legend_kwds=legend_kwds,
alpha=alpha,
)
if ctxmap: # need set basemap of each graph
ctx.add_basemap(axs[i], source=ctxmap)
axs[i].format(title=year)
else:
if nrows is None and ncols is None:
f, axs = plot.subplots(ncols=len(years))
else:
f, axs = plot.subplots(ncols=ncols, nrows=nrows)
for i, year in enumerate(
years
): # display in whatever order list is passed in
if categorical:
df[df.year == year].plot(
column=column,
ax=axs[i],
categorical=True,
cmap=cmap,
legend=legend,
legend_kwds=legend_kwds,
alpha=alpha,
)
else:
df[df.year == year].plot(
column=column,
ax=axs[i],
scheme=scheme,
k=k,
cmap=cmap,
**kwargs,
legend=legend,
legend_kwds=legend_kwds,
alpha=alpha,
)
if ctxmap: # need set basemap of each graph
ctx.add_basemap(axs[i], source=ctxmap)
axs[i].format(title=year)
if not title: # only use title when passed
axs.format(suptitle=column)
else:
axs.format(suptitle=title)
axs.axis("off")
if save_fig:
f.savefig(save_fig, dpi=dpi, bbox_inches="tight")
return axs
def animate_timeseries(
self,
column=None,
filename=None,
title="",
time_col="year",
time_periods=None,
scheme="quantiles",
k=5,
cmap=None,
legend=True,
alpha=0.6,
categorical=False,
dpi=200,
fps=0.5,
interval=500,
repeat_delay=1000,
title_fontsize=40,
subtitle_fontsize=38,
figsize=(20, 20),
ctxmap=ctx.providers.Stamen.TonerLite,
):
"""Create an animated gif from a Community timeseries.
Parameters
----------
column : str
column to be graphed in a time series
filename : str, required
output file name
title : str, optional
desired title of figure
time_col : str, required
column on the Community.gdf that stores time periods
time_periods: list, optional
subset of time periods to include in the animation. If None, then all times will be used
scheme : string, optional
matplotlib scheme to be used
default is 'quantiles'
k : int, optional
number of bins to graph. k may be ignored
or unnecessary for some schemes, like headtailbreaks, maxp, and maximum_breaks
Default is 5.
legend : bool, optional
whether to display a legend on the plot
categorical : bool, optional
whether the data should be plotted as categorical as opposed to continuous
alpha: : float, optional
transparency parameter passed to matplotlib
dpi : int, optional
dpi of the saved image if save_fig=True
default is 500
figsize : tuple, optional
the desired size of the matplotlib figure
ctxmap : contextily map provider, optional
contextily basemap. Set to False for no basemap.
figsize : tuple, optional
output figure size passed to matplotlib.pyplot
fps : float, optional
frames per second, used to speed up or slow down animation
interval : int, optional
interval between frames in miliseconds, default 500
repeat_delay : int, optional
time before animation repeats in miliseconds, default 1000
"""
gdf = self.gdf.copy()
if categorical and not cmap:
cmap = "Accent"
elif not cmap:
cmap = "Blues"
if not gdf.crs == 3857:
gdf = gdf.to_crs(3857)
if not time_periods:
time_periods = list(gdf[time_col].unique())
time_periods = sorted(time_periods)
with tempfile.TemporaryDirectory() as tmpdirname:
for i, time in enumerate(time_periods):
fig, ax = plt.subplots(figsize=figsize)
outpath = PurePath(tmpdirname, f"file_{i}.png")
if categorical:
gdf[gdf[time_col] == time].plot(
column,
categorical=True,
ax=ax,
alpha=alpha,
legend=legend,
cmap=cmap,
)
else:
classifier = schemes[scheme](gdf[column].dropna().values, k=k)
gdf[gdf[time_col] == time].plot(
column,
scheme="user_defined",
classification_kwds={"bins": classifier.bins},
k=k,
ax=ax,
alpha=alpha,
legend=legend,
cmap=cmap,
)
ctx.add_basemap(ax=ax, source=ctxmap)
ax.axis("off")
ax.set_title(f"{time}", fontsize=subtitle_fontsize)
fig.suptitle(f"{title}", fontsize=title_fontsize)
plt.tight_layout()
plt.savefig(outpath, dpi=dpi)
_gif_from_path(
tmpdirname,
interval=interval,
repeat_delay=repeat_delay,
filename=filename,
fps=fps,
dpi=dpi,
)
def transition(
self, cluster_col, time_var="year", id_var="geoid", w_type=None, permutations=0
):
"""
(Spatial) Markov approach to transitional dynamics of neighborhoods.
The transitional dynamics approach should be adopted after
neighborhood segmentation since the column name of neighborhood
labels is a required input.
Parameters
----------
cluster_col : string or int
Column name for the neighborhood segmentation, such as
"ward", "kmeans", etc.
time_var : string, optional
Column defining time and or sequencing of the long-form data.
Default is "year".
id_var : string, optional
Column identifying the unique id of spatial units.
Default is "geoid".
w_type : string, optional
Type of spatial weights type ("rook", "queen", "knn" or
"kernel") to be used for spatial structure. Default is
None, if non-spatial Markov transition rates are desired.
permutations : int, optional
number of permutations for use in randomization based
inference (the default is 0).
Returns
---------
mar : giddy.markov.Markov or giddy.markov.Spatial_Markov
if w_type=None, return a classic Markov instance;
if w_type is given, return a Spatial_Markov instance
"""
assert id_var in self.gdf.columns.to_list(), (
f"id_var: {id_var} is not in the columns."
" Please use an appropriate index that properly identifies spatial units."
)
mar = _transition(
self.gdf,
cluster_col,
time_var=time_var,
id_var=id_var,
w_type=w_type,
permutations=permutations,
)
return mar
def sequence(
self,
cluster_col,
seq_clusters=5,
subs_mat=None,
dist_type=None,
indel=None,
time_var="year",
id_var="geoid",
):
"""
Pairwise sequence analysis to evaluate the distance/dissimilarity
between every two neighborhood sequences.
The sequence approach should be adopted after
neighborhood segmentation since the column name of neighborhood
labels is a required input.
Parameters
----------
cluster_col : string or int
Column name for the neighborhood segmentation, such as
"ward", "kmeans", etc.
seq_clusters : int, optional
Number of neighborhood sequence clusters. Agglomerative
Clustering with Ward linkage is now used for clustering
the sequences. Default is 5.
subs_mat : array
(k,k), substitution cost matrix. Should be hollow (
0 cost between the same type), symmetric and non-negative.
dist_type : string
"hamming": hamming distance (substitution only
and its cost is constant 1) from sklearn.metrics;
"markov": utilize empirical transition
probabilities to define substitution costs;
"interval": differences between states are used
to define substitution costs, and indel=k-1;
"arbitrary": arbitrary distance if there is not a
strong theory guidance: substitution=0.5, indel=1.
"tran": transition-oriented optimal matching. Sequence of
transitions. Based on :cite:`Biemann:2011`.
indel : float, optional
insertion/deletion cost.
time_var : string, optional
Column defining time and or sequencing of the long-form data.
Default is "year".
id_var : string, optional
Column identifying the unique id of spatial units.
Default is "geoid".
Returns
-------
gdf_new : Community instance
New Community instance with attribute "gdf" having
a new column for sequence labels.
df_wide : pandas.DataFrame
Wide-form DataFrame with k (k is the number of periods)
columns of neighborhood types and 1 column of sequence
labels.
seq_dis_mat : array
(n,n), distance/dissimilarity matrix for each pair of
sequences
"""
gdf_temp, df_wide, seq_dis_mat = _sequence(
self.gdf,
cluster_col,
seq_clusters=seq_clusters,
subs_mat=subs_mat,
dist_type=dist_type,
indel=indel,
time_var=time_var,
id_var=id_var,
)
gdf_new = Community(gdf_temp)
return gdf_new, df_wide, seq_dis_mat
def simulate(
self,
model_name=None,
index_col="geoid",
w_type="queen",
w_options=None,
base_year=2010,
new_colname="predicted",
increment=10,
time_steps=3,
time_col="year",
seed=None,
):
"""Simulate community dynamics using spatial Markov transition rules.
Parameters
----------
model_name : [type], optional
[description], by default None
index_col : str, optional
column on the community gdf that denotes the unique index of geographic units
for U.S. census data this is "geoid" (which is the default)
w_type : str {'queen', 'rook'}
which type of libpysal spatial weights objects to encode connectivity
w_options : dict
additional options passed to a libpysal weights constructor (e.g. `k` for a KNN weights matrix)
base_year : int, optional
time period to begin the simulation. Typically this is the last time period for which
labels have been estimated by a cluster model.
new_colname : str, optional
name of new column holding predicted neighorhood labels. Default is "predicted"
increment : int, optional
number of units in each time step (e.g. for a model based on decennial census data, this is 10)
time_steps : int, optional
number of time periods to simulate
time_col : str, optional
column on the community gdf that denotes the time index. For builtin data, this is "year"
seed: int, optional
seed passed to numpy random number generator
Returns
-------
geosnap.Community if time_steps > 1, else geopandas.GeoDataFrame
If simulating multiple timesteps, the return is a new Community instance with simulated label values as its gdf.
If simulating a single time step, the return is a single geodataframe
"""
np.random.seed(seed)
if time_steps == 1:
gdf = _predict_labels(
self,
model_name=model_name,
w_type=w_type,
w_options=w_options,
base_year=base_year,
new_colname=new_colname,
index_col=index_col,
increment=increment,
time_steps=time_steps,
time_col=time_col,
seed=seed,
)
return gdf
else:
gdfs = _predict_labels(
self,
model_name=model_name,
w_type=w_type,
w_options=w_options,
base_year=base_year,
new_colname=new_colname,
index_col=index_col,
increment=increment,
time_steps=time_steps,
time_col=time_col,
seed=seed,
)
gdfs = pd.concat(gdfs)
gdfs = gdfs.dropna(subset=[model_name])
gdfs[model_name] = gdfs[model_name].astype(int)
return Community.from_geodataframes(gdfs=[gdfs])
###### Constructor Methods ######
#################################
@classmethod
def from_ltdb(
cls,
state_fips=None,
county_fips=None,
msa_fips=None,
fips=None,
boundary=None,
years="all",
):
"""Create a new Community from LTDB data.
Instiantiate a new Community from pre-harmonized LTDB data. To use
you must first download and register LTDB data with geosnap using
the `store_ltdb` function. Pass lists of states, counties, or any
arbitrary FIPS codes to create a community. All fips code arguments
are additive, so geosnap will include the largest unique set.
Alternatively, you may provide a boundary to use as a clipping
feature.
Parameters
----------
state_fips : list or str
string or list of strings of two-digit fips codes defining states
to include in the study area.
county_fips : list or str
string or list of strings of five-digit fips codes defining
counties to include in the study area.
msa_fips : list or str
string or list of strings of fips codes defining
MSAs to include in the study area.
fips : list or str
string or list of strings of five-digit fips codes defining
counties to include in the study area.
boundary : geopandas.GeoDataFrame
geodataframe that defines the total extent of the study area.
This will be used to clip tracts lazily by selecting all
`GeoDataFrame.representative_point()`s that intersect the
boundary gdf
years : list of ints
list of years (decades) to include in the study data
(the default "all" is [1970, 1980, 1990, 2000, 2010]).
Returns
-------
Community
Community with LTDB data
"""
if years == "all":
years = [1970, 1980, 1990, 2000, 2010]
if isinstance(boundary, gpd.GeoDataFrame):
tracts = datasets.tracts_2010()[["geoid", "geometry"]]
ltdb = datasets.ltdb().reset_index()
if boundary.crs != tracts.crs:
warn(
"Unable to determine whether boundary CRS is WGS84 "
"if this produces unexpected results, try reprojecting"
)
tracts = tracts[
tracts.representative_point().intersects(boundary.unary_union)
]
gdf = ltdb[ltdb["geoid"].isin(tracts["geoid"])]
gdf = gpd.GeoDataFrame(gdf.merge(tracts, on="geoid", how="left"))
else:
gdf = _from_db(
data=datasets.ltdb(),
state_fips=state_fips,
county_fips=county_fips,
msa_fips=msa_fips,
fips=fips,
years=years,
)
return cls(gdf=gdf.reset_index(), harmonized=True)
@classmethod
def from_ncdb(
cls,
state_fips=None,
county_fips=None,
msa_fips=None,
fips=None,
boundary=None,
years="all",
):
"""Create a new Community from NCDB data.
Instiantiate a new Community from pre-harmonized NCDB data. To use
you must first download and register LTDB data with geosnap using
the `store_ncdb` function. Pass lists of states, counties, or any
arbitrary FIPS codes to create a community. All fips code arguments
are additive, so geosnap will include the largest unique set.
Alternatively, you may provide a boundary to use as a clipping
feature.
Parameters
----------
state_fips : list or str
string or list of strings of two-digit fips codes defining states
to include in the study area.
county_fips : list or str
string or list of strings of five-digit fips codes defining
counties to include in the study area.
msa_fips : list or str
string or list of strings of fips codes defining
MSAs to include in the study area.
fips : list or str
string or list of strings of five-digit fips codes defining
counties to include in the study area.
boundary : geopandas.GeoDataFrame
geodataframe that defines the total extent of the study area.
This will be used to clip tracts lazily by selecting all
`GeoDataFrame.representative_point()`s that intersect the
boundary gdf
years : list of ints
list of years (decades) to include in the study data
(the default is all available [1970, 1980, 1990, 2000, 2010]).
Returns
-------
Community
Community with NCDB data
"""
if years == "all":
years = [1970, 1980, 1990, 2000, 2010]
if isinstance(boundary, gpd.GeoDataFrame):
tracts = datasets.tracts_2010()[["geoid", "geometry"]]
ncdb = datasets.ncdb().reset_index()
if boundary.crs != tracts.crs:
warn(
"Unable to determine whether boundary CRS is WGS84 "
"if this produces unexpected results, try reprojecting"
)
tracts = tracts[
tracts.representative_point().intersects(boundary.unary_union)
]
gdf = ncdb[ncdb["geoid"].isin(tracts["geoid"])]
gdf = gpd.GeoDataFrame(gdf.merge(tracts, on="geoid", how="left"))
else:
gdf = _from_db(
data=datasets.ncdb(),
state_fips=state_fips,
county_fips=county_fips,
msa_fips=msa_fips,
fips=fips,
years=years,
)
return cls(gdf=gdf.reset_index(), harmonized=True)
@classmethod
def from_census(
cls,
state_fips=None,
county_fips=None,
msa_fips=None,
fips=None,
boundary=None,
years="all",
constant_dollars=True,
currency_year=2015,
):
"""Create a new Community from original vintage US Census data.
Instiantiate a new Community from . To use
you must first download and register census data with geosnap using
the `store_census` function. Pass lists of states, counties, or any
arbitrary FIPS codes to create a community. All fips code arguments
are additive, so geosnap will include the largest unique set.
Alternatively, you may provide a boundary to use as a clipping
feature.
Parameters
----------
state_fips : list or str, optional
string or list of strings of two-digit fips codes defining states
to include in the study area.
county_fips : list or str, optional
string or list of strings of five-digit fips codes defining
counties to include in the study area.
msa_fips : list or str, optional
string or list of strings of fips codes defining
MSAs to include in the study area.
fips : list or str, optional
string or list of strings of five-digit fips codes defining
counties to include in the study area.
boundary : geopandas.GeoDataFrame, optional
geodataframe that defines the total extent of the study area.
This will be used to clip tracts lazily by selecting all
`GeoDataFrame.representative_point()`s that intersect the
boundary gdf
years : list of ints, required
list of years to include in the study data
(the default is [1990, 2000, 2010]).
constant_dollars : bool, optional
whether to standardize currency columns to constant dollars. If true,
each year will be expressed in dollars set by the `currency_year` parameter
currency_year : int, optional
If adjusting for inflation, this parameter sets the year in which dollar values will
be expressed
Returns
-------
Community
Community with unharmonized census data
"""
if years == "all":
years = [1990, 2000, 2010]
if isinstance(years, (str, int)):
years = [years]
msa_states = []
if msa_fips:
pr_metros = set(
datasets.msa_definitions()[
datasets.msa_definitions()["CBSA Title"].str.contains("PR")
]["CBSA Code"].tolist()
)
if msa_fips in pr_metros:
raise Exception(
"geosnap does not yet include built-in data for Puerto Rico"
)
msa_states += datasets.msa_definitions()[
datasets.msa_definitions()["CBSA Code"] == msa_fips
]["stcofips"].tolist()
msa_states = [i[:2] for i in msa_states]
# build a list of states in the dataset
allfips = []
for i in [state_fips, county_fips, fips, msa_states]:
if i:
if isinstance(i, (str,)):
i = [i]
for each in i:
allfips.append(each[:2])
states = list(set(allfips))
# if using a boundary there will be no fips, so reset states to None
if len(states) == 0:
states = None
df_dict = {
1990: datasets.tracts_1990(states=states),
2000: datasets.tracts_2000(states=states),
2010: datasets.tracts_2010(states=states),
}
tracts = []
for year in years:
tracts.append(df_dict[year])
tracts = pd.concat(tracts, sort=False)
if isinstance(boundary, gpd.GeoDataFrame):
if boundary.crs != tracts.crs:
warn(
"Unable to determine whether boundary CRS is WGS84 "
"if this produces unexpected results, try reprojecting"
)
tracts = tracts[
tracts.representative_point().intersects(boundary.unary_union)
]
gdf = tracts.copy()
else:
gdf = _fips_filter(
state_fips=state_fips,
county_fips=county_fips,
msa_fips=msa_fips,
fips=fips,
data=tracts,
)
# adjust for inflation if necessary
if constant_dollars:
newtracts = []
inflate_cols = [
"median_home_value",
"median_contract_rent",
"per_capita_income",
"median_household_income",
]
for year in years:
df = gdf[gdf.year == year]
df = adjust_inflation(df, inflate_cols, year, currency_year)
newtracts.append(df)
gdf = pd.concat(newtracts)
return cls(gdf=gdf, harmonized=False)
@classmethod
def from_lodes(
cls,
state_fips=None,
county_fips=None,
msa_fips=None,
fips=None,
boundary=None,
years=2015,
dataset="wac",
):
"""Create a new Community from Census LEHD/LODES data.
Instantiate a new Community from LODES data.
Pass lists of states, counties, or any
arbitrary FIPS codes to create a community. All fips code arguments
are additive, so geosnap will include the largest unique set.
Alternatively, you may provide a boundary to use as a clipping
feature.
Parameters
----------
state_fips : list or str, optional
string or list of strings of two-digit fips codes defining states
to include in the study area.
county_fips : list or str, optional
string or list of strings of five-digit fips codes defining
counties to include in the study area.
msa_fips : list or str, optional
string or list of strings of fips codes defining
MSAs to include in the study area.
fips : list or str, optional
string or list of strings of five-digit fips codes defining
counties to include in the study area.
boundary : geopandas.GeoDataFrame, optional
geodataframe that defines the total extent of the study area.
This will be used to clip tracts lazily by selecting all
`GeoDataFrame.representative_point()`s that intersect the
boundary gdf
years : list of ints, required
list of years to include in the study data
(the default is 2015).
dataset : str, required
which LODES dataset should be used to create the Community.
Options are 'wac' for workplace area characteristics or 'rac' for
residence area characteristics. The default is "wac" for workplace.
Returns
-------
Community
Community with LODES data
"""
if isinstance(years, (str,)):
years = int(years)
if isinstance(years, (int,)):
years = [years]
years = list(set(years))
if msa_fips:
pr_metros = set(
datasets.msa_definitions()[
datasets.msa_definitions()["CBSA Title"].str.contains("PR")
]["CBSA Code"].tolist()
)
if msa_fips in pr_metros:
raise Exception(
"geosnap does not yet include built-in data for Puerto Rico"
)
msa_counties = datasets.msa_definitions()[
datasets.msa_definitions()["CBSA Code"] == msa_fips
]["stcofips"].tolist()
else:
msa_counties = None
# build a list of states in the dataset
allfips = []
stateset = []
for i in [state_fips, county_fips, msa_counties, fips]:
if i:
if isinstance(i, (str,)):
i = [i]
for each in i:
allfips.append(each)
stateset.append(each[:2])
states = list(set(stateset))
if any(year < 2010 for year in years):
gdf00 = datasets.blocks_2000(states=states, fips=(tuple(allfips)))
gdf00 = gdf00.drop(columns=["year"])
gdf00 = _fips_filter(
state_fips=state_fips,
county_fips=county_fips,
msa_fips=msa_fips,
fips=fips,
data=gdf00,
)
if isinstance(boundary, gpd.GeoDataFrame):
if boundary.crs != gdf00.crs:
warn(
"Unable to determine whether boundary CRS is WGS84 "
"if this produces unexpected results, try reprojecting"
)
gdf00 = gdf00[
gdf00.representative_point().intersects(boundary.unary_union)
]
gdf = datasets.blocks_2010(states=states, fips=(tuple(allfips)))
gdf = gdf.drop(columns=["year"])
gdf = _fips_filter(
state_fips=state_fips,
county_fips=county_fips,
msa_fips=msa_fips,
fips=fips,
data=gdf,
)
if isinstance(boundary, gpd.GeoDataFrame):
if boundary.crs != gdf.crs:
warn(
"Unable to determine whether boundary CRS is WGS84 "
"if this produces unexpected results, try reprojecting"
)
gdf = gdf[gdf.representative_point().intersects(boundary.unary_union)]
# grab state abbreviations
names = (
_fipstable[_fipstable["FIPS Code"].isin(states)]["State Abbreviation"]
.str.lower()
.tolist()
)
if isinstance(names, str):
names = [names]
dfs = []
for name in names:
if name == "PR":
raise Exception("does not yet include built-in data for Puerto Rico")
for year in years:
try:
df = get_lehd(dataset=dataset, year=year, state=name)
df["year"] = year
if year < 2010:
df = gdf00.merge(df, on="geoid", how="inner")
else:
df = gdf.merge(df, on="geoid", how="inner")
df = df.set_index(["geoid", "year"])
dfs.append(df)
except ValueError:
warn(f"{name.upper()} {year} not found!")
pass
out = pd.concat(dfs, sort=True)
out = out[~out.index.duplicated(keep="first")]
out = out.reset_index()
return cls(gdf=out, harmonized=False)
@classmethod
def from_geodataframes(cls, gdfs=None):
"""Create a new Community from a list of geodataframes.
Parameters
----------
gdfs : list-like of geopandas.GeoDataFrames
list of geodataframes that hold attribute and geometry data for
a study area. Each geodataframe must have neighborhood
attribute data, geometry data, and a time column that defines
how the geodataframes are sequenced. The geometries may be
stable over time (in which case the dataset is harmonized) or
may be unique for each time. If the data are harmonized, the
dataframes must also have an ID variable that indexes
neighborhood units over time.
"""
crss = set([gdf.crs for gdf in gdfs])
assert len(crss) == 1, (
f"These geodataframes have {len(crss)} different CRS: "
f"{[i.to_string() for i in crss]}."
" To continue, reproject the geodataframes into a single consistent system."
" See: https://geopandas.org/projections.html for more inforamtion."
)
gdf = pd.concat(gdfs, sort=True)
return cls(gdf=gdf)
|
def binary_to_string(binary):
return ''.join(chr(int(binary[a:a + 8], 2))
for a in xrange(0, len(binary), 8))
|
xiaoming = {"name": "xiaoming",
"height": 1.75,
"age": 18,
"weight": 60,
"gender": True}
print(xiaoming) |
from numpy import *
filename = 'euler11.txt'
with open(filename, "r") as ins:
array = []
for line in ins:
array.append(line)
print(array)
newArray = []
for i in array:
j = i.split(' ')
k = [int(n) for n in j]
newArray.append(k)
print(newArray)
problemMatrix = matrix(newArray)
print(problemMatrix)
maxProd = 1
for i in range(16):
for j in range(16):
prod1 = problemMatrix[i,j]*problemMatrix[i+1,j]*problemMatrix[i+2,j]*problemMatrix[i+3,j]
if prod1 > maxProd:
maxProd = prod1
prod2 = problemMatrix[i,j]*problemMatrix[i,j+1]*problemMatrix[i,j+2]*problemMatrix[i,j+3]
if prod2 > maxProd:
maxProd = prod2
prod3 = problemMatrix[i,j]*problemMatrix[i+1,j+1]*problemMatrix[i+2,j+2]*problemMatrix[i+3,j+3]
if prod3 > maxProd:
maxProd = prod3
prod4 = problemMatrix[19-i,j]*problemMatrix[18-i,j+1]*problemMatrix[17-i,j+2]*problemMatrix[16-i,j+3]
if prod4 > maxProd:
maxProd = prod4
print(maxProd) |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module consists of the key-value store abstract class
and an implementation for memcached using python memcached
"""
import memcache
class KVStoreClient(object):
"""Abstract KV client class for interacting with a single-node KV store
Initialiation will take address, port number and any optional arguments
Dervied classes must implement init, get, set, and delete methods.
"""
_type = "Generic KV Store"
# some examples of how to wrap code to conform to 79-columns limit:
def __init__(self, address, port, **kwargs):
self.address = address
self.port = port
self.KVconnection = None # Derived classes must perform connection
def insert(self, key, value):
return self.set(key, value)
def get(self, key):
return self.KVconnection.get(key)
def set(self, key, value):
return self.KVconnection.set(key, value)
def delete(self, key):
return self.KVconnection.delete(key)
def __str__(self):
return type(self)._type + ": " \
+ str(self.address) + ":" + str(self.port)
def __del__(self):
return NotImplementedError # Clean-up
class MemcachedClient(KVStoreClient):
"""Memcached KV client class for interacting with a single-node Memcached
"""
_type = "Memcached"
_STORE_COMMANDS = ['add', 'store', 'set', 'replace', 'cas']
_GET_COMMANDS = ['get', 'gets']
_META_COMMANDS = ['version']
def __init__(self, address, port, **kwargs):
KVStoreClient.__init__(self, address, port)
self.KVconnection = memcache.Client(
[address + ':' + str(port)], debug=0)
def process_memcached_query(self, query):
''' expects query.MemacachedQuery type with all fields parsed
'''
### DEBUG
#print 'FORWARDING query to ' + self.address + " : " + str(self.port)
returnstring = ''
if not query.exptime:
query.exptime = 0
if query.command in MemcachedClient._STORE_COMMANDS:
result = self.KVconnection.set(
query.key, query.data, time=0) # query.exptime)
if not result:
returnstring += 'NOT_STORED\r\n'
else:
returnstring += 'STORED\r\n'
elif query.command in MemcachedClient._GET_COMMANDS:
result = self.KVconnection.get(query.key) # query.exptime)
if result:
returnstring += 'VALUE ' + query.key + \
' 0 ' + str(len(result)) + '\r\n'
returnstring += str(result) + '\r\n'
returnstring += 'END\r\n'
elif query.command in MemcachedClient._META_COMMANDS:
returnstring += 'VERSION 1.4.25 Ubuntu\r\n'
# DEBUG print returnstring
return returnstring
def __del__(self):
return self.KVconnection.disconnect_all() # Clean-up
|
from django.db import models
# Create your models here.
class Maker(models.Model):
name_maker = models.CharField(max_length=20)
date_of_birth = models.DateField()
salary = models.IntegerField()
telephone = models.IntegerField()
pos = (('T', 'tailor'), ('E', 'engineer'), ('C', 'cutter'))
position = models.CharField(max_length=1, choices=pos)
address = models.CharField(max_length=30)
def __str__(self):
return self.name_maker
class Machine(models.Model):
machine = (
('Sm', 'Shima Seiki Computrized'),
('Sc', 'Scheller Brand Tricot '),
('St', 'STOLL ANVH BLM '),
('Kn', 'Knitting Machine'),
('Em', 'embroidery'),
)
name_machine = models.CharField(choices=machine, max_length=2)
maker = models.ForeignKey(Maker, on_delete=models.CASCADE)
def __str__(self):
return self.name_machine
def price_machine(self):
if self.name_machine == 'Sm':
price = 100000
elif self.name_machine == 'Sc':
price = 200000
elif self.name_machine == 'St':
price = 450050
elif self.name_machine == 'Kn':
price = 800860
else:
price = 332000
return price
class Raw(models.Model):
type = (
('A', 'alpha'),
('S', 'Asher Ramadan'),
('M', 'Masrien'),
('G', 'Gawada'),
)
raw = models.CharField(max_length=1,choices=type)
address_trade = models.CharField(max_length=30)
color = models.CharField(max_length=15)
machine = models.ManyToManyField(Machine)
def __str__(self):
return self.raw
class Model(models.Model):
name = models.CharField(max_length=11)
SIZE = (('sm', 'small'), ('ch', 'Chields'), ('M', 'medium'), ('gr', 'Girl'),)
size = models.CharField(max_length=2,choices=SIZE)
machine = models.ManyToManyField(Machine)
def __str__(self):
return self.name |
def hundred():
name = str(input("What's your name my dude? : "))
age = int(input("How old are you? : "))
year = int(input("I lose track of time, what year is it? : "))
dif = 100-age
answer = str(year + dif)
print(name + ", you'll turn 100 in " + answer +". Congratulations on being so old!" + '\n')
numOfTimes = int(input("But one last question... what's your favourite number? (Think BIG ;]) : "))
for i in range(numOfTimes):
print("SPAMMING TIME" + '\n')
|
# -*- coding: utf-8 -*-
# @Time : 2018/11/18 14:54
# @Author : Monica
# @Email : 498194410@qq.com
# @File : my_log.py
import logging
from Common import project_path
class MyLog:
def my_log(self, level, msg):
# 定义一个日志收集器my_logger
my_logger = logging.getLogger("Monica")
# 设定级别
my_logger.setLevel("DEBUG")
# 设置日志输出格式
formatter = logging.Formatter('%(asctime)s-%(levelname)s-%(filename)s-%(name)s-日志信息:%(message)s')
# 创造一个专属输出渠道 过滤 和排版
# ch = logging.StreamHandler() # 输出到控制台
# ch.setLevel("DEBUG") # 设置输出级别 大写
# ch.setFormatter(formatter)
fh = logging.FileHandler(project_path.logs_path, encoding='UTF-8') # 输出到制定文件
fh.setLevel("DEBUG") # 设置输出级别 大写
fh.setFormatter(formatter)
# 两者对接--指定输出渠道
# my_logger.addHandler(ch)
my_logger.addHandler(fh)
# 收集日志
if level == 'DEBUG':
my_logger.debug(msg)
elif level == 'INFO':
my_logger.info(msg)
elif level == 'WARNING':
my_logger.warning(msg)
elif level == 'ERROR':
my_logger.error(msg)
elif level == 'CRITICAL':
my_logger.critical(msg)
elif level == 'exception':
my_logger.exception(msg)
# 渠道要记得移除掉 否则 日志输出会重复
# my_logger.removeHandler(ch)
my_logger.removeHandler(fh)
def debug(self, msg):
self.my_log("DEBGU", msg)
def info(self, msg):
self.my_log("INFO", msg)
def warning(self, msg):
self.my_log("ERROR", msg)
def error(self, msg):
self.my_log("WARNING", msg)
def critical(self, msg):
self.my_log("CRITICAL", msg)
def exception(self, msg):
self.my_log("exception", msg)
|
import pandas as pd
import numpy as np
import math
import sys
import argparse
import json
from urllib import request
from bs4 import BeautifulSoup
import hashlib
import os
import requests
import csv
genji_dir = "/Users/nakamurasatoru/git/d_genji"
prefix = "https://utda.github.io/genji"
manifests = []
id = "utokyo"
collection_label = "東大本"
for i in range(1, 55):
manifest = "https://utda.github.io/genji/data/iiif/org/東大本/"+str(i).zfill(2)+"/manifest.json"
print(manifest)
try:
m_data = requests.get(manifest).json()
except Exception as e:
print(i, e)
uuid = m_data["related"].split("/")[-1]
metadata = m_data["metadata"]
label = m_data["label"]
for m in metadata:
if m["label"] == "Title":
label = m["value"]
manifests.append({
"@id": "https://iiif.dl.itc.u-tokyo.ac.jp/repo/iiif/"+uuid+"/manifest",
"@type": "sc:Manifest",
"attribution": "東京大学総合図書館 General Library, The University of Tokyo, JAPAN",
"label": label,
"license": m_data["license"],
"thumbnail": m_data["thumbnail"]
})
collection = {
"@context": "http://iiif.io/api/presentation/2/context.json",
"@id": prefix + "/iiif/"+id+"/top.json",
"@type": "sc:Collection",
"label": collection_label,
"manifests" : manifests,
"vhint": "use-thumb"
}
opath = genji_dir + "/genji/static/iiif/"+id+"/top.json"
dirname = os.path.dirname(opath)
os.makedirs(dirname, exist_ok=True)
with open(opath, 'w') as f:
json.dump(collection, f, ensure_ascii=False, indent=4,
sort_keys=True, separators=(',', ': '))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/30 16:12
# @Author : Jason
# @Site :
# @File : test2.py
# @Software: PyCharm
from multiprocessing import Pool
from multiprocessing import Process
def f2(nane):
print("hello",nane)
def f(x):
return x*x
if __name__ == '__main__':
with Pool(5) as p:
print(p.map(f,[1,2,3]))
p = Process(target=f2,args=('bbb',))
p.start()
p.join()
|
from .client import Client
class Stats(Client):
def __init__(self, api_key='KMK786MB5AZYQSFS5CW3JQ9AAW4DCX3AX4'):
Client.__init__(self, address='', api_key=api_key)
self.module = self.URL_BASES['module'] + 'stats'
def make_url(self, call_type=''):
if call_type == 'stats':
self.url = self.URL_BASES['prefix'] \
+ self.module \
+ self.action \
+ self.key
def get_total_ether_supply(self):
self.action = self.URL_BASES['action'] + 'ethsupply'
self.make_url(call_type='stats')
req = self.connect()
return req['result']
def get_ether_last_price(self):
self.action = self.URL_BASES['action'] + 'ethprice'
self.make_url(call_type='stats')
req = self.connect()
return req['result']
|
import numpy as np
import param
from boundingregion import BoundingBox, BoundingRegion
from dataviews import Stack, Histogram, DataStack, find_minmax
from ndmapping import NdMapping, Dimension
from options import options
from sheetcoords import SheetCoordinateSystem, Slice
from views import View, Overlay, Annotation, GridLayout
class SheetLayer(View):
"""
A SheetLayer is a data structure for holding one or more numpy
arrays embedded within a two-dimensional space. The array(s) may
correspond to a discretisation of an image (i.e. a rasterisation)
or vector elements such as points or lines. Lines may be linearly
interpolated or correspond to control nodes of a smooth vector
representation such as Bezier splines.
"""
bounds = param.ClassSelector(class_=BoundingRegion, default=BoundingBox(), doc="""
The bounding region in sheet coordinates containing the data.""")
roi_bounds = param.ClassSelector(class_=BoundingRegion, default=None, doc="""
The ROI can be specified to select only a sub-region of the bounds to
be stored as data.""")
_abstract = True
def __init__(self, data, bounds, **kwargs):
super(SheetLayer, self).__init__(data, bounds=bounds, **kwargs)
def __mul__(self, other):
if isinstance(other, SheetStack):
items = [(k, self * v) for (k, v) in other.items()]
return other.clone(items=items)
self_layers = self.data if isinstance(self, SheetOverlay) else [self]
other_layers = other.data if isinstance(other, SheetOverlay) else [other]
combined_layers = self_layers + other_layers
if isinstance(other, Annotation):
return SheetOverlay(combined_layers, self.bounds,
roi_bounds=self.roi_bounds,
metadata=self.metadata)
if self.bounds is None:
self.bounds = other.bounds
elif other.bounds is None:
other.bounds = self.bounds
roi_bounds = self.roi_bounds if self.roi_bounds else other.roi_bounds
roi_bounds = self.bounds if roi_bounds is None else roi_bounds
return SheetOverlay(combined_layers, self.bounds,
metadata=self.metadata,
roi_bounds=roi_bounds)
@property
def xlim(self):
l, _, r, _ = self.bounds.lbrt()
return (l, r)
@property
def ylim(self):
_, b, _, t = self.bounds.lbrt()
return (b, t)
class SheetOverlay(SheetLayer, Overlay):
"""
SheetOverlay extends a regular Overlay with bounds checking and an
ROI property, which applies the roi_bounds to all SheetLayer
objects it contains. When adding SheetLayers to an Overlay, a
common ROI bounds is enforced.
A SheetOverlay may be used to overlay lines or points over a
SheetView. In addition, if an overlay consists of three or four
SheetViews of depth 1, the overlay may be converted to an RGB(A)
SheetView via the rgb property.
"""
def add(self, layer):
"""
Overlay a single layer on top of the existing overlay.
"""
if isinstance(layer, Annotation):
self.data.append(layer)
return
elif layer.bounds.lbrt() != self.bounds.lbrt():
if layer.bounds is None:
layer.bounds = self.bounds
else:
raise Exception("Layer must have same bounds as SheetOverlay")
self.data.append(layer)
@property
def roi(self):
"""
Apply the roi_bounds to all elements in the SheetOverlay
"""
return SheetOverlay([el.get_roi(self.roi_bounds) for el in self.data],
bounds=self.roi_bounds if self.roi_bounds else self.bounds,
metadata=self.metadata)
@property
def range(self):
range = self[0].range
cyclic = self[0].cyclic_range is not None
for view in self:
if isinstance(view, SheetView):
if cyclic != (self[0].cyclic_range is not None):
raise Exception("Overlay contains cyclic and non-cyclic "
"SheetViews, cannot compute range.")
range = find_minmax(range, view.range)
return range
def __len__(self):
return len(self.data)
class SheetView(SheetLayer, SheetCoordinateSystem):
"""
SheetView is the atomic unit as which 2D data is stored, along with its
bounds object. Allows slicing operations of the data in sheet coordinates or
direct access to the data, via the .data attribute.
Arrays with a shape of (X,Y) or (X,Y,Z) are valid. In the case of
3D arrays, each depth layer is interpreted as a channel of the 2D
representation.
"""
cyclic_range = param.Number(default=None, bounds=(0, None), allow_None=True, doc="""
For a cyclic quantity, the range over which the values repeat. For
instance, the orientation of a mirror-symmetric pattern in a plane is
pi-periodic, with orientation x the same as orientation x+pi (and
x+2pi, etc.) A cyclic_range of None declares that the data are not
cyclic. This parameter is metadata, declaring properties of the data
that can be useful for automatic plotting and/or normalization, and is
not used within this class itself.""")
_deep_indexable = True
def __init__(self, data, bounds=None, **kwargs):
bounds = bounds if bounds else BoundingBox()
data = np.array([[0]]) if data is None else data
(l, b, r, t) = bounds.lbrt()
(dim1, dim2) = data.shape[0], data.shape[1]
xdensity = dim1/(r-l)
ydensity = dim2/(t-b)
SheetLayer.__init__(self, data, bounds, **kwargs)
SheetCoordinateSystem.__init__(self, bounds, xdensity, ydensity)
def __getitem__(self, coords):
"""
Slice the underlying numpy array in sheet coordinates.
"""
if coords is () or coords == slice(None, None):
return self
if not any([isinstance(el, slice) for el in coords]):
return self.data[self.sheet2matrixidx(*coords)]
if all([isinstance(c, slice) for c in coords]):
l, b, r, t = self.bounds.lbrt()
xcoords, ycoords = coords
xstart = l if xcoords.start is None else max(l, xcoords.start)
xend = r if xcoords.stop is None else min(r, xcoords.stop)
ystart = b if ycoords.start is None else max(b, ycoords.start)
yend = t if ycoords.stop is None else min(t, ycoords.stop)
bounds = BoundingBox(points=((xstart, ystart), (xend, yend)))
else:
raise IndexError('Indexing requires x- and y-slice ranges.')
return SheetView(Slice(bounds, self).submatrix(self.data),
bounds, cyclic_range=self.cyclic_range,
label=self.label, style=self.style, metadata=self.metadata)
def normalize(self, min=0.0, max=1.0, norm_factor=None):
norm_factor = self.cyclic_range if norm_factor is None else norm_factor
if norm_factor is None:
norm_factor = self.data.max() - self.data.min()
else:
min, max = (0.0, 1.0)
norm_data = (((self.data - self.data.min())/norm_factor) * abs((max-min))) + min
return SheetView(norm_data, self.bounds, cyclic_range=self.cyclic_range,
metadata=self.metadata, roi_bounds=self.roi_bounds,
style=self.style)
def hist(self, num_bins=20, bin_range=None, individually=True, style_prefix=None):
"""
Returns a Layout of the SheetView with an attached histogram.
num_bins allows customizing the bin number. The container_name
can additionally be specified to set a common cmap when viewing
a Stack or Overlay.
"""
range = find_minmax(self.range, (0, None)) if bin_range is None else bin_range
# Avoids range issues including zero bin range and empty bins
if range == (0, 0):
range = (0.0, 0.1)
try:
hist, edges = np.histogram(self.data.flatten(), normed=True,
range=range, bins=num_bins)
except:
edges = np.linspace(range[0], range[1], num_bins+1)
hist = np.zeros(num_bins)
hist[np.isnan(hist)] = 0
hist_view = Histogram(hist, edges, cyclic_range=self.cyclic_range,
label=self.label + " Histogram",
metadata=self.metadata)
# Set plot and style options
style_prefix = 'Custom[<' + self.name + '>]_' if style_prefix is None else style_prefix
opts_name = style_prefix + hist_view.label.replace(' ', '_')
hist_view.style = opts_name
options[opts_name] = options.plotting(self)(**dict(rescale_individually=individually))
return hist_view
@property
def range(self):
if self.cyclic_range:
return (0, self.cyclic_range)
else:
return (self.data.min(), self.data.max())
@property
def depth(self):
return 1 if len(self.data.shape) == 2 else self.data.shape[2]
@property
def mode(self):
"""
Mode specifying the color space for visualizing the array data
and is a function of the depth. For a depth of one, a colormap
is used as determined by the style. If the depth is 3 or 4,
the mode is 'rgb' or 'rgba' respectively.
"""
if self.depth == 1: return 'cmap'
elif self.depth == 3: return 'rgb'
elif self.depth == 4: return 'rgba'
else:
raise Exception("Mode cannot be determined from the depth")
@property
def N(self):
return self.normalize()
@property
def roi(self):
bounds = self.roi_bounds if self.roi_bounds else self.bounds
return self.get_roi(bounds)
def get_roi(self, roi_bounds):
if self.depth == 1:
data = Slice(roi_bounds, self).submatrix(self.data)
else:
data = np.dstack([Slice(roi_bounds, self).submatrix(
self.data[:, :, i]) for i in range(self.depth)])
return SheetView(data, roi_bounds, cyclic_range=self.cyclic_range,
style=self.style, metadata=self.metadata)
class Points(SheetLayer):
"""
Allows sets of points to be positioned over a sheet coordinate
system.
The input data is an Nx2 Numpy array where each point in the numpy
array corresponds to an X,Y coordinate in sheet coordinates,
within the declared bounding region.
"""
def __init__(self, data, bounds=None, **kwargs):
bounds = bounds if bounds else BoundingBox()
data = np.array([[], []]).T if data is None else data
super(Points, self).__init__(data, bounds, **kwargs)
def resize(self, bounds):
return Points(self.points, bounds, style=self.style, metadata=self.metadata)
def __len__(self):
return self.data.shape[0]
@property
def roi(self):
(N,_) = self.data.shape
roi_data = self.data[[n for n in range(N)
if self.data[n, :] in self.roi_bounds]]
roi_bounds = self.roi_bounds if self.roi_bounds else self.bounds
return Points(roi_data, roi_bounds, style=self.style,
metadata=self.metadata)
def __iter__(self):
i = 0
while i < len(self):
yield tuple(self.data[i, :])
i += 1
class Contours(SheetLayer):
"""
Allows sets of contour lines to be defined over a
SheetCoordinateSystem.
The input data is a list of Nx2 numpy arrays where each array
corresponds to a contour in the group. Each point in the numpy
array corresponds to an X,Y coordinate.
"""
def __init__(self, data, bounds=None, **kwargs):
bounds = bounds if bounds else BoundingBox()
data = [] if data is None else data
super(Contours, self).__init__(data, bounds, **kwargs)
def resize(self, bounds):
return Contours(self.contours, bounds, style=self.style)
def __len__(self):
return self.data.shape[0]
@property
def roi(self):
# Note: Data returned is not sliced to ROI because vertices
# outside the bounds need to be snapped to the bounding box
# edges.
bounds = self.roi_bounds if self.roi_bounds else self.bounds
return Contours(self.data, bounds, style=self.style,
metadata=self.metadata)
class SheetStack(Stack):
"""
A SheetStack is a stack of SheetLayers over some dimensions. The
dimension may be a spatial dimension (i.e., a ZStack), time
(specifying a frame sequence) or any other dimensions along
which SheetLayers may vary.
"""
bounds = None
data_type = (SheetLayer, Annotation)
overlay_type = SheetOverlay
def drop_dimension(self, dim, val):
"""
Drop dimension from the NdMapping using the supplied
dimension name and value.
"""
slices = [slice(None) for i in range(self.ndims)]
slices[self.dim_index(dim)] = val
dim_labels = [d for d in self.dimension_labels if d != dim]
return self[tuple(slices)].reindex(dim_labels)
def _compute_samples(self, samples):
"""
Transform samples as specified to a format suitable for _get_sample.
May be overridden to compute transformation from sheetcoordinates to matrix
coordinates in single pass as an optimization.
"""
return [tuple(self.top.sheet2matrixidx(*s)) for s in samples]
def _get_sample(self, view, sample):
"""
Given a sample as processed by _compute_sample to extract a scalar sample
value from the view. Uses __getitem__ by default but can operate on the view's
data attribute if this helps optimize performance.
"""
return view.data[sample]
def _curve_labels(self, x_axis, sample, ylabel):
"""
Subclasses _curve_labels in regular Stack to correctly label curves
sampled from a SheetStack.
"""
curve_label = " ".join(["Coord:", str(sample), x_axis.capitalize(), ylabel])
return curve_label, x_axis.capitalize(), ylabel
def grid_sample(self, rows, cols, lbrt=None, **kwargs):
"""
Creates a CoordinateGrid of curves according sampled according to
the supplied rows and cols. A sub-region to be sampled can be specified
using the lbrt argument, which expresses the subsampling in sheet
coordinates. The usual sampling semantics apply.
"""
dim1, dim2 = self.top.shape
if lbrt is None:
l, t = self.top.matrixidx2sheet(0, 0)
r, b = self.top.matrixidx2sheet(dim1-1, dim2-1)
else:
l, b, r, t = lbrt
x, y = np.meshgrid(np.linspace(l, r, cols),
np.linspace(b, t, rows))
coords = zip(x.flat, y.flat)
shape = (rows, cols)
bounds = BoundingBox(points=[(l, b), (r, t)])
grid = self.sample(coords, **kwargs)
return DataGrid(bounds, shape, initial_items=zip(coords, grid.values()))
def map(self, map_fn, **kwargs):
"""
Map a function across the stack, using the bounds of first
mapped item.
"""
mapped_items = [(k, map_fn(el, k)) for k, el in self.items()]
return self.clone(mapped_items, bounds=mapped_items[0][1].bounds, **kwargs)
@property
def empty_element(self):
return self._type(None, self.bounds)
@property
def rgb(self):
if self.type == self.overlay_type:
return self.map(lambda x, _: x.rgb)
else:
raise Exception("Can only convert %s of overlays to RGB(A)" % self.__class__.__name__)
@property
def N(self):
return self.normalize()
@property
def roi(self):
return self.map(lambda x, _: x.roi)
def hist(self, num_bins=20, individually=False, bin_range=None):
histstack = DataStack(dimensions=self.dimensions, title=self.title,
metadata=self.metadata)
stack_range = None if individually else self.range
bin_range = stack_range if bin_range is None else bin_range
for k, v in self.items():
histstack[k] = v.hist(num_bins=num_bins, bin_range=bin_range,
individually=individually,
style_prefix='Custom[<' + self.name + '>]_')
return histstack
@property
def range(self):
range = self.top.range
for view in self._data.values():
range = find_minmax(range, view.range)
return range
def _item_check(self, dim_vals, data):
if isinstance(data, Annotation): pass
elif self.bounds is None:
self.bounds = data.bounds
elif not data.bounds.lbrt() == self.bounds.lbrt():
raise AssertionError("All SheetLayer elements must have matching bounds.")
super(SheetStack, self)._item_check(dim_vals, data)
def normalize_elements(self, **kwargs):
return self.map(lambda x, _: x.normalize(**kwargs))
def normalize(self, min=0.0, max=1.0):
data_max = np.max([el.data.max() for el in self.values()])
data_min = np.min([el.data.min() for el in self.values()])
norm_factor = data_max-data_min
return self.map(lambda x, _: x.normalize(min=min, max=max,
norm_factor=norm_factor))
class CoordinateGrid(NdMapping, SheetCoordinateSystem):
"""
CoordinateGrid indexes other NdMapping objects, containing projections
onto coordinate systems. The X and Y dimensions are mapped onto the bounds
object, allowing for bounds checking and grid-snapping.
"""
dimensions = param.List(default=[Dimension(name="X"),
Dimension(name="Y")])
def __init__(self, bounds, shape, initial_items=None, **kwargs):
(l, b, r, t) = bounds.lbrt()
(dim1, dim2) = shape
xdensity = dim1 / (r-l) if (r-l) else 1
ydensity = dim2 / (t-b) if (t-b) else 1
self._style = None
SheetCoordinateSystem.__init__(self, bounds, xdensity, ydensity)
super(CoordinateGrid, self).__init__(initial_items, **kwargs)
def _add_item(self, coords, data, sort=True):
"""
Subclassed to provide bounds checking.
"""
if not self.bounds.contains(*coords):
self.warning('Specified coordinate %s is outside grid bounds %s' % (coords, self.lbrt))
self._item_check(coords, data)
coords = self._transform_indices(coords)
super(CoordinateGrid, self)._add_item(coords, data, sort=sort)
def _transform_indices(self, coords):
return tuple([self._transform_index(i, coord)
for (i, coord) in enumerate(coords)])
def _transform_index(self, dim, index):
if isinstance(index, slice):
[start, stop] = [self._transform_value(el, dim)
for el in (index.start, index.stop)]
return slice(start, stop)
else:
return self._transform_value(index, dim)
def _transform_value(self, val, dim):
"""
Subclassed to discretize grid spacing.
"""
if val is None: return None
return self.closest_cell_center(*((0, val) if dim else (val, 0)))[dim]
def update(self, other):
"""
Adds bounds checking to the default update behavior.
"""
if hasattr(other, 'bounds') and (self.bounds.lbrt() != other.bounds.lbrt()):
raise Exception('Cannot combine %ss with different'
' bounds.' % self.__class__)
super(CoordinateGrid, self).update(other)
def clone(self, items=None, **kwargs):
"""
Returns an empty duplicate of itself with all parameter values and
metadata copied across.
"""
settings = dict(self.get_param_values(), **kwargs)
settings.pop('metadata', None)
return CoordinateGrid(bounds=self.bounds, shape=self.shape,
initial_items=items,
metadata=self.metadata, **settings)
def __mul__(self, other):
if isinstance(other, SheetStack) and len(other) == 1:
other = other.top
overlayed_items = [(k, el * other) for k, el in self.items()]
return self.clone(overlayed_items)
@property
def top(self):
"""
The top of a ProjectionGrid is another ProjectionGrid
constituted of the top of the individual elements. To access
the elements by their X,Y position, either index the position
directly or use the items() method.
"""
top_items = [(k, v.clone(items=(v.keys()[-1], v.top)))
for (k, v) in self.items()]
return self.clone(top_items)
def __len__(self):
"""
The maximum depth of all the elements. Matches the semantics
of __len__ used by SheetStack. For the total number of
elements, count the full set of keys.
"""
return max([len(v) for v in self.values()] + [0])
def __add__(self, obj):
if not isinstance(obj, GridLayout):
return GridLayout(initial_items=[self, obj])
def map(self, map_fn, **kwargs):
"""
Map a function across the stack, using the bounds of first
mapped item.
"""
mapped_items = [(k, map_fn(el, k)) for k, el in self.items()]
if isinstance(mapped_items[0][1], tuple):
split = [[(k, v) for v in val] for (k, val) in mapped_items]
item_groups = [list(el) for el in zip(*split)]
else:
item_groups = [mapped_items]
clones = tuple(self.clone(els, **kwargs)
for (i, els) in enumerate(item_groups))
return clones if len(clones) > 1 else clones[0]
@property
def xlim(self):
xlim = self.values()[-1].xlim
for data in self.values():
xlim = find_minmax(xlim, data.xlim)
return xlim
@property
def ylim(self):
ylim = self.values()[-1].ylim
for data in self.values():
ylim = find_minmax(ylim, data.ylim)
if ylim[0] == ylim[1]: ylim = (ylim[0], ylim[0]+1.)
return ylim
@property
def style(self):
"""
The name of the style that may be used to control display of
this view. If a style name is not set and but a label is
assigned, then the closest existing style name is returned.
"""
if self._style:
return self._style
class_name = self.__class__.__name__
matches = options.fuzzy_match_keys(class_name)
return matches[0] if matches else class_name
@style.setter
def style(self, val):
self._style = val
class DataGrid(CoordinateGrid):
"""
DataGrid is mostly the same as CoordinateGrid, however it contains
DataLayers or DataStacks as elements and can therefore not be overlaid
with SheetLayer elements.
"""
def __add__(self, obj):
raise NotImplementedError
def __mul__(self, other):
raise NotImplementedError
__all__ = list(set([_k for _k,_v in locals().items() if isinstance(_v,type) and
(issubclass(_v, NdMapping) or issubclass(_v, View))]))
|
from django.db import models
from django.contrib.auth.models import AbstractUser
import datetime
# Create your models here.
class Teacher(AbstractUser):
surname = models.CharField(verbose_name='Фамилия', max_length=20)
name = models.CharField(verbose_name='Имя', max_length=20)
second_name = models.CharField(verbose_name='Отчество', max_length=20, blank=True)
def __str__(self):
return str(self.surname)+' '+str(self.name)+' '+str(self.second_name)
class Subject(models.Model):
name = models.CharField(verbose_name='Название предмета', max_length=50)
def __str__(self):
return str(self.name)
class Group(models.Model):
COURSE = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '1 маг.'),
(6, '2 маг.')
)
course = models.IntegerField(verbose_name='Курс', choices=COURSE, default=1)
number = models.PositiveIntegerField(verbose_name='Номер группы', default=1)
def __str__(self):
return str(self.course)+' курс '+str(self.number)+' гр.'
class Student(models.Model):
surname = models.CharField(verbose_name='Фамилия', max_length=20)
name = models.CharField(verbose_name='Имя', max_length=20)
second_name = models.CharField(verbose_name='Отчество', max_length=20, blank=True)
group = models.ForeignKey(Group, on_delete=models.SET_NULL, null=True, verbose_name='Группа')
def __str__(self):
return str(self.surname) + ' ' + str(self.name) + ' ' + str(self.second_name)+' '+str(self.group)
# курс лекций
class Curriculum(models.Model):
teacher = models.ForeignKey(Teacher, on_delete=models.SET_NULL, null=True, verbose_name='Преподаватель')
subject = models.ForeignKey(Subject, on_delete=models.SET_NULL, null=True, verbose_name='Предмет')
def __str__(self):
return 'Преподаватель: ' + str(self.teacher)+'\n' + \
'Предмет: ' + str(self.subject)
class GroupCurriculum(models.Model):
group = models.ForeignKey(Group, on_delete=models.SET_NULL, null=True,verbose_name='Группа')
curriculum = models.ForeignKey(Curriculum, on_delete=models.SET_NULL, null=True,verbose_name='Учебный курс')
semester = models.PositiveIntegerField(verbose_name='Семестр')
TYPE_OF_CLASS = (
(1, 'Семинар'),
(2, 'Лекция'),
(3, 'Практика')
)
type = models.IntegerField(choices=TYPE_OF_CLASS,default=2,verbose_name='Тип занятия')
def __str__(self):
return str(self.curriculum)+'\n '+str(self.group)+'\n '+str(self.semester) + ' семестр\n' + self.TYPE_OF_CLASS[int(str(self.type))-1][1]
def get_group(self):
return self.group
class Visit(models.Model):
group_curriculum = models.ForeignKey(GroupCurriculum, verbose_name='Учебный курс', on_delete=models.SET_NULL,null=True)
student = models.ForeignKey(Student, on_delete=models.SET_NULL, null=True, verbose_name='Студент')
date = models.DateField(verbose_name='Дата')
visit = models.BooleanField(verbose_name="Посетил")
def __str__(self):
return str(self.group_curriculum)+'\n '+str(self.student)+'\n'+str(self.date)+'\n'+str(self.visit) |
from django.shortcuts import render, HttpResponse, redirect
from django.contrib import messages
from .models import User
def index(request):
if 'user' in request.session:
return redirect('/success')
else:
return render (request, 'logApp/index.html')
def new(request):
if request.method == "POST":
errors = User.objects.reg_val(request.POST)
if len(errors):
print errors
for tag, error in errors.iteritems():
messages.error(request,error,extra_tags=tag)
return redirect('/')
else:
request.session['user'] = User.objects.last().id
return redirect('/success')
def success(request):
if 'user' in request.session:
return render(request, 'logApp/sucess.html' , {"User" : User.objects.get(id=request.session['user'])})
else:
return redirect ('/')
def signin(request):
if request.method== "POST":
errors = User.objects.log_val(request.POST)
if len(errors):
for tag, error in errors.iteritems():
messages.error(request,error,extra_tags=tag)
return redirect('/')
else:
request.session['user'] = User.objects.get(email=request.POST['email']).id
return redirect('/success')
def logout(request):
request.session.pop('user')
return redirect ('/')
# Create your views here.
|
#!/usr/bin/env python3
from typing import List, Tuple
import os
import nltk
from contextlib import redirect_stdout
# Do not print log messages:
with redirect_stdout(open(os.devnull, "w")):
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
def cached(method):
"""
Method decorator for the Document class, caching results if enabled
"""
result = {}
def wrapper(*args):
if not args[0]._use_cache:
return method(*args)
params = tuple(args)
if params not in result:
result[params] = method(*args)
return result[params]
return wrapper
class Document:
"""
Base class for Document, a datatype that stores text and enables
basic textwise feature extraction, such as listing words or sentences.
"""
def __init__(self, document: 'Document', **kwargs) -> None:
"""
Create a new Document.
Arguments:
document (str): The string, or a pathlike, to load
use_cache (bool: True): Cache sentences and words. This is
especially useful if you anticipate referencing these functions
commonly. Disable this on ridiculously large documents to save
on memory usage.
"""
# Pass `use_cache=False` if you want to disable caching
self._use_cache = True
if 'no_cache' in kwargs:
self._use_cache = not kwargs['no_cache']
# Try to load the document from disk.
if not os.path.exists(document):
self._text = document
else:
# If you fail to load from disk, it's because it's a string!
with open(document, 'r') as f:
self._text = f.read()
def text(self) -> str:
"""
Returns the text of the document.
Returns:
str
"""
return self._text
@cached
def sentences(self) -> List[str]:
"""
Compute a list of sentences.
Uses nltk.sent_tokenize.
Returns:
List[str]
"""
return [s.replace('\n', ' ') for s in nltk.sent_tokenize(self._text)]
@cached
def words(self) -> List[str]:
"""
Compute a list of words from this Document.
Uses nltk.word_tokenize.
Returns:
List[str]
"""
return nltk.word_tokenize(self._text)
@cached
def words_with_indices(self) -> List[Tuple[str, int, int]]:
"""
Compute a list of words, with beginning and end indices
Returns:
List[Tuple[str, int, int]]
"""
offset = 0
token_indices = []
for word in self.words():
offset = self._text.find(word, offset)
token_indices.append((word, offset, offset + len(word)))
offset += len(word)
return token_indices
@cached
def words_by_part_of_speech(self) -> dict:
"""
Compute the parts of speech for each word in the document.
Uses nltk.pos_tag.
Returns:
dict
"""
words = self.words()
tagged = nltk.pos_tag(words)
categories = {}
for type in {t[1] for t in tagged}:
categories[type] = [t[0] for t in tagged if t[1] == type]
return categories
@cached
def stemmed_words(self) -> List:
"""
Compute the stems of words.
Uses nltk.PorterStemmer.
Returns:
List
"""
words = self.words()
porter = nltk.PorterStemmer()
return [porter.stem(w) for w in words]
|
import requests
import json
# resp = requests.get("https://status.github.com/api/status.json")
# txt = resp.text
# obj = json.loads(txt)
#
# print(obj)
# print(type(obj))
# print(obj)
########################################################################
# class User(object):
# def __init__(self, name, username, *args, **kwargs):
# self.name = name
# self.username = username
#
# import json
# j = json.loads('{"name": "John Smith", "username": "jsmith"}')
# u = User(**j)
# print (u.name)
########################################################################
#
# class AutoVar(object):
# def __init__(self, data):
# self.__dict__ = data
#
# json_data = '{"a": "my data"}'
# data = json.loads(json_data)
#
# test = AutoVar(data)
# print (test.a)
class Something(object):
@classmethod
def _load_from_json(cls, blob):
for k, v in blob.iteritems():
setattr(cls, k, v)
|
#Created on July 7, 2014, adjusted Dec 11, 2015 by chahn
#@author: rspies
# Python 2.7
# This script plots a raster image of an input stream dischare txt file
import os
import matplotlib.pyplot as plt
#Turn interactive plot mode off (don't show figures)
plt.ioff()
import matplotlib.ticker as ticker
from matplotlib import cm
from matplotlib.colors import LogNorm
import numpy as np
import pandas as pd
import datetime
os.chdir("../..")
maindir = os.getcwd() + os.sep + 'Calibration_NWS' + os.sep
############################### User input ###################################
##############################################################################
##### IMPORTANT: Make sure to call the correct CHPS .csv output columns ######
##### and specify the calibration period in next section
add_obs_Q_plot = 'yes' # yes/no to create a subplot of the observed data for same period
sim_type = 'draft' # choices: initial (prior to calib), final (final calib), working (currently in calib process), draft
RFC = 'NWRFC_FY2016'
basin_ids = [] # run individual basin(s) instead of all basins in dir -> else leave empty []
error_types = ['bias','accum'] # choices: pbias, bias, NAE (normalized absolute error)
fig_name = '_bias_pbias_' + sim_type #' Calb Raster Analysis' or '_bias_pbias_test'
resolution = 350 #350->(for report figs) 100->for CHPS fx help tab display (E19)
################ Define the corresponding column of data in the csv file #################
yr_start = 1979; yr_end = 2011 #
#ignore_basins = ['MADO3','WDHN2','PHLO3','BUSO3','DONO3','EGCO3','TRAO3','TRSO3','SNDO3','MCZO3','ISSW1','PILW1','CALW1','SMRW1','MFPI1','BRFI1','MORI1','BTSI1','PRII1','PINI1','ASCW1']
########### find all basin QME vs SQME .csv files ############
if len(basin_ids) == 0:
basin_ids = []
all_files = os.listdir(maindir + RFC[:5] + os.sep + RFC + os.sep + 'Calibration_TimeSeries'+ os.sep + sim_type + os.sep + 'QME_SQME' + os.sep)
for each in all_files:
if each.endswith(".csv"):
# if each[:6].rstrip('_') not in ignore_basins:
basin_ids.append(each[:6].rstrip('_'))
#basin_ids = ['MADO3','WDHN2','PHLO3','BUSO3','DONO3','EGCO3','TRAO3','TRSO3','SNDO3','MCZO3','ISSW1','PILW1','CALW1','SMRW1','MFPI1','BRFI1','MORI1','BTSI1','PRII1','PINI1','ASCW1'] # <-- use this to run specific basins
########### loop through all desired basins and define min/max errors for plots ############
for basin_id in basin_ids:
print basin_id
calib_read = open(maindir + RFC[:5] + os.sep + RFC + os.sep + 'Calibration_TimeSeries' + os.sep + sim_type+ os.sep + 'QME_SQME' + os.sep + basin_id + '_QME_SQME.csv', 'r') #test!!!
# output figure directory
out_dir = maindir + RFC[:5] + os.sep + RFC + os.sep + 'Calibration_TimeSeries' + os.sep + sim_type + os.sep + 'raster_hydrograph_plots' + os.sep #+ 'E19' + os.sep
low_cat = ['ASCW1','ISSW1','MFPI1','PHLO3','PILO3','PILW1','PINI1','SMRW1','WDHN2']
mid_cat = ['BTSI1','BUSO3','DONO3','EGCO3','MADO3','PRII1','SNDO3','TRAO3']
big_cat = ['BRFI1','MCZO3','MORI1']
if basin_id in low_cat:
cminb = -50; cmaxb = 50
cmina = -1000; cmaxa =1000
elif basin_id in mid_cat:
cminb = -100; cmaxb = 100
cmina = -2500; cmaxa =2500
elif basin_id in big_cat:
cminb = -100; cmaxb = 100
cmina = -3000; cmaxa =3000
else:
cminb = -100; cmaxb = 100
cmina = -1000; cmaxa =1000
############ End User input ##################################################
###### tab delimitted CHPS calibrated AND Observed dishcarge text file into panda arrays ###########
### replaces hour time stamp with zero to create a mean daily value -> match obs data
test = pd.read_csv(calib_read,sep=',',skiprows=2,
usecols=[0,1,2],parse_dates=['date'],names=['date', 'QME', 'SQME'])
### assign column data to variables
print 'Populating data arrays for calibrated dishcarge... and converting to daily values'
date_calib = test['date'].tolist() # convert to list (indexible)
Q_calib = test['SQME'].tolist()
date_Q_calib = {}; count = 0 # read the data into a dictionary (more efficient processing)
for each_day in date_calib:
if yr_start <= int(each_day.year) <= yr_end:
if each_day.replace(hour=0) in date_Q_calib:
if float(Q_calib[count]) >= 0: # ignore data less than 0
date_Q_calib[each_day.replace(hour=0)].append(Q_calib[count])
else:
if float(Q_calib[count]) >= 0:
date_Q_calib[each_day.replace(hour=0)] = [Q_calib[count]]
count += 1
###### tab delimitted CHPS observed data text file into panda arrays ###########
### replaces hour time stamp with zero to create a mean daily value -> match obs data
date = test['date'].tolist() # convert to list (indexible)
discharge = test['QME'].tolist()
date_Q = {}; count = 0 # read the data into a dictionary (more efficient processing)
for each_day in date:
if yr_start <= int(each_day.year) <= yr_end:
if each_day.replace(hour=0) in date_Q:
if float(discharge[count]) >= 0:
date_Q[each_day.replace(hour=0)].append(float(discharge[count]))
else:
if float(discharge[count]) >= 0:
date_Q[each_day.replace(hour=0)] = [float(discharge[count])]
count += 1
calib_read.close()
################## Create matrix of observed and calibrated data #####################
print 'Creating matrix of data...'
print 'Ignoring leap days...'
start=pd.datetime(yr_start,1,1); end=pd.datetime(yr_end,12,31); delta = datetime.timedelta(days=1)
gage_Q = []
print 'Parsing daily observed gage dishcarge data...'
while start <= end:
date_loop = pd.to_datetime(start)
#ignore leap year day (maintains equal matrix dimensions)
if date_loop.month == 2 and date_loop.day == 29:
print 'Ignoring: ' + str(date_loop)
else:
if date_loop in date_Q:
if float(date_Q[date_loop][0]) < 0.0: # replace negative Q with nan
gage_Q.append(np.nan)
elif float(date_Q[date_loop][0]) <= 0.1: # replace Q values btw 0.0 and 0.1 with 0.1 (log plotting issues)
gage_Q.append(0.1)
else:
gage_Q.append(float(date_Q[date_loop][0])) # add each day of available data to new list
else:
gage_Q.append(np.nan) # set missing observed to nan (ignored in plot and analysis)
start += delta
print 'Parsing daily calibrated dishcarge data...'
start=pd.datetime(yr_start,1,1); chps_Q = []
while start <= end:
date_loop = pd.to_datetime(start)
#ignore leap year day (maintains equal matrix dimensions)
if date_loop.month == 2 and date_loop.day == 29:
print 'Ignoring: ' + str(date_loop)
else:
if date_loop in date_Q_calib:
if float(np.average(date_Q_calib[date_loop])) < 0.0: # replace negative Q with nan
chps_Q.append(np.nan)
elif float(np.average(date_Q_calib[date_loop])) <= 0.1:
chps_Q.append(0.1)
else:
chps_Q.append(np.average(date_Q_calib[date_loop])) # add each day of available data to new list
else:
chps_Q.append(np.nan) #set missing observed to nan (ignored in plot and analysis)
start += delta
### flip matrix upside down to plot most recent data on top
ediff = (np.asarray(chps_Q)-np.asarray(gage_Q))#/np.asarray(gage_Q)
ema = np.ma.masked_invalid(ediff)
#eadd = np.cumsum(ema)
error_cum = ema.reshape((len(gage_Q)/365),365)
obs_Q = np.flipud(np.asarray(gage_Q).reshape((len(gage_Q)/365),365))
calib_Q = np.flipud(np.asarray(chps_Q).reshape((len(chps_Q)/365),365))
fig = plt.figure(figsize=(8,10))
cmap =cm.seismic_r
############################# create plot(s) ##########################################
#######################################################################################
for error_type in error_types:
### set all nan (masked array) values to grey
cmap.set_bad('k',0.3) # defining here seems to get applied to all plots?? (color,opacity)
########################## error calculations #####################################
# Bias = (calib-obs)
if error_type == 'bias':
error = (calib_Q-obs_Q)
#error = np.ma.array(error, mask=np.isnan(error))
text = 'SQME Daily Bias ($m^3$$s^{-1}$)'
label = 'Bias'
#cmin = -50; cmax = 50
cmin = cminb; cmax = cmaxb
cmap =cm.seismic_r
if add_obs_Q_plot == 'yes':
ax1 = fig.add_subplot(312)
else:
ax1 = fig.add_subplot(212)
# Accumulated Bias
if error_type == 'accum':
error = np.flipud(np.cumsum(error_cum,axis=1))
#error = np.cumsum(((calib_Q-obs_Q)/obs_Q),axis=1).reshape((len(gage_Q)/365),365)
text = 'SQME Annual Accumulated Bias ($m^3$$s^{-1}$)'
label = 'Accumbias'
cmin = cmina; cmax = cmaxa
cmap =cm.seismic_r
if add_obs_Q_plot == 'yes':
ax1 = fig.add_subplot(313)
else:
ax1 = fig.add_subplot(213)
print 'Creating plot...'
# %Bias = 100 * [(calib-obs)/obs]
if error_type == 'pbias':
error = 100*((calib_Q-obs_Q)/obs_Q)
text = 'SQME Daily Percent Bias (%)'
label = 'Pbias'
cmin = -100; cmax = 100
cmap =cm.seismic_r
if add_obs_Q_plot == 'yes':
ax1 = fig.add_subplot(313)
else:
ax1 = fig.add_subplot(213)
print 'Creating plot...'
#cmap.set_bad('k',0.3)
### create image: aspect set to auto (creates a square plot with any data)
### vmin->sets values less than 0 to missing
image = ax1.imshow(error, cmap=cmap, aspect='auto',interpolation='none')#extent=[1,365,50,-50]
image.set_clim(cmin,cmax)
cbar = fig.colorbar(image,format='%.1f')#,extend='min',extendrect=True,extendfrac=.1)
#### color bar and axis properties ###
#cbar.ax.set_ylabel(text, rotation=270,labelpad=20,fontsize=10)
cbar.ax.tick_params(labelsize=8)
ax1.tick_params(axis='y', which='major', labelsize=8)
ax1.tick_params(axis='x', which='major', labelsize=10)
#### axis adjustments ####
if yr_end-yr_start > 30: # reduces overcrowding of y-axis tick labels (less frequent ticks with longer data)
ax1.locator_params(axis='y',nbins=(yr_end-yr_start)/2,tight=True) #set number of y ticks
else:
ax1.locator_params(axis='y',nbins=(yr_end-yr_start),tight=True) #set number of y ticks
ax1.xaxis.set_minor_locator(ticker.FixedLocator([31,59,90,120,151,181,212,243,273,304,334,365]))
ax1.xaxis.set_major_locator(ticker.FixedLocator([15,45,74,105,135,166,196,227,258,288,319,349]))
ax1.set_xticklabels(['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'])
ax1.xaxis.grid(b=True, which='minor', color='k', linestyle='--')
### set tick marks properties
ax1.tick_params(which='minor', length=5,width=1.5)
ax1.tick_params(axis='x',which='major', length=0,width=0)
lala = ax1.yaxis.get_majorticklocs() # find the automatic tick mark locations
# create a list of the years used in plot and add to the appropriate
# tick location
yticks = []; all_years = []; new_labels = []
start = yr_start
while start <= yr_end:
all_years.append(str(start))
start += 1
all_years.reverse()
for each in lala:
yticks.append(int(each))
for each in yticks:
if each < 0 or each == max(yticks):
new_labels.append('')
else:
new_labels.append(all_years[each])
ax1.set_yticklabels(new_labels)
obs_Q = np.ma.masked_invalid(obs_Q) # mask the observed nan data
if np.min(obs_Q) <10.0:
cmin = 1
elif np.min(obs_Q) < 100.0:
cmin = 10
elif np.min(obs_Q) < 1000.0:
cmin = 100
else:
cmin = 1000
ticks_in = []
tick_labels = []
cmin_int = cmin
while cmin_int <= np.max(obs_Q):
ticks_in.append(float(cmin_int))
tick_labels.append(str(int(cmin_int)))
cmin_int = cmin_int * 2
#if np.max(obs_Q) > 1.0 and np.max(obs_Q) < 10.0:
# cmax = 10
#if np.max(obs_Q) > 10.0 and np.max(obs_Q) < 100.0:
# cmax = 100
#if np.max(obs_Q)> 100.0 and np.max(obs_Q) < 1000.0:
# cmax = 1000
#if np.max(obs_Q) > 1000 and np.max(obs_Q) < 10000.0:
# cmax = 10000
cmax = int(np.max(obs_Q))
#### axis and title properties ###
#ax1.set_xlabel('Day of Year')
ax1.set_ylabel('Calendar Year',fontsize=10)
ax1.set_title(basin_id + ': ' + text + ' ' + str(yr_start) + '-' + str(yr_end),fontsize=12)
############# optional: add side-by-side plot of observed dishcarge #################
if add_obs_Q_plot == 'yes':
ax2 = fig.add_subplot(311)
#cmap.set_bad('w',1)
cmap=cm.jet_r
cmap.set_bad('k',0.3)
image = ax2.imshow(obs_Q, cmap=cmap, aspect='auto', norm=LogNorm(),interpolation='none')#extent=[1,365,50,-50]
image.set_clim(cmin,cmax)
cbar = fig.colorbar(image,shrink=0.9,format='%i',ticks = ticks_in,extendfrac=.1)#extend='min', extendrect=True
cbar.ax.set_yticklabels(tick_labels)
#ax1.text(398,39, 'Missing',fontsize = 9)#, style='italic')#,bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})
#ax1.text(380,24, 'Max: ' + str(int(np.max(obs_Q))), fontsize = 9)
#### color bar properties ###
text = 'Daily QME ($m^3$$s^{-1}$)'
#cbar.ax.set_ylabel(text, rotation=270,labelpad=20)
#### axis adjustments ####
if yr_end-yr_start > 30: # reduces overcrowding of y-axis tick labels (less frequent ticks with longer data)
ax2.locator_params(axis='y',nbins=(yr_end-yr_start)/2,tight=True) #set number of y ticks
else:
ax2.locator_params(axis='y',nbins=(yr_end-yr_start),tight=True) #set number of y ticks
ax2.xaxis.set_minor_locator(ticker.FixedLocator([31,59,90,120,151,181,212,243,273,304,334,365]))
ax2.xaxis.set_major_locator(ticker.FixedLocator([15,45,74,105,135,166,196,227,258,288,319,349]))
ax2.set_xticklabels(['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'])
### set tick marks properties
ax2.tick_params(which='minor', length=5,width=1.5)
ax2.tick_params(axis='x',which='major', length=0,width=0)
lala = ax2.yaxis.get_majorticklocs() # find the automatic tick mark locations
ax2.xaxis.grid(b=True, which='minor', color='k', linestyle='--')
# create a list of the years used in plot and add to the appropriate
# tick location
yticks = []; all_years = []; new_labels = []
start = yr_start
while start <= yr_end:
all_years.append(str(start))
start += 1
all_years.reverse()
for each in lala:
yticks.append(int(each))
for each in yticks:
if each < 0 or each == max(yticks):
new_labels.append('')
else:
new_labels.append(all_years[each])
ax2.set_yticklabels(new_labels)
#### colorbar, axis and title properties ###
cbar.ax.tick_params(labelsize=8)
ax2.tick_params(axis='y', which='major', labelsize=8)
ax2.tick_params(axis='x', which='major', labelsize=10)
ax2.set_ylabel('Calendar Year',fontsize=10)
ax2.set_title(basin_id + ': Observed ' + text + ' ' + str(yr_start) + '-' + str(yr_end),fontsize=12)
fig.subplots_adjust(hspace=0.3)
fig_out = out_dir + basin_id + fig_name + '.png'
else:
fig_out = out_dir + basin_id + fig_name + '_hydrograph.png'
plt.savefig(fig_out, dpi=resolution, bbox_inches='tight')
print 'Figure saved to: ' + out_dir + basin_id + '_' + label + '.png'
plt.close()
print 'Finished!'
print datetime.datetime.now() |
from bs4 import BeautifulSoup
import requests
import unicodedata
r = requests.get("https://docs.python.org/2/library/functions.html")
data = r.text
soup = BeautifulSoup(data)
names = []
descs = []
for name in soup.findAll("tt", {"class" : "descname"}):
names.append(str(''.join(name.findAll(text=True))))
descs = soup.select('div dl dd p')
for desc in descs:
print desc
print names
print descs
|
#All code is owned by https://github.com/QuantzLab/ with an Apache 2.0 liscence
n = int(input("Till how much do you want prime numbers?"))
def isPrime(n):
# Corner case
if n <= 1 :
return False
# check from 2 to n-1
for i in range(2, n):
if n % i == 0:
return False
return True
def printPrime(n):
for i in range(2, n + 1):
if isPrime(i):
print(i, end = " ")
if __name__ == "__main__" :
# function calling
printPrime(n)
|
"""将 xml 文件按照类别生成多个标注文件,每个文件都是 txt 文件,包含该类别的框信息
@Author: patrickcty (Tianyang Cheng)
@Filename: separate_classes_from_xml.py
"""
import os
import xml.etree.ElementTree as ET
from collections import defaultdict
from .make_dir_if_not_exist import make_dir_if_not_exists
def generate_txt(xml_dir, target_dir, image_path):
"""
将 xml 文件按照类别生成多个标注文件,每个文件都是 txt 文件
:param xml_dir xml 标注所在文件夹
:param target_dir 生成 txt 文件所在文件夹
:param image_path xml 对应图像所在文件夹
"""
xml_dict = xml_to_txt_sep(xml_dir, image_path)
make_dir_if_not_exists(target_dir)
for k, v in xml_dict.items():
with open(os.path.join(target_dir, '{}.txt'.format(k)), 'w') as f:
for r in v:
f.writelines(r + '\n')
print('Generate {}.txt successfully.'.format(k))
def xml_to_txt_sep(path, image_path):
"""
处理给定目录下所有 xml 文件,
其中单个 xml 中的各个类别标注分别保存到一个 list 中
每个 list 由若干个字符串组成,每个字符串表示单个图像的标注信息
标注信息格式为 xmin,ymin,xmax,ymax,class_id
"""
xml_dict = defaultdict(list)
for file in os.scandir(path):
if file.is_file() and file.name.endswith('.xml'):
xml_file = os.path.join(path, file.name)
tree = ET.parse(xml_file)
root = tree.getroot()
filename = root.find('filename').text
row_dict = defaultdict(str)
for member in root.findall('object'):
classname = member[0].text
if row_dict[classname] == '':
row_dict[classname] = os.path.join(image_path, filename)
value = (member[4][0].text,
member[4][1].text,
member[4][2].text,
member[4][3].text,
'0')
row_dict[classname] = row_dict[classname] + ' ' + ','.join(value)
for k, v in row_dict.items():
xml_dict[k].append(v)
return xml_dict
def xml_to_txt(path, target_dir, image_path):
"""
将路径下的 xml 文件生成一个 txt 文件,各个框类别无关
"""
xml_list = []
for file in os.scandir(path):
if file.is_file() and file.name.endswith('.xml'):
xml_file = os.path.join(path, file.name)
tree = ET.parse(xml_file)
root = tree.getroot()
row = os.path.join(image_path, root.find('filename').text)
for member in root.findall('object'):
value = (member[4][0].text,
member[4][1].text,
member[4][2].text,
member[4][3].text,
'0')
row = row + ' ' + ','.join(value)
xml_list.append(row)
make_dir_if_not_exists(target_dir)
with open(os.path.join(target_dir, 'label.txt'), 'w') as f:
for r in xml_list:
f.writelines(r + '\n')
print('Generate label.txt successfully.')
return xml_list |
"""
This is a Python 3 script to convert the microscope XML documentation into markdown
"""
import xml.etree.ElementTree as ET
import sys
import os
import shutil
import re
import time
def namify(title):
"""Replace spaces with underscores etc. to generate sensible filenames from titles"""
name = title.replace(" ","_") # replace spaces with underscores
name = re.sub(r'\W+', '', name) # strip non-alphanumeric chars (allows '_')
return name.lower()
def html_strip(html, pad_l=True, pad_r=True, none_to_empty_string=True):
"""Replace leading or trailing whitespace with a single space.
HTML is whitespace-insensitive-ish, in that any amount of whitespace becomes one space.
When converting to markdown, it is useful to respect this, hence this function.
If there is any leading or trailing space, that whitespace is replaced with a single
space. Set ``pad_l`` or ``pad_r`` to ``False`` to disable adding the extra space.
TODO: decide if this function should just do a re.sub("\s+", " ") to replace **all** whitespace
"""
if html is None and none_to_empty_string:
html = ""
lstripped = html.lstrip()
if len(lstripped) < len(html) and pad_l:
lstripped = " " + lstripped
stripped = lstripped.rstrip()
if len(stripped) < len(lstripped) and pad_r:
stripped += " "
return stripped
def safe_html_to_markdown(element, prefix="", recursion_count=0, links_to_convert={}):
"""Take an XML Element and turn its contents into MarkDown, respecting safe HTML tags.
For this function, "safe" HTML tags are ["b","i","ul","ol","li","p","a","pre","code"]
This function is **NOT production-ready** or guaranteed in any way. It should make a
decent stab at converting, but I have not handled all edge cases!
Arguments:
prefix: string (optional, default "")
This string will be prefixed to every line output, which allows nested lists etc.
recursion_count: (integer, default 0)
The recursion count is used internally to avoid infinite loops
"""
assert recursion_count < 100, "Exceeded maximum recursion depth converting HTML to markdown"
rargs = {"recursion_count":recursion_count + 1, "links_to_convert":links_to_convert}
md = html_strip(element.text, pad_l=False) # Start with the text immediately following the opening tags
inlines = {'b':'**', 'strong':'**', 'em':'*', 'i':'*', 'code':'`', 'u':"__"}
lists = {'ul':'* ', 'ol':'1. '}
for e in element:
tag = e.tag.lower()
if tag in inlines: # Handle emphasis (b/i/em/strong/u) and code
md += inlines[tag] + safe_html_to_markdown(e, prefix, **rargs) + inlines[tag]
if tag in lists: # Lists are more complicated - we add an appropriate prefix to each <li>
for item in e:
if item.tag.lower() == "li":
md += "\n" + prefix + lists[tag] + safe_html_to_markdown(item, prefix + " ", **rargs)
md += "\n" + prefix
if tag == 'p':
md += "\n" + prefix + safe_html_to_markdown(e, prefix, **rargs) + "\n\n" + prefix
if tag == 'br':
md += "\n\n" + prefix
if tag == "pre":
#TODO: think about what happens to tags in here (though DocuBricks doesn't permit them anyway)
# Currently, we use `"".join(e.itertext())` to strip the tags out and get some text.
md += "\n"
for line in "".join(e.itertext()).split("\n"):
md += "\n" + prefix + " " + line
md += "\n\n" + prefix
if tag == "a":
href = e.attrib['href']
if href in links_to_convert:
href = links_to_convert[href]
md += "[" + safe_html_to_markdown(e, prefix, **rargs) + "](" + href + ")"
md += html_strip(e.tail) # append any text that happens after the current tag
return md
def step_by_step_instructions(element, sectionlevel="##", to_markdown=safe_html_to_markdown):
"""Render a <StepByStepInstruction> hierarchy to markdown"""
output = ""
for step in element:
assert step.tag.lower() == "step", "instructions sections can only contain steps"
output += sectionlevel + " Step\n"
output += to_markdown(step.find("description"))
output += "\n"
output += media_section(step)
output += "\n\n"
return output
def media_section(element, title="Media", sectionlevel="###"):
"""Extract the <media> section and display as a list of images"""
output = ""
if element.find("media"):
media_files = element.find("media").findall("file")
if len(media_files) > 0:
output += sectionlevel + " " + title + "\n"
for f in media_files:
output += "* \n"
output += "\n"
return output
def requirements_subsections(requirements, id_to_file):
"""Render a list of <function> Elements nicely"""
output = ""
# Split requirements that are bricks out separately
brick_requirements = []
for r in requirements:
for imp in r.findall("implementation"):
if imp.attrib['type'] == "brick" and r not in brick_requirements:
brick_requirements.append(r)
non_brick_requirements = [r for r in requirements if r not in brick_requirements]
for title, filtered_requirements in [("Assemblies", brick_requirements), ("Parts", non_brick_requirements)]:
if len(filtered_requirements) > 0:
output += "## " + title + "\n"
for r in filtered_requirements:
output += "* "
if r.find("quantity") is not None:
output += r.find("quantity").text + " of "
implementations = r.findall("implementation")
if len(implementations) == 0:
output += r.find("description").text
elif len(implementations) == 1:
output += "[" + r.find("description").text + "]"
output += "(" + "./" + id_to_file[implementations[0].attrib['id']] + ")"
else:
# if there are multiple implementations, link to them all using [[]] style links
links = []
for imp in implementations:
filename = id_to_file[imp.attrib['id']]
links.append("[./{}]".format(filename))
output += r.find("description").text + "(" + ", ".join(links) + ")"
output += "\n"
output += "\n"
return output
if __name__ == "__main__":
doc = ET.parse("./openflexure microscope.docubricks.xml")
root = doc.getroot()
output_folder = "./output"
if os.path.exists(output_folder) and os.path.isdir(output_folder):
shutil.rmtree(output_folder)
time.sleep(0.1) # without this, Windows barfs and you have to run the script twice...
os.mkdir(output_folder)
os.mkdir(os.path.join(output_folder, "parts"))
# DocuBricks uses a lot of ID numbers, we convert these to filepaths/URLs
# This is the only mapping between filenames and IDs we should be using...
id_to_file = {}
for brick in root.iter("brick"):
id = brick.attrib['id']
assert id not in id_to_file, "There is a duplicate ID in the input file ({}).".format(id)
id_to_file[id] = namify(brick.find("name").text)
for e in list(root.iter("physical_part")) + list(root.iter("part")):
id = e.attrib['id']
assert id not in id_to_file, "There is a duplicate ID in the input file ({}).".format(id)
name = e.find("name")
if name is None:
name = e.find("description")
id_to_file[id] = "parts/" + namify(name.text)
links_to_convert = {"#" + ("part" if v.startswith("parts/") else "brick") + "_" + k: v
for k, v in id_to_file.items()}
def markdown_converter(links_to_convert, root="./"):
"""This is a version of `safe_html_to_markdown` witht the link conversion baked in.
This version of the function is suitable for top-level files in the hierarchy by
default, to use it deeper, simply specify `"../"*n` as `root`.
"""
def to_markdown(element):
"""This is a version of `safe_html_to_markdown` with link conversion baked in.
NB links will all start with """ + root + """.
"""
return safe_html_to_markdown(element,
links_to_convert={k:root + v
for k, v in links_to_convert.items()})
return to_markdown
to_markdown = markdown_converter(links_to_convert, root="./")
for brick in root.iter("brick"):
title = brick.find("name").text
fname = id_to_file[brick.attrib['id']]
with open(os.path.join(output_folder, fname + ".md"), "w") as file:
file.write("# " + title + "\n")
if brick.find("abstract") is not None:
file.write("" + to_markdown(brick.find("abstract")) + "\n\n")
if brick.find("long_description") is not None:
file.write("" + to_markdown(brick.find("long_description")) + "\n\n")
requirements = brick.findall("function")
if len(requirements) > 0:
file.write("# Requirements\n")
file.write(requirements_subsections(requirements, id_to_file))
file.write(media_section(brick, sectionlevel="##"))
if brick.find("assembly_instruction") is not None:
file.write("# Assembly Instructions\n")
file.write(step_by_step_instructions(brick.find("assembly_instruction"), to_markdown = to_markdown))
if brick.find("notes") is not None:
file.write("# Notes\n" + to_markdown(brick.find("abstract")) + "\n\n")
to_markdown = markdown_converter(links_to_convert, root="../")
parts = list(root.iter("physical_part")) + list(root.iter("part"))
for part in parts:
try:
title = part.find("name").text # this is missing in older DocuBricks files
except:
title = part.find("description").text
fname = id_to_file[part.attrib['id']]
with open(os.path.join(output_folder, fname + ".md"), "w") as file:
file.write("# " + title + "\n")
if part.find("description") is not None:
file.write("" + to_markdown(part.find("description")) + "\n\n")
file.write("## Details\n")
metadata = {"supplier": "Supplier",
"supplier_part_num": "Supplier's part number",
"manufacturer_part_num": "Manufacturer's part number",
"url": "URL",
"material_amount": "Material used",
"material_unit": "Material units",
}
for k, title in metadata.items():
if part.find(k) is not None:
if part.find(k).text is not None:
file.write("* **" + title + ":** " + part.find(k).text + "\n")
file.write("\n")
file.write(media_section(part, sectionlevel="##"))
if part.find("manufacturing_instruction") is not None:
file.write("# Manufacturing Instructions\n")
file.write(step_by_step_instructions(part.find("manufacturing_instruction"), to_markdown = to_markdown))
|
#!/usr/bin/env python2
# coding=utf-8
__author__ = 'Hanzhiyun'
# returns the factorial of the argument "number"
def factorial(number):
if number <= 1: # base case
return 1
else:
return number * factorial(number - 1)
# def factorial(number):
# product = 1
# for i in range(number):
# product *= (i + 1)
# return product
user_input = input("Enter a non-negative integer to take the factorial of: ")
factorial_of_user_input = factorial(user_input)
print factorial_of_user_input
|
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
# 读取nc文件
dataset = Dataset('E:\PycharmProjects\yiyue\MERRA2_400.tavgM_2d_chm_Nx.202101.nc4', mode='r', format='NETCDF4')
# # 查看信息
# print(dataset)
# 查看变量
print('变量:',dataset.variables.keys())
# # 查看某个变量的信息
# print(dataset.variables['lon'])
# # 查看某个变量的属性
# print(dataset.variables['lon'].ncattrs())
# # 查看变量的值
# print(dataset.variables['lon'][:])
lons = dataset.variables['lon'][:]
lats = dataset.variables['lat'][:]
TO3 = dataset.variables['TO3'][0,:,:]
print(dataset.variables['TO3'][0,:,:])
# Start Plotting Data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.basemap import Basemap
map = Basemap(resolution='l', projection='eck4', lat_0=0, lon_0=0)
lon, lat = np.meshgrid(lons, lats)
xi, yi = map(lon, lat)
# Plot Data
cs = map.pcolor(xi,yi,np.squeeze(TO3), vmin=np.min(TO3), vmax=np.max(TO3), cmap=cm.jet)
cs.set_edgecolor('face')
# Add Grid Lines
map.drawparallels(np.arange(-90., 90., 15.), labels=[1,0,0,0], fontsize=5)
map.drawmeridians(np.arange(-180., 180., 30.), labels=[0,0,0,1], fontsize=4)
# Add Coastlines, States, and Country Boundaries
map.drawcoastlines()
map.drawstates()
map.drawcountries()
# Add Colorbar
cbar = map.colorbar(cs, location='bottom', pad="10%")
# cbar.set_label('K')
cbar.ax.tick_params(labelsize=10)
# Add Title
plt.title('MERRA-2 Global total_column_ozone (2021-01)')
# Save figure as PDF
# plt.savefig('MERRA2_2m_airTemp_TEST.pdf', format='pdf', dpi=360)
plt.show()
# https://theonegis.blog.csdn.net/article/details/50805408?utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7EBlogCommendFromBaidu%7Edefault-15.control&dist_request_id=&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7EBlogCommendFromBaidu%7Edefault-15.control
|
first = [1, 2, 3, 4, 5]
second = first
second.append(6)
print(first)
print(second)
|
single = []
pipeline = []
with open("out_single","r") as f:
for l in f:
single.append(l.strip())
with open("out","r") as f:
for l in f:
if len(pipeline) == len(single):
break
pipeline.append(l.strip())
for i,x in enumerate(single):
if x != pipeline[i]:
print("single: "+x)
print("pipeline: "+pipeline[i])
break |
{
'variables': {
'foo': '"fromhome"',
},
}
|
from setuptools import setup, find_namespace_packages
from typing import List
from pathlib import Path
import re
setup_requires = [
'setuptools>=54.2.0',
]
install_requires = [
'aiohttp~=3.8.0',
'aiotusclient~=0.1.4',
'appdirs~=1.4.4',
'async_timeout>=4.0',
'attrs>=20.3',
'click>=8.0.1',
'colorama>=0.4.4',
'humanize>=3.1.0',
'janus>=0.6.1',
'multidict>=5.1.0',
'python-dateutil>=2.8.2',
'PyYAML~=5.4.1',
'rich~=12.2',
'tabulate>=0.8.9',
'tqdm>=4.61',
'yarl>=1.6.3',
'backend.ai-cli~=0.6.0',
]
build_requires = [
'wheel>=0.37.1',
'twine>=4.0.0',
'towncrier~=21.9.0',
]
test_requires = [
'pytest~=7.0.1',
'pytest-cov',
'pytest-mock',
'pytest-asyncio>=0.18.2',
'aioresponses>=0.7.3',
'codecov',
]
lint_requires = [
'flake8>=4.0.1',
'flake8-commas>=2.1',
]
typecheck_requires = [
'mypy>=0.950',
'types-click',
'types-python-dateutil',
'types-tabulate',
]
dev_requires: List[str] = [
# 'pytest-sugar>=0.9.1',
]
docs_requires = [
'Sphinx~=3.4.3',
'sphinx-intl>=2.0',
'sphinx_rtd_theme>=0.4.3',
'sphinxcontrib-trio>=1.1.0',
'sphinx-autodoc-typehints~=1.11.1',
'pygments~=2.7.4',
]
def read_src_version():
path = (Path(__file__).parent / 'src' /
'ai' / 'backend' / 'client' / '__init__.py')
src = path.read_text(encoding='utf-8')
m = re.search(r"^__version__ = '([^']+)'$", src, re.MULTILINE)
assert m is not None, 'Could not read the version information!'
return m.group(1)
setup(
name='backend.ai-client',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=read_src_version(),
description='Backend.AI Client for Python',
long_description=Path('README.rst').read_text(encoding='utf-8'),
url='https://github.com/lablup/backend.ai-client-py',
author='Lablup Inc.',
author_email='joongi@lablup.com',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Environment :: No Input/Output (Daemon)',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
],
package_dir={'': 'src'},
packages=find_namespace_packages(where='src', include='ai.backend.*'),
python_requires='>=3.8',
setup_requires=setup_requires,
install_requires=install_requires,
extras_require={
'dev': dev_requires,
'build': build_requires,
'test': test_requires,
'lint': lint_requires,
'typecheck': typecheck_requires,
'docs': docs_requires,
},
data_files=[],
package_data={
'ai.backend.client': ['py.typed'],
},
entry_points={
'backendai_cli_v10': [
'_ = ai.backend.client.cli.main:main',
],
},
)
|
import random
import sys
# v, w, k, n, a - 0-9 - (1,2)x(1,2,3)
def gen_callsign(call_seed):
random.seed(call_seed)
#n_letter = call_seed % 5
#call_seed = int(call_seed / 5)
#n_chars1 = (call_seed % 2)
#call_seed = int(call_seed / 2)
#n_chars2 = 1 + (call_seed % 3)
#call_seed = int(call_seed / 3)
#n_sec = call_seed % 10
#call_seed = int(call_seed / 10)
n_letter = random.randint(0,4)
n_chars1 = random.randint(0,1)
n_chars2 = random.randint(1,3)
n_sec = random.randint(0,4)
letters = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
prefixes = ['V','W','K','N','A']
callsign = ""
callsign += prefixes[n_letter]
if( n_chars1 > 0 ):
callsign += random.choice(letters)
callsign += str(n_sec)
for i in range(0, n_chars2):
callsign += random.choice(letters)
return callsign
def gen_class(class_seed):
random.seed(class_seed)
n_type = random.choice(['A','B','C','D','E','F'])
if(random.randint(0,5) == 0):
n_num = random.randint(0,30)
else:
n_num = random.randint(0,9)
return str(n_num) + n_type
def gen_section(section_seed):
random.seed(section_seed)
return random.choice(['AL','AK','AB','AZ','AR','BC','CO','CT','DE','EB','EMA','ENY','EPA','EWA','GA','ID','IL','IN','IA','KS','KY','LAX','LA','ME','MB','MAR','MDC','MI','MN','MS','MO','MT','NE','NV','NH','NM','NL','NC','ND','NTX','NFL','NNJ','NNY','NT','NLI','OH','OK','ON','ORG','OR','PAC','PR','QC','RI','SV','SDG','SF','SJV','SB','SCV','SK','SC','SD','STX','SFL','SNJ','TN','UT','VT','VI','VA','WCF','WTX','WV','WMA','WNY','WPA','WWA','WI','WY'])
if(sys.argv[1] != "check"):
for i in range(0,300):
print gen_callsign(i) + "\t" + gen_class(i) + "\t" + gen_section(i)
else:
import MySQLdb
db = MySQLdb.connect(host="localhost", user="root")
c = db.cursor()
c.execute("use FieldDay;");
for i in range(0,300):
c.execute("SELECT ContestClass, Section.Abbr FROM Station JOIN Section ON (Station.SectionID=Section.SectionID) WHERE Station.StationCall = \'" + gen_callsign(i) + "\';")
row = c.fetchone()
if(row == None):
print "Error... station not in database: " + gen_callsign(i) + "\n"
elif(row[0] != gen_class(i)):
print "Error... class mismatch: " + gen_class(i) + " != " + row[0] + "\n"
elif(row[1] != gen_section(i)):
print "Error... section mismatch: " + gen_section(i) + " != " + row[1] + "\n"
|
class Queue:
def __init__(self, size):
self.front = 0
self.rear = 0
self.items = []
self.size = size
def add(self, item):
if self.is_full():
raise Exception("Queue is full")
self.items.insert(self.rear, item)
self.rear += 1
def remove(self):
if self.is_empty():
raise Exception("Queue is empty")
item = self.items[self.front]
del self.items[self.front]
return item
def peek(self):
return self.items[self.front]
def is_full(self):
return self.rear == self.size
def is_empty(self):
return len(self.items) == 0
|
# 运算符
# 算术运算符, +, -, *, /, //, %, **, 注意//为整除
# 赋值运算符, =, +=, -=, *=, /=, //= ,%=, **=
# 比较运算符, ==, !=, <>, >, >=, <, <=
# 逻辑运算符, and, or, not
# 成员运算符, in, not in
# 身份运算符, is, not is
# 位运算符, &, |, >>, <<, ^, ~
# 优先级 算术>比较>逻辑>赋值
a = 3
b = 5
print(b // a) # 1
a *= 3 # 9
print(a > b) # True,9>5
print(3>4 and 4>2) # False
|
import os
class Config():
REGISTERED_USERS = {
#variable names in all caps indicate that that variable will be a constant
'kevinb@codingtemple.com': {'name':'Kevin', 'password': 'abc123'},
'johnl@codingtemple.com': {'name':'John', 'password': 'Colt45'},
'joel@codingtemple.com': {'name':'Joel', 'password': 'MorphinTime'}
}
#pip install flask forms
SECRET_KEY= os.environ.get('SECRET_KEY') or 'You-will-never-guess' |
import random
import copy
from game import Game
tree = {
0: {},
1: {}
}
# Search the tree and back trace victory
class TreeBot:
@classmethod
def computeTree(Class, player=None):
board = [
None, None, None,
None, None, None,
None, None, None]
Class.scoreMoves(board, player, 0)
def getMove(self, board, whichPlayerAmI):
if not tree[whichPlayerAmI]:
self.computeTree(whichPlayerAmI)
moveScores = tree[whichPlayerAmI][self.hashBoard(board)]
legalMoveScores = [x for i, x in enumerate(moveScores) if board[i] is None]
maxScore = max(legalMoveScores)
bestMoves = [i for i, j in enumerate(moveScores) if j == maxScore and board[i] is None]
# Game.printBoard(board)
# cleanBoard = [False if board[i] is not None else j for i, j in enumerate(moveScores)]
# Game.printBoard(cleanBoard)
# print(bestMoves)
return random.choice(bestMoves)
@staticmethod
def hashBoard(board):
return ''.join(map(lambda x: '_' if x is None else str(x), board))
@classmethod
def scoreMoves(Class, board, whichPlayerAmI, whichPlayersTurnIsIt):
if Game.whoWon(board) == whichPlayerAmI:
return 1
elif Game.whoWon(board) == (0 if whichPlayerAmI == 1 else 1):
return -1
elif not Game.spacesAreOpen(board):
return 0
else:
if Class.hashBoard(board) in tree[whichPlayerAmI]:
nextMoves = tree[whichPlayerAmI][Class.hashBoard(board)]
else:
nextMoves = Class.descendTree(board, whichPlayerAmI, whichPlayersTurnIsIt)
tree[whichPlayerAmI][Class.hashBoard(board)] = nextMoves
if whichPlayerAmI == whichPlayersTurnIsIt:
legalMoves = [x for i,x in enumerate(nextMoves) if board[i] is None]
return max(legalMoves)
else:
legalMoves = [x for i,x in enumerate(nextMoves) if board[i] is None]
bestMove = min(legalMoves)
legalMoves.remove(bestMove)
nonPerfectPlay = 0.1 * float(sum(legalMoves)) / (len(legalMoves) or 1)
return round(bestMove + nonPerfectPlay, 16)
@classmethod
def descendTree(Class, board, whichPlayerAmI, whichPlayersTurnIsIt):
nextMoves = []
for i, move in enumerate(board):
if move is None:
nextBoard = copy.deepcopy(board)
nextBoard[i] = whichPlayersTurnIsIt
nextPlayer = 0 if whichPlayersTurnIsIt is 1 else 1
moveValue = Class.scoreMoves(nextBoard, whichPlayerAmI, nextPlayer)
nextMoves.append(moveValue)
else:
nextMoves.append(0)
return nextMoves
|
import socket
import struct
import time
import os
import threading
def read_msg(buff):
sid = buff[-2:] #get id
msg = buff[:-2] #get msg
return int.from_bytes(sid, 'little'), str(msg, 'utf-8')
def udp_m_receive_fun(socket):
try:
while True:
buff, _addr = socket.recvfrom(buff_size)
sid, msg = read_msg(buff)
print('(', sid, ') ', msg)
except:
print("exception occured 2")
finally:
socket.close()
def tcp_receive_fun():
try:
buff = socket_tcp.recv(buff_size)
while buff:
sid, msg = read_msg(buff)
print('(', sid, ') ', msg)
buff = socket_tcp.recv(buff_size)
# if server has closed connection
print('lost connection with server')
except:
print("exception occured 1")
finally:
socket_tcp.close()
os._exit(0)
def send_fun():
msg = input()
while msg!='close':
if msg=='U': #send msg using UDP
msg = input()
socket_udp.sendto(bytes(msg,'utf-8'), (serverIP, serverPort))
elif msg == 'M':
msg = input()
socket_m.sendto(bytes(msg,'utf-8')+mname, (multiIP, multiPort))
else:
socket_tcp.send(bytes(msg,'utf-8')+sname) #send msg with sockets id
msg = input() #enter new msg
socket_tcp.shutdown(2)
if __name__ == '__main__':
serverIP = '127.0.0.1'
multiIP = '225.0.0.0'
serverPort = 9009
multiPort = 9010
buff_size = 1024
socket_m = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
socket_m.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_m.bind(('', multiPort))
group = socket.inet_aton(multiIP)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
socket_m.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
mname = multiPort.to_bytes(2,'little')
socket_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_tcp.connect((serverIP,serverPort))
port = socket_tcp.getsockname()[1]
socket_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
socket_udp.bind(('', port))
sname = port.to_bytes(2,'little') #get socket id - port number
print('PYTHON CLIENT id: ' + str(int.from_bytes(sname, 'little')))
tcp_client_thread = threading.Thread(target=tcp_receive_fun, daemon=True) #new thread to receive msgs
tcp_client_thread.start()
udp_client_thread = threading.Thread(target=udp_m_receive_fun, args=(socket_udp,), daemon=True) #new thread to receive msgs
udp_client_thread.start()
m_client_thread = threading.Thread(target=udp_m_receive_fun, args=(socket_m,), daemon=True) #new thread to receive msgs
m_client_thread.start()
msg2 = """
.-.' '.-.
.-( \ / )-.
/ '..oOOo..' |
, \.--.oOOOOOOo.--./
|\ , ( :oOOOOOOo: )
_\.\/| /'--'oOOOOOOo'--'|
'-.. ;/| \ .''oOOo''. /
.--`'. :/|'-( / \ )-'
'--. `. / //'-'.__.'-;
`'-,_';// , /|
'(( |\/./_
\\ . |\; ..-'
\\ |\: .'`--.
\\, .' .--'
))'_,-'`
jgs //-'
//
//
|/
"""
send_fun() |
# -*- coding: utf-8 -*-
from __future__ import print_function
import os,time
from collections import OrderedDict
import pygame, pygame.image
import OpenGL.GL as gl
import OpenGL.GLU as glu
import numpy as np
import itertools
import fractions
import copy
import sys
import shelve
import scipy.interpolate
import resources
from PIL import Image
SETTINGS = OrderedDict()
SETTINGS['debug'] = False
inv_gamma = 0.43
COLORS = {
'black' : (0.0,0.0,0.0),
'red' : (1.0,0.0,0.0),
'green' : (0.0,1.0,0.0),
'blue' : (0.0,0.0,1.0),
'cyan' : (0.0,1.0,1.0),
'magenta' : (1.0,0.0,1.0),
'yellow' : (1.0,1.0,0.0),
'white' : (1.0,1.0,1.0),
'neutral-gray': (0.5**inv_gamma,0.5**inv_gamma,0.5**inv_gamma)
}
SCREEN_LT = np.array((-1.0, 1.0))
SCREEN_LB = np.array((-1.0,-1.0))
SCREEN_RB = np.array(( 1.0,-1.0))
SCREEN_RT = np.array(( 1.0, 1.0))
LARGE_WIDTH = 0.5 #fraction of total screen length
VSYNC_PATCH_WIDTH_DEFAULT = 0.225
VSYNC_PATCH_HEIGHT_DEFAULT = 0.225
DEFAULT_FLASH_RATE = 17 #Hz
MONITOR_NAME = 'benq'
#-------------------------------------------------------------------------------
# utility functions
def bell(blocking=False):
pygame.mixer.init()
bell_sound = pygame.mixer.Sound(resources.get_bellpath("bell.wav"))
ch = bell_sound.play()
if blocking:
while ch.get_busy():
pygame.time.delay(100)
def sound_alarm(blocking=False):
pygame.mixer.init()
bell_sound = pygame.mixer.Sound(resources.get_bellpath("Alien_Siren-KevanGC.wav"))
ch = bell_sound.play()
if blocking:
while ch.get_busy():
pygame.time.delay(100)
class UserEscape(Exception):
def __init__(self, msg = "User stopped the sequence"):
Exception.__init__(self, msg)
# write a png file from GL framebuffer data
def write_frame_to_png(name, frame_num, w, h, data, outdir = None):
im = Image.frombuffer("RGBA", (w,h), data, "raw", "RGBA", 0, 0)
fname = "%s_%05d.png" % (name,frame_num)
if outdir is None:
outdir = name
#make a directory to store the recording
if not os.path.isdir(outdir):
os.mkdir(outdir)
pathname = os.path.sep.join((outdir,fname))
im.save(pathname)
# got this straight out of stackexchange: http://stackoverflow.com/questions/17084928/how-to-enable-vsync-in-pyopengl/34768964
def enable_VBI_sync_osx():
if sys.platform != 'darwin':
return
try:
import ctypes
import ctypes.util
ogl = ctypes.cdll.LoadLibrary(ctypes.util.find_library("OpenGL"))
v = ctypes.c_int(1)
ogl.CGLGetCurrentContext.argtypes = []
ogl.CGLGetCurrentContext.restype = ctypes.c_void_p
ogl.CGLSetParameter.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ogl.CGLSetParameter.restype = ctypes.c_int
context = ogl.CGLGetCurrentContext()
ogl.CGLSetParameter(context, 222, ctypes.pointer(v))
except Exception as e:
print("Unable to set vsync mode, using driver defaults: {}".format(e))
def load_gamma_calibration(monitor_name = MONITOR_NAME, interp_kind = 'cubic', show_plot = False):
home = os.path.expanduser('~')
dbPath = os.path.sep.join((home, '.neurodot_present', 'calibrations', monitor_name))
# check if calibration file has been created, otherwise shelve.open will make a new .db file
if not os.path.isfile(dbPath):
errorstring = str(dbPath) + ' does not exist: Create a calibration file with gamma_utility.py first.'
raise ValueError(errorstring)
# get data from .db file
db = shelve.open(dbPath)
needed_inputs = db['input_intensities']
desired_intensity = db['desired_intensities']
db.close()
# get function from interp1d
inv_gam_func = scipy.interpolate.interp1d(desired_intensity, needed_inputs, kind = interp_kind)
# show plot if needed (this is only for checking data with ipython, will not be needed in actual implementation)
if show_plot:
x_range = np.linspace(0, 1, 100)
experiment_x_vals = inv_gam_func.x
experiment_y_vals = inv_gam_func.y
interp_vals = [inv_gam_func(x) for x in x_range]
import matplotlib.pyplot as plt
fig = plt.figure(1)
ax1 = fig.add_subplot(111)
ax1.scatter(experiment_x_vals, experiment_y_vals)
ax1.plot(x_range, interp_vals)
ax1.grid(True)
ax1.set_xlabel('Desired Brightness')
ax1.set_ylabel('Input Intensity')
ax1.set_title('Inverse Gamma Function')
ax1.set_xlim(0,1)
ax1.set_ylim(0,1)
plt.show()
return inv_gam_func
def correct_gamma(input_color, monitor_name = MONITOR_NAME, interp_kind = 'linear', **kwargs):
inv_gam_func = load_gamma_calibration(monitor_name = monitor_name, interp_kind = interp_kind, **kwargs)
return(float(inv_gam_func(input_color)))
#-------------------------------------------------------------------------------
# graphics
class Quad:
def __init__(self, lt, lb, rb, rt, color = COLORS['white']):
self.vertices = np.array((lt,lb,rb,rt))
self.color = color
def render(self):
gl.glLoadIdentity()
gl.glDisable(gl.GL_LIGHTING)
try:
gl.glBegin(gl.GL_QUADS)
gl.glColor3f(*self.color)
for v in self.vertices:
gl.glVertex2f(*tuple(v))
gl.glEnd()
finally:
gl.glEnable(gl.GL_LIGHTING)
#-------------------------------------------------------------------------------
# math
# functions for converting between coordinate systems
def cart2pol(x, y):
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
return(r, theta)
def pol2cart(r, theta):
x = r * np.cos(theta)
y = r * np.sin(theta)
return(x, y)
|
# -*- encoding : utf-8 -*-
from enum import Enum, unique
@unique
class Release_version(Enum):
B010 = '1335'
B020 = '1340'
B030 = '1352'
B050 = '1353'
B060 = '1354'
B070 = '1364'
B080 = '1365'
B090 = '1366'
@unique
class Severity(Enum):
Critical = 1
Major = 2
Minor = 3
Tips = 4
@unique
class Task(Enum):
BBT = 162
SINT = 163
Other = 0 # Exception, need modify to BBT or SINT
WBIT = 164 #exinclude
#bug超期人员对应
# @unique
# class DevlopPerson(Enum):
# #测试人员列表
# PG02005 = '辜林杰'
# P00609 = '陈正伟'
# P00761 = '邓智明'
# P00571 = '张进伟'
#
#
# #开发人员列表
# P00708 = '王国庆'
# P00507 = '宋平'
# P00517 = '赵宇凤'
# P00468 = "许必成"
# P00848 = '张弛'
@unique
class DevlopPerson(Enum):
#测试人员列表
PG02005 = '辜林杰'
P00609 = '陈正伟'
P00761 = '邓智明'
P00571 = '张进伟'
#开发人员列表
P00708 = '王国庆'
P00507 = '宋平'
P00756 = '梁勇'
P00922 = "谢杰"
P00848 = '张弛'
# print(DevlopPerson["P00848"].value) |
from django.shortcuts import render, get_object_or_404
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from .models import Post, Categories
from .forms import PostForm, EditForm
from django.urls import reverse_lazy, reverse
from django.http import HttpResponseRedirect
# Create your views here.
#CLASS BASED VIEW
class HomeView(ListView):
model = Post
#IF FHBLOG - BLOG.HTML, WITHOUT FHBLOG - POST_LIST.HTML
template_name = 'fhblog/blog.html'
cats = Categories.objects.all()
def get_context_data(self, *args, **kwargs):
cat_menu = Categories.objects.all()
context = super(HomeView, self).get_context_data(*args, **kwargs)
context['cat_menu'] = cat_menu
return context
def CategoryView(request, cats):
category_posts = Post.objects.filter(category=cats.replace('-', ' '))
return render(request, 'fhblog/category.html', {'cats':cats.title().replace('-', ' '), 'category_posts':category_posts})
def CategoryListView(request):
cat_list = Categories.objects.all()
return render(request, 'fhblog/category_list.html', {'cat_list':cat_list})
class PostDetailView(DetailView):
model = Post
template_name = 'fhblog/post_detail.html'
def get_context_data(self, *args, **kwargs):
cat_menu = Categories.objects.all()
context = super(PostDetailView, self).get_context_data(*args, **kwargs)
context['cat_menu'] = cat_menu
return context
class AddPostView(CreateView):
model = Post
form_class = PostForm
template_name = 'fhblog/newpost.html'
class EditPostView(UpdateView):
model = Post
form_class = EditForm
template_name = 'fhblog/post_update.html'
class DeletePostView(DeleteView):
model = Post
template_name = 'fhblog/post_delete.html'
success_url = reverse_lazy('home')
class AddCategoryView(CreateView):
model = Categories
template_name = 'fhblog/category_add.html'
fields = '__all__' |
import threading
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame as pg
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
FPS = 30
graphs = []
i = 0
screen_size = (640, 640)
graph_dimensions = (1, 1)
clock = pg.time.Clock()
def scale_position(pos):
x, y = pos
dim_x, dim_y = graph_dimensions
size_x, size_y = screen_size
x = (x / dim_x + 0.5) * size_x
y = (0.5 - y / dim_y) * size_y
return (int(round(x)), int(round(y)))
def draw_node(screen, position):
coords = scale_position(position)
pg.draw.circle(screen, BLACK, coords, 5)
def draw_edge(screen, start, end):
start = scale_position(start)
end = scale_position(end)
pg.draw.line(screen, BLACK, start, end)
def draw(screen):
global i, graphs, graph_dimensions
screen.fill(WHITE)
if len(graphs) == 0:
return
graph = graphs[i]
graph.recentre_nodes() # this is bad
graph_dimensions = graph.dimensions
positions = graph.get_positions()
for _, pos in positions.items():
draw_node(screen, pos)
for start, end, _, _ in graph.edge_data:
start_pos = positions[start]
end_pos = positions[end]
draw_edge(screen, start_pos, end_pos)
i = (i + 1) % len(graphs)
def run():
pg.init()
screen = pg.display.set_mode(screen_size)
screen.fill(WHITE)
while True:
clock.tick(FPS)
for event in pg.event.get():
if event.type in (pg.QUIT, pg.KEYDOWN):
return
draw(screen)
pg.display.update()
pg.quit()
def start_animation_thread(graph_array):
global graphs
graphs = graph_array
x = threading.Thread(target = run)
x.start()
|
#!python3
from numpy import random
from time import perf_counter
def insert_sort(a):
"a is a list like iterable. returns sorted version of a."
for i in range(len(a)-1):
for j in range(i+1):
if a[i+1-j] < a[i-j]:
a[i-j], a[i-j+1] = a[i-j+1], a[i-j]
def main():
a = random.randint(1000, size=3_000)
#a = [1,2,9,4,7,3,6,11,10]
print(a)
start = perf_counter()
insert_sort(a)
print(a)
print(f"\nSort time for {len(a):,} items: {(perf_counter()-start):.2f} seconds\n")
if __name__ == "__main__":
main()
|
import os
import shutil as s
def copy(path_in,path_out):
s.move(path_in, path_out)
def all_picture(path1,path2):
a = set(os.listdir(path1)).difference(os.listdir(path2))
return list(a)
if __name__ == '__main__':
path_out = r'D:\Git_project\VKR\FALSE_DETEC_CARS'
path_in = r'D:\Git_project\VKR\OUPUT_ANOTHER\ '
all_files = all_picture(r'D:\Git_project\VKR\CARS_ANOTHER',r'D:\Git_project\VKR\OUPUT_ANOTHER')
for i in all_files:
path_in = 'D:\Git_project\VKR\CARS_ANOTHER\{0}'.format(i)
copy(path_in,path_out)
|
import math
import os, glob
import sys
import string
from porter2stemmer import Porter2Stemmer
bow_doc_col = {}
DF = {}
TFIDF = {}
class BowDocument:
def __init__(self, doc_ID, dict_):
self.documentID = doc_ID
self.dict = dict_
self.wordCount = 0
self.tfDict = {}
self.idfDict = {}
self.tfidfDict = {}
def getDocId(self):
return self.documentID
def getDict(self):
return self.dict
def getTFDict(self):
return self.tfDict
def getWordCount(self):
return self.wordCount
def addTerm(self, term): #add term to dictionary (self.dict)
term = Porter2Stemmer().stem(term.lower()) # Q1b
if len(term) > 2 and term not in stopWordsList: # Q1b
try:
self.dict[term] += 1
except KeyError:
self.dict[term] = 1
def getTermFreqMap(self): #sort and print out dictionary values
sortedList = sorted(self.dict.items(), key=lambda x: x[1]) # Q1c
for elem in sortedList:
print(elem[0], ":", elem[1])
def computeTF(self, docid): #calculate TF value for Bow Document (only body)
for item in bow_doc_col[docid].dict.keys():
self.tfDict[item] = (bow_doc_col[docid].dict.get(item, 0) / float(self.wordCount))
def computeIDF(self): #calculate IDF value (only body)
N = len(bow_doc_col)
for item in DF.keys():
self.idfDict[item] = math.log10(N / float(DF.get(item, 0)) + 1)
def computeTFIDF(self): #calculate TFIDF value (only body)
for item in self.tfDict.keys():
self.tfidfDict[item] = self.tfDict.get(item, 0) * self.idfDict.get(item, 0)
return self.tfidfDict
def calculateTfIdf():
for items in bow_doc_col.keys():
bow_doc_col[items].computeIDF()
TFIDF[items] = bow_doc_col[items].computeTFIDF() #generate term:tfidf dict for each doc and fill al dictionary of docID: term:tfidf
for item in TFIDF.items(): #print out top 20 terms
print("-------Document", item[0], "contains", len(item[1]), "terms-------")
temp = item[1]
if len(item[1]) > 20:
sortedList = sorted(temp.items(), reverse=True, key=lambda x: x[1]) # Q1c
for elem in sortedList [:20]:
print(elem[0], ":", elem[1])
else:
sortedList = sorted(temp.items(), reverse=True, key=lambda x: x[1]) # Q1c
for elem in sortedList:
print(elem[0], ":", elem[1])
def addDFTerm(term, docid):
term = Porter2Stemmer().stem(term.lower()) # Q1b
if len(term) > 2 and term not in stopWordsList: # Q1b
try:
DF[term].add(docid)
except KeyError:
DF[term] = {docid}
def parse_doc(inputpath, stop_wds): #returns dictionary collection of BowDoc {docid: BowDoc}
Path = inputpath
filelist = os.listdir(Path)
start_end = False
for i in filelist:
if i.endswith(".xml"):
with open(Path + '/' + i, 'r') as f:
word_count = 0
myfile = f.readlines()
# print(f)
for line in myfile:
line = line.strip()
if line.startswith("<newsitem") | line.startswith("<p>"):
if (start_end == False):
if line.startswith("<newsitem "):
for part in line.split():
if part.startswith("itemid="):
docid = part.split("=")[1].split("\"")[1]
bow_doc_col[docid] = BowDocument(docid, {})
break
if line.startswith("<text>"):
start_end = True
elif line.startswith("</text>"):
break
else:
line = line.replace("<p>", "").replace("</p>", "").replace("quot", "")
line = line.translate(str.maketrans('', '', string.digits)).translate(
str.maketrans(string.punctuation, ' ' * len(string.punctuation)))
line = line.replace("\\s+", " ")
for xterm in line.split():
word_count += 1
bow_doc_col[docid].addTerm(xterm)
addDFTerm(xterm, docid) # Q2a
bow_doc_col[docid].wordCount = word_count
bow_doc_col[docid].computeTF(docid) #Q2b
start_end = False
f.close()
return (bow_doc_col)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
sys.stderr.write("USAGE: %s <coll-file>\n" % sys.argv[0])
sys.exit()
stopwords_f = open('common-english-words.txt', 'r')
stopWordsList = stopwords_f.read().split(',')
stopwords_f.close()
x = parse_doc(sys.argv[1], stopWordsList)
for i in DF: #changing the value of DF dict from dict to int freq value
DF[i] = len(DF[i])
calculateTfIdf()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'log_reg_windows.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Login_Register(object):
def setupUi(self, Login_Register):
Login_Register.setObjectName("Login_Register")
Login_Register.resize(400, 300)
self.title = QtWidgets.QLabel(Login_Register)
self.title.setGeometry(QtCore.QRect(36, 10, 321, 41))
self.title.setObjectName("title")
self.label_login = QtWidgets.QLabel(Login_Register)
self.label_login.setGeometry(QtCore.QRect(10, 110, 47, 13))
self.label_login.setObjectName("label_login")
self.label_password = QtWidgets.QLabel(Login_Register)
self.label_password.setGeometry(QtCore.QRect(10, 160, 51, 16))
self.label_password.setObjectName("label_password")
self.log_line_login = QtWidgets.QLineEdit(Login_Register)
self.log_line_login.setGeometry(QtCore.QRect(70, 110, 301, 20))
self.log_line_login.setObjectName("log_line_login")
self.log_line_password = QtWidgets.QLineEdit(Login_Register)
self.log_line_password.setGeometry(QtCore.QRect(70, 160, 301, 20))
self.log_line_password.setText("")
self.log_line_password.setEchoMode(QtWidgets.QLineEdit.Password)
self.log_line_password.setObjectName("log_line_password")
self.button_log = QtWidgets.QPushButton(Login_Register)
self.button_log.setGeometry(QtCore.QRect(310, 270, 75, 23))
self.button_log.setObjectName("button_log")
self.button_reg = QtWidgets.QPushButton(Login_Register)
self.button_reg.setGeometry(QtCore.QRect(200, 270, 75, 23))
self.button_reg.setObjectName("button_reg")
self.button_settings = QtWidgets.QPushButton(Login_Register)
self.button_settings.setGeometry(QtCore.QRect(20, 270, 75, 23))
self.button_settings.setObjectName("button_settings")
self.button_forgot_password = QtWidgets.QPushButton(Login_Register)
self.button_forgot_password.setGeometry(QtCore.QRect(110, 270, 75, 23))
self.button_forgot_password.setObjectName("button_forgot_password")
self.retranslateUi(Login_Register)
QtCore.QMetaObject.connectSlotsByName(Login_Register)
def retranslateUi(self, Login_Register):
_translate = QtCore.QCoreApplication.translate
Login_Register.setWindowTitle(_translate("Login_Register", "Dialog"))
self.title.setText(_translate("Login_Register", "<html><head/><body><p align=\"center\"><span style=\" font-size:28pt; font-weight:600; color:#00ff00;\">Your Chat</span></p></body></html>"))
self.label_login.setText(_translate("Login_Register", "login:"))
self.label_password.setText(_translate("Login_Register", "password:"))
self.button_reg.setText(_translate("Login_Register", "Register"))
self.button_log.setText(_translate("Login_Register", "Log in"))
self.button_settings.setText(_translate("Login_Register", "Settings"))
self.button_forgot_password.setText(_translate("Login_Register", "Forgot pass"))
|
import requests
from tools import imgAutoCick,location
import pyautogui
import time
import pyperclip
import keyboard
def moveClick(x=None,y=None,duration=None,tween=pyautogui.linear):
pyautogui.moveTo(x=x, y=y, duration=duration, tween=tween)
pyautogui.click()
def trans_money(account,money):
# 点击头像
moveClick(x=1535, y=110,duration=0.1, tween=pyautogui.linear)
time.sleep(0.5)
# 点击搜索框
moveClick(x=1330, y=210,duration=0.1, tween=pyautogui.linear)
'1330,210'
# 输入账户名称
pyperclip.copy(account)
pyautogui.hotkey('ctrl', 'v')
time.sleep(0.5)
# 点击目标账户
moveClick(x=1377, y=257,duration=0.1, tween=pyautogui.linear)
'1377,257'
time.sleep(0.5)
# 点击token地址复制
moveClick(x=956, y=170,duration=0.1, tween=pyautogui.linear)
'956,170'
time.sleep(0.5)
# 点击头像
moveClick(x=1535, y=110,duration=0.1, tween=pyautogui.linear)
time.sleep(0.5)
# 点击总账户
moveClick(x=1377, y=257,duration=0.1, tween=pyautogui.linear)
'1377,257'
time.sleep(0.5)
# 点击发送
moveClick(x=948, y=353,duration=0.1, tween=pyautogui.linear)
'948,353'
time.sleep(0.5)
# 直接粘贴
pyautogui.hotkey('ctrl', 'v')
time.sleep(1)
# 点击数额输入框
moveClick(x=911, y=362,duration=0.1, tween=pyautogui.linear)
'911,362'
# 输入0.001
time.sleep(0.5)
pyperclip.copy(money)
pyautogui.hotkey('ctrl', 'v')
time.sleep(0.5)
# 点击下一步
moveClick(x=1050, y=899,duration=0.1, tween=pyautogui.linear)
'1050,899'
time.sleep(1.5)
# 点击确认(位置不变可以直接点)
moveClick(x=1050, y=899,duration=0.1, tween=pyautogui.linear)
'1050,899'
time.sleep(1)
trans_money('t13','0.001')
# location()
|
# -*- coding: utf-8 -*-
import scrapy
import time
import os
class mzSpider(scrapy.Spider):
name = "Mzitu_Spider"
start_urls = [
"http://mzitu.com/all"
]
def parse(self, response):
for ctgry_link in response.css('div.all a::attr(href)'):
time.sleep(0.5)
yield scrapy.Request(ctgry_link.extract(),
callback = self.parse_main_page) #yield each req
def parse_main_page(self, response):
for page_num in range(int(response.css('div.pagenavi span')[-2].extract()[6:-7])): #return page num
yield scrapy.Request(response.url + '/' + str(page_num),
callback = self.parse_sub_page)
def parse_sub_page(self, response):
#print response.css('div.main-image p a img::attr(src)').extract()[0]
file_name = response.css('div.main-image p a img::attr(alt)').extract()
yield scrapy.Request(response.css('div.main-image p a img::attr(src)').extract()[0],
callback = self.parse_img)
def parse_img(self, response):
self.Downloads(response.url[-9:], response.body)
def Downloads(self, file_name, content):
name = file_name
img = content
f = open(file_name, 'wb')
f.write(img)
f.close()
def mkdir(self, path):
path = path.strip()
isExists = os.path.exists(os.path.join("D:\Mzitu_Scraper", path))
if not isExists:
print "Create a dictionary"
os.makedirs(os.path.join("D:\Mzitu_Scraper", path))
os.chdir(os.path.join("D:\Mzitu_Scraper", path))
return True
else:
print "Folder has been existed"
return False
|
c = input("Enter the temprature in celcious")
c = int(c)
f = (9/5)*c + 32
print(f)
|
old = int(input('만 나이를 입력하세요:'))
sex = str(input('성별을 입력하세요:'))
if old >= 19:
print('성인 %s입니다'%sex)
else:
print('미성년자 %s입니다'%sex)
|
import math
def prime(n):
for i in range(2,int(math.sqrt(n))):
if n%i==0:
return False
return True
if __name__=='__main__':
for i in range(100,1000+1):
if prime(i):
print i |
#!/usr/bin/env python3
##################################################
# Anton Rubisov 20150119 #
# University of Toronto Sports Analytics Group #
# #
# Search through the Mongo database and return #
# stats on the collection, query a particular #
# athlete, etc. #
##################################################
# For finding current largest ID in database:
# db.athleteRankings.find({},{"athlete_id":1}).sort({"athlete_id": -1}).limit(1)
# Drop a document
# db.athleteRankings.remove({"athlete_id" : {"$gt": 2563}})
# db.athleteRankings.remove({"athlete_id" : 2500})
# Required headers
import pymongo # MongoDB for Python3
host = 'localhost'
database = 'crossfitSoup'
collection = 'athleteId'
def mongo_connection():
client = pymongo.MongoClient()
db = client[database]
col = db[collection]
return col
def main():
col = mongo_connection()
sorted_id = col.find().sort("id", pymongo.DESCENDING)
print('MongoDB {}.[{}]. {} documents.'.format(database, collection, col.count()))
print('Max Athlete ID: {}'.format(sorted_id[0]['id']))
print(col.find({"id": {"$gt": 97245}}).count())
if __name__ == "__main__":
main()
|
from bootstrap3_datetime.widgets import DateTimePicker
from django import forms
from django.forms import ModelForm
from models import *
class ParteForm(ModelForm):
class Meta:
model = Parte
widgets = {
'fecha' : DateTimePicker(options={"format": "YYYY-MM-DD", "pickTime": False})
}
|
# -*- coding: utf-8 -*-
#
import pygmsh
import examples
import os
import tempfile
from importlib import import_module
import subprocess
def test_generator():
for name in examples.__all__:
test = import_module('examples.' + name)
yield check_output, test
def check_output(test):
pygmsh.generate_mesh(test.generate())
return
if __name__ == '__main__':
test_io()
|
# -*- coding: utf-8 -*-
"""Tests for oauth2_provider overrides."""
from __future__ import unicode_literals
from django.test import RequestFactory
from oauth2_provider.exceptions import FatalClientError, OAuthToolkitError
from oauth2_provider.http import HttpResponseUriRedirect
from webplatformcompat.tests.base import TestCase
from .views import MyAuthorizationView
class FakeOAuthLibError(object):
"""A fake oauthlib error passed to OAuthToolkitError."""
def __init__(self, redirect_uri='/redirect', urlencoded=None, **kwargs):
"""Initialize the fake error."""
self.redirect_uri = redirect_uri
self.urlencoded = urlencoded or 'fake=1'
for name, value in kwargs.items():
setattr(self, name, value)
class TestMyAuthorizationView(TestCase):
"""Test our overrides of the AuthorizationView."""
def setUp(self):
self.view = MyAuthorizationView()
self.view.request = RequestFactory().get('/authorize')
def test_error_response_without_redirect(self):
"""Test that errors are rendered without a 'url' context variable."""
base_error = FakeOAuthLibError(status_code=405)
error = FatalClientError(error=base_error)
response = self.view.error_response(error)
self.assertNotIn('url', response.context_data)
def test_error_response_with_redirect(self):
"""Test that errors are rendered without a 'url' context variable."""
base_error = FakeOAuthLibError()
error = OAuthToolkitError(error=base_error)
response = self.view.error_response(error)
self.assertIsInstance(response, HttpResponseUriRedirect)
|
import sqlite3
import random
import time
def LoggingOut():
print("\nLogging out", end="")
for logging_out in range(5):
print(".", end="")
time.sleep(1)
print("\nYou logged out.")
def AddingEmployee():
print("\nPlease enter employee's information\n")
x = input("Enter worker's name: ")
y = input("Enter worker's gender: ")
z = int(input("Enter worker's age: "))
a = random.randrange(100000000, 999999999)
b = input("Enter worker's position: ")
c = int(input("Enter worker's salary: "))
d = random.randrange(10000, 99999)
cursor.execute("INSERT INTO workers (name, gender, age, phone, position, salary, number) "
"VALUES('{}', '{}', {}, {}, '{}', {}, {})".format(x, y, z, a, b, c, d))
db.commit()
cursor.execute("SELECT * FROM workers WHERE number= {}".format(d))
print("\nOperation completed:")
for name, gender, age, phone, position, salary, number in cursor:
print("-" * 40)
print("Worker's name: {}".format(name))
print("Worker's gender: {}".format(gender))
print("Worker's age: {}".format(age))
print("Worker's phone: {}".format(phone))
print("Worker's position: {}".format(position))
print("Worker's salary: {}".format(salary))
print("Worker's serial number: {}".format(number))
print("-" * 40)
def EmployeeList():
cursor.execute("SELECT * FROM workers")
for name, gender, age, position, phone, salary, number in cursor:
print("Worker's name: {}".format(name))
print("Worker's gender: {}".format(gender))
print("Worker's age: {}".format(age))
print("Worker's position: {}".format(position))
print("Phone number: {}".format(phone))
print("Worker's salary: {}".format(salary))
print("Worker's serial number: {}".format(number))
print("-" * 50)
def FireEmployee():
print("\nYou'll fire a person.")
serial_number = input("Please enter serial number of worker that you want to remove: ")
cursor.execute("DELETE FROM workers WHERE number = {}".format(serial_number))
db.commit()
print("\nPlease wait", end="")
for removing in range(3):
print(".", end="")
time.sleep(1)
print("\nRemoving operation is completed.")
def CarList():
cursor.execute("SELECT * FROM cars")
for brand, model, year, price, number, rent in cursor:
print("Car's brand: {}".format(brand))
print("Car's model: {}".format(model))
print("Produced year: {}".format(year))
print("Car's price: {}".format(price))
print("Serial number: {}".format(number))
print("For rent: {}".format(rent))
print("-" * 50)
def AddingCar():
print("\nPlease enter car's data\n")
x = input("Enter brand: ")
y = input("Enter model: ")
z = int(input("Enter year: "))
t = int(input("Enter price: "))
q = random.randrange(10000, 99999)
p = str("Available")
cursor.execute("INSERT INTO cars (brand, model, year, price, number, rent) "
"VALUES('{}', '{}', {}, {}, {}, '{}')".format(x, y, z, t, q, p))
db.commit()
def RemoveCar():
print("\nYou'll remove a car from system.")
serial_number = input("Please enter serial number of car that you want to remove: ")
cursor.execute("DELETE FROM cars WHERE number = {}".format(serial_number))
db.commit()
print("\nPlease wait", end="")
for removing in range(3):
print(".", end="")
time.sleep(1)
print("\nRemoving operation is completed.")
def AvailableCarList():
cursor.execute("SELECT * FROM cars WHERE rent='Available'")
for brand, model, year, price, number, rent in cursor:
print("Car's brand: {}".format(brand))
print("Car's model: {}".format(model))
print("Produced year: {}".format(year))
print("Car's price: {}".format(price))
print("Serial number: {}".format(number))
print("For rent: {}".format(rent))
print("-" * 50)
def RentingCar():
serial_number = input("Enter serial number of the car that you want to rent: ")
rent_date = input("Enter the date that renting will be finish: ")
cursor.execute("UPDATE cars SET rent='Not Available until {}' "
"WHERE number={}".format(rent_date, serial_number))
db.commit()
def ReturningCar():
serial_number = \
input("Enter the serial number of car that you want to make it available for renting: ")
cursor.execute("UPDATE cars SET rent='Available' WHERE number={}".format(serial_number))
db.commit()
def SellingCar():
serial_number = input("\nPlease enter serial number of car that sold: ")
cursor.execute("INSERT INTO sold SELECT * FROM cars WHERE number={}".format(serial_number))
cursor.execute("DELETE FROM cars WHERE number={}".format(serial_number))
db.commit()
db = sqlite3.connect("MyCarShowroom.db")
cursor = db.cursor()
db.execute("CREATE TABLE IF NOT EXISTS workers "
"(name TEXT, gender TEXT, age INTEGER, phone INTEGER, position TEXT, salary INTEGER, number INTEGER)")
db.execute("CREATE TABLE IF NOT EXISTS cars "
"(brand TEXT, model TEXT, year INTEGER, price INTEGER,"
" number INTEGER, rent TEXT)")
db.execute("CREATE TABLE IF NOT EXISTS sold "
"(brand TEXT, model TEXT, year INTEGER, price INTEGER,"
" number INTEGER, rent TEXT)")
print("\nSystem opened.")
while True:
print("\n1. ADMIN LOGIN\n2. EMPLOYEE LOGIN\n3. EXIT SYSTEM")
login_choice = input("\nWhat would you like to do: ")
if login_choice == "1":
while True:
print("\n1.Adding employee\n2.Removing employee\n3.List of employees\n4.List of cars\n5.Logout")
admin_choice = input("\nWhat would you like to do? ")
if admin_choice == "1":
AddingEmployee()
continue_operation = input("\nWould you like to do another operation? ")
positive = ["Yes", "yes", "YES", "I do", "ı do"]
negative = ["No", "no", "NO", "I don't", "I do not", "ı dont", "ı do not"]
if continue_operation in positive:
pass
elif continue_operation in negative:
LoggingOut()
break
elif admin_choice == "2":
EmployeeList()
FireEmployee()
continue_operation = input("\nWould you like to do another operation? ")
positive = ["Yes", "yes", "YES", "I do", "ı do"]
negative = ["No", "no", "NO", "I don't", "I do not", "ı dont", "ı do not"]
if continue_operation in positive:
pass
elif continue_operation in negative:
LoggingOut()
break
elif admin_choice == "3":
EmployeeList()
continue_operation = input("\nWould you like to do another operation? ")
positive = ["Yes", "yes", "YES", "I do", "ı do"]
negative = ["No", "no", "NO", "I don't", "I do not", "ı dont", "ı do not"]
if continue_operation in positive:
pass
elif continue_operation in negative:
LoggingOut()
break
elif admin_choice == "4":
CarList()
continue_operation = input("\nWould you like to do another operation? ")
positive = ["Yes", "yes", "YES", "I do", "ı do"]
negative = ["No", "no", "NO", "I don't", "I do not", "ı dont", "ı do not"]
if continue_operation in positive:
pass
elif continue_operation in negative:
LoggingOut()
break
elif admin_choice == "5":
LoggingOut()
break
elif login_choice == "2":
while True:
print("\n1.Adding car\n2.Removing car\n3.Renting car\n4.Selling car\n5.Logout\n")
employee_choice = input("\nWhat would you like to do: ")
if employee_choice == "1":
AddingCar()
continue_operation = input("\nWould you like to do another operation? ")
positive = ["Yes", "yes", "YES", "I do", "ı do"]
negative = ["No", "no", "NO", "I don't", "I do not", "ı dont", "ı do not"]
if continue_operation in positive:
pass
elif continue_operation in negative:
LoggingOut()
break
elif employee_choice == "2":
CarList()
RemoveCar()
continue_operation = input("\nWould you like to do another operation? ")
positive = ["Yes", "yes", "YES", "I do", "ı do"]
negative = ["No", "no", "NO", "I don't", "I do not", "ı dont", "ı do not"]
if continue_operation in positive:
pass
elif continue_operation in negative:
LoggingOut()
break
elif employee_choice == "3":
print("\n1.Start of a renting date\n2.Expiration of a renting date")
while True:
rent_choose = ""
rent_choose = input("What would you like to do:")
if rent_choose == "1":
AvailableCarList()
RentingCar()
break
elif rent_choose == "2":
CarList()
ReturningCar()
break
else:
pass
continue_operation = input("\nWould you like to do another operation? ")
positive = ["Yes", "yes", "YES", "I do", "ı do"]
negative = ["No", "no", "NO", "I don't", "I do not", "ı dont", "ı do not"]
if continue_operation in positive:
pass
elif continue_operation in negative:
LoggingOut()
break
elif employee_choice == "4":
AvailableCarList()
SellingCar()
continue_operation = input("\nWould you like to do another operation? ")
positive = ["Yes", "yes", "YES", "I do", "ı do"]
negative = ["No", "no", "NO", "I don't", "I do not", "ı dont", "ı do not"]
if continue_operation in positive:
pass
elif continue_operation in negative:
LoggingOut()
break
elif employee_choice == "5":
LoggingOut()
break
elif login_choice == "3":
print("\nClosing system", end="")
for logging_out in range(5):
print(".", end="")
time.sleep(1)
print("\nSystem closed.")
break
else:
pass
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 10 09:57:08 2020
@author: sumant
"""
class Memory:
""" This is Memory data """
def __init__(self,internal,secondary,ram):
self.internal = internal
self.secondary = secondary
self.ram = ram
def details(self):
print(f"Internal Memory: {self.internal}")
print(f"Secondary Memory: {self.secondary}")
print(f"Ram: {self.ram}")
class Mobile:
""" This is details of mobile data"""
def __init__(self,model,brand,price,memory):
self.model = model
self.brand = brand
self.price = price
self.memory = memory
def newFeatures(self):
print(f"Features of {self.brand} Model {self.model}")
self.memory.details()
print("Price: ",self.price)
m = Memory('128gb','256gb','6gb')
tv = Mobile("G5s","Moto",40000,m)
tv.newFeatures()
|
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
from .base_controller import BaseController
from ..api_helper import APIHelper
from ..configuration import Configuration
from ..http.auth.basic_auth import BasicAuth
class AccountController(BaseController):
"""A Controller to access Endpoints in the ytelapi API."""
def create_view_account(self,
date):
"""Does a POST request to /accounts/viewaccount.json.
Retrieve information regarding your Ytel account by a specific date.
The response object will contain data such as account status, balance,
and account usage totals.
Args:
date (string): Filter account information based on date.
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.base_uri
_query_builder += '/accounts/viewaccount.json'
_query_url = APIHelper.clean_url(_query_builder)
# Prepare form parameters
_form_parameters = {
'Date': date
}
# Prepare and execute request
_request = self.http_client.post(_query_url, parameters=_form_parameters)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return _context.response.raw_body
|
import numpy as np
import pandas as pd
from flask import Flask,request,jsonify
import pickle
import requests,ssl
from flask_cors import CORS
app=Flask(__name__)
rfregressor=pickle.load(open('model.pkl','rb'))
CORS(app)
visibility_item_avg = [[]]
def impute_visibility_mean(cols):
visibility = cols[0]
item = cols[1]
if visibility == 0:
return visibility_item_avg['Item_Visibility'][visibility_item_avg.index == item]
else:
return visibility
@app.route('/findsales',methods=['POST'])
def findSales():
print(request)
item_identifier=request.json['Item_Identifier']
item_weight=float(request.json['Item_Weight'])
item_fat_content=float(request.json['Item_Fat_Content'])
item_visibility=float(request.json['Item_Visibility'])
item_type=request.json['Item_Type']
item_mrp=float(request.json['Item_MRP'])
outlet_identifier=request.json['Outlet_Identifier']
outlet_establishment_year=int(request.json['Outlet_Establishment_Year'])
outlet_size=request.json['Outlet_Size']
outlet_location_type=request.json['Outlet_Location_Type']
outlet_type=request.json['Outlet_Type']
datavalues=[[item_identifier,item_weight,item_fat_content,item_visibility,item_type,item_mrp,outlet_identifier,outlet_establishment_year,outlet_size,outlet_location_type,outlet_type]]
data = pd.DataFrame(datavalues, columns = ['Item_Identifier', 'Item_Weight', 'Item_Fat_Content','Item_Visibility','Item_Type', 'Item_MRP', 'Outlet_Identifier','Outlet_Establishment_Year','Outlet_Size','Outlet_Location_Type','Outlet_Type'])
visibility_item_avg = data.pivot_table(values='Item_Visibility',index='Item_Identifier')
data['Item_Visibility'] = data[['Item_Visibility','Item_Identifier']].apply(impute_visibility_mean,axis=1).astype(float)
data['Outlet_Years'] = 2013 - data['Outlet_Establishment_Year']
data['Item_Type_Combined'] = data['Item_Identifier'].apply(lambda x: x[0:2])
#Rename them to more intuitive categories:
data['Item_Type_Combined'] = data['Item_Type_Combined'].map({'FD':'Food','NC':'Non-Consumable','DR':'Drinks'})
data.loc[data['Item_Type_Combined']=="Non-Consumable",'Item_Fat_Content'] = "Non-Edible"
Mean_Visibility=data['Item_Visibility'].mean()
data['Item_Visibility_MeanRatio']=data.apply(lambda x:x['Item_Visibility']/Mean_Visibility,axis=1)
#changing all nominal attributes to numerical using label encoder
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
data['outlet']=le.fit_transform(data['Outlet_Identifier'])
var_mod = ['Item_Fat_Content','Outlet_Location_Type','Outlet_Size','Item_Type_Combined','Outlet_Type']
for i in var_mod:
data[i] = le.fit_transform(data[i])
data.drop(['Item_Type','Outlet_Establishment_Year'],axis=1,inplace=True)
data=data.drop(['Outlet_Identifier','Item_Identifier'], axis=1)
res = rfregressor.predict(data)
return str(res[0])
if __name__=="__main__":
app.run(debug=True)
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.option.option_types import SkipOption
from pants.option.subsystem import Subsystem
class RustfmtSubsystem(Subsystem):
options_scope = "rustfmt"
name = "rustfmt"
help = "Rustfmt-specific options."
skip = SkipOption("fmt", "lint")
|
soma = 0
velho_nome = ''
velho_idade = 0
mulheres_novas = 0
for c in range(0, 4):
print('----- Pessoa {} -----'.format(c+1))
nome = str(input('Nome: '))
idade = int(input('Idade: '))
sexo = str(input('Sexo [m/f]: '))
if(sexo.lower() == 'm'):
if (c == 0):
velho_nome = nome
velho_idade = idade
if(idade > velho_idade):
velho_nome = nome
velho_idade = idade
else:
if(idade < 20):
mulheres_novas += 1
soma += idade
media = soma/4
print('\nA média de idade do grupo é de {} anos.'.format(media))
print('O homem mais velho do grupo é o {}.'.format(velho_nome))
print('{} mulheres possuem menos de 20 anos.'.format(mulheres_novas)) |
from __future__ import unicode_literals
from django.db import models
class News(models.Model):
title = models.CharField(max_length=500)
resumen = models.CharField(max_length=5000)
content = models.CharField(max_length=10000)
imagen = models.FileField(upload_to='news/')
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ["-created_at"]
class Report(models.Model):
title = models.CharField(max_length=500)
fileReport = models.FileField(upload_to='reports/')
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ["-created_at"] |
#!/usr/bin/env python3
import io
import csv
import utils
def download():
utils.download_file('https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3932451/bin/amiajnl-2013-001612-s3.csv',
'../data/pmid_24158091/amiajnl-2013-001612-s3.csv')
def map_to_drugbank():
result = []
total = 0
matched = set()
duplicated = 0
with io.open('../data/pmid_24158091/amiajnl-2013-001612-s3.csv', 'r', encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
next(reader, None)
# 0 - event
# 1 - drug1
# 2 - drug2
# 3 - a
# 4 - b
# 5 - c
# 6 - d
# 7 - pop_event_rate
# 8 - uor025
# 9 - label
# 10 - aor025
for row in reader:
row = [x.strip() for x in row]
if row[10] == 'NA' or float(row[10]) < 1.1:
continue
total += 1
id1 = utils.name_to_drugbank_id(row[1])
id2 = utils.name_to_drugbank_id(row[2])
if id1 is not None and id2 is not None:
id_key = '%s:%s' % (id1 if id1 < id2 else id2, id2 if id1 < id2 else id1)
if id_key not in matched:
matched.add(id_key)
# 0 - drugbank1
# 1 - drugbank2
# 2 - event
# 3 - drug1
# 4 - drug2
# 5 - a
# 6 - b
# 7 - c
# 8 - d
# 9 - pop_event_rate
# 10 - uor025
# 11 - label
# 12 - aor025
result.append(
[id1, id2, row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9],
row[10]])
else:
duplicated += 1
with io.open('../data/pmid_24158091/amiajnl-2013-001612-s3_matched.csv', 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=',', quotechar='"')
writer.writerow(
['drugbank1', 'drugbank2', 'event', 'drug1', 'drug2', 'a', 'b', 'c', 'd', 'pop_event_rate', 'uor025',
'label', 'aor025'])
for row in result:
writer.writerow(row)
# Matched, Duplicated, Unmatched
return [len(result), duplicated, total - duplicated - len(result)]
def process() -> [int]:
return map_to_drugbank()
def get_all_interaction_pairs() -> []:
result = []
with io.open('../data/pmid_24158091/amiajnl-2013-001612-s3_matched.csv', 'r', encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
next(reader, None)
for row in reader:
result.append([row[0], row[3], row[1], row[4], 12])
return result
|
import os
import numpy as np
from skimage import io
import matplotlib.pyplot as plt
from photutils import DAOStarFinder, IRAFStarFinder
from astropy.stats import mad_std
from photutils import aperture_photometry, CircularAperture
img = io.imread("/home/mot/data/saliance/exp3/frame.png", as_grey=True)
img = 1-img
bkg_sigma = mad_std(img)
daofind = IRAFStarFinder(fwhm=24., threshold=1.*bkg_sigma, )
sources = daofind.find_stars(img)
print(sources)
positions = (sources['xcentroid'], sources['ycentroid'])
apertures = CircularAperture(positions, r=8.)
phot_table = aperture_photometry(img, apertures)
print(phot_table)
plt.imshow(img, cmap="gray")
apertures.plot(color='blue', lw=1.5, alpha=0.5)
plt.show()
|
import csv
import smtplib
from email.mime.text import MIMEText
class Mailer(object):
def send(sender, recipients, subject, message):
msg = MIMEText()
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipients
class Logger(object):
def output(message):
print("[Logger]".format(message))
class LoggerAdapter(object):
def __init__(self, what_i_have):
self.what_i_have = what_i_have
def send(self, sender, recipients, subject, message):
log_message = "From: {}\nTo: {}\nSubject: {}\nMessage: {}".format(
sender, recipients, subject, message)
self.what_i_have.output(log_message)
def __getattr__(self, attr):
return getattr(self.what_i_have, attr)
if __name__ == '__main__':
mailer = Mailer()
mailer.send("me@example.com", ["a@a.com", "b@b.com"],
"This is your message", "Have a good day")
|
#!/usr/bin/python
import numpy as np
import pylab as py
import scipy.interpolate as interp1d
from COMMON import yr,week,nanosec
def PPTA_data():
'''Outputs arrays with frequency and strain of the EPTA upper limits.'''
#Input parameters:
inputdir='../data/PPTA/'
ifile1='LimSen4f.dat' #ZhuEtAl2014 limit.
#Load EPTA upper limits data.
ul1=np.array(np.loadtxt(inputdir+ifile1,usecols=(1,2)))
ul2=np.array(np.loadtxt(inputdir+ifile1,usecols=(1,3)))
ul3=np.array(np.loadtxt(inputdir+ifile1,usecols=(1,4)))
return [ul1,ul2,ul3]
|
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.svm import LinearSVC, LinearSVR
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import cross_val_predict, KFold
from sklearn.cluster import MiniBatchKMeans
from typing import Dict, Optional
from distil.primitives.utils import MISSING_VALUE_INDICATOR, SINGLETON_INDICATOR
from sklearn.decomposition import PCA
from distil.modeling.metrics import metrics, classification_metrics, regression_metrics
# --
# Categorical
class BinaryEncoder(BaseEstimator, TransformerMixin):
def __init__(self, random_seed=None):
super().__init__()
self.lookup = None
self.random_seed = (
random_seed if random_seed is not None else int(np.random.randint)
)
def fit(self, X):
levels = list(set(X.squeeze())) + [
MISSING_VALUE_INDICATOR
] # !! How to sort? Randomly? Alphabetically?
# use th
random_state = np.random.RandomState(self.random_seed)
levels = random_state.permutation(levels)
vals = range(len(levels))
max_width = len(np.binary_repr(max(vals)))
vals = [np.binary_repr(v, width=max_width) for v in vals]
vals = [np.array(list(v)).astype(int) for v in vals]
self.lookup = dict(zip(levels, vals))
return self
def transform(self, X, y=None):
assert self.lookup is not None
squeezed = X.squeeze()
# single row element will become scalar after squeeze
if not isinstance(squeezed, pd.Series):
squeezed = [squeezed]
return np.vstack(
[
self.lookup.get(xx, self.lookup[MISSING_VALUE_INDICATOR])
for xx in squeezed
]
)
def fit_transform(self, X, y=None, **kwargs):
_ = self.fit(X)
return self.transform(X)
# --
# Text
class SVMTextEncoder(BaseEstimator, TransformerMixin):
# number of jobs to execute in parallel
NUM_JOBS = 3
# number of folds to apply to svm fit
NUM_FOLDS = 3
# !! add tuning
def __init__(self, metric, random_seed):
super().__init__()
self._vect = TfidfVectorizer(ngram_range=[1, 2], max_features=30000)
self._random_seed = random_seed
if metric in classification_metrics:
self._model = LinearSVC(class_weight="balanced", random_state=random_seed)
self.mode = "classification"
elif metric in regression_metrics:
self._model = LinearSVR(random_state=random_seed)
self.mode = "regression"
else:
raise AttributeError("metric not in classification or regression metrics")
def fit(self, X, y):
raise NotImplemented
def transform(self, X):
X = pd.Series(X.squeeze()).fillna(MISSING_VALUE_INDICATOR).values
Xv = self._vect.transform(X)
if self.mode == "classification":
out = self._model.decision_function(Xv)
else:
out = self._model.predict(Xv)
if len(out.shape) == 1:
out = out.reshape(-1, 1)
return out
def fit_transform(self, X, y=None, **kwargs):
assert y is not None, "SVMTextEncoder.fit_transform requires y"
X = pd.Series(X.squeeze()).fillna(MISSING_VALUE_INDICATOR).values
Xv = self._vect.fit_transform(X)
self._model = self._model.fit(Xv, y)
if self.mode == "classification":
# Aim for NUM_FOLDS and stratified k-fold. If that doesn't work, fallback to uniform sampling.
num_folds = min(self.NUM_FOLDS, y.value_counts().min())
if num_folds < 2:
cv = KFold(n_splits=self.NUM_FOLDS, random_state=self._random_seed)
out = cross_val_predict(
self._model,
Xv,
y,
method="decision_function",
n_jobs=self.NUM_JOBS,
cv=cv,
)
else:
out = cross_val_predict(
self._model,
Xv,
y,
method="decision_function",
n_jobs=self.NUM_JOBS,
cv=num_folds,
)
else:
out = cross_val_predict(
self._model, Xv, y, n_jobs=self.NUM_JOBS, cv=self.NUM_FOLDS
)
if len(out.shape) == 1:
out = out.reshape(-1, 1)
return out
class TfidifEncoder(BaseEstimator, TransformerMixin):
# !! add tuning
def __init__(self):
super().__init__()
self._vect = TfidfVectorizer(ngram_range=[1, 2], max_features=300)
self._pca = PCA(n_components=16)
self.label_map: Optional[Dict[int, str]] = None
def fit(self, X, y):
raise NotImplemented
def transform(self, X):
X = pd.Series(X.squeeze()).fillna(MISSING_VALUE_INDICATOR)
if self.label_map:
self.label_map_inv = {v: k for k, v in self.label_map.items()}
# fillna is mostly needed if subset of data was trained on
X = X.map(self.label_map_inv).fillna(0).values
else:
X = self._vect.transform(X).toarray()
X = self._pca.transform(X)
out = X
if len(out.shape) == 1:
out = out.reshape(-1, 1)
return out
def fit_transform(self, X, y=None, **kwargs):
X = pd.Series(X.squeeze()).fillna(MISSING_VALUE_INDICATOR)
if len(X.unique()) / len(X) < 0.5: # TODO should be pulled from metadata
factor = pd.factorize(X)
X = factor[0]
self.label_map = {k: v for k, v in enumerate(factor[1])}
else:
X = self._vect.fit_transform(X).toarray()
X = self._pca.fit_transform(X)
out = X
if len(out.shape) == 1:
out = out.reshape(-1, 1)
return out
# --
# Timeseries
def run_lengths_hist(T_train, T_test):
# !! Super simple -- ignores values
train_rls = [
np.diff(np.where(np.diff(T_train[i]))[0]).astype(int)
for i in range(len(T_train))
]
test_rls = [
np.diff(np.where(np.diff(T_test[i]))[0]).astype(int) for i in range(len(T_test))
]
thresh = np.percentile(np.hstack(train_rls), 95).astype(int)
H_train = np.vstack(
[np.bincount(r[r <= thresh], minlength=thresh + 1) for r in train_rls]
)
H_test = np.vstack(
[np.bincount(r[r <= thresh], minlength=thresh + 1) for r in test_rls]
)
return H_train, H_test
# --
# Sets
def set2hist(
S_train,
S_test,
n_clusters=64,
n_jobs=32,
kmeans_sample=100000,
batch_size=1000,
verbose=False,
):
S_train, S_test = list(S_train), list(S_test)
n_train_obs, n_test_obs = len(S_train), len(S_test)
train_offsets = np.cumsum([t.shape[0] for t in S_train])
train_offsets = np.hstack([[0], train_offsets])
test_offsets = np.cumsum([t.shape[0] for t in S_test])
test_offsets = np.hstack([[0], test_offsets])
dim = S_train[0].shape[1]
assert len(set([t.shape[1] for t in S_train])) == 1
assert len(set([t.shape[1] for t in S_test])) == 1
S_train_flat, S_test_flat = np.vstack(S_train), np.vstack(S_test)
T_all = np.vstack([S_train_flat, S_test_flat])
kmeans_sample = min(kmeans_sample, T_all.shape[0])
sel = np.random.choice(T_all.shape[0], kmeans_sample, replace=False)
km = MiniBatchKMeans(
n_clusters=n_clusters, batch_size=batch_size, verbose=verbose
).fit(T_all[sel])
cl_train = km.predict(S_train_flat)
H_train = np.vstack(
[
np.histogram(
cl_train[train_offsets[i] : train_offsets[i + 1]],
bins=range(n_clusters + 1),
)[0]
for i in range(n_train_obs)
]
)
cl_test = km.predict(S_test_flat)
H_test = np.vstack(
[
np.histogram(
cl_test[test_offsets[i] : test_offsets[i + 1]],
bins=range(n_clusters + 1),
)[0]
for i in range(n_test_obs)
]
)
return H_train, H_test
# --
# Graphs
def cf_remap_graphs(X_train, X_test):
assert X_train.shape[1] == 2
assert X_test.shape[1] == 2
X_train = X_train.copy()
X_test = X_test.copy()
X_train.columns = ("user", "item")
X_test.columns = ("user", "item")
uusers = np.unique(np.hstack([X_train.user, X_test.user]))
user_lookup = dict(zip(uusers, range(len(uusers))))
X_train.user = X_train.user.apply(user_lookup.get)
X_test.user = X_test.user.apply(user_lookup.get)
uitems = np.unique(np.hstack([X_train.item, X_test.item]))
item_lookup = dict(zip(uitems, range(len(uitems))))
X_train.item = X_train.item.apply(item_lookup.get)
X_test.item = X_test.item.apply(item_lookup.get)
n_users = len(uusers)
n_items = len(uitems)
return X_train, X_test, n_users, n_items
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.