text stringlengths 8 6.05M |
|---|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 9 10:54:45 2019
@author: murillon2003_00
"""
def call_numbers():
for number in range(0,10):
print(number)
def call_numbers_with_limits(limit):
list = range(0,10)
for number in list[0:limit]:
print(number)
def calculator(x = 10 ,y = 5):
return x+y |
"""
TECHX API GATEWAY
API GATEWAY FOR MERAKI
CREATED BY: FRBELLO AT CISCO DOT COM
DATE : JUL 2020
VERSION: 1.0
STATE: RC2
"""
__author__ = "Freddy Bello"
__author_email__ = "frbello@cisco.com"
__copyright__ = "Copyright (c) 2016-2020 Cisco and/or its affiliates."
__license__ = "MIT"
# === import libraries ====
import os
import json
import logging
import secrets
import string
import re
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# === Disable SSL Warnings ===
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# === Define a Logger ========
logger = logging.getLogger("apigw.MerakiWorker")
# === Class Blueprint
class APIGWMerakiWorker:
"""
Object to wrap up all the Meraki Calls
Requires
MERAKI_API_TOKEN
MERAKI_ORG
"""
def __init__(self, requestor_name, meraki_org="", meraki_net=""):
self.__name__ = "APIGW MERAKI Worker"
self.job_owner = requestor_name
if meraki_org in [""]:
# if no meraki ORG or NET use Default Meraki Org and Net
self.meraki_org = str(os.environ["MERAKI_ORG"])
else:
self.meraki_org = meraki_org
if meraki_net in [""]:
self.meraki_net = str(os.environ["MERAKI_DEFAULT_NETWORK"])
else:
self.meraki_net = meraki_net
def show_meraki_network(self, job_req):
"""
GET Meraki Netwok ID
params
return:
Network ID String
"""
logger.info("Job Received: %s", job_req)
api_uri = f"/v1/organizations/{self.meraki_org}/networks"
data = get_meraki_api_data(api_uri)
# Select the Mmeraki Network
for network in data:
net_info = network
message = f"You are connecting to **{net_info['name']}**, which have this type of products \n"
for product in net_info["productTypes"]:
message += f"* **{product}** \n"
# message = net_json
return message
def show_meraki_vlans(self, job_req):
"""
Show All Vlans Associated to Meraki Network
job_req: Message from Dispatcher
return a message
"""
logger.info("Job Received : %s", job_req)
api_uri = f"/v1/networks/{self.meraki_net}/appliance/vlans"
data = get_meraki_api_data(api_uri)
# Parse the JSON
check_icon = chr(0x2705)
vlan_count = 0
message = f"There are {len(data)} vlans in the Network. Details: \n"
for vlan in data:
message += f"* **{vlan['name']}** | ID: **{vlan['id']}** | Subnet: **{vlan['subnet']}** \n"
vlan_count += 1
message += f" {check_icon} Total: **{vlan_count}** \n"
return message
def show_meraki_switch(self, job_req):
"""
Show All Switches Associated to Meraki Network
job_req: Message from Dispatcher, parse the request
device_type = mx, ms, mv, mr, mc, wireless, switch, appliance
return a message
"""
logger.info("Job Received : %s", job_req)
api_uri = f"/v1/networks/{self.meraki_net}/devices"
data = get_meraki_api_data(api_uri)
# Parse the JSON
message = "Here is the detail: \n"
device_counter = 0
check_icon = chr(0x2705)
for device in data:
device_type = decode_meraki_model(device["model"])
if "switch" in device_type:
message += f"* **{device['name']}** | IP: **{device['lanIp']}** | Serial: **{device['serial']}** \n"
device_counter += 1
message += f"{check_icon} Total: **{device_counter}** \n"
return message
def show_meraki_mx_ports(self, job_req):
"""
Show Ports Associated to MX Meraki Device
job_req: Message from Dispatcher, parse the request
device_type = mx, ms, mv, mr, mc, wireless, switch, appliance
return a message
API V1
"""
logger.info("Job Received : %s", job_req)
api_uri = f"/v1/networks/{self.meraki_net}/appliance/ports"
data = get_meraki_api_data(api_uri)
# Parse the JSON
message = "Here is the detail: \n"
port_counter = 0
check_icon = chr(0x2705)
for mx_port in data:
message += f"* **{mx_port['number']}** | Port Mode: **{mx_port['type']}** | Vlan ID: **{mx_port['vlan']}** \n"
port_counter += 1
message += f"{check_icon} Total: **{port_counter}** \n"
return message
def show_meraki_ports(self, job_req):
"""
Retrieve all Ports from a Switch
"""
logger.info("Job Received : %s", job_req)
fails_icon = chr(0x2757) + chr(0xFE0F)
check_icon = chr(0x2705)
message = ""
job_params = job_req.split()
if len(job_params) < 2:
#Not Enough info provided
message = f" {fails_icon} Job Request is incomplete, please provide Switch IP _show-ports <SWITCH_IP>_ \n"
else:
## STEP 0-1: Assign all the parameters to job variables
ip_addr = job_params[1]
serial_id, switch_name = get_switch_serial(ip_addr, self.meraki_net)
if serial_id in [""]:
message = f"{devicon} **There is not switch with that IP**"
logger.error("VALIDATION failed Switch serial not Found %s", ip_addr)
return message
else:
logger.info("VALIDATION Succeeded Switch serial Found %s", serial_id)
# STEP 1 - Retrieve Data
api_uri = f"/v1/devices/{serial_id}/switch/ports/"
data = get_meraki_api_data(api_uri)
port_counter = 0
message = f"Here is the detail for **{switch_name}** \n"
for port in data:
if port["enabled"]:
message += f"* {check_icon} Port **{port['portId']}** | Type : **{port['type']}** | VLAN: **{port['vlan']}** | VoiceVlan **{port['voiceVlan']}** | Status: **Enabled** \n"
else:
message += f"* {fails_icon} Port **{port['portId']}** | Type : **{port['type']}** | VLAN: **{port['vlan']}** | VoiceVlan **{port['voiceVlan']}** | Status: **Disabled** \n"
port_counter += 1
message += f"{check_icon} Total Ports: **{port_counter}** \n"
return message
def show_meraki_ssid(self, job_req):
"""
Show All enabled and named SSID
job_req: Message from Dispatcher, parse the request
API V1
"""
message = ""
logger.info("Job Received : %s", job_req)
api_uri = f"/v1/networks/{self.meraki_net}/wireless/ssids"
data = get_meraki_api_data(api_uri)
message = "Here is the detail: \n"
configured_ssid_counter = 0
unused_ssid_counter = 0
check_icon = chr(0x2705)
for ssid in data:
# Find Enabled SSID first
if ssid["enabled"]:
message += f"* Enabled SSID: ({ssid['number']}) **{ssid['name']}** Mode **{ssid['authMode']}** \n"
configured_ssid_counter += 1
# Find Named SSID which are disabled
# Extract SSID Name
ssid_cur_name = ssid["name"].split()
if "Unconfigured" not in [ssid_cur_name[0].strip()] and not ssid["enabled"]:
message += f"* Disbaled SSID ({ssid['number']}) **{ssid['name']}** Mode **{ssid['authMode']}** \n"
configured_ssid_counter += 1
else:
# All the Unconfigured SSIDs
unused_ssid_counter += 1
message += f"{check_icon} Total: Configured **{configured_ssid_counter}** Unused **{unused_ssid_counter - configured_ssid_counter }** \n"
return message
def change_port_vlan(self, job_req):
"""
Change a Switch Port and assign a vlan
params:
job_req: Message from Dispatcher
return: Job Action Message
API V1
"""
# STEP-0 Extract parameters from job_req
#job_params
#job_params[0]: Bot Command
#job_params[1]: Switch IP Address
#job_params[2]: Port Number
#job_params[3]: Vlan ID
devicon = chr(0x2757) + chr(0xFE0F)
check_icon = chr(0x2705)
job_params = job_req.split()
if len(job_params) < 4:
#Not Enough info provided
message = f" Job Request is incomplete, please provide Switch IP, Switch-Port, Vlan-ID ie _change-port-vlan 1.1.1.1 10 101 \n"
else:
## STEP 0-1: Assign all the parameters to job variables
ip_addr = "".join(re.findall(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', job_params[1])) #Validate IP Addr. Format
port_id = "".join(re.findall(r'^\d{1,2}$',job_params[2])) # Accept upto 2 Digits
vlan_id = "".join(re.findall(r'^\d{1,4}$',job_params[3])) #Accept up to 4 Digits
# STEP 1: Validations
## STEP 1-1: GET Switch Serial Number
serial_id, switch_name = get_switch_serial(ip_addr, self.meraki_net)
if serial_id in [""]:
message = f"{devicon} **There is not switch with that IP**"
logger.error("VALIDATION failed Switch serial not Found %s", ip_addr)
return message
else:
logger.info("VALIDATION Succeeded Switch serial Found %s", serial_id)
## STEP 1-2: Validate Vlans ID
vlan_exists, vlan_name = validate_vlan(vlan_id, self.meraki_net)
if vlan_exists:
logger.info("VALIDATION Succeeded Vlan ID Valid for %s Name: %s", vlan_id, vlan_name)
else:
logger.error("VALIDATION failed Vlan ID not Found %s", vlan_id)
message = f"{devicon} **Invalid VLAN ID**"
return message
## STEP 1-3: Validate Port ID
if validate_port(port_id, serial_id):
logger.info("VALIDATION Succeeded Port ID Valid %s", port_id)
else:
logger.error("VALIDATION failed Port ID not Found %s", port_id)
message = f"{devicon} **Invalid Port ID**"
return message
# STEP 2: Prepare the Payload
port_payload = {}
port_payload["name"] = f"Port changed by {self.job_owner} to {vlan_name.upper()} via Teams"
port_payload["tags"] = ["Changed","Automation", "WebexBot", "DevOps"]
port_payload["vlan"] = vlan_id
port_payload["type"] = "access"
port_payload["enabled"] = True
logger.info("JSON Data to Port Update %s ", json.dumps(port_payload))
# STEP 3: Send The Change to API
api_uri = f"/v1/devices/{serial_id}/switch/ports/{int(port_id)}"
data = update_via_meraki_api(api_uri, port_payload)
if data:
logger.info("Port updated successfully job_owner %s : ", self.job_owner)
message = f" {check_icon} **Port Update has been applied Sucesfully** \n"
message += F"* Job Owner: **{self.job_owner}** \n"
message += F"* Switch Name: **{switch_name}** \n"
message += f"* PortID **{data['portId']}** \n"
message += f"* Port Name **{data['name']}** \n"
message += f"* Port Type **{data['type']}** \n"
message += f"* VLAN ID **{data['vlan']}** \n"
message += f"* Voice Vlan **{data['voiceVlan']}** \n"
else:
logger.error("Port update failed : ")
message = f"{devicon} Port Update incomplete"
return message
def deactivate_port(self, job_req):
"""
Change a Switch Port and assign a vlan
params:
job_req: Message from Dispatcher
return: Job Action Message
API V1
"""
# STEP-0 Extract parameters from job_req
#job_params
#job_params[0]: Bot Command
#job_params[1]: Switch IP Address
#job_params[2]: Port Number
devicon = chr(0x2757) + chr(0xFE0F)
check_icon = chr(0x2705)
job_params = job_req.split()
if len(job_params) < 3:
#Not Enough info provided
message = f" Job Request is incomplete, please provide Switch IP, Switch-Port, Vlan-ID ie _change-port-vlan 1.1.1.1 10 101 \n"
else:
## STEP 0-1: Assign all the parameters to job variables
ip_addr = "".join(re.findall(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', job_params[1])) #Well Formed IP Address
port_id = "".join(re.findall(r'^\d{1,2}$',job_params[2])) #Accepting up to numbers
# STEP 1: Validations
## STEP 1-1: GET Switch Serial Number
serial_id, switch_name = get_switch_serial(ip_addr, self.meraki_net)
if serial_id in [""]:
message = f"{devicon} **There is not switch with that IP**"
logger.error("VALIDATION failed Switch serial not Found %s", ip_addr)
return message
else:
logger.info("VALIDATION Succeeded Switch serial Found %s", serial_id)
## STEP 1-2: Validate Port ID
if validate_port(port_id, serial_id):
logger.info("VALIDATION Succeeded Port ID Valid %s", port_id)
else:
logger.error("VALIDATION failed Port ID not Found %s", port_id)
message = f"{devicon} **Invalid Port ID**"
return message
# STEP 2: Prepare the Payload
port_payload = {}
port_payload["name"] = f"Port disabled by {self.job_owner} via Teams"
port_payload["enabled"] = False
logger.info("JSON Data to Port Update %s ", json.dumps(port_payload))
# STEP 3: Send The Change to API
api_uri = f"/v1/devices/{serial_id}/switch/ports/{int(port_id)}"
data = update_via_meraki_api(api_uri, port_payload)
if data:
logger.info("Port updated successfully job_owner %s : ", self.job_owner)
message = f" {check_icon} **Port Update has been applied Sucesfully** \n"
message += F"* Job Owner: **{self.job_owner}** \n"
message += F"* Switch Name: **{switch_name}** \n"
message += f"* PortID **{data['portId']}** \n"
message += f"* Port Name **{data['name']}** \n"
message += f"* Port Type **{data['enabled']}** \n"
else:
logger.error("Port update failed : ")
message = f"{devicon} Port Update incomplete"
return message
def activate_new_ssid(self, job_req):
"""
Select Unused SSID and Activate it for Services
params: job_req - Message from Dispacther with job details
return: Message Confirmation
API V0
"""
message = ""
devicon = chr(0x2757) + chr(0xFE0F)
check_icon = chr(0x2705)
# STEP-0 Extract parameters from job_req
job_params = job_req.split(" ", 1)
## job_params[0] = Bot Command
## job_params[1] = SSID Name
## STEP 0-1 job_req validation
if len(job_params) < 2:
#Not Enough info provided
message = "Job Request is incomplete, please provide SSID Name ie. _activate-ssid Testing_ \n"
else:
## STEP 0-2 Assign job_params to job variables
ssid_name = job_params[1]
# STEP 1 - Get the First Unused SSID
ssid_num, ssid_state = get_unused_ssid(self.meraki_net)
if ssid_num == -1:
message = f"{devicon} **All SSID are in Use**"
logger.error("VALIDATION failed Not Available SSID to Activate")
return message
else:
logger.info("VALIDATION succeeded the SSID number %s is available", str(ssid_num).strip())
# STEP 2 - Prepare Payload:
ssid_payload = {}
ssid_payload["name"] = ssid_name.strip()
ssid_payload["enabled"] = True
ssid_payload["authMode"] = "psk"
ssid_payload["encryptionMode"] = "wpa"
ssid_payload["wpaEncryptionMode"] = "WPA2 only"
ssid_payload["visible"] = True
ssid_payload["psk"] = generate_preshare_key()
# STEP 3 - Request Activation to Meraki API:
api_uri = f"/v0/networks/{self.meraki_net}/ssids/{ssid_num}"
data = update_via_meraki_api(api_uri, ssid_payload)
if data:
logger.info("SSID Activation succeeded for job_owner %s : ", self.job_owner)
message = f" {check_icon} **SSID Activation has been applied sucesfully** \n"
message += F"* Job Owner: **{self.job_owner}** \n"
message += f"* SSID Number **{data['number']}** \n"
message += f"* SSID Name **{data['name']}** \n"
message += f"* Preshare Key **{data['psk']}** \n"
message += f"* Encryption Mode **{data['encryptionMode']}** \n"
message += f"* Visible **{data['visible']}** \n"
else:
logger.error("SSID Activation failed : ")
message = f"{devicon} **SSID Activation incomplete**"
return message
def remove_ssid(self, job_req):
"""
Select Unused SSID and Activate it for Services
params: job_req - Message from Dispacther with job details
return: Message Confirmation
API V0
"""
message = ""
devicon = chr(0x2757) + chr(0xFE0F)
check_icon = chr(0x2705)
# STEP-0 Extract parameters from job_req
job_params = job_req.split(" ", 1)
## job_params[0] = Bot Command
## job_params[1] = SSID Name
## STEP 0-1 job_req validation
if len(job_params) < 2:
#Not Enough info provided
message = "Job Request is incomplete, please provide SSID Name ie. _remove-ssid Testing_ \n"
else:
## STEP 0-2 Assign job_params to job variables
if str(job_params[1]).isnumeric():
# The Job Owner Submit already a SSID Number
ssid_num = int(job_params[1])
logger.info("VALIDATION succeeded SSID number %s provided by %s", ssid_num, self.job_owner)
else:
## STEP 0-3 Get SSID ID
# The Job Owner pass a name so a ssid_number should be retrieve
ssid_name = str(job_params[1]).strip()
ssid_num = get_used_ssid_by_name(self.meraki_net, ssid_name)
logger.info("VALIDATION succeeded SSID number %s for %s ", str(ssid_num) , ssid_name)
# STEP 2 - Prepare Payload:
ssid_payload = {}
ssid_payload["name"] = "Unconfigured SSID " + str(ssid_num + 1)
ssid_payload["enabled"] = False
ssid_payload["authMode"] = "open"
ssid_payload["visible"] = True
# STEP 3 - Request Deactivation to Meraki API:
api_uri = f"/v0/networks/{self.meraki_net}/ssids/{ssid_num}"
data = update_via_meraki_api(api_uri, ssid_payload)
if data:
logger.info("SSID removal succeeded for job_owner %s : ", self.job_owner)
message = f"{check_icon} **SSID Deactivation has been completed sucesfully** \n"
message += F"* Job Owner: **{self.job_owner}** \n"
message += f"* SSID Number **{data['number']}** \n"
message += f"* SSID Name **{data['name']}** \n"
message += f"* Authentication **{data['authMode']}** \n"
message += f"* Visible **{data['visible']}** \n"
else:
logger.error("SSID Deactivation failed : ")
message = f"{devicon} **SSID Deactivation incomplete**"
return message
# === Meraki API CRUD Operations =====
## === Common Paramenters
api_headers = {}
api_headers["X-Cisco-Meraki-API-Key"] = str(os.environ["MERAKI_API_KEY"])
api_headers["Content-Type"] = "application/json"
API_URL = "https://api.meraki.com/api"
## === Functions
def get_meraki_api_data(api_uri):
"""
Common Function to send GET Request to Meraki API
param:
api_uri = resource endpoint
api_header = special header with api_key
return:
JSON data
"""
url = API_URL + api_uri
a_response = requests.get(url, headers=api_headers, verify=False)
if a_response.status_code == 200:
data = json.loads(a_response.text)
logger.info("Meraki GET operation suceeded : %s ", api_uri)
else:
data = {}
logger.info("Meraki GET Operation failed : %s", a_response.status_code)
return data
def update_via_meraki_api(api_uri, payload):
"""
PUT Task sended to Meraki
api_uri = resource endpoint
api_header = special header with api_key
payload = Data to Update, conforming API format
"""
url = API_URL + api_uri
update_data = json.dumps(payload)
make_update = requests.put(url, headers=api_headers, data=update_data, verify=False)
if make_update.status_code == 200:
data = json.loads(make_update.text)
logger.info("Meraki PUT operation suceeded : %s ", api_uri)
else:
data = {}
logger.info("Meraki PUT Operation failed : %s", make_update.status_code)
return data
def post_via_meraki_api(api_uri, payload):
"""
PUT Task sended to Meraki
api_uri = resource endpoint
api_header = special header with api_key
payload = Data to Update, conforming API format
"""
url = API_URL + api_uri
post_data = json.dumps(payload)
make_post = requests.post(url, headers=api_headers, data=post_data, verify=False)
if make_post.status_code in [200, 201, 202, 203, 204]:
data = json.loads(make_update.text)
logger.info("Meraki POST operation suceeded : %s ", api_uri)
else:
data = {}
logger.info("Meraki POST Operation failed : %s", make_update.status_code)
return data
# === Meraki Helpers Functions
def decode_meraki_model(model):
"""
Receive Model ID
Return a Human Readable Label
"""
model_label = ""
if "MX" in model:
model_label = "appliance"
if "MS" in model:
model_label = "switch"
if "MR" in model:
model_label = "wireless"
if "MV" in model:
model_label = "camera"
if "MC" in model:
model_label = "phone"
return model_label
def get_switch_serial(ip_addr, meraki_net):
"""
Helper to Retrieve Switch Serial from IP
params: IP Address, Meraki Network
return: Serial
APIV1
"""
serial_id = ""
api_uri = f"/v1/networks/{meraki_net}/devices"
data = get_meraki_api_data(api_uri)
for device in data:
device_type = decode_meraki_model(device["model"])
if "switch" in device_type:
if ip_addr in device["lanIp"]:
serial_id = str(device["serial"]).strip()
switch_name = str(device["name"]).strip()
logger.info("Switch Found! Serial %s" , serial_id)
return serial_id, switch_name
def get_unused_ssid(meraki_net):
ssid_num = -1
ssid_state = False
api_uri = f"/v1/networks/{meraki_net}/wireless/ssids"
data = get_meraki_api_data(api_uri)
for ssid in data:
current_label = str(ssid["name"]).split()[0] # Extract the First Word of the SSID Label
if "Unconfigured" in [current_label]:
ssid_num = ssid["number"]
ssid_state = ssid["enabled"]
print(ssid["name"])
break #Stop at the First SSID Unused
return ssid_num, ssid_state
def get_used_ssid_by_name(meraki_net, ssid_name):
ssid_num = -1
api_uri = f"/v1/networks/{meraki_net}/wireless/ssids"
data = get_meraki_api_data(api_uri)
for ssid in data:
current_label = str(ssid["name"]).strip() # Extract the First Word of the SSID Label
if ssid_name in [current_label]:
ssid_num = ssid["number"]
break #Stop at the First SSID Unused
return ssid_num
def generate_preshare_key(size_of_psk=16):
"""
Random Preshare Key Generator
Use Secrets to Generate Cryptographic Unique PSK
params: size_of_psk - Number of Characters to generate Default 16 Characters
return: Preshare Key
"""
preshare_key = ""
psk_source = string.ascii_letters + string.digits
for i in range(size_of_psk):
preshare_key += secrets.choice(psk_source)
char_list = list(preshare_key)
secrets.SystemRandom().shuffle(char_list)
preshare_key = ''.join(char_list)
return preshare_key
def validate_vlan(vlan_id, meraki_net):
"""
Helper to Validate if a Vlan ID exist
params: Vlan ID, Meraki Network ID
returns: True/False
API V0
"""
check_vlan = False
vlan_name = ""
api_uri = f"/v0/networks/{meraki_net}/vlans/{vlan_id}"
data = get_meraki_api_data(api_uri)
if data:
check_vlan = True
vlan_name = data["name"].strip()
else:
check_vlan = False
return check_vlan, vlan_name
def validate_port(port_id, serial_id):
"""
Helper to Validate if a Vlan ID exist
params: Vlan ID, Meraki Network ID
returns: True/False
API V1
"""
check_port = False
api_uri = f"/v1/devices/{serial_id}/switch/ports/{port_id}"
data = get_meraki_api_data(api_uri)
if data:
check_port = True
else:
check_port = False
return check_port
def meraki_api_enable():
"""
Validate If Meraki Token is Set
Return True/False
"""
token = str(os.environ["MERAKI_API_KEY"])
if token in [""]:
logger.warning('API Key for Meraki is missing. check ENV')
return False
else:
return True |
import nest
from numpy.random import uniform, normal, choice
def create_populations(cell_params, scale = 1):
populations = {}
for pop_name in cell_params:
p = cell_params[pop_name]
populations[pop_name] = nest.Create(p['model'], scale * p['n'], params = p['params'])
for i in populations['SNc']:
nest.SetStatus([i], {'V_m': uniform(-100.,-40.), 'U_m': uniform(-100.,100.)})
return populations
def create_network(pop):
nest.Connect(
pop['Thal'], pop['Pyr'],
syn_spec = {'weight': 5., 'delay': 5.6},
conn_spec = {'rule': 'one_to_one'}
)
nest.Connect(
pop['Inh'], pop['Pyr'],
syn_spec = {'weight': -18., 'delay': 0.1},
conn_spec = {'rule': 'fixed_indegree', 'indegree': 4}
)
PyrInput = nest.Create('poisson_generator')
nest.SetStatus(PyrInput, {'rate': 20.})
nest.Connect(PyrInput, pop['Pyr'], syn_spec={'weight': 3.})
nest.Connect(
pop['Pyr'], pop['Inh'],
syn_spec = {'weight': 3.3, 'delay': 0.1},
conn_spec = {'rule': 'fixed_indegree', 'indegree': 4}
)
nest.Connect(
pop['Pyr'], pop['MSN_D1'],
syn_spec = {'weight': 43., 'delay': 5.1, 'receptor_type':
nest.GetStatus(pop['MSN_D1'])[0]['receptor_types']['SPIKESGLU']},
conn_spec = {'rule': 'one_to_one'}
)
nest.Connect(
pop['MSN_D1'], pop['MSN_D1'],
syn_spec = {'weight': -8., 'delay': 0.1, 'receptor_type':
nest.GetStatus(pop['MSN_D1'])[0]['receptor_types']['SPIKESGABA']},
conn_spec = {'rule': 'fixed_indegree', 'indegree': 3, 'autapses': False},
)
nest.Connect(
pop['Pyr'], pop['MSN_D2'],
syn_spec = {'weight': 43., 'delay': 5.1, 'receptor_type':
nest.GetStatus(pop['MSN_D2'])[0]['receptor_types']['SPIKESGLU']},
conn_spec = {'rule': 'one_to_one'}
)
nest.Connect(
pop['MSN_D2'], pop['MSN_D2'],
syn_spec = {'weight': -8., 'delay': 0.1, 'receptor_type':
nest.GetStatus(pop['MSN_D2'])[0]['receptor_types']['SPIKESGABA']},
conn_spec = {'rule': 'fixed_indegree', 'indegree': 3, 'autapses': False}
)
nest.Connect(
pop['GPe'], pop['STN'],
syn_spec = {'weight': -4., 'delay': 4.0},
conn_spec = {'rule': 'fixed_indegree', 'indegree': 2}
)
nest.Connect(
pop['Pyr'], pop['STN'],
syn_spec = {'weight': 7., 'delay': 5.9},
conn_spec = {'rule': 'fixed_indegree', 'indegree': 2}
)
nest.Connect(
pop['GPe'], pop['GPe'],
syn_spec = {'weight': -2., 'delay': 0.1},
conn_spec = {'rule': 'fixed_indegree', 'indegree': 2}
)
nest.Connect(
pop['MSN_D2'], pop['GPe'],
syn_spec = {'weight': -50. / len(pop['MSN_D2']), 'delay': 5.0},
conn_spec = {'rule': 'all_to_all'}
)
nest.Connect(
pop['STN'], pop['GPe'][::2],
syn_spec = {'weight': 5., 'delay': 2.0},
conn_spec = {'rule': 'fixed_indegree', 'indegree': 2}
)
nest.Connect(
pop['GPe'], pop['GPi'],
syn_spec = {'weight': -5., 'delay': 3.0},
conn_spec = {'rule': 'fixed_indegree', 'indegree': 2}
)
nest.Connect(
pop['MSN_D1'], pop['GPi'],
syn_spec = {'weight': -50. / len(pop['MSN_D1']), 'delay': 4.0},
conn_spec = {'rule': 'all_to_all'}
)
nest.Connect(
pop['STN'], pop['GPi'][::2],
syn_spec = {'weight': 5., 'delay': 1.5},
conn_spec = {'rule': 'fixed_indegree', 'indegree': 2}
)
nest.Connect(
pop['GPi'], pop['Thal'],
syn_spec = {'weight': -25., 'delay': 5.0},
conn_spec = {'rule': 'one_to_one'}
)
def connect_SNc(pop, frac = 1., weight = 12, outdeg = 0.6):
if frac >= 1.:
# Use the whole population
sn = pop['SNc']
else:
# Pick a fractional part of the population
n = int(round(len(pop['SNc']) * frac))
idx = choice(len(pop['SNc']), n, replace=False)
sn = [pop['SNc'][i] for i in idx]
nest.Connect(
sn, pop['MSN_D1'],
syn_spec = {'weight': 100.0 * weight, 'delay': 3.0, 'receptor_type':
nest.GetStatus(pop['MSN_D1'])[0]['receptor_types']['SPIKESDOPA']},
conn_spec = {'rule': 'fixed_outdegree', 'outdegree': int(round(outdeg*len(pop['MSN_D1'])))}
)
nest.Connect(
sn, pop['MSN_D2'],
syn_spec = {'weight': 100.0 * weight, 'delay': 3.0, 'receptor_type':
nest.GetStatus(pop['MSN_D2'])[0]['receptor_types']['SPIKESDOPA']},
conn_spec = {'rule': 'fixed_outdegree', 'outdegree': int(round(outdeg*len(pop['MSN_D2'])))}
)
def add_stims(pop, times, amp = 100):
''' Creates a poisson input for each (tStart, tEnd) in times and connects it to the given population. '''
for start, stop in times:
stim = nest.Create('dc_generator')
nest.SetStatus(stim, {'start': float(start), 'stop': float(stop), 'amplitude': float(amp)})
nest.Connect(stim, pop)
if __name__ == '__main__':
pop = create_populations()
create_network(pop)
|
from torchvision.models import resnet101
import torch
from torch import nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
model = resnet101(pretrained=True)
self.conv1 = model.conv1
self.bn1 = model.bn1
self.relu = model.relu
self.maxpool = model.maxpool
self.layer1 = model.layer1
self.layer2 = model.layer2
self.layer3 = model.layer3
self.layer4 = model.layer4
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
if __name__ == '__main__':
net = Net()
x = torch.randn(13,3,854,480)
y = net(x)
print(y.shape)
|
#!/usr/bin/env python
input_number = [2**31, ]
if __name__ == '__main__':
N = int(raw_input())
# read input
for i in range(N):
input_number.append(int(raw_input()))
# for
Q = int(raw_input())
for i in range(Q):
inp = map(int, raw_input().split(" "))
# update operation
if int(inp[0]) == 1:
input_number[inp[1]] = inp[2]
# query operation
else:
print min(input_number[inp[1]:inp[2]+1])
|
import random
import util
from app.xlib.responder import Responder
from app.xlib.states import StateType
from app.xlib.sr_strings import srs
from app.xlib.states import StateString
from ..main import music
from .. import db
from ..decorators import check_state
import json
class Handler(object):
@staticmethod
@check_state(StateType.INITIAL)
def handle_intro(to, game, body, response=StateString.INTRO):
Responder.send_text_response(to, game.id, response, keyboards=srs.grouped_srs[StateType.INITIAL])
@staticmethod
@check_state(StateType.INITIAL)
def handle_genre(to, game, body, response=StateString.GENRE):
game.state = StateType.GENRE_SELECT
Responder.send_text_response(to, game.id, response, keyboards=srs.grouped_srs[StateType.GENRE_SELECT])
@staticmethod
@check_state(StateType.INITIAL)
def handle_artist(to, game, body, response=StateString.ARTIST):
game.state = StateType.ARTIST_SELECT
Responder.send_text_response(to, game.id, response, keyboards=srs.grouped_srs[StateType.ARTIST_SELECT])
@staticmethod
@check_state(StateType.GENRE_SELECT, StateType.ARTIST_SELECT, StateType.INITIAL)
def handle_song(to, game, body, song=None):
if not song:
song = music.get_song_from_playlist()
game.state = StateType.ANSWER_TIME
game.song = song.to_json_string()
print("Adding song json to the db: ", game.song)
Responder.send_wubble_response(to, game.id, song.preview_id, keyboards=srs.grouped_srs[StateType.ANSWER_TIME])
@staticmethod
def handle_back(to, game, body, response=StateString.BACK):
game.state = StateType.INITIAL
game.song = None
db.session.commit()
Responder.send_text_response(to, game.id, response, keyboards=srs.grouped_srs[StateType.INITIAL])
@staticmethod
@check_state(StateType.INITIAL)
def handle_score(to, game, body, response=StateString.SCORE):
print 'game', game
print 'scores', game.scores
scores = json.loads(game.scores)
sorted_scores = sorted(scores.items(), key=lambda x: x[1], reverse=True)
for tup in sorted_scores:
response = response + tup[0] + ': ' + str(tup[1]) + '\n'
Responder.send_text_response(to, game.id, response, keyboards=srs.grouped_srs[StateType.INITIAL])
@staticmethod
@check_state(StateType.INITIAL)
def handle_settings(to, game, body, response=StateString.DIFFICULTY):
game.state = StateType.SETTINGS
Responder.send_text_response(to, game.id, response, keyboards=srs.grouped_srs[StateType.SETTINGS])
@staticmethod
@check_state(StateType.SETTINGS)
def handle_difficulty(to, game, body):
game.state = StateType.INITIAL
if body == 'easy':
game.difficulty = 60
elif body == 'hard':
game.difficulty = 0
elif body == 'medium':
game.difficulty = 30
response = 'Difficulty has been set to ' + body
Responder.send_text_response(to, game.id, response, keyboards=srs.grouped_srs[StateType.INITIAL])
@staticmethod
def handle_fallback(to, game, body, response=None):
if response:
response = 'I don\'t understand what you mean by "{}"'.format(response)
else:
response = random.choice(StateString.FALLBACK_STRINGS)
Responder.send_text_response(to, game.id, response,
keyboards=srs.grouped_srs.get(game.state, srs.grouped_srs[StateType.INITIAL]))
@staticmethod
@check_state(StateType.ANSWER_TIME)
def handle_hint(to, game, body):
try:
if game:
song = json.loads(game.song)
print 'song: %r' % song
album_art = song['album_art'] or 'http://i.imgur.com/DUCOwkM.jpg'
album = song['album'] or 'Album art'
print 'album_art: %r' % album_art
print 'album: %r' % album
Responder.send_image_response(to, game.id, album_art, album,
keyboards=srs.grouped_srs[StateType.ANSWER_TIME])
except Exception as e:
print 'HANDLE_HINT ERROR: %r' % e
Handler.handle_error(to, game)
return
@staticmethod
def handle_error(to, game, response=StateString.ERROR):
game.state = StateType.INITIAL
db.session.commit()
Responder.send_text_response(to, game.id, response, keyboards=srs.grouped_srs[StateType.INITIAL])
@staticmethod
@check_state(StateType.ANSWER_TIME)
def handle_answer(to, game, body):
hidden_sr = True
try:
if game:
song = json.loads(game.song)
except Exception as e:
print 'HANDLE_ANSWER ERROR: %r' % e
Handler.handle_error(to, game)
return
if song and util.guess_matches_answer(body, song['title'].lower()):
game.state = StateType.INITIAL
game.song = None
print 'scores %r', game.scores
scores = json.loads(game.scores)
scores[to] = scores.get(to, 0) + 1
game.scores = json.dumps(scores)
response = random.choice(StateString.CORRECT)
response += ' ' + random.choice(StateString.CORRECT_EMOJI)
response += '\nIt\'s "{song}" by {artist}'.format(song=song['title'], artist=song['artist'])
keyboards = srs.grouped_srs[StateType.INITIAL]
hidden_sr = False
else:
if body in ['back', 'skip', 'next']:
back_message = 'Giving up? The song was "{song}" by {artist}'.format(song=song['title'], artist=song['artist'])
Handler.handle_back(to, game, body, back_message)
return
elif body == 'hint':
Handler.handle_hint(to, game, body)
return
else:
response = random.choice(StateString.INCORRECT)
response += ' ' + random.choice(StateString.INCORRECT_EMOJI)
keyboards = srs.grouped_srs[StateType.ANSWER_TIME]
Responder.send_text_response(to, game.id, response, keyboards, hidden_sr)
|
# i pledge my honor that i have abided by the stevens honor system
def sumoflst(lst):
sum = 0
for i in range(len(lst)):
sum += lst[i]
return sum
#testing of function
def main():
numbers = [1,2,3,4,5]
print(sumoflst(numbers))
main()
|
# $Id: __init__.py,v 1.14 2012/11/27 00:49:39 phil Exp $
#
# @Copyright@
#
# Rocks(r)
# www.rocksclusters.org
# version 5.6 (Emerald Boa)
# version 6.1 (Emerald Boa)
#
# Copyright (c) 2000 - 2013 The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice unmodified and in its entirety, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. All advertising and press materials, printed or electronic, mentioning
# features or use of this software must display the following acknowledgement:
#
# "This product includes software developed by the Rocks(r)
# Cluster Group at the San Diego Supercomputer Center at the
# University of California, San Diego and its contributors."
#
# 4. Except as permitted for the purposes of acknowledgment in paragraph 3,
# neither the name or logo of this software nor the names of its
# authors may be used to endorse or promote products derived from this
# software without specific prior written permission. The name of the
# software includes the following terms, and any derivatives thereof:
# "Rocks", "Rocks Clusters", and "Avalanche Installer". For licensing of
# the associated name, interested parties should contact Technology
# Transfer & Intellectual Property Services, University of California,
# San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910,
# Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:invent@ucsd.edu
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# @Copyright@
#
# $Log: __init__.py,v $
# Revision 1.14 2012/11/27 00:49:39 phil
# Copyright Storm for Emerald Boa
#
# Revision 1.13 2012/05/06 05:49:49 phil
# Copyright Storm for Mamba
#
# Revision 1.12 2012/05/05 00:28:02 clem
# minor code refactoring: libvirt url is now in a global variable
#
# Revision 1.11 2012/03/09 01:45:23 clem
# Rocks command xen is not compatible with 5.7 and 6.2
#
# Revision 1.10 2011/07/23 02:31:43 phil
# Viper Copyright
#
# Revision 1.9 2010/09/07 23:53:32 bruno
# star power for gb
#
# Revision 1.8 2009/05/01 19:07:33 mjk
# chimi con queso
#
# Revision 1.7 2009/04/08 22:27:58 bruno
# retool the xen commands to use libvirt
#
# Revision 1.6 2008/10/18 00:56:23 mjk
# copyright 5.1
#
# Revision 1.5 2008/09/09 19:37:52 bruno
# make 'list cluster' simplier
#
# Revision 1.4 2008/09/08 21:51:40 bruno
# added optional status columns to VM listing commands
#
# Revision 1.3 2008/09/04 19:55:00 bruno
# list the FQDN for frontends
#
# Revision 1.2 2008/08/22 23:25:56 bruno
# closer
#
# Revision 1.1 2008/08/21 21:41:36 bruno
# list physical and virtual clusters
#
import rocks.vm
import rocks.commands
import sys
sys.path.append('/usr/lib64/python2.' + str(sys.version_info[1]) + '/site-packages')
sys.path.append('/usr/lib/python2.' + str(sys.version_info[1]) + '/site-packages')
import libvirt
class Command(rocks.commands.HostArgumentProcessor,
rocks.commands.list.command):
"""
Lists a cluster, that is, for each frontend, all nodes that are
associated with that frontend are listed.
<arg optional='1' type='string' name='cluster' repeat='1'>
Zero, one or more frontend names. If no frontend names are supplied,
information for all clusters will be listed.
</arg>
<param type='bool' name='status'>
If true, then for each VM-based cluster node, output the VM's status
(e.g., 'active', 'paused', etc.).
</param>
<example cmd='list cluster frontend-0-0'>
List the cluster associated with the frontend named 'frontend-0-0'.
</example>
<example cmd='list cluster'>
List all clusters.
</example>
"""
def getStatus(self, host):
#
# find the physical host for this virtual host
#
rows = self.db.execute("""select vn.physnode from
vm_nodes vn, nodes n where n.name = '%s'
and n.id = vn.node""" % (host))
if rows == 1:
physnodeid, = self.db.fetchone()
else:
return 'nostate'
rows = self.db.execute("""select name from nodes where
id = %s""" % (physnodeid))
if rows == 1:
physhost, = self.db.fetchone()
else:
return 'nostate'
try:
import rocks.vmconstant
hipervisor = libvirt.open( rocks.vmconstant.connectionURL % physhost)
except:
return 'nostate'
found = 0
for id in hipervisor.listDomainsID():
if id == 0:
#
# skip dom0
#
continue
domU = hipervisor.lookupByID(id)
if domU.name() == host:
found = 1
break
state = 'nostate'
if found:
status = domU.info()[0]
if status == libvirt.VIR_DOMAIN_NOSTATE:
state = 'nostate'
elif status == libvirt.VIR_DOMAIN_RUNNING or \
status == libvirt.VIR_DOMAIN_BLOCKED:
state = 'active'
elif status == libvirt.VIR_DOMAIN_PAUSED:
state = 'paused'
elif status == libvirt.VIR_DOMAIN_SHUTDOWN:
state = 'shutdown'
elif status == libvirt.VIR_DOMAIN_SHUTOFF:
state = 'shutoff'
elif status == libvirt.VIR_DOMAIN_CRASHED:
state = 'crashed'
return state
def getClientInfo(self, host, showstatus):
info = (host, 'VM')
if showstatus:
info += (self.getStatus(host),)
return info
def run(self, params, args):
(showstatus, ) = self.fillParams( [ ('status', 'n') ])
showstatus = self.str2bool(showstatus)
frontends = self.getHostnames( [ 'frontend' ])
if len(args) > 0:
hosts = self.getHostnames(args)
for host in hosts:
if host not in frontends:
self.abort('host %s is not a frontend'
% host)
else:
hosts = frontends
vm = rocks.vm.VM(self.db)
self.beginOutput()
for frontend in hosts:
#
# get the FQDN of the frontend
#
rows = self.db.execute("""select net.name from
nodes n, networks net, subnets s where
s.name = 'public' and s.id = net.subnet
and n.name = '%s' and n.id = net.node"""
% (frontend))
if rows == 1:
fqdn, = self.db.fetchone()
else:
fqdn = frontend
if vm.isVM(frontend):
info = ('', 'VM')
if showstatus:
info += (self.getStatus(frontend),)
self.addOutput(fqdn, info)
#
# all client nodes of this VM frontend have
# the same vlan id as this frontend
#
rows = self.db.execute("""select
net.vlanid from
networks net, nodes n, subnets s where
n.name = '%s' and net.node = n.id and
s.name = 'private' and
s.id = net.subnet""" % frontend)
if rows > 0:
vlanid, = self.db.fetchone()
else:
self.abort('could not find Vlan Id ' +
'for frontend %s' % frontend)
rows = self.db.execute("""select n.name from
networks net, nodes n where
net.vlanid = %s and net.node = n.id
""" % vlanid)
for client, in self.db.fetchall():
if client != frontend and \
vm.isVM(client):
info = self.getClientInfo(
client, showstatus)
self.addOutput('', info)
else:
info = ('', 'physical')
if showstatus:
info += (None,)
self.addOutput(fqdn, info)
#
# a physical frontend. go get all the physical
# client nodes
#
clients = self.getHostnames()
for client in clients:
if client not in frontends and \
not vm.isVM(client):
info = (client, 'physical')
if showstatus:
info += (None,)
self.addOutput('', info)
header = [ 'frontend', 'client nodes', 'type' ]
if showstatus:
header.append('status')
self.endOutput(header, trimOwner = 0)
|
from PyQt4 import QtCore
from PyQt4.Qt import *
class TreeController(QTreeWidget):
def __init__(self, controller):
super(TreeController, self).__init__()
self.__initTree()
self.__addBranches()
self.__initTreeSlots(controller)
def __initTree(self):
self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Ignored)
self.setMinimumSize(QSize(150, 0))
self.setMaximumSize(QSize(280, 16777215))
self.headerItem().setText(0, "Navigation")
self.setSortingEnabled(False)
self.setSortingEnabled(self.isSortingEnabled())
def __addBranches(self):
items = ["Import file", "Options", "About"]
for i in range(len(items)):
branch = QTreeWidgetItem(self)
branch.setText(0, items[i])
self.addTopLevelItem(branch)
def __initTreeSlots(self, controller):
QtCore.QObject.connect(self, QtCore.SIGNAL("itemClicked(QTreeWidgetItem*,int)"),
lambda x, idx: controller.update(x, idx))
|
def house_numbers_sum(inp):
sum = 0
for i in inp:
if i == 0:
break
sum += i
return sum |
from django.shortcuts import render
from chemposer.forms import *
from django.http import *
import os
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.conf import settings
from subprocess import call
def home(request):
context = {}
return render(request, 'home.html', context)
def upload_xyz(request, uid=None):
context = {}
if request.method == 'GET':
print "Get request to upload_xyz"
#context['name'] = '/static/xyz/tmp/C20.sdf'
return render(request, 'renderMolecule.html', context)
print request.FILES
filename = request.FILES['xyzfile'].name
data = request.FILES['xyzfile']
path = default_storage.save('tmp/'+filename, ContentFile(data.read()))
tmp_file = os.path.join(settings.MEDIA_ROOT, path)
ex = './chemposer/chemposer'
fname = './chemposer/static/xyz/tmp/'+filename
call([ex,fname])
arr = filename.split('.')
context['name'] = '/static/xyz/tmp/'+ arr[0] + '.sdf'
return render(request, 'renderMolecule.html', context)
|
from django.db import models
from django.db.models import Max
# Create your models here.
class ProjectCode(models.Model):
pcode = models.CharField(db_column='PCODE', primary_key=True, max_length=4) # Field name made lowercase.
pname = models.CharField(db_column='PNAME', max_length=100) # Field name made lowercase.
class Meta:
managed = False
db_table = 'project_code'
class TodoList(models.Model):
no = models.AutoField(db_column='NO', primary_key=True) # Field name made lowercase.
pcode = models.CharField(db_column='PCODE', max_length=4) # Field name made lowercase.
user_id = models.CharField(db_column='USER_ID', max_length=50, blank=True, null=True) # Field name made lowercase.
title = models.CharField(db_column='TITLE', max_length=200, blank=True, null=True) # Field name made lowercase.
content = models.CharField(db_column='CONTENT', max_length=1000, blank=True, null=True) # Field name made lowercase.
is_complete = models.IntegerField(db_column='IS_COMPLETE', blank=True, null=True) # Field name made lowercase.
priority = models.IntegerField(db_column='PRIORITY', blank=True, null=True) # Field name made lowercase.
end_date = models.DateField(db_column='END_DATE', blank=True, null=True) # Field name made lowercase.
def todo_save(self):
self.is_complete = 0
if TodoList.objects.all().aggregate(Max('priority'))['priority__max'] is None : self.priority = 1
else : self.priority = int(TodoList.objects.latest('priority').priority) + 1
self.pcode = 1
self.user_id = 'guest'
self.save()
def todo_update_is_complete(self, complete):
self.is_complete = complete
self.save()
class Meta:
managed = False
db_table = 'todo_list' |
import os
from objects.constants import Constants
def setup_path():
"""
Setup the initial path
:return:
"""
if not os.path.exists(Constants.RESULT_PATH):
os.mkdir(Constants.RESULT_PATH)
path = Constants.SAVING_FOLDER_PATH
if os.path.exists(path):
_rename_path(path)
os.mkdir(path)
def _rename_path(path):
path_exist = True
new_path = path
i = 0
while path_exist:
i += 1
new_path = path + ' ({})'.format(i)
path_exist = os.path.exists(new_path)
os.rename(path, new_path)
|
# -*- coding: utf-8 -*-
import re
import logging
import json
import sys
from model.laboralinsertion.languages import LanguageDAO
class Filter:
@classmethod
def fromMapList(cls, ls):
result = []
for f in ls:
for sub in cls.__subclasses__():
if f['filter'] == sub.__name__:
o = sub._fromMap(f['data'])
if o is not None:
result.append(o)
return result
@classmethod
def _fromMap(cls, m):
try:
c = cls()
c.__dict__ = m
return c
except Exception as e:
logging.exception(e)
return None
@staticmethod
def _groupFilters(filters=[]):
''' agrupa los filtros por tipo (el nombre de la clase) '''
groups = {}
for f in filters:
if f.__class__.__name__ not in groups:
groups[f.__class__.__name__] = []
groups[f.__class__.__name__].append(f)
return groups
@staticmethod
def apply(con, ls, filters=[]):
''' aplica los filtros indicados a una lista de inscripciones '''
groups = Filter._groupFilters(filters)
result = None
for k in groups.keys():
s = set()
for f in groups[k]:
lss = f.filter(con, ls)
s = s.union(lss)
if result is None:
result = s
else:
result = result.intersection(s)
return result
def filter(self, con, ls):
return self._filter(con, ls)
class FInscriptionDate(Filter):
def __init__(self):
self.ffrom = None
self.to = None
def _filter(self, con, inscriptions):
return [ i for i in inscriptions if i.created >= self.ffrom and i.created <= self.to ]
class FDegree(Filter):
def __init__(self):
self.degree = 'Lic. En Economía'
def _filter(self, con, inscriptions):
return [ i for i in inscriptions if i.degree == self.degree ]
class FOffer(Filter):
def __init__(self):
self.offer = ''
def _filter(self, con, inscriptions):
return [ i for i in inscriptions if i.workType == self.offer ]
class FWorkExperience(Filter):
def __init__(self):
self.workExperience = True
def _filter(self, con, inscriptions):
return [ i for i in inscriptions if i.workExperience == self.workExperience ]
class FGenre(Filter):
def __init__(self):
self.genre = 'Masculino'
def _filter(self, con, inscriptions):
return [ i for i in inscriptions if i.getUser(con).genre == self.genre ]
class FAge(Filter):
def __init__(self):
self.beginAge = 0
self.endAge = 0
def _filter(self, con, inscriptions):
return [ i for i in inscriptions if FAge._age(i.getUser(con).birthdate) >= self.beginAge and FAge._age(i.getUser(con).birthdate) <= self.endAge ]
@staticmethod
def _age(birthdate):
import datetime
if (birthdate is None):
return 0
return (datetime.date.today() - birthdate).days / 365
class FResidence(Filter):
def __init__(self):
self.city = 'La Plata'
def _filter(self, con, inscriptions):
return [ i for i in inscriptions if i.getUser(con).residence_city == self.city ]
class FCity(Filter):
def __init__(self):
self.city = 'La Plata'
def _filter(self, con, inscriptions):
return [ i for i in inscriptions if i.getUser(con).city == self.city ]
class FTravel(Filter):
def __init__(self):
self.travel = True
def _filter(self, con, inscriptions):
return [ i for i in inscriptions if i.travel == self.travel ]
class FLanguage(Filter):
def __init__(self):
self.language = "Inglés"
self.level = None
def _filter(self, con, inscriptions):
return [ i for i in inscriptions if FLanguage._includeLanguages(con, self.language, self.level, i.getLanguages(con)) ]
@staticmethod
def _includeLanguages(con, language, level, languages):
for lid in languages:
l = LanguageDAO.findById(con, lid)
if l.name == language and (level == None or l.level == level):
return True
return False
class FCountCathedra(Filter):
def __init__(self):
self.begin = 0
self.end = 0
def _filter(self, con, inscriptions):
return [ i for i in inscriptions if i.approved >= self.begin and i.approved <= self.end]
class FAverageFails(Filter):
def __init__(self):
self.begin = 0
self.end = 0
def _filter(self, con, inscriptions):
return [ i for i in inscriptions if i.average2 >= self.begin and i.average2 <= self.end]
class FAverage(Filter):
def __init__(self):
self.begin = 0
self.end = 0
def _filter(self, con, inscriptions):
return [ i for i in inscriptions if i.average1 >= self.begin and i.average1 <= self.end]
class FPriority:
def __init__(self):
self.ffrom = 0
self.to = 0
|
import time
import redis
class DataBase(object):
def __init__(self, logger, redis_host, redis_port, redis_db):
"""
@type logger: Logger
"""
self.logger = logger
self.logger.info('Connecting to Redis at {0}:{1}:{2}'.format(redis_host, redis_port, redis_db))
self.rc = redis.Redis(host=redis_host, port=redis_port, db=redis_db)
def is_loading(self):
try:
info = self.rc.info(section='persistence')
return bool(info['loading'])
except:
pass
return True
def _get_hfield_as_str_time(self, key, field):
req_f = 0
req_str = self.rc.hget(key, field)
if req_str:
req_f = float(req_str)
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(req_f))
def _append_hfield(self, key, field, value):
list_str = self.rc.hget(key, field)
if not list_str:
self.rc.hset(key, field, value)
elif list_str.count(value) == 0:
list_str += ',{0}'.format(value)
self.rc.hset(key, field, list_str)
def _delete_hfield_item(self, key, field, value):
list_str = self.rc.hget(key, field)
if not list_str or list_str.count(value) == 0:
return
items = [v for v in list_str.split(',') if v and v != value]
if len(items):
update = ''.join([item + ',' for item in items])
self.rc.hset(key, field, update[:-1])
else:
self.rc.hdel(key, field)
def _get_hfield_list(self, key, field):
list_str = self.rc.hget(key, field)
if not list_str:
return []
return list_str.split(',')
def get_next_in_list(self, list_name):
value = self.rc.lpop(list_name)
if value:
yield value
def list_push(self, list_name, value):
self.rc.lpush(list_name, value) |
from django.db import models
class _Repository(models.Model):
class Meta:
abstract = True
@property
def graphs(self):
if not hasattr(self, '_graphs_limpyd_object'):
from .limpyd_models import GraphData
self._graphs_limpyd_object, created = GraphData.get_or_connect(repository_id=self.id)
return self._graphs_limpyd_object
from gim.graphs.tasks import *
|
# Generated by Django 2.2.5 on 2019-09-29 18:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_askaquestion'),
]
operations = [
migrations.DeleteModel(
name='AskAQuestion',
),
]
|
'''
Defines some basic example datasets that are used in testing.
'''
import os
import numpy as np
import mne
from mne import minimum_norm as mn
from . import load
from ._colorspaces import eeg_info
from ._data_obj import Dataset, Factor, Var, NDVar, Sensor, UTS
from ._design import permute
def get_loftus_masson_1994():
"Dataset used for illustration purposes by Loftus and Masson (1994)"
ds = Dataset()
ds['subject'] = Factor(range(1, 11), tile=3, random=True)
ds['exposure'] = Var([1, 2, 5], repeat=10)
ds['n_recalled'] = Var([10, 6, 11, 22, 16, 15, 1, 12, 9, 8,
13, 8, 14, 23, 18, 17, 1, 15, 12, 9,
13, 8, 14, 25, 20, 17, 4, 17, 12, 12])
return ds
def get_mne_evoked(ndvar=False):
"""MNE-Python Evoked
Parameters
----------
ndvar : bool
Convert to NDVar (default False).
"""
data_path = mne.datasets.sample.data_path()
evoked_path = os.path.join(data_path, 'MEG', 'sample',
'sample_audvis-ave.fif')
evoked = mne.Evoked(evoked_path, "Left Auditory")
if ndvar:
return load.fiff.evoked_ndvar(evoked)
else:
return evoked
def get_mne_stc(ndvar=False):
"""MNE-Python SourceEstimate
Parameters
----------
ndvar : bool
Convert to NDVar (default False; src="ico-4" is false, but it works as
long as the source space is not accessed).
"""
data_path = mne.datasets.testing.data_path()
stc_path = os.path.join(data_path, 'MEG', 'sample', 'fsaverage_audvis_trunc-meg')
if not ndvar:
return mne.read_source_estimate(stc_path, 'sample')
subjects_dir = os.path.join(data_path, 'subjects')
return load.fiff.stc_ndvar(stc_path, 'sample', 'ico-4', subjects_dir)
def _mne_source_space(subject, src_tag, subjects_dir):
"""Load mne source space"""
src_file = os.path.join(subjects_dir, subject, 'bem',
'%s-%s-src.fif' % (subject, src_tag))
src = src_tag[:3]
if os.path.exists(src_file):
return mne.read_source_spaces(src_file, False)
elif src == 'ico':
return mne.setup_source_space(subject, src_file, 'ico4',
subjects_dir=subjects_dir, add_dist=True)
elif src == 'vol':
mri_file = os.path.join(subjects_dir, subject, 'mri', 'orig.mgz')
bem_file = os.path.join(subjects_dir, subject, 'bem',
'sample-5120-5120-5120-bem-sol.fif')
return mne.setup_volume_source_space(subject, src_file, pos=10.,
mri=mri_file, bem=bem_file,
mindist=0., exclude=0.,
subjects_dir=subjects_dir)
else:
raise ValueError("src_tag=%s" % repr(src_tag))
def get_mne_sample(tmin=-0.1, tmax=0.4, baseline=(None, 0), sns=False,
src=None, sub="modality=='A'", fixed=False, snr=2,
method='dSPM', rm=False, stc=False):
"""Load events and epochs from the MNE sample data
Parameters
----------
tmin, tmax baseline :
Epoch parameters.
sns : bool
Add sensor space data as NDVar as ``ds['sns']`` (default ``False``).
src : False | 'ico' | 'vol'
Add source space data as NDVar as ``ds['src']`` (default ``False``).
sub : str | list | None
Expresion for subset of events to load. For a very small dataset use e.g.
``[0,1]``.
fixed : bool
MNE inverse parameter.
snr : scalar
MNE inverse parameter.
method : str
MNE inverse parameter.
rm : bool
Pretend to be a repeated measures dataset (adds 'subject' variable).
stc : bool
Add mne SourceEstimate for source space data as ``ds['stc']`` (default
``False``).
Returns
-------
ds : Dataset
Dataset with epochs from the MNE sample dataset in ``ds['epochs']``.
"""
data_dir = mne.datasets.sample.data_path()
meg_dir = os.path.join(data_dir, 'MEG', 'sample')
raw_file = os.path.join(meg_dir, 'sample_audvis_filt-0-40_raw.fif')
event_file = os.path.join(meg_dir, 'sample_audvis_filt-0-40-eve.fif')
subjects_dir = os.path.join(data_dir, 'subjects')
subject = 'sample'
label_path = os.path.join(subjects_dir, subject, 'label', '%s.label')
if not os.path.exists(event_file):
raw = mne.io.Raw(raw_file)
events = mne.find_events(raw, stim_channel='STI 014')
mne.write_events(event_file, events)
ds = load.fiff.events(raw_file, events=event_file)
ds.index()
ds.info['subjects_dir'] = subjects_dir
ds.info['subject'] = subject
ds.info['label'] = label_path
# get the trigger variable form the dataset for eaier access
trigger = ds['trigger']
# use trigger to add various labels to the dataset
ds['condition'] = Factor(trigger, labels={1:'LA', 2:'RA', 3:'LV', 4:'RV',
5:'smiley', 32:'button'})
ds['side'] = Factor(trigger, labels={1: 'L', 2:'R', 3:'L', 4:'R',
5:'None', 32:'None'})
ds['modality'] = Factor(trigger, labels={1: 'A', 2:'A', 3:'V', 4:'V',
5:'None', 32:'None'})
if rm:
ds = ds.sub('trigger < 5')
ds = ds.equalize_counts('side % modality')
subject_f = ds.eval('side % modality').enumerate_cells()
ds['subject'] = subject_f.as_factor('s%r', random=True)
if sub:
ds = ds.sub(sub)
load.fiff.add_mne_epochs(ds, tmin, tmax, baseline)
if sns:
ds['sns'] = load.fiff.epochs_ndvar(ds['epochs'], data='mag',
sysname='neuromag306mag')
if not src:
return ds
elif src == 'ico':
src_tag = 'ico-4'
elif src == 'vol':
src_tag = 'vol-10'
else:
raise ValueError("src = %r" % src)
epochs = ds['epochs']
# get inverse operator
inv_file = os.path.join(meg_dir, 'sample_eelbrain_%s-inv.fif' % src_tag)
if os.path.exists(inv_file):
inv = mne.minimum_norm.read_inverse_operator(inv_file)
else:
fwd_file = os.path.join(meg_dir, 'sample-%s-fwd.fif' % src_tag)
bem_dir = os.path.join(subjects_dir, subject, 'bem')
bem_file = os.path.join(bem_dir, 'sample-5120-5120-5120-bem-sol.fif')
trans_file = os.path.join(meg_dir, 'sample_audvis_raw-trans.fif')
if os.path.exists(fwd_file):
fwd = mne.read_forward_solution(fwd_file)
else:
src_ = _mne_source_space(subject, src_tag, subjects_dir)
fwd = mne.make_forward_solution(epochs.info, trans_file, src_,
bem_file, fwd_file)
cov_file = os.path.join(meg_dir, 'sample_audvis-cov.fif')
cov = mne.read_cov(cov_file)
inv = mn.make_inverse_operator(epochs.info, fwd, cov, None, None,
fixed)
mne.minimum_norm.write_inverse_operator(inv_file, inv)
ds.info['inv'] = inv
stcs = mn.apply_inverse_epochs(epochs, inv, 1. / (snr ** 2), method)
ds['src'] = load.fiff.stc_ndvar(stcs, subject, src_tag, subjects_dir,
method, fixed)
if stc:
ds['stc'] = stcs
return ds
def get_uts(utsnd=False, seed=0):
"""Create a sample Dataset with 60 cases and random data.
Parameters
----------
utsnd : bool
Add a sensor by time NDVar (called 'utsnd').
seed : None | int
If not None, call ``numpy.random.seed(seed)`` to ensure replicability.
Returns
-------
ds : Dataset
Datasets with data from random distributions.
"""
if seed is not None:
np.random.seed(seed)
ds = Dataset()
# add a model
ds['A'] = Factor(['a0', 'a1'], repeat=30)
ds['B'] = Factor(['b0', 'b1'], repeat=15, tile=2)
ds['rm'] = Factor(('R%.2i' % i for i in xrange(15)), tile=4, random=True)
ds['ind'] = Factor(('R%.2i' % i for i in xrange(60)), random=True)
# add dependent variables
rm_var = np.tile(np.random.normal(size=15), 4)
y = np.hstack((np.random.normal(size=45), np.random.normal(1, size=15)))
y += rm_var
ds['Y'] = Var(y)
ybin = np.random.randint(0, 2, size=60)
ds['YBin'] = Factor(ybin, labels={0:'c1', 1:'c2'})
ycat = np.random.randint(0, 3, size=60)
ds['YCat'] = Factor(ycat, labels={0:'c1', 1:'c2', 2:'c3'})
# add a uts NDVar
time = UTS(-.2, .01, 100)
y = np.random.normal(0, .5, (60, len(time)))
y += rm_var[:, None]
y[:15, 20:60] += np.hanning(40) * 1 # interaction
y[:30, 50:80] += np.hanning(30) * 1 # main effect
ds['uts'] = NDVar(y, dims=('case', time))
# add sensor NDVar
if utsnd:
locs = np.array([[-1.0, 0.0, 0.0],
[ 0.0, 1.0, 0.0],
[ 1.0, 0.0, 0.0],
[ 0.0, -1.0, 0.0],
[ 0.0, 0.0, 1.0]])
sensor = Sensor(locs, sysname='test_sens')
sensor.set_connectivity(connect_dist=1.75)
y = np.random.normal(0, 1, (60, 5, len(time)))
y += rm_var[:, None, None]
# add interaction
win = np.hanning(50)
y[:15, 0, 50:] += win * 3
y[:15, 1, 50:] += win * 2
y[:15, 4, 50:] += win
# add main effect
y[30:, 2, 25:75] += win * 2.5
y[30:, 3, 25:75] += win * 1.5
y[30:, 4, 25:75] += win
# add spectral effect
freq = 15.0 # >= 2
x = np.sin(time.times * freq * 2 * np.pi)
for i in xrange(30):
shift = np.random.randint(0, 100 / freq)
y[i, 2, 25:75] += 1.1 * win * x[shift: 50+shift]
y[i, 3, 25:75] += 1.5 * win * x[shift: 50+shift]
y[i, 4, 25:75] += 0.5 * win * x[shift: 50+shift]
dims = ('case', sensor, time)
ds['utsnd'] = NDVar(y, dims, eeg_info())
return ds
def get_uv(seed=0):
"""Dataset with random univariate data
Parameters
----------
seed : None | int
Seed the numpy random state before generating random data.
"""
if seed is not None:
np.random.seed(seed)
ds = permute([('A', ('a1', 'a2')),
('B', ('b1', 'b2')),
('rm', ['s%03i' % i for i in xrange(20)])])
ds['rm'].random = True
ds['intvar'] = Var(np.random.randint(5, 15, 80))
ds['intvar'][:20] += 3
ds['fltvar'] = Var(np.random.normal(0, 1, 80))
ds['fltvar'][:40] += 1.
ds['fltvar2'] = Var(np.random.normal(0, 1, 80))
ds['fltvar2'][40:] += ds['fltvar'][40:].x
ds['index'] = Var(np.repeat([True, False], 40))
return ds
|
from fastapi import Depends, APIRouter, Path, Query, Body
from fastapi.exceptions import HTTPException
from fastapi.security import OAuth2PasswordRequestForm
from starlette.responses import JSONResponse
from app.schemas import user, token_sa
from app.response.users import UserCreateResponse, UserCurrentResponse
from app.services.authenticate import authenticate_user, create_access_token
from app.api.operation.users import create_user, get_user_by_username, get_user_info, get_user_by_id
from app.api.operation.question import get_question
from app.models.questions import QuestionChoices
from app.services.jwt import oauth2_scheme
from app.services.smtp import async_send_message
router = APIRouter()
@router.post("/create/", response_model=UserCreateResponse, tags=['users'])
async def create_users(iuser: user.UserCreate):
ouser = await get_user_by_username(iuser.username)
if ouser:
raise HTTPException(
status_code=400,
detail="username has been used !!"
)
return await create_user(iuser)
@router.post("/login/",response_model=UserCreateResponse, tags=['users'])
# async def user_login(user: OAuth2PasswordRequestForm = Depends()):
async def user_login(*, username: str = Body(...), password: str = Body(...)):
user = await authenticate_user(username, password)
if not user:
raise HTTPException(
status_code=401,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token = create_access_token(
data={"email": user[0].email, "username": user[0].username})
# return {"token": 1, "token_type": "bearer"}
return {"code": 0, "msg": "success", "data": {"Oauth-Token": access_token, "exprie": 86400*7}}
@router.get("/current/", response_model=UserCreateResponse, tags=['users'])
async def get_info(user: user.User = Depends(get_user_info)):
content = {
"code": 0,
"msg": "success",
"data":{
"id": user.id,
"avatar": user.avatar,
"username": user.username,
"nickname": user.nickname
}
}
return JSONResponse(content=content)
@router.get("/send/", tags=['users'])
async def send_email(user: user.UserActivated = Depends(get_user_info)):
if user.is_active:
return JSONResponse(content="账户已激活!")
async_send_message.delay(id=user.id, email=user.email)
return JSONResponse(content={"msg": "邮件已发送,请尽快激活您的账户", "code": 0})
@router.get("/activated/{id}", tags=['users'])
async def activate(id: int = Path(..., gt=0, title="账户id"), q: str = Query(..., alias="code", len=6)):
user = await get_user_by_id(id=id)
await user.update(is_active=1)
return JSONResponse(content={"msg": "成功激活", "code": 0})
|
#!/usr/bin/env python
import sys
from os import path
from pkg_resources import parse_requirements
from setuptools import setup, find_packages
name = 'socker' # PyPI name
package_name = name.replace('-', '_') # Python module name
package_path = 'src' # Where does the package live?
version_file = path.join(path.dirname(__file__), 'VERSION')
here = path.dirname(path.abspath(__file__))
# Add src dir to path
sys.path.append(package_path)
# Get the long description from the relevant file
long_description = None
try:
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
except Exception:
pass
def get_version():
"""
Get the version from version_file
"""
return ''.join(
line for line in open(version_file)
if not line.startswith('#')).strip()
def get_requirements(filename):
return [str(r) for r in parse_requirements(open(filename).read())]
setup(
name=name,
version=get_version(),
author='Joar Wandborg',
author_email='joar@5monkeys.se',
url='https://github.com/5monkeys/socker',
license='MIT',
description='redis pubsub websocket proxy',
long_description=long_description,
package_dir={'': package_path},
packages=find_packages(package_path),
entry_points={
'console_scripts': [
'socker = socker.cli.command:Interface'
]
},
install_requires=get_requirements('requirements.txt')
)
|
# as_image.py written by Duncan Murray 6/7/2013 (C) Acute Software
# Acute Software library of functions for image manipulation
import PIL
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageFilter
def TEST():
# todo - make a jpg from desktop or something, use that as a test
print(" \n --- Testing Image functions--- ")
print(" ------------------------------ ")
fileName = GetRandomImage("sunset", 1, "")
tempFileList = ["resized_" + fileName, "contour_" + fileName, "text_" + fileName]
resize(fileName, 600, tempFileList[0])
filterContour(fileName, tempFileList[1])
addTextToImage(fileName, "This is a test", tempFileList[2])
return tempFileList
def GetRandomImage(searchString, searchResultPosition, baseFileName):
# searches Google images for searchString and returns the nth value and saves to 'fileName'
return 'sunset.jpg' # todo - implement this
def resize(fname, basewidth, opFilename):
#print("HELLO")
if basewidth == 0:
basewidth = 300
print("Resizing ", fname, " to ", basewidth, " pixels wide")
img = Image.open(fname)
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), PIL.Image.ANTIALIAS)
img.save(opFilename)
def addTextToImage(fname, txt, opFilename):
ft = ImageFont.load("T://user//dev//src//python//aspytk//timR24.pil") # Font - google name and download
wh = ft.getsize(txt)
print("Adding text '", txt, "' to", fname)
im = Image.open(fname)
draw = ImageDraw.Draw(im)
# draw text in center
#draw.text((im.size[0]/2 - wh[0]/2, im.size[1]/2 + 20), txt, fill=(255, 255, 0), font=ft)
# draw text on top left
# draw.text((0, 0), txt, fill=(255, 255, 0), font=ft)
draw.text((0, 0), txt, fill=(0, 0, 0), font=ft)
del draw
im.save(opFilename)
def addCrossHairToImage(fname, opFilename):
im = Image.open(fname)
draw = ImageDraw.Draw(im)
draw.line((0, 0) + im.size, fill=(255, 255, 255))
draw.line((0, im.size[1], im.size[0], 0), fill=(255, 255, 255))
del draw
im.save(opFilename)
def filterContour(imageFile, opFile):
print("Contouring ", imageFile, " to ", opFile)
im = Image.open(imageFile)
im1 = im.filter(ImageFilter.CONTOUR)
im1.save(opFile)
def DetectFace(fname, opFile):
storage = cv.CreateMemStorage()
haar=cv.LoadHaarClassifierCascade('haarcascade_frontalface_default.xml')
detected = cv.HaarDetectObjects(fname, haar, storage, 1.2, 2,cv.CV_HAAR_DO_CANNY_PRUNING, (100,100))
if detected:
for face in detected:
print (face)
if __name__ == '__main__':
TEST()
|
from django.contrib import admin
from django.urls import path
import exchangeapp.views
# from django.conf import settings
# from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', exchangeapp.views.home, name="home"),
path('usd/', exchangeapp.views.usd, name="usd"),
path('jpy/', exchangeapp.views.jpy, name="jpy"),
path('can/', exchangeapp.views.can, name="can"),
]
|
# -*- coding: utf-8 -*-#
#-------------------------------------------------------------------------------
# Name: findCounter.py
# Author: wdf
# Date: 2019/7/17
# IDE: PyCharm
# Parameters:
# @param:
# @param:
# Return:
#
# Description:
# Usage:
#-------------------------------------------------------------------------------
import cv2
import numpy as np
def split_rec(arr):
"""
切分单元格
:param arr:
:return:
"""
# 数组进行排序
print(arr)
print("*"*50)
arr.sort(key=lambda x: x[0],reverse=True)
# 数组反转
arr.reverse()
for i in range(len(arr) - 1):
if arr[i+1][0] == arr[i][0]:
arr[i+1][3] = arr[i][1]
arr[i + 1][2] = arr[i][2]
if arr[i+1][0] > arr[i][0]:
arr[i + 1][2] = arr[i][0]
print(arr[i])
return arr
def get_points(img_transverse, img_vertical):
"""
获取横纵线的交点
:param img_transverse:
:param img_vertical:
:return:
"""
img = cv2.bitwise_and(img_transverse, img_vertical)
return img
def get_vertical_line(binary):
rows, cols = binary.shape
scale = 20 # 这个值越大,检测到的直线越多
# 识别竖线
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, rows // scale))
# 竖直方向上线条获取的步骤同上,唯一的区别在于腐蚀膨胀的区域为一个宽为1,高为缩放后的图片高度的一个竖长形直条
eroded = cv2.erode(binary, kernel, iterations=1)
dilatedrow = cv2.dilate(eroded, kernel, iterations=2)
# cv2.imshow("Dilated row", dilatedrow)
return dilatedrow
def get_transverse_line(binary):
rows, cols = binary.shape
scale = 20 # 这个值越大,检测到的直线越多
# 识别横线
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (cols // scale, 1))
# getStructuringElement: Returns a structuring element of the specified size and shape for morphological operations.
# (cols // scale, 1) 为了获取横向的表格线,设置腐蚀和膨胀的操作区域为一个比较大的横向直条
eroded = cv2.erode(binary, kernel, iterations=1)
# cv2.imshow("Eroded Image",eroded)
dilatedcol = cv2.dilate(eroded, kernel, iterations=2)
# cv2.imshow("Dilated col", dilatedcol)
return dilatedcol
def bin_img(image):
"""
对图像进行二值化处理
:param img: 传入的图像对象(numpy.ndarray类型)
:return: 二值化后的图像
"""
# 二值化
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
binary = cv2.adaptiveThreshold(~gray, 255, # ~取反,很重要,使二值化后的图片是黑底白字
cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 15, -10)
return binary
def get_rec(img):
"""
获取单元格
:param img:
:return:
"""
contours, hierarchy = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
contours_poly = [0] * len(contours)
boundRect = [0] * len(contours)
rois = []
print("*"*50)
print("contours: \n")
for i in range(len(contours) - 1):
cnt = contours[i]
print(i,cnt)
contours_poly[i] = cv2.approxPolyDP(curve=cnt, epsilon=1, closed=True)
# 以指定的精度近似多边形曲线。
'''
. @param curve Input vector of a 2D point stored in std::vector or Mat
. @param epsilon Parameter specifying the approximation accuracy. This is the maximum distance
. between the original curve and its approximation.
. @param closed If true, the approximated curve is closed (its first and last vertices are
. connected). Otherwise, it is not closed.'''
boundRect[i] = cv2.boundingRect(contours_poly[i])
rois.append(np.array(boundRect[i]))
pt1 = (boundRect[i][0], boundRect[i][1]),
pt2 = (boundRect[i][2], boundRect[i][3]),
print(img.shape)
print("pt1:",pt1)
print("pt2:",pt2)
img = cv2.rectangle(img_bak,
pt1=(boundRect[i][0], boundRect[i][1]),
pt2=(boundRect[i][2], boundRect[i][3]),
color=(0, 0, 255),
thickness=2,
lineType=1,
shift=0)
cv2.imshow("contour",img)
rois = split_rec(rois)
return rois
if __name__ == "__main__":
image = "./img/table-6.png"
image1 = "./img/9.jpg"
img_bak = cv2.imread(image)
img = bin_img(img_bak)
# img_transverse = erode_img(img,(1,2),40)
# img_vertical = erode_img(img, (2,1), 40)
# # img = img_transverse + img_vertical
# img_transverse = dilate_img(img_transverse,(2,2),1)
# img_vertical = dilate_img(img_vertical,(2,2),1)
#
# img = get_points(img_transverse,img_vertical)
dilatedcol, dilatedrow = get_vertical_line(img), get_transverse_line(img)
img = get_points(dilatedcol, dilatedrow)
rois = get_rec(img)
print("*"*50)
print(rois)
for i, r in enumerate(rois):
cv2.imshow(str(i), img_bak[r[3]:r[1], r[2]:r[0]])
cv2.waitKey(0)
cv2.destroyAllWindows()
pass
|
def retrieveFile():
try:
bestStudent={}
bestStudentStr= "The students ranks are as follows \n"
fin = open("student.txt","r")
except(IOError),e:
print "File not found",e
else:
for line in fin:
name,grade = line.split()
bestStudent[grade] = name
fin.close()
for i in sorted(bestStudent.keys()):
print bestStudent[i] + " Scored a " + i
bestStudentStr += bestStudent[i]+ " scored a "+ i + "\n"
print "\n"
print bestStudentStr
fout = open("studentrank.txt","w")
fout.write(bestStudentStr)
fout.close()
def main():
retrieveFile()
if __name__ == "__main__":
main()
|
import datetime
from flask_mongoengine import MongoEngine
db = MongoEngine()
class AddressBook(db.Document):
name = db.StringField(max_length=60)
address = db.StringField(max_length=60)
city = db.StringField(max_length=60)
state = db.StringField(max_length=60)
country = db.StringField(max_length=60)
pincode=db.IntField()
phone=db.StringField(max_length=13)
email = db.StringField(max_length=60)
|
#------------------------------------------------------------------------------
# Copyright 2008-2012 Istituto Nazionale di Fisica Nucleare (INFN)
#
# Licensed under the EUPL, Version 1.1 only (the "Licence").
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at:
#
# http://joinup.ec.europa.eu/system/files/EN/EUPL%20v.1.1%20-%20Licence.pdf
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
#------------------------------------------------------------------------------
|
import random
def generator(mx:int,my:int):
maze = [[0 for x in range(mx)] for y in range(my)]
dx = [0, 1, 0, -1]; dy = [-1, 0, 1, 0] # 4 directions to move in the maze
stack = [(random.randint(0, mx - 1), random.randint(0, my - 1))]
while len(stack) > 0:
(cx, cy) = stack[-1]
maze[cy][cx] = 1
# find a new cell to add
nlst = [] # list of available neighbors
for i in range(4):
nx = cx + dx[i]; ny = cy + dy[i]
if nx >= 0 and nx < mx and ny >= 0 and ny < my:
if maze[ny][nx] == 0:
# of occupied neighbors must be 1
ctr = 0
for j in range(4):
ex = nx + dx[j]; ey = ny + dy[j]
if ex >= 0 and ex < mx and ey >= 0 and ey < my:
if maze[ey][ex] == 1: ctr += 1
if ctr == 1: nlst.append(i)
# if 1 or more neighbors available then randomly select one and move
if len(nlst) > 0:
ir = nlst[random.randint(0, len(nlst) - 1)]
cx += dx[ir]; cy += dy[ir]
stack.append((cx, cy))
else: stack.pop()
mazeInvert = []
for row in range(len(maze)):
tempRow = []
for col in range(len(maze[row])):
if maze[row][col] == 0:
tempRow.append(1)
elif maze[row][col] == 1:
tempRow.append(0)
mazeInvert.append(tempRow)
# mazeToPNG(maze,imgx,imgy,mx,my,color)
finalMaze = []
finalMaze.append([1]*(len(mazeInvert[0])+2))
for row in range(len(mazeInvert)):
tempRow = [1] + mazeInvert[row] + [1]
finalMaze.append(tempRow)
finalMaze.append([1]*(len(mazeInvert[0])+2))
return finalMaze |
from __future__ import print_function
import numpy as np
import gzip
from six.moves import cPickle
#get_ipython().magic(u'matplotlib inline')
import matplotlib.pyplot as plt
class Decoded_plot(object):
def __init__(self, residue_diff=28, coor=3, batch_size=1000, start=0, sep_train=0.8,
sep_test=0.9, sep_pred=1, chose=1):
if(residue_diff < 0):
raise Exception("Invalid input: residue_diff must be greater than 0!")
if(coor < 0):
raise Exception("Invalid input: coor must be greater than 0!")
if(batch_size < 0):
raise Exception("Invalid input: batch_size must be greater than 0!")
if(start < 0):
raise Exception("Invalid input: start must be greater than 0!")
if(sep_train < 0 or sep_train > 1):
raise Exception("Invalid input: sep_train must be between 0 and 1!")
if(sep_pred < 0 or sep_pred > 1):
raise Exception("Invalid input: sep_pred must be between 0 and 1!")
if(chose != 0 and chose != 1 and chose != 2):
raise Exception("Invalid input: chose must be either 0, 1, or 2!")
# define parameters
self.residue_diff = residue_diff
self.coor = coor
self.batch_size = batch_size
# how data was separated into train, test or prediction set while running running code
self.start = start
self.sep_train = sep_train
self.sep_test = sep_test
self.sep_pred = sep_pred
# which dataset was loaded? chose '0' = train, '1' = test, '2' = pred
self.chose = chose
# original_path='./aligned_1FME-0_coor.pkl.gz'
# decoded_path='./decoded_test_80.out'
def load(self, original_path=None, decoded_path=None):
"""
original_path : string
- path of the coor.pkl.gz file. Should be located in ./output_data or ./input_data
decoded_path : string
- path of the decoded .out. Should be located in ./output_data or ./input_data
"""
if(original_path == None or decoded_path == None):
raise ValueError("Must input original_path and decoded_path as parameters.")
if (not os.path.exists(original_path)):
raise Exception("Path " + str(original_path) + " does not exist!")
if (not os.path.exists(decoded_path)):
raise Exception("Path " + str(decoded_path) + " does not exist!")
# load original data
# open pickled file
print("Loading original data:")
with gzip.open(original_path, 'rb') as f3:
(X) = cPickle.load(f3)
self.CA_xyz_tot = X[0:500000]
print("Shape of original data", np.shape(self.CA_xyz_tot))
# load decoded data
print("Loading decoded data:")
self.CA_xyz_tot_decoded = np.loadtxt(decoded_path)
print("Shape of loaded data", np.shape(self.CA_xyz_tot_decoded))
# reshaping loaded decoded data
self.CA_xyz_tot_decoded_nf = np.reshape(self.CA_xyz_tot_decoded,
(self.CA_xyz_tot_decoded.shape[0], self.residue_diff, (self.coor)))
print("Shape of decoded reshaped data:", np.shape(self.CA_xyz_tot_decoded_nf))
def calc_orginal_decoded_value(self):
# calculate original value of decoded data
if self.chose == 0:
# for training data
xyz_max = np.amax(self.CA_xyz_tot[int(self.CA_xyz_tot.shape[0]*self.start):
int(self.CA_xyz_tot.shape[0]*self.sep_train), :, :])
if self.chose == 1:
# for testing data
xyz_max = np.amax(self.CA_xyz_tot[int(self.CA_xyz_tot.shape[0]*self.sep_train):
int(self.CA_xyz_tot.shape[0]*self.sep_test), :, :])
if self.chose == 2:
# for prediction data
xyz_max = np.amax(self.CA_xyz_tot[self.CA_xyz_tot.shape[0]*self.sep_test:
self.CA_xyz_tot.shape[0]*self.sep_pred, :, 0:3])
print("Maximum value in any direction in concerned data set", xyz_max)
print("Change to original value from normalized loaded data")
self.CA_xyz_tot_decoded_nf[:, :, :] = self.CA_xyz_tot_decoded_nf[:, :, :]*xyz_max
def calc_diff_value(self):
# calculate difference between original & decoded
# create zero value arrays
diff = np.zeros((self.CA_xyz_tot_decoded.shape[0],residue_diff,coor))
diff_pc = np.zeros((self.CA_xyz_tot_decoded.shape[0],residue_diff,coor))
# calculate difference - both in nominal & in percentage
if self.chose == 0:
# for training data
diff[:, :, :] = self.CA_xyz_tot_decoded_nf[:, :, :]-self.CA_xyz_tot[int(self.CA_xyz_tot.shape[0]*self.start)
:int(self.CA_xyz_tot.shape[0]*self.sep_train), :, :]
diff_pc = np.absolute(diff[:, :, :]/self.CA_xyz_tot[int(self.CA_xyz_tot.shape[0]*self.start)
:int(CA_xyz_tot.shape[0]*sep_train), :, :]*100)
if self.chose == 1:
# for testing data
diff[:, :, :] = self.CA_xyz_tot_decoded_nf[:, :, :]-self.CA_xyz_tot[int(self.CA_xyz_tot.shape[0]*self.sep_train)
:int(self.CA_xyz_tot.shape[0]*self.sep_test), :, :]
diff_pc = np.absolute(diff[:, :, :]/self.CA_xyz_tot[int(self.CA_xyz_tot.shape[0]*self.sep_train)
:int(self.CA_xyz_tot.shape[0]*self.sep_test), :, :]*100)
if self.chose == 2:
# for prediction data
diff[:, :, 0:3] = self.CA_xyz_tot_decoded_nf[:, :, 0:3]-self.CA_xyz_tot[int(self.CA_xyz_tot.shape[0]*self.sep_test)
:int(self.CA_xyz_tot.shape[0]*self.sep_pred), :, 0:3]
diff_pc = np.absolute(diff[:, :, 0:3]/self.CA_xyz_tot[int(self.CA_xyz_tot.shape[0]*self.sep_test)
:int(CA_xyz_tot.shape[0]*sep_pred), :, 0:3]*100)
# create array for count of frames/samples
count = np.arange(diff_pc.shape[0]*diff_pc.shape[1]*diff_pc.shape[2])
# create array for storing count of frames/samples & difference in percentage
self.diff_1 = np.zeros((diff_pc.shape[0]*diff_pc.shape[1]*diff_pc.shape[2], 3))
diff = np.reshape(diff, (diff.shape[0]*diff.shape[1]*diff.shape[2]))
diff_pc = np.reshape(diff_pc, (diff_pc.shape[0]*diff_pc.shape[1]*diff_pc.shape[2]))
self.diff_1[:, 0] = count
self.diff_1[:, 1] = diff
self.diff_1[:, 2] = diff_pc
print("Total number of coordinate data points", len(self.diff_1))
def plot1(self):
# plot calculated difference
# plot histogram
# number of bins
n_bin = 180
#[nhist, shist] = np.histogram(np.absolute(self.diff[:, :, 0:3]), 25)
[nhist, shist] = np.histogram(self.diff_1[ :, 1], n_bin)
print("Number of points in respective bin (%):")
print(nhist[81:111]/float(len(self.diff_1))*100)
print("Bin values:")
print(shist[81:111])
plt.semilogy(shist[1: ], (nhist/float(len(self.diff_1))*100), marker='o', linestyle='-.', color='r')
plt.title('x = diff. in coordinate value (in angstrom)')
plt.xlim(-4,4)
plt.ylim(1.5,5)
plt.show()
def plot2(self):
# plot calculated difference
# plot histogram
# number of bins
n_bin = 180
#[nhist, shist] = np.histogram(np.absolute(self.diff[:, :, 0:3]), 25)
[nhist, shist] = np.histogram(self.diff_1[ :, 2], n_bin)
print("Number of points in respective bin (%):")
print(nhist[:14]/float(len(self.diff_1))*100)
print("Bin values:")
print(shist[:14])
plt.semilogy(shist[1: ], (nhist/float(len(self.diff_1))*100), marker='o', linestyle='--', color='r')
plt.title('x = relative "%" diff. in coordinate value')
plt.xlim(0,100)
plt.ylim(0.08,100)
plt.show()
|
import sys
import pandas as pd
from CEMA import *
import pyjacob
import cantera as ct
import numpy as np
import matplotlib.pyplot as plt
import time as t
import pdb
import numpy.linalg as LA
## Plot toggle
plot_EI = True
plot_EI = False
plot_eigenvalue = False
# mode = 'selectedEig'
mode = 'n_eig'
EI_mode = 'debug'
# EI_mode = 'no debug'
def build_conservative_basis():
# row vectors of conservative modes for H2 li2003
B = np.array([[1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,1]])
inv = LA.inv(B.dot(B.T))
Bc = B.T.dot(inv).dot(B) # basis of conservative modes
return Bc
def defect(Bc, be):
be = be/LA.norm(be) # normalise (just in case)
ba = (be.T).dot(Bc) # best approximation of be spanned by conservative modes basis
defectiveness = be.dot(ba)
return defectiveness
def find_ind(vect, low_bound, high_bound):
idx = np.zeros_like(vect)
for i in range(len(vect)):
if vect[i] > low_bound and vect[i] < high_bound:
idx[i] = 0
else:
idx[i] = 1
return idx
def stats(vector,keys):
max_val = np.amax(vector)
max_idx = np.argmax(vector)
print "Max value: ", max_val, " at position ", max_idx, " -> ", keys[max_idx]
# Gas properties
phi = 1.0
P = 101325
T = 1100
fuel_spec = 'H2'
# Simulation conditions
npoints = 1000
timestep = 1.5e-7
CEMA_interval = 1 # only divisors of npoints
N_eig = 8
N_EI = 1
#### options
first_eigenmode = True
first_ei = True
# Create gas object
gas = ct.Solution('Li_2003.cti')
## REORDER CANTERA SPECIES AS PYJAC WOULD:
specs = gas.species()[:]
N2_ind = gas.species_index('AR')
gas = ct.Solution(thermo='IdealGas', kinetics='GasKinetics',
species=specs[:N2_ind] + specs[N2_ind + 1:] + [specs[N2_ind]],
reactions=gas.reactions())
# print gas.species_name(28) # >> should give N2
EI_keys = ['']*gas.n_species
EI_keys[0] = 'T'
for i in range(1,gas.n_species):
EI_keys[i] = gas.species_name(i-1)
print gas.species_name(i)
print EI_keys
# prova manuale : H (entry 1)
## SET EQUIVALENCE RATIO TO phi, temperature and pressure
gas.set_equivalence_ratio(phi,fuel_spec,'O2:1, N2:3.76')
gas.TP = T, P
# # Create constant pressure reactor
r = ct.IdealGasConstPressureReactor(gas)
# # Create simulation PSR object
sim = ct.ReactorNet([r])
# # Initialize time and data vectors
time = 0.0
tim = np.zeros(npoints,'d')
temp = np.zeros(npoints,'d')
press = np.zeros(npoints,'d')
enth = np.zeros(npoints,'d')
# with open('xmgrace.txt','w') as file1:
file1 = open('xmgrace.txt','w')
val_ei = []
lambda_patched1 = []
lambda_patched2 = []
eigenvalues = np.zeros((N_eig,npoints))
expl_indices = np.zeros((gas.n_species,npoints))
CEM = np.zeros(npoints)
H2_cons_eig = np.empty(npoints)
track_species = []
count=0
most_aligned_eig = []
most_aligned_ei = []
alignment = np.zeros(gas.n_species)
# df=pd.read_csv('ei_max_H2.csv', sep=',',header=None)
# EI_max = df.values.ravel()
start = t.time()
Bc = build_conservative_basis()
for n in range(npoints):
time += timestep
sim.advance(time)
tim[n] = time
temp[n]= r.T
D, L, R = solve_eig_gas(gas)
# TRY1:
# conservative_pos = [1,2,3,9,10,11,12]
# non_conservative_pos = [0,4,5,6,7,8]
# conservative_mask = np.empty(len(D))
# for spec in range(len(D)):
# ei_curr = EI(D,L,R,spec)
# for j in conservative_pos:
# if ei_curr[j] > 0.9:
# conservative_mask[spec] = 1
# non_conservative_modes = np.ma.array(D,mask=conservative_mask)
# CEM[n] = np.amax(non_conservative_modes)
# TRY2:
non_conservative_modes = np.array([[1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,1]])
temperature_cons = np.array([1,0,0,0,0,0,0,0,0,0,0,0,0])
temperature_cons = np.array([0,1,0,0,0,0,0,0,0,0,0,0,0])
alignment = np.zeros(len(D))
count = 0
for i in range(len(D)):
ei_curr = EI(D,L,R,i)
alignment[i] = parallelism(ei_curr,temperature_cons)
if alignment[i] > 0.99:
count += 1
# print(np.sort(alignment))
H2_cons_eig[n] = D[np.argsort(alignment)[-1]]
# print(D[np.argsort(alignment)[-1]])
# pdb.set_trace()
print count
# if n%100 == 0:
# print(time)
# for i in range(len(D)):
# if D[i] < 1 and D[i] > -1:
# print D[i]
# ei_curr = EI(D,L,R,i)
# print ei_curr
# pdb.set_trace()
# else:
# ei_curr = EI(D,L,R,i)
# print ei_curr[-4:]
# L_curr = L[:,i]
# R_curr = R[:,i]
# print('Eigenvalue {:d} => {:.3f}'.format(i+1,D[i]))
# print(L_curr)#/LA.norm(L_curr))
# print(R_curr)
# print(ei_curr)
# for mode in range(len(non_conservative_modes[:,0])):
# print(parallelism(ei_curr,non_conservative_modes[mode,:]))
# pdb.set_trace()
# for mode in range(len(non_conservative_modes[:,0])):
# print(parallelism(ei_curr,non_conservative_modes[mode,:]))
count = 0
for i in range(len(D)):
if D[i] == 0.0:
count += 1
# if D[i]<1 and D[i]> -1:
# count +=1
if count != 4:
print('This point is different')
pdb.set_trace()
eigenvalues[:,n] = D[np.argsort(D)[-N_eig:]]
# explore conservative modes EI
# if n%20 == 0:
# mask_big = find_ind(D,-1,1)
# mask_small = np.abs(mask_big-1)
# small_eig = np.ma.array(D,mask=mask_big)
# small_idx = np.ma.array(np.arange(0,len(D)),mask=mask_big)
# big_eig = np.ma.array(D,mask=mask_small)
# big_idx = np.ma.array(np.arange(0,len(D)),mask=mask_small)
# # pdb.set_trace()
# print 'Time : ', time
# for i in big_idx.compressed():
# # print(EI(D,L,R,i))
# ei = EI(D,L,R,i)
# for j in conservative_pos:
# if ei[j]>0.5:
# pdb.set_trace()
# print(ei)
# for i in small_idx.compressed():
# ei = EI(D,L,R,i)
# for j in non_conservative_pos:
# if ei[j]>0.5:
# print('conservative mode has high EI for non conservative spec')
# pdb.set_trace()
# print(D[i])
# print(ei)
# for i in range(len(D)):
# alignment[i] = parallelism(EI(D,L,R,i), EI_max)
# CE[n] = D[np.argmax(alignment)]
## MANUALLY CHANGE WHICH EIG on which to base EI calc: -6 is eig 1
# 7 6 5 4 3 2 1 0 i
# -[1 2 3 4 5 6 7 8]
switch_time = 8.249e-5
# if tt[count] < switch_time:
# max_idx = np.argsort(D)[-1]
# lambda_patched1.append(D[max_idx])
# else:
# max_idx = np.argsort(D)[-8]
# lambda_patched2.append(D[max_idx])
# WHEN LOOKING AT WHICH EI IS WHICH (DEBUG EI FOR PLOTS)
order=-1
max_idx = np.argsort(D)[order]
expl_indices[:,n] = EI(D,L,R,max_idx)
main_EI = np.argsort(expl_indices[:,n])[-N_EI:]
# manual identification of important species indices
# print main_EI
# pdb.set_trace()
track_species = np.union1d(main_EI,track_species)
# stats(expl_indices,EI_keys)
# track species
# enth[n]= r.thermo.enthalpy_mass
press[n]= r.thermo.P
end = t.time()
pdb.set_trace()
print end-start, ' seconds'
dT=np.diff(temp)
dTdt = dT/np.diff(tim)
selected_species = []
# Time series plot of temperature, maximum eigenvalue, temperature gradient
# plt.figure(figsize=(10,5))
# plt.subplot(3,1,1)
# plt.plot(tim,temp)
# plt.title('Temp')
# plt.subplot(3,1,2)
# plt.plot(tt,np.array(val)/1e6)
# plt.title('Eig')
# plt.subplot(3,1,3)
# plt.plot(np.arange(0,len(dT)),dT)
# plt.title('Temperature gradient')
# plt.show()
# plt.figure()
# plt.plot(tt*1e6,CEM/1e6)
# plt.xlim(0,100)
# plt.ylim(-0.5,0.5)
# plt.show()
track_entries = ['T', 'H', 'O', 'OH', 'HO2', 'O2']
idx_entries = [0, 4, 5, 6, 7, 2]
idx_entries = [0, 1, 2, 4]
track_entries = ['T', 'H2', 'O2', 'H']
#### EI plot ####
# if plot_EI == True:
# fig, ax = plt.subplots()
# for i in range(len(idx_entries)):
# plt.plot(tim,expl_indices[idx_entries[i],:],linestyle='--', marker='o',label=track_entries[i])
# plt.axvline(x=switch_time, ymin=0., ymax = 1, linewidth=1, color='k')
# plt.xlim((7.5e-5,9e-5))
# plt.xlabel('Residence time')
# plt.xticks([7.5e-5, 8e-5, switch_time, 8.5e-5, 9e-5])
# plt.legend()
# plt.show()
# if EI_mode == 'debug':
# track_species=map(int,track_species)
# fig, ax = plt.subplots(2,1)
# plt.subplot(2,1,1)
# for i in range(len(track_species)):
# plt.plot(tim*1e6,expl_indices[track_species[i],:],linestyle='--', marker='o',label=EI_keys[track_species[i]])
# # plt.xlim((7.5e-5,9e-5))
# plt.xlabel('Residence time')
# plt.axvline(x=switch_time*1e6, ymin=0., ymax = 1, linewidth=1, color='k')
# # plt.xticks([7.5e-5, 8e-5, switch_time, 8.5e-5, 9e-5])
# plt.legend()
# # plt.show()
# plt.subplot(2,1,2)
# plt.plot(tim*1e6,eigenvalues[order,:]/1e6,linestyle='--', marker='.')
# plt.axvline(x=switch_time*1e6, ymin=0., ymax = 1, linewidth=1, color='k')
# titlefig = str(order) + '_provaEI' + '.pdf'
# plt.savefig(titlefig, bbox_inches='tight')
plt.figure()
plt.plot(tim,H2_cons_eig/1e6)
plt.show()
#### EIG PLOT ####
if mode == 'n_eig':
legend_entry = ['8th','7th','6th','5th','4th','3rd','2nd','1st']
plt.figure()
for i in range(N_eig):
# plt.subplot(N_eig,1,i+1) # comment to overlay instead of subplot
plt.plot(tim*1e6,eigenvalues[i,:]/1e6,linestyle='--',marker='.',label=legend_entry[i])
plt.plot(tim*1e6,CEM/1e6,'x',label='CEM')
plt.legend()
plt.show()
eigenvalues = np.flipud(eigenvalues)
df = pd.DataFrame(
{'time': tim,
'eig1': eigenvalues[0,:],
'eig2': eigenvalues[1,:],
'eig3': eigenvalues[2,:],
'eig3': eigenvalues[3,:],
'eig4': eigenvalues[4,:],
'eig5': eigenvalues[5,:],
'eig6': eigenvalues[6,:],
'eig7': eigenvalues[7,:]
})
# df.to_csv('eigenvalues_autoignition_Li2003_H2.csv')
if mode == 'selectedEig':
tt = tim
tt1 = tt[np.where(tt<switch_time)]
tt2 = tt[np.where(tt>switch_time)]
fig = plt.figure()
ax_big = fig.add_subplot(111, frameon=False) # The big subplot
plt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
ax1 = fig.add_subplot(911)
ax2 = fig.add_subplot(912)
ax3 = fig.add_subplot(913)
ax4 = fig.add_subplot(914)
ax5 = fig.add_subplot(915)
ax6 = fig.add_subplot(916)
ax7 = fig.add_subplot(917)
ax8 = fig.add_subplot(918)
ax9 = fig.add_subplot(919)
# ax_big.spines['top'].set_color('none')
# ax_big.spines['bottom'].set_color('none')
# ax_big.spines['left'].set_color('none')
# ax_big.spines['right'].set_color('none')
# ax_big.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
# ax_big.set_xlabel(r'$\mathrm{Residence\;time}\quad \left[ \mu s^{-1} \right]$')
# ax_big.set_ylabel(r'$\lambda_{expl} \quad \mu s ^{-1}$')
# ax_big.set_xlabel('ciao')
# ax_big.set_ylabel(r'$\lambda_{expl} \quad \mu s ^{-1}$')
# PLOT ONLY FOR COMPARISON
i=7
plt.subplot(9,1,1)
plt.plot(tt*1e6,eigenvalues[i,:]/1e6,linestyle='--', marker='.',color='r',label=r'$1^{st} \, \mathrm{eig}$')
plt.ylim((-0.3,0.4))
plt.axvline(x=switch_time*1e6, linewidth=1, linestyle='--', color='k')
plt.legend(loc='right')
ax1.axes.get_xaxis().set_visible(False)
plt.yticks([-0.2, 0.0, 0.2])
i=0
plt.subplot(9,1,2)
plt.plot(tt*1e6,eigenvalues[i,:]/1e6,linestyle='--', marker='.',color='b',label=r'$8^{th} \, \mathrm{eig}$')
plt.ylim((-0.3,0.4))
plt.axvline(x=switch_time*1e6, linewidth=1, linestyle='--', color='k')
plt.legend(loc='right')
ax2.axes.get_xaxis().set_visible(False)
plt.yticks([-0.2, 0.0, 0.2])
plt.subplot(9,1,3)
plt.plot(tt1*1e6,np.array(lambda_patched1)/1e6,linestyle='--', marker='.', color='r', label=r'$\lambda_{explosive}$')
plt.plot(tt2*1e6,np.array(lambda_patched2)/1e6,linestyle='--', marker='.', color='b',label=r'$\lambda_{explosive}$')
plt.ylim((-0.3,0.4))
plt.axvline(x=switch_time*1e6, linewidth=1, linestyle='--', color='k')
plt.legend(loc='right')
ax3.axes.get_xaxis().set_visible(False)
plt.yticks([-0.2, 0.0, 0.2])
i=6
plt.subplot(9,1,4)
plt.plot(tt*1e6,eigenvalues[i,:]/1e6,linestyle='--', marker='.',color='k',label=r'$2^{nd} \, \mathrm{eig}$')
# plt.ylim((-0.2,0.4))
plt.axvline(x=switch_time*1e6, linewidth=1, linestyle='--', color='k')
plt.legend(loc='right')
ax4.axes.get_xaxis().set_visible(False)
i=5
plt.subplot(9,1,5)
plt.plot(tt*1e6,eigenvalues[i,:]/1e6,linestyle='--', marker='.',color='k',label=r'$3^{rd} \, \mathrm{eig}$')
# plt.ylim((-0.2,0.4))
plt.axvline(x=switch_time*1e6, linewidth=1, linestyle='--', color='k')
plt.legend(loc='right')
ax5.set_ylabel(r'$\lambda_{i} \, \left[ \mu s ^{-1}\right]$')
ax5.axes.get_xaxis().set_visible(False)
i=4
plt.subplot(9,1,6)
plt.plot(tt*1e6,eigenvalues[i,:]/1e6,linestyle='--', marker='.',color='k',label=r'$4^{th} \, \mathrm{eig}$')
# plt.ylim((-0.2,0.4))
plt.axvline(x=switch_time*1e6, linewidth=1, linestyle='--', color='k')
plt.legend(loc='right')
ax6.axes.get_xaxis().set_visible(False)
i=3
plt.subplot(9,1,7)
plt.plot(tt*1e6,eigenvalues[i,:]/1e6,linestyle='--', marker='.',color='k',label=r'$5^{th} \, \mathrm{eig}$')
# plt.ylim((-0.2,0.4))
plt.axvline(x=switch_time*1e6, linewidth=1, linestyle='--', color='k')
plt.legend(loc='right')
ax7.axes.get_xaxis().set_visible(False)
i=2
plt.subplot(9,1,8)
plt.plot(tt*1e6,eigenvalues[i,:]/1e6,linestyle='--', marker='.',color='k',label=r'$6^{th} \, \mathrm{eig}$')
# plt.ylim((-0.2,0.4))
plt.axvline(x=switch_time*1e6, linewidth=1, linestyle='--', color='k')
plt.legend(loc='right')
ax8.axes.get_xaxis().set_visible(False)
i=1
plt.subplot(9,1,9)
plt.plot(tt*1e6,eigenvalues[i,:]/1e6,linestyle='--', marker='.',color='k',label=r'$7^{th} \, \mathrm{eig}$')
# plt.ylim((-0.2,0.4))
plt.axvline(x=switch_time*1e6, linewidth=1, linestyle='--', color='k')
plt.legend(loc='right')
# po
# post_treatment of most_aligned_eig and tt
# discard values of most_aligned_eig/1e6 < 0.5
most_aligned_eig = np.array(most_aligned_eig)
# pdb.set_trace()
tt = np.delete(tt,np.where(most_aligned_eig < -8e5))
most_aligned_eig = np.delete(most_aligned_eig,np.where(most_aligned_eig < -8e5))
plt.xlabel(r'$\mathrm{Residence\;time}\quad \left[ \mu s \right]$')
# plt.plot(tt,np.array(most_aligned_eig)/1e6,'.',label='most_aligned_eig')
# plt.plot(tt,np.array(most_aligned_ei)/1e6,'.',label='most_aligned_ei')
plt.suptitle(r'Time scale of the chemical explosive mode in PSR simulation of hydrogen-air at $\phi$=1.0, 1atm, initial temperature of 1100 K', fontsize=17)
plt.show()
# print 'maximum heat release rate at time ', tim[np.argmax(np.diff(temp))]
plt.show() |
# coding:utf-8
import smtplib
from email.mime.text import MIMEText
from email.header import Header
class Mail:
def __init__(self):
# 第三方 SMTP 服务
self.mail_host = "smtp.qq.com" # 设置服务器:这个是qq邮箱服务器,直接复制就可以
self.mail_pass = "fmlgcjkysuqadhfi" # 刚才我们获取的授权码
self.sender = '2675607101@qq.com' # 你的邮箱地址
# self.receivers = receivers # 收件人的邮箱地址,可设置为你的QQ邮箱或者其他邮箱,可多个
def send(self,receivers,code):
content = '您的验证码为%s' % code
message = MIMEText(content, 'plain', 'utf-8')
message['From'] = Header("MQZ的blog", 'utf-8')
message['To'] = Header(receivers, 'utf-8')
subject = '注册验证码' # 发送的主题,可自由填写
message['Subject'] = Header(subject, 'utf-8')
try:
smtpObj = smtplib.SMTP_SSL(self.mail_host, 465)
smtpObj.login(self.sender, self.mail_pass)
smtpObj.sendmail(self.sender, receivers, message.as_string())
smtpObj.quit()
return "ok"
except smtplib.SMTPException as e:
return "error"
|
# Generated by Django 2.2.3 on 2019-08-06 10:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='LostItems',
fields=[
('num', models.AutoField(primary_key=True, serialize=False)),
('managementID', models.CharField(blank=True, db_column='managementID', max_length=100, null=True)),
('findYmd', models.CharField(blank=True, db_column='findYmd', max_length=100, null=True)),
('productName', models.CharField(blank=True, db_column='productName', max_length=100, null=True)),
('keepPlace', models.CharField(blank=True, db_column='keepPlace', max_length=100, null=True)),
('productImg', models.CharField(blank=True, db_column='productImg', max_length=100, null=True)),
('productDesc', models.CharField(blank=True, db_column='productDesc', max_length=200, null=True)),
('productClass', models.CharField(blank=True, db_column='productClass', max_length=100, null=True)),
('placeAddress', models.CharField(blank=True, db_column='placeAddress', max_length=100, null=True)),
],
),
migrations.CreateModel(
name='LostItemsTemp',
fields=[
('num', models.AutoField(primary_key=True, serialize=False)),
('managementID', models.CharField(blank=True, db_column='managementID', max_length=100, null=True)),
('findYmd', models.CharField(blank=True, db_column='findYmd', max_length=100, null=True)),
('productName', models.CharField(blank=True, db_column='productName', max_length=100, null=True)),
('keepPlace', models.CharField(blank=True, db_column='keepPlace', max_length=100, null=True)),
('productImg', models.CharField(blank=True, db_column='productImg', max_length=100, null=True)),
('productDesc', models.CharField(blank=True, db_column='productDesc', max_length=200, null=True)),
('productClass', models.CharField(blank=True, db_column='productClass', max_length=100, null=True)),
('placeAddress', models.CharField(blank=True, db_column='placeAddress', max_length=100, null=True)),
],
),
]
|
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Ensure that when debug information is not output, a pdb is not expected.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp()
CHDIR = 'linker-flags'
test.run_gyp('pdb-output.gyp', chdir=CHDIR)
test.build('pdb-output.gyp', 'test_pdb_output_disabled', chdir=CHDIR)
# Make sure that the build doesn't expect a PDB to be generated when there
# will be none.
test.up_to_date('pdb-output.gyp', 'test_pdb_output_disabled', chdir=CHDIR)
test.pass_test()
|
def solve(arr):
arr_s = sorted(arr)
arr_l = len(arr_s)
output = []
for x in range(arr_l//2+1):
output.append(arr_s[-(x+1)])
output.append(arr_s[x])
return output[:-1] if arr_l%2!=0 else output[:-2]
'''
In this Kata, you will be given an array of unique elements, and your task is to
rerrange the values so that the first max value is followed by the first minimum,
followed by second max value then second min value, etc.
For example:
solve([15,11,10,7,12]) = [15,7,12,10,11]
The first max is 15 and the first min is 7. The second max is 12 and the
second min is 10 and so on.
More examples in the test cases.
Good luck!
'''
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from pants.backend.rust.lint.rustfmt import skip_field
from pants.backend.rust.lint.rustfmt.skip_field import SkipRustfmtField
from pants.backend.rust.lint.rustfmt.subsystem import RustfmtSubsystem
from pants.backend.rust.target_types import RustPackageSourcesField
from pants.backend.rust.util_rules.toolchains import RustToolchainProcess
from pants.core.goals.fmt import FmtResult, FmtTargetsRequest
from pants.core.util_rules.partitions import PartitionerType
from pants.engine.internals.selectors import Get
from pants.engine.process import ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.engine.target import FieldSet, Target
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class RustfmtFieldSet(FieldSet):
required_fields = (RustPackageSourcesField,)
sources: RustPackageSourcesField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipRustfmtField).value
class RustfmtRequest(FmtTargetsRequest):
field_set_type = RustfmtFieldSet
tool_subsystem = RustfmtSubsystem
partitioner_type = PartitionerType.DEFAULT_SINGLE_PARTITION
@rule(desc="Format with rustfmt")
async def rustfmt_fmt(request: RustfmtRequest.Batch) -> FmtResult:
result = await Get(
ProcessResult,
RustToolchainProcess(
binary="rustfmt",
args=request.snapshot.files,
input_digest=request.snapshot.digest,
output_files=request.snapshot.files,
description=f"Run rustfmt on {pluralize(len(request.files), 'file')}.",
level=LogLevel.DEBUG,
),
)
return await FmtResult.create(request, result)
def rules():
return [
*collect_rules(),
*skip_field.rules(),
*RustfmtRequest.rules(),
]
|
# Copyright (c) 2013, Wesley Shields <wxs@atarininja.org>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from setuptools import setup, Extension
import os
import sys
import platform
here = os.path.dirname(__file__)
pepy = os.path.join(here, "pepy")
with open(os.path.join(pepy, "README.md")) as f:
README = f.read()
with open(os.path.join(here, "VERSION")) as f:
VERSION = f.read().strip()
SOURCE_FILES = [
os.path.join(pepy, "pepy.cpp"),
os.path.join(here, "pe-parser-library", "src", "parse.cpp"),
os.path.join(here, "pe-parser-library", "src", "buffer.cpp"),
]
INCLUDE_DIRS = []
LIBRARY_DIRS = []
if platform.system() == "Windows":
SOURCE_FILES.append(
os.path.join(here, "pe-parser-library", "src", "unicode_winapi.cpp")
)
INCLUDE_DIRS += [
os.path.abspath(os.path.join(os.path.dirname(sys.executable), "include")),
os.path.join(here, "pe-parser-library", "include"),
"C:\\usr\\include",
]
LIBRARY_DIRS += [
os.path.abspath(os.path.join(os.path.dirname(sys.executable), "libs")),
"C:\\usr\\lib",
]
COMPILE_ARGS = ["/EHsc"]
else:
SOURCE_FILES.append(
os.path.join(here, "pe-parser-library", "src", "unicode_codecvt.cpp")
)
INCLUDE_DIRS += [
"/usr/local/include",
"/opt/local/include",
"/usr/include",
os.path.join(here, "pe-parser-library", "include"),
]
LIBRARY_DIRS += ["/usr/lib", "/usr/local/lib"]
COMPILE_ARGS = ["-std=c++17"]
extension_mod = Extension(
"pepy",
define_macros=[("PEPARSE_VERSION", f'"{VERSION}"')],
sources=SOURCE_FILES,
extra_compile_args=COMPILE_ARGS,
language="c++",
include_dirs=INCLUDE_DIRS,
library_dirs=LIBRARY_DIRS,
)
setup(
name="pepy",
url="https://github.com/trailofbits/pe-parse",
python_requires=">=3.7",
version=VERSION,
description="Python bindings for pe-parse",
long_description=README,
long_description_content_type="text/markdown",
author="Wesley Shields",
author_email="wxs@atarininja.org",
license="BSD",
ext_modules=[extension_mod],
)
|
import os
import random
os.system('cls')
length = int(input('Enter the length of your password: '))
pw = []
let = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
num = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', ]
symb = ['!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '-', '+', '_', '=']
for x in range(length):
r = random.randint(1, 6)
if r >= 4:
pw.append(random.choice(let))
elif r == 5:
pw.append(random.choice(num))
else:
pw.append(random.choice(symb))
print('Your password is: ', ''.join(pw))
|
#!/usr/bin/python3.4
# -*-coding:Utf-8
France = {10 : "Zizou", 1 : "Barthez", 9 : "Henry"}
for cle in France.keys():
print(cle)
for cle in France.values():
print(cle)
for cle, values in France.items():
print(cle, values)
|
####11111
import optimization
#s = [1,4,3,2,7,3,6,3,2,4,5,3]
#domain = [(0,9)] * (len(optimization.people) * 2)
#print(domain)
#a = optimization.randomoptimize(domain, optimization.schedulecost)#随机法
#b = optimization.hillclimb(domain, optimization.schedulecost)#爬山法
#c= optimization.annealingoptimize(domain, optimization.schedulecost)#模拟退火法
#a = optimization.geneticoptimize(domain, optimization.schedulecost)#遗传算法
#a = optimization.whatever(domain, optimization.schedulecost)#随机爬山法
#a= optimization.maybetry(domain, optimization.schedulecost)
#a= optimization.difftoname(domain, optimization.schedulecost)
#注意append用法:a=[];a.append(bbb) print(a[i])
'''
print(a)
print(optimization.schedulecost(a))
optimization.printschedule(a)
#######22222 需要联网otz
import kayak
sid = kayak.getkayaksession()
searchid = kayak.flightsearch(sid,'BOS','LGA','11/17/2006)
f = kayak.flightsearchresults(sid, searchid)
f[0:3]
#######333333
import dorm
#dorm.printsolution([0,0,0,0,0,0,0,0,0,0])#调用遗传法,mut里最后一项可能是1,这里最后一项是1会报错,所以不能随便加减的。
#s = optimization.randomoptimize(dorm.domain, dorm.dormcost)#随机数法真是非常非常非常糟糕啊。在分寝室的问题中,1000的频数的结果还是很不理想。
#s = optimization.geneticoptimize(dorm.domain, dorm.dormcost)
s = optimization.whatever(dorm.domain, dorm.dormcost)
#s = optimization.hillclimb(dorm.domain, dorm.dormcost)
print(s)
print(dorm.dormcost(s))
dorm.printsolution(s)
##########4444444
import socialnetwork
#s = optimization.randomoptimize(socialnetwork.domain, socialnetwork.crosscount)
s = optimization.whatever(socialnetwork.domain, socialnetwork.crosscount)
print(s)
print(socialnetwork.crosscount(s))
socialnetwork.drawnetwork(s)
i =6
def sss(i):
i=i+4
print(i)
a=sss(i)
print(i)
import random
i=random.randint(0,2)
print(i)
for i in range(4):
print(i)
a=[0]
print(a[0])
print(a[-1])
aa=[]
bb=[]
cc=[]
for i in range(3):
aa+=[i,i*i]
print(aa)
bb+=[[i,i*i]]
print(bb)
cc+=[i,[i*i]]
print(cc)
a=[1,2,3,4,5,6]
b=a[:]
print(b)
#######习题室友。
import dormex
#a = [[0,1],[2,3],[4,5],[6,7],[8,9]]
#a=[0,0,0,0,0,0,0,0,0,0]
#a=[9,8,7,6,5,4,3,2,1,0]
#print(dormex.domain)
#a = optimization.randomoptimize(dormex.domain, dormex.matecost)
#a = optimization.hillclimb(dormex.domain, dormex.matecost)
a = optimization.whatever(dormex.domain, dormex.matecost)
#a = optimization.difftoname(dormex.domain, dormex.matecost)
s = dormex.printresult(a)
print(dormex.matecost(a))
#dormex.kk()
#dormex.zz()
#####习题人际关系网格。
import socialnetwork
s = optimization.whatever(socialnetwork.domain, socialnetwork.crosscount)
print(s)
print(socialnetwork.crosscount(s))
socialnetwork.drawnetwork(s)
''' |
#code
#https://practice.geeksforgeeks.org/problems/next-permutation/0
T = int(input())
for i in range(T):
N = int(input())
nums = [int(x) for x in input().split(" ") if x is not '']
found = False
for j in range(len(nums)-1,-1,-1):
if j!=len(nums)-1:
if nums[j]<nums[j+1]:
found = True
just_greater_num = min(x for x in nums[j+1:] if x > nums[j])
index = nums[j+1:].index(just_greater_num)
temp = nums[j]
nums[j] = just_greater_num
nums[j+1+index] = temp
tmpl = nums[j+1:]
tmpl.sort()
nums = nums[:j+1] + tmpl
for j in nums:
print (j,end=" ")
break
if not found:
nums.sort()
for j in nums:
print (j,end=" ")
if i!=T-1:
print ()
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:config_xgb.py
# @Author: Michael.liu
# @Date:2020/7/6 16:48
# @Desc: this code is ....
params = {
'booster': 'gbtree',
'objective': 'multi:softmax',
'num_class':2,
'gamma': 0.1,
'max_depth': 5,
'lambda': 3,
'subsample': 0.7,
'colsample_bytree': 0.7,
'min_child_weight': 3,
'eta': 0.1,
'seed': 1000,
} |
import random, math
from food_planner.tools import getMeals, getItems, updateCupboard, getPrices
POOR, MEDIUM, RICH = 2, 3, 5
class Calculator:
def __init__(self, budget, time):
self.meals = getMeals()
self.cupboard = getItems()
# Check values are in bounds
if time < 1:
raise Exception("Time must be above 0.")
if budget < 1:
raise Exception("Budget must be above 1.")
self.budget = budget
self.time = time
# Run the program depending on the mode
def run(self, mode):
if mode:
shopping_plan = self.calculate(True)
shopping_list = self.generateShoppingList(shopping_plan)
print("Items needed: ")
for item in shopping_list:
print(f"{item[0]} : {item[1]}")
return shopping_plan
else:
meal_plan = self.calculate(False)
updateCupboard(self.cupboard)
return meal_plan
# Calculate a meal plan or shopping list
def calculate(self, shopping):
# The final return list
meal_plan = []
rich_meals = []
medium_meals = []
poor_meals = []
# meals that have already been selected and don't want to be selected
# several times in a row.
cached_meals = []
# classify meal prices
for meal in self.meals:
if meal.price > 5:
rich_meals.append(meal)
elif meal.price >= 3:
medium_meals.append(meal)
else:
poor_meals.append(meal)
# For every block of days select some random meals and appends
for i in range(self.time):
dailyBudget = self.budget / self.time
# Check what meals you can cook
if not shopping:
poor_meals = self.getPossibleMeals(poor_meals)
medium_meals = self.getPossibleMeals(medium_meals)
rich_meals = self.getPossibleMeals(rich_meals)
self.meals = self.getPossibleMeals(self.meals)
# Get attraction values
poorAttraction = (RICH - dailyBudget) * 10
richAttraction = dailyBudget * 10
mediumAttraction = MEDIUM*10 + (richAttraction - poorAttraction)
threshold = (100-int(poorAttraction)) + int(richAttraction)
r = random.randint(0, threshold)
# In the rich range
if r > (50 + int(mediumAttraction/3)) and len(rich_meals) != 0:
meal_plan.append(random.choice(rich_meals))
# In the poor range
elif r < (50-int(mediumAttraction/3)) and len(poor_meals) != 0:
meal_plan.append(random.choice(poor_meals))
else:
if len(medium_meals) != 0:
meal_plan.append(random.choice(medium_meals))
else:
if len(self.meals) == 0:
raise Exception("No possible meals to chose from due to lack or items.")
meal_plan.append(random.choice(self.meals))
# Update the budget and the ingredient quantities
self.budget -= meal_plan[-1].price
if not shopping:
for ingredient in meal_plan[-1].ingredients:
self.cupboard[ingredient.name].quantity -= ingredient.quantity
if self.budget <= 0:
raise Exception("Budget is not large enough. Could not calculate meal plan.")
return meal_plan
def generateShoppingList(self, meal_plan):
shoppingList = []
shoppingDict = {}
prices = getPrices()
canDo = self.getPossibleMeals(meal_plan)
# Remove all meals you can already make
meal_plan = [x for x in meal_plan if x not in canDo]
# Check how much of each ingredient you need
for meal in meal_plan:
neededList = meal.findIngredientsNeeded(self.cupboard)
for item in neededList:
if item[0] in shoppingDict:
shoppingDict[item[0]] = shoppingDict[item[0]] + item[1]
else:
shoppingDict[item[0]] = item[1]
for item in shoppingDict.keys():
shoppingList.append([item, math.ceil(shoppingDict[item] / prices[item].quantity)])
return shoppingList
# Get a list of possible meals with items in the cupboard
def getPossibleMeals(self, meals):
final = []
for meal in meals:
if meal.checkCanCook(self.cupboard):
final.append(meal)
return final
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
import os
import sys
import re
import Chandra.Time
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
def run():
cmd = 'ls ccd*_cnt col*_cnt hccd*_cnt hist_* front_side* > zxc'
os.system(cmd)
with open('zxc', 'r') as f:
data = [line.strip() for line in f.readlines()]
cmd = 'rm -f zxc'
os.system(cmd)
for dfile in data:
print(dfile)
with open(dfile, 'r') as f:
out = [line.strip() for line in f.readlines()]
save = []
for line in out:
atemp = re.split('<>', line)
#
#---- assume that if the first entry is not neumeric, the file is
#---- either already removed dom or the file is not appropriate for the treatment
#
try:
val = float(atemp[0])
chk = 1
except:
chk = 0
if chk == 0:
break
alen = len(atemp)
btemp = re.split(':', atemp[1])
ltime = btemp[0] + ':' + add_leading_zero(btemp[1], dlen=3) + ':00:00:00'
ctime = int(Chandra.Time.DateTime(ltime).secs)
oline = str(ctime)
for k in range(1,alen):
oline = oline + '<>' + atemp[k]
oline = oline + '\n'
save.append(oline)
if chk == 0:
continue
with open(dfile, 'w') as fo:
for line in save:
fo.write(line)
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
def add_leading_zero(val, dlen=2):
try:
val = int(val)
except:
return val
val = str(val)
vlen = len(val)
for k in range(vlen, dlen):
val = '0' + val
return val
#--------------------------------------------------------------------------
if __name__ == "__main__":
run()
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import os
from datetime import datetime
import requests
def markdown():
session = requests.Session()
host = 'https://leetcode.com'
url_algorithms = '{}/problemset/algorithms/'.format(host)
url_problems = '{}/api/problems/algorithms/'.format(host)
session.get(url=url_algorithms)
problems = session.get(url=url_problems).json()
yield '# LeetCode Online Judge of [{}]({})'.format(problems['category_slug'].capitalize(), url_algorithms)
yield '\n'
yield 'Solved with Python 3.x'
yield '\n' * 2
header = ['#', 'Title', 'Solution', 'Discuss', 'Difficulty', 'Acceptance', 'Protected', 'Solved']
yield '### Table of Contents'
yield datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
yield '\n'
yield '| {} |'.format(' | '.join(header))
yield '| {} |'.format('|'.join(['-' * (len(item) + 2) for item in header]))
for problem in sorted(problems['stat_status_pairs'], key=lambda x: x['stat']['question_id']):
py = '{}.{}.py'.format(
problem['stat']['question_id'], problem['stat']['question__title_slug'].replace('-', '_')
)
yield '| {} |'.format(
' | '.join([
str(problem['stat']['question_id']),
'[{}]({})'.format(
problem['stat']['question__title'],
'{}/problems/{}/'.format(host, problem['stat']['question__title_slug'])
),
"[Python](/{})".format(py),
'[LeetCode]({})'.format(
'{}/discuss/questions/oj/{}/'.format(host, problem['stat']['question__title_slug'])
),
{1: 'Easy', 2: 'Medium', 3: 'Hard'}[problem['difficulty']['level']],
'%.1f%% = %d / %d' % (
100 * problem['stat']['total_acs'] / problem['stat']['total_submitted'],
problem['stat']['total_acs'], problem['stat']['total_submitted']
),
str(problem['paid_only']),
str(os.path.exists(py) and os.path.isfile(py))
])
)
if __name__ == '__main__':
with open(os.path.splitext(os.path.basename(__file__))[0] + '.md', mode='w', encoding='utf-8') as fp:
for index, line in enumerate(markdown()):
if index != 0:
fp.write('\n')
fp.write(line)
|
# coding: utf-8
from database import Database,fake_database
import os
if __name__=="__main__":
#connect to the fake database
database = fake_database()
#get data from database
all_data = database.get_all_data()
#show all data
print("--------")
print("All item")
temp = 1
for i in all_data:
print(str(temp)+ ": "+i.shape+" location="+str(i.location))
temp += 1
print("--------")
#sort data by location
print("sort shape")
database.sort_all_data()
#get data from database again
all_data = database.get_all_data()
#show all data again
print("--------")
print("All item")
temp = 1
for i in all_data:
print(str(temp)+ ": "+i.shape+" location="+str(i.location))
temp += 1
print("--------")
os.system("pause")
|
import re
import urllib
hand = urllib.urlopen('http://python-data.dr-chuck.net/regex_sum_371732.txt')
lst = []
for line in hand:
line= line.rstrip()
want = re.findall('[0-9]+' ,line)
if len(want) ==0: continue
#print want
for i in want:
num = float(i)
lst.append(num)
#print lst
#print num
print sum(lst)
|
#!/usr/bin/env python
# Requires pathlib on python 2
from __future__ import print_function, unicode_literals
import re
import argparse
from pathlib import Path
from subprocess import check_output
includes_local = re.compile(r"""^#include "(.*)"$""", re.MULTILINE)
includes_system = re.compile(r"""^#include \<(.*)\>$""", re.MULTILINE)
DIR = Path(__file__).resolve().parent
BDIR = DIR.parent / 'include'
TAG = check_output(['git', 'describe', '--tags', '--always'], cwd=str(DIR)).decode("utf-8")
def MakeHeader(out):
main_header = BDIR / 'CLI/CLI.hpp'
with main_header.open() as f:
header = f.read()
include_files = includes_local.findall(header)
headers = set()
output = ''
for inc in include_files:
with (BDIR / inc).open() as f:
inner = f.read()
headers |= set(includes_system.findall(inner))
output += '\n// From {inc}\n\n'.format(inc=inc)
output += inner[inner.find('namespace'):]
header_list = '\n'.join('#include <'+h+'>' for h in headers)
output = '''\
#pragma once
// Distributed under the MIT license. See accompanying
// file LICENSE or https://github.com/henryiii/CLI11 for details.
// This file was generated using MakeSingleHeader.py in CLI11/scripts
// from: {tag}
// This has the complete CLI library in one file.
{header_list}
{output}'''.format(header_list=header_list, output=output, tag=TAG)
with Path(out).open('w') as f:
f.write(output)
print("Created {out}".format(out=out))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("output", nargs='?', default=BDIR / 'CLI11.hpp')
args = parser.parse_args()
MakeHeader(args.output)
|
from gpiozero import Button, LED
import requests
from signal import pause
from pathlib import Path
import os
from configparser import ConfigParser
import paho.mqtt.client as mqtt
import time
LOCK_BUTTON_GPIO = 4
AVAILABLE_LED_GPIO = 17
UNAVAILABLE_LED_GPIO = 27
lock_button = Button(LOCK_BUTTON_GPIO)
available_led = LED(AVAILABLE_LED_GPIO)
unavailable_led = LED(UNAVAILABLE_LED_GPIO)
is_available = True
def toggle():
global is_available
is_available = not is_available
if is_available:
available_led.on()
unavailable_led.off()
else:
available_led.off()
unavailable_led.on()
def handle_mqtt_connection(client, userdata, flags, rc):
client.subscribe('washer/8/run_cycle')
def handle_mqtt_message(client, userdata, msg):
print('here')
def main():
# Get configuration settings
current_dir = Path(os.path.abspath(os.path.dirname(__file__)))
settings_file = current_dir / '..' / 'config' / 'settings.ini'
config = ConfigParser()
config.read(settings_file)
# Establish connection to MQTT
mqtt_client = mqtt.Client()
mqtt_client.on_messsage = handle_mqtt_message
mqtt_client.connect('129.21.65.60')
topic = 'washer/' + config['instance']['washer_id'] + '/run_cycle'
print(topic)
mqtt_client.subscribe('washer/8/run_cycle')
mqtt_client.loop_forever()
if __name__ == '__main__':
main()
|
import unittest
from katas.beta.find_jon_snow_parents import jonSnowParents
class JonSnowParentsTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(jonSnowParents('Robert Baratheon', 'Catelyn Stark'),
'Jon Snow, you know nothing')
def test_equal_2(self):
self.assertEqual(jonSnowParents('Lyanna Stark', 'Rhaegar Targaryen'),
'Jon Snow, you know nothing')
def test_equal_3(self):
self.assertEqual(jonSnowParents('Barack Obama', 'Lady Gaga'),
'Jon Snow, you know nothing')
def test_equal_4(self):
self.assertEqual(jonSnowParents('Rhaegar Targaryen', 'Lyanna Stark'),
'Jon Snow you deserve the throne')
def test_not_equal_1(self):
self.assertNotEqual(
jonSnowParents('Rhaegar Targaryen', 'Lyanna Stark'),
'Jon Snow, you know nothing'
)
|
import unittest
from katas.kyu_7.number_of_occurrences import number_of_occurrences
class NumberOfOccurrencesTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(number_of_occurrences(4, []), 0)
def test_equals_2(self):
self.assertEqual(number_of_occurrences(4, [4, 0, 4]), 2)
def test_equals_3(self):
self.assertEqual(number_of_occurrences(
1024, [1024, 1024, 2056, 512, 256, 4096, 1024]
), 3)
def test_equals_4(self):
self.assertEqual(number_of_occurrences(
9, [1, 2, 3, 4, 5, 6, 7, 8, 9]
), 1)
def test_equals_5(self):
self.assertEqual(number_of_occurrences(
'abc', ['abc', '123', '123', 'abc']
), 2)
|
from django import forms
class Order_Upload_Form(forms.Form):
file = forms.FileField() |
from threading import Timer
import pdb
class PeriodicTimer(object):
""" Sends periodic stats requests
through the callback function parameter """
def __init__(self, interval, maxticks, callback, *args, **kwargs):
"""
@param interval : interval
@param maxticks : nbr of execution
@param callback : dynamic function which is executed
"""
self._interval = interval
self._callback = callback
self._args = args
self._kwargs = kwargs
if maxticks:
self._nticks = 0
self._maxticks = maxticks
else:
self._maxticks = None
def _run(self):
""" function executed by threads """
if self._maxticks:
self._nticks += 1
if self._nticks < self._maxticks:
self._timer = Timer(self._interval, self._run)
self._timer.start()
else:
self._timer = Timer(self._interval, self._run)
self._timer.start()
# sends the request
self._callback(*self._args, **self._kwargs)
def start(self):
""" launches threads """
self._timer = Timer(self._interval, self._run)
self._timer.start()
def stop(self):
""" stops threads"""
self._timer.cancel()
class Policy(object):
"""
base classe for policies
"""
def __init__(self, policy_type, target_list):
self._type = policy_type
self._targets = target_list
self._triggers = None
@property
def type(self):
return self._type
@property
def targets(self):
return self._targets
@property
def triggers(self):
return self._triggers
class Trigger(object):
def __init__(self, event, condition, action):
self._event = event
self._condition = condition
self._action = action
@property
def event(self):
return self._event
@property
def condition(self):
return self._condition
@property
def action(self):
return self._action
class SignatureVerPolicy(Policy):
"""
"""
def __init__(self, policy_type, target_list, triggers_list):
Policy.__init__(self, policy_type, target_list)
self._triggers = self.create_triggers(triggers_list)
def create_triggers(self, triggers_list):
triggers = []
for trigger in triggers_list:
event = trigger.get_event()
action = trigger.get_action()
condition = trigger.get_condition()['constraint']
key_word, constraint = condition.split(' ')
try:
assert key_word == "triggred_by"
except AssertionError:
print "constraint assertion error"
triggers.append(Trigger(event, constraint, action))
return triggers
def evaluate(self, vnf):
actions = []
for trigger in self.triggers:
if trigger.condition == vnf:
actions.append(trigger.action)
return actions
class UpdateFirewallPolicy(Policy):
"""
"""
def __init__(self, policy_type, target_list, triggers_list):
Policy.__init__(self, policy_type, target_list)
self._triggers = self.create_triggers(triggers_list)
def create_triggers(self, triggers_list):
triggers = []
for trigger in triggers_list:
event = trigger.get_event()
action = trigger.get_action()
condition = trigger.get_condition()['constraint']
key_word, constraint = condition.split(' ')
try:
assert key_word == "triggred_by"
except AssertionError:
print "constraint assertion error"
triggers.append(Trigger(event, constraint, action))
return triggers
def evaluate(self, vnf):
for trigger in self.triggers:
if trigger.condition == vnf:
return trigger.action
return None
class PeriodicTrigger(object):
def __init__(self, target, meter_name, callback, event, condition, action):
self._target = target
self._callback = callback
self._meter_name = meter_name
self._data = None
self._event = event
self._condition = condition
self._action = action
self._timer = self.init_periodic_timer(condition)
self._timer.start()
@property
def target(self):
return self._target
@property
def event(self):
return self._event
@property
def condition(self):
return self._condition
@property
def action(self):
return self._action
@property
def data(self):
return self._data
@property
def meter_name(self):
return self._meter_name
def set_meter_value(self, value):
self._data = value
def init_periodic_timer(self, condition):
period = condition['period']
return PeriodicTimer(period, None, self.evaluate)
def evaluate(self):
print 'Evaluating policy condition for targets: {0}'.format(self.target)
if self.condition['comparison_operator'] == 'gt':
if self.data > self.condition['threshold']:
#print '{0} greater_than {1} : condition satisfied'.format(self.data, self.condition['threshold'])
self.trigger()
else:
pass
#print '{0} lesser_than {1} : condition NOT satisfied'.format(self.data, self.condition['threshold'])
elif self.condition['comparison_operator'] == 'lt':
if self.data < self.condition['threshold']:
#print '{0} lesser_than {1} : condition satisfied'.format(self.data, self.condition['threshold'])
self.trigger()
else:
pass
#print '{0} greater_than {1} : condition NOT satisfied'.format(self.data, self.condition['threshold'])
def trigger(self):
self._timer.stop()
self._callback(self.target, self.action['number'])
class ScalingPolicy(Policy):
"""
"""
def __init__(self, callback, policy_type, target_list, triggers_list):
Policy.__init__(self, policy_type, target_list)
self._callback = callback
self._triggers = self.create_triggers(triggers_list)
def create_triggers(self, triggers_list):
triggers = []
for target in self.targets:
for trigger in triggers_list:
meter_name = trigger.trigger_tpl['meter_name']
event = trigger.get_event()
action = trigger.get_action()
condition = trigger.get_condition()
triggers.append(PeriodicTrigger(target,
meter_name,
self._callback,
event,
condition,
action))
return triggers
def meter_in(self, meter_name, target, value):
for trigger in self.triggers:
if trigger.target == target:
if trigger.meter_name == meter_name:
trigger.set_meter_value(value)
|
from selenium import webdriver
from time import sleep
link = "http://suninjuly.github.io/huge_form.html"
def func():
elements = browser.find_elements_by_tag_name("input")
for element in elements:
element.send_keys("ABRWALK")
button = browser.find_element_by_css_selector("button.btn")
button.click()
try:
browser = webdriver.Chrome()
browser.get(link)
func()
finally:
time = sleep(30)
browser.quit()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from typing import Iterable
from pants.core.goals.fix import FixFilesRequest, Partitions
from pants.core.goals.multi_tool_goal_helper import SkippableSubsystem
from pants.engine.internals.build_files import BuildFileOptions
from pants.engine.internals.native_engine import FilespecMatcher
from pants.engine.rules import collect_rules, rule
from pants.util.memo import memoized
@memoized
def _get_build_file_partitioner_rules(cls) -> Iterable:
"""Returns the BUILD file partitioner rule."""
@rule(
_param_type_overrides={
"request": cls.PartitionRequest,
"subsystem": cls.tool_subsystem,
}
)
async def partition_build_files(
request: FixFilesRequest.PartitionRequest,
subsystem: SkippableSubsystem,
build_file_options: BuildFileOptions,
) -> Partitions:
if subsystem.skip:
return Partitions()
specified_build_files = FilespecMatcher(
includes=[os.path.join("**", p) for p in build_file_options.patterns],
excludes=build_file_options.ignores,
).matches(request.files)
return Partitions.single_partition(specified_build_files)
return collect_rules(locals())
|
import os
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
SECRET_KEY = config('SECRET_KEY')
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'webpack_loader',
'tinymce',
'rest_framework',
'django_filters',
'mptt',
'import_export',
'adminsortable2',
'administrator.apps.AdministratorConfig',
'catalog.apps.CatalogConfig',
'shop.apps.ShopConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sushishop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'administrator.context_processors.site.site',
],
},
},
]
WSGI_APPLICATION = 'sushishop.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
SITE_ID = 1
TINYMCE_DEFAULT_CONFIG = {
'selector': 'textarea',
'theme': 'modern',
'plugins': 'link image preview codesample contextmenu table code lists',
'toolbar1': 'formatselect | bold italic underline | alignleft aligncenter alignright alignjustify '
'| bullist numlist | outdent indent | table | link image | codesample | preview code',
'contextmenu': 'formats | link image',
'menubar': False,
'inline': False,
'statusbar': True,
'width': 'auto',
'height': 360,
}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 30,
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',)
}
|
import time
import json
import telepot
from pprint import pprint
from Text import *
from telepot.loop import MessageLoop
from telepot.delegate import per_chat_id, create_open, pave_event_space
from telepot.namedtuple import ReplyKeyboardMarkup, KeyboardButton
from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton
from searchXXXX import search_info
from search_price_taobao_updated import search_taobao
from list_data import function_list, primary_sale_list, secondary_sale_list, search_list, yes_or_no, attribute_list, display_attribute_dict
# This class is to store item information and shop information
class add_item(object):
def __init__(self):
self.new_item = {}
self.shop_info = {}
self.user_info = {}
def add_attribute(self, attribute_name, info):
self.new_item[attribute_name] = info
def add_shop(self, attribute_name, info):
self.shop_info[attribute_name] = info
def add_user(self, attribute_name, info):
self.user_info[attribute_name] = info
# This is our class, Aladdin
class Aladdin(telepot.helper.ChatHandler):
# This function is to initialize Aladdin
def __init__(self, *args, **kwargs):
super(Aladdin, self).__init__(include_callback_query=True, *args, **kwargs)
self.indicator = 'choose_function'
self.store = add_item()
# This function is what Aladdin does when the user first send a message
def open(self, initial_msg, seed):
content_type, chat_type, chat_id = telepot.glance(initial_msg)
if chat_type == 'group' and initial_msg['text'] == '@aladdin_of_teletubbies_bot Answer my summoning!':
bot.sendMessage(chat_id, text_helping_message, parse_mode='Markdown')
self.close()
if chat_type == "private":
search_result = self.search_data('user_info.json', 'chat_id', chat_id)
if not search_result:
bot.sendMessage(chat_id, text_helping_message, parse_mode='Markdown')
self.store.add_user('chat_id', chat_id)
self.store.add_user('first_name', initial_msg['chat']['first_name'])
if 'last_name' in initial_msg['chat']:
self.store.add_user('last_name', initial_msg['chat']['last_name'])
if 'username' in initial_msg['chat']:
self.store.add_user('username', initial_msg['chat']['username'])
with open('user_info.json', 'a') as handle:
json.dump(self.store.user_info, handle)
handle.write("\n")
handle.close()
self.send_list_keyboard(chat_id, text_welcome_message, function_list)
bot.sendAudio(chat_id, gif_welcome)
return True
# This function is to send a custom inline keyboard to the user
def send_custom_inline_keyboard(self, chat_id, text_info, send_tuple_list):
kb = []
for t in send_tuple_list:
kb.append([InlineKeyboardButton(text=t[0], callback_data=t[1])])
mark_up = InlineKeyboardMarkup(inline_keyboard=kb)
bot.sendMessage(chat_id, text=text_info, reply_markup=mark_up, parse_mode='Markdown')
# This function is to add information to the item, send a message to the user, and change indicator
def add_attribute_info(self, attribute_name, read_message, send_message, change_indicator):
self.store.add_attribute(attribute_name, read_message)
self.sender.sendMessage(send_message, parse_mode='Markdown')
self.indicator = change_indicator
# This function is to send a custom reply keyboard to the user
# (the messages displayed on the keyboards are passed to the function as strings)
def send_custom_keyboard(self, chat_id, text_info, *args):
kb = []
kb_2 = []
for arg in args:
kb.append(KeyboardButton(text=arg))
kb_2.append(kb)
mark_up = ReplyKeyboardMarkup(keyboard=kb_2, resize_keyboard = True, one_time_keyboard = True )
bot.sendMessage(chat_id, text=text_info, reply_markup=mark_up)
# This function is also to send a custom reply keyboard to the user
# (the messages displayed on the keyboards are passed to the function as a list of strings)
def send_list_keyboard(self, chat_id, text_info, lst):
kb = []
kb_2 = []
for item in lst:
kb.append(item)
kb_2.append(kb)
kb = []
mark_up = ReplyKeyboardMarkup(keyboard=kb_2, one_time_keyboard = True)
bot.sendMessage(chat_id, text=text_info, reply_markup=mark_up)
# This function is to search data (a key-value pair) from the database
def search_data(self, file, search_key, search_value):
with open(file, 'r') as handle:
user_sale_data = [json.loads(line) for line in handle]
# when the database is empty, return an empty list
if not user_sale_data:
handle.close()
return []
# when the database is not empty, search through the database and return the search result
else:
return_list = []
for data in user_sale_data:
if data[search_key] == search_value:
return_list.append(data)
handle.close()
return return_list
# This function is to search whether the keyword (search_value in the function)
# is part of the value which corresponds to the key
def search_keyword(self, search_key, search_value):
with open('item.json', 'r') as handle:
user_sale_data = [json.loads(line) for line in handle]
# when the database is empty
if not user_sale_data:
handle.close()
return []
# when the database is not empty
else:
return_list = []
for data in user_sale_data:
if search_value in data[search_key]:
return_list.append(data)
handle.close()
return return_list
# This function is to format and send the key information of the item(s) and send to the user
def we_have_found(self, found_list, user_id):
result = text_have_found+'\n'
result_2 = ''
# if the item list is not empty
if found_list:
for data in found_list:
for attribute in ['item_id', 'item_type_2', 'price']:
if attribute == 'item_id':
result_2 += '/' + str(data[attribute]) + '\t'
elif attribute == 'price':
result_2 += '$' + str(data[attribute]) + '\t'
else:
result_2 += str(data[attribute]) + ' \t'
result_2 += '\n'
# send the result to the user
if result_2:
result += result_2
bot.sendMessage(user_id, result)
# if the item list is empty, inform the user about this and end the service
else:
bot.sendMessage(user_id, text_sorry)
self.send_list_keyboard(user_id, text_thank_you, [text_button_main])
self.close()
# This function is to send the detailed information of an item to the user
def display_info(self, info, user_id, attribute_list):
result = text_display_detail
result_2 = ''
# if info is not empty, generate the formatted message
if info:
for attribute in attribute_list:
if attribute == 'item_id':
result_2 += '/' + str(info[attribute]) + '\n'
elif attribute == 'price':
result_2 += 'price: $' + str(info[attribute]) + '\n'
else:
result_2 += display_attribute_dict[attribute] + ': ' + str(info[attribute]) + '\n'
# send the formatted message to the user
if result_2:
result += result_2
bot.sendMessage(user_id, result)
# if info is empty, it means that the user has input an invalid item id
# in this case, remind the user to choose from the item list Aladdin provided
else:
bot.sendMessage(user_id, text_choose_from_list)
# This funciton is to respond to the user's callback query
def on_callback_query(self, msg):
query_id, from_id, query_data = telepot.glance(msg, flavor='callback_query')
# Close inline keyboard
inline_message_id = msg['message']['chat']['id'], msg['message']['message_id']
bot.editMessageReplyMarkup(inline_message_id, reply_markup=None)
# --------------- SELL ---------------
if self.indicator == 'choose_item_type':
bot.sendMessage(from_id, text_chosen_type+' %s!' % query_data)
self.store.add_attribute("item_type_1", query_data)
self.send_custom_inline_keyboard(from_id, text_choose_type2, secondary_sale_list[query_data])
self.indicator = 'choose_item_type_2'
elif self.indicator == 'choose_item_type_2':
bot.sendMessage(from_id, text_chosen_type+'%s!' % query_data)
self.store.add_attribute("item_type_2", query_data)
bot.sendMessage(from_id, text_input_price, parse_mode='Markdown')
self.indicator = 'input_price'
elif self.indicator == 'choose_input_image':
if query_data == 'yes':
bot.sendMessage(from_id, text_send_photo)
self.indicator = 'input_image'
elif query_data == 'no':
self.store.add_attribute('photo_id', '')
self.send_list_keyboard(from_id, text_successfully_add, [text_button_main])
# at this point, since the user chose to not add a photo of the item, he/she has completed inputting
# all the necessary information
# we can now store information to the database
# read from database
with open('item.json', 'r') as handle:
user_sale_data = [json.loads(line) for line in handle]
individual_sale_data = []
for data in user_sale_data:
if data['chat_id'] == from_id:
individual_sale_data.append(data)
# create item id from user id
# the first part of item id is the user id
# the last three digits of the item id indicate the index of the item in a user's item list
if not individual_sale_data:
self.store.new_item['item_id'] = from_id * 1000 + 1
else:
self.store.new_item['item_id'] = from_id * 1000 + len(individual_sale_data) + 1
handle.close()
# write to database
with open('item.json', 'a') as handle:
json.dump(self.store.new_item, handle)
handle.write("\n")
handle.close()
self.close()
# --------------- BUY ---------------
elif self.indicator == 'how_to_search':
if query_data == 'type':
self.store.add_attribute('search_type', query_data)
self.send_custom_inline_keyboard(from_id, text_search_type1, primary_sale_list)
self.indicator = 'type_search'
elif query_data == 'keyword':
self.add_attribute_info('search_type', query_data, text_keyword, "keyword_search")
elif query_data == 'shop name':
self.add_attribute_info('search_type', query_data, text_shop_name, "shop_name_search")
elif query_data == 'internet':
bot.sendMessage(from_id, text_online_keyword)
self.indicator = 'give_internet_search_result'
elif self.indicator == 'type_search':
bot.sendMessage(from_id, 'Okay, %s!' % query_data)
self.store.add_attribute('item_type_1', query_data)
self.send_custom_inline_keyboard(from_id, text_search_type2, secondary_sale_list[query_data])
self.indicator = 'find'
elif self.indicator == 'find':
bot.sendMessage(from_id, text_searched_type+' %s!' % query_data)
self.add_attribute_info('item_type_2', query_data, text_searching, "not decided")
bot.sendAudio(from_id, gif_searching)
search_result = self.search_data('item.json', 'item_type_2', self.store.new_item['item_type_2'])
self.we_have_found(search_result, from_id)
bot.sendMessage(from_id, text_choose_item, parse_mode='Markdown')
self.indicator = 'choose_item'
elif self.indicator == 'want_contact_or_not':
if query_data == 'yes':
item_owner = int(self.store.shop_info['temp_item']/1000)
with open('shop_info.json', 'r') as handle:
contact_data = [json.loads(line) for line in handle]
for data in contact_data:
if data['contact']['contact']['user_id'] == item_owner:
contact_send = data
bot.sendContact(from_id, contact_send['contact']['contact']['phone_number'], contact_send['contact']['contact']['first_name'], contact_send['contact']['contact']['last_name'])
self.send_custom_inline_keyboard(from_id, text_check_another, yes_or_no)
self.indicator = 'next_item'
elif self.indicator == 'next_item':
if query_data == 'yes':
self.indicator = 'choose_item'
elif query_data == 'no':
self.send_list_keyboard(from_id, text_thank_you, [text_button_main])
self.close()
# --------------- CHANGE ---------------
elif self.indicator == 'change_attribute':
if query_data == 'delete':
with open('item.json', 'r') as handle:
user_sale_data = [json.loads(line) for line in handle]
for data in user_sale_data:
if str(data['item_id']) == str(self.store.new_item['item_id']):
user_sale_data.remove(data)
self.store.new_item['original_data'] = data
handle.close()
with open('item.json', 'w') as handle:
for data in user_sale_data:
json.dump(data, handle)
handle.write("\n")
handle.close()
bot.sendMessage(from_id, text_successfully_delete)
self.send_list_keyboard(from_id, text_thank_you, [text_button_main])
self.close()
else:
bot.sendMessage(from_id, text_new_info)
self.store.new_item['to_change_attribute'] = query_data
self.indicator = 'new_info'
elif self.indicator == 'check_another':
if query_data == 'yes':
self.indicator = 'choose_my_item'
else:
self.send_list_keyboard(from_id, text_thank_you, [text_button_main])
self.close()
def on_chat_message(self, msg):
content_type, chat_type, chat_id = telepot.glance(msg)
# To allow the user cancel the action at any time and return to the main menu
if ('text' in msg) and (msg['text'] == text_button_cancel):
bot.sendMessage(chat_id, text_cancel)
self.send_list_keyboard(chat_id, text_welcome_message, function_list)
self.indicator = 'choose_function'
if ('text' in msg) and (msg['text'] == '/help'):
bot.sendMessage(chat_id, text_helping_message)
self.close()
if ('text' in msg) and (msg['text'] == '/sell'):
self.send_custom_keyboard(chat_id, text_welcome_seller, text_button_cancel)
search_result = self.search_data('shop_info.json', 'chat_id', chat_id)
if not search_result:
bot.sendMessage(chat_id, text_name_shop, parse_mode='MarkDown')
self.store.add_shop('chat_id', chat_id)
self.indicator = 'add_shop_name'
else:
bot.sendMessage(chat_id, text_own_shop + search_result[0]['shop_name'])
self.send_custom_inline_keyboard(chat_id, text_choose_type1, primary_sale_list)
self.indicator = 'choose_item_type'
if ('text' in msg) and (msg['text'] == '/buy'):
self.send_custom_keyboard(chat_id, text_welcome_buyer, text_button_cancel)
self.send_custom_inline_keyboard(chat_id, text_choose_search_type, search_list)
self.indicator = 'how_to_search'
if ('text' in msg) and (msg['text'] == '/credit'):
bot.sendMessage(chat_id, text_credit)
bot.sendAudio(chat_id, gif_credit)
self.close()
if self.indicator == 'choose_function':
self.store.new_item['chat_id'] = msg['chat']['id']
# --------------- SELL ---------------
if ('text' in msg) and (msg['text'] == text_button_sell):
self.send_custom_keyboard(chat_id, text_welcome_seller, text_button_cancel)
with open('shop_info.json', 'r') as handle:
contact_data = [json.loads(line) for line in handle]
result_list = []
for data in contact_data:
if data['contact']['contact']['user_id'] == chat_id:
result_list.append(data)
handle.close()
if not result_list:
# If chat id is not found in the shop_info file, the user is a new seller
# So Aladdin should ask the user to name the shop and store the shop data
bot.sendMessage(chat_id, text_name_shop, parse_mode='MarkDown')
self.store.add_shop('chat_id', chat_id)
self.indicator = 'add_shop_name'
# If chat id is found in the shop_info file, the user is not new seller
# So Aladdin should give the user the shop name and proceed
else:
self.send_custom_inline_keyboard(chat_id, text_choose_type1, primary_sale_list)
self.indicator = 'choose_item_type'
handle.close()
# --------------- BUY ---------------
elif ('text' in msg) and (msg['text'] == text_button_buy):
self.send_custom_keyboard(chat_id, text_welcome_buyer, text_button_cancel)
self.send_custom_inline_keyboard(chat_id, text_choose_search_type, search_list)
self.indicator = 'how_to_search'
# --------------- CHANGE ---------------
elif ('text' in msg) and (msg['text'] == text_button_modify_items):
self.send_custom_keyboard(chat_id, text_searching, text_button_cancel)
search_result = self.search_data('item.json', 'chat_id', chat_id)
# if the user does not have anything on sale
if not search_result:
bot.sendMessage(chat_id, text_nothing_on_sale)
self.send_list_keyboard(chat_id, text_thank_you, [text_button_main])
self.close()
# if the user's shop is not empty
else:
bot.sendMessage(chat_id, text_all)
self.we_have_found(search_result, chat_id)
self.indicator = 'choose_item_modify'
# --------------- CHECK ---------------
elif ('text' in msg) and (msg['text'] == text_button_my_shop):
self.send_custom_keyboard(chat_id, text_my, text_button_cancel)
search_result = self.search_data('item.json', 'chat_id', chat_id)
self.we_have_found(search_result, chat_id)
self.indicator = 'choose_my_item'
# --------------- SELL ---------------
elif self.indicator == 'add_shop_name':
try:
self.store.add_shop('shop_name', msg['text'])
bot.sendMessage(chat_id, text_get_contact_info, parse_mode='MarkDown', reply_markup=ReplyKeyboardMarkup(keyboard = [[KeyboardButton(text='Yes', request_contact = True)]], one_time_keyboard = True))
self.indicator = 'add_contact'
except KeyError:
bot.sendMessage(chat_id, text_type_shop_name)
elif self.indicator == 'add_contact':
self.store.shop_info['contact'] = msg
self.send_custom_keyboard(chat_id, text_welcome_seller, text_button_cancel)
try:
with open('shop_info.json', 'a') as handle:
json.dump(self.store.shop_info, handle)
handle.write("\n")
self.send_custom_inline_keyboard(chat_id, text_choose_type1, primary_sale_list)
self.indicator = 'choose_item_type'
except KeyError:
bot.sendMessage(chat_id, text_type_contact_info)
elif self.indicator == 'input_price':
try:
price_int = int(msg['text'])
self.add_attribute_info('price', price_int, text_input_description,
"input_description")
except ValueError:
bot.sendMessage(chat_id, text_only_number)
except KeyError:
bot.sendMessage(chat_id, text_only_number)
elif self.indicator == "input_description":
try:
self.store.add_attribute('description', msg['text'])
self.send_custom_inline_keyboard(chat_id, text_add_photo, yes_or_no)
self.indicator = 'choose_input_image'
except KeyError:
bot.sendMessage(chat_id, text_type_description)
elif self.indicator == 'input_image':
try:
self.store.add_attribute('photo_id', msg['photo'][0]['file_id'])
self.send_list_keyboard(chat_id, text_successfully_add,
[text_button_main])
except KeyError:
bot.sendMessage(chat_id, text_send_photo)
# read from database
with open('item.json', 'r') as handle:
user_sale_data = [json.loads(line) for line in handle]
individual_sale_data = []
for data in user_sale_data:
if data['chat_id'] == chat_id:
individual_sale_data.append(data)
if not individual_sale_data:
self.store.new_item['item_id'] = chat_id * 1000 + 1
else:
self.store.new_item['item_id'] = chat_id * 1000 + len(individual_sale_data) + 1
handle.close()
# write to database
with open('item.json', 'a') as handle:
json.dump(self.store.new_item, handle)
handle.write("\n")
handle.close()
self.close()
# --------------- BUY ---------------
elif self.indicator == 'choose_item_type':
bot.sendMessage(chat_id, text_search_type1)
elif self.indicator == 'choose_item_type_2':
bot.sendMessage(chat_id, text_search_type2)
elif self.indicator == 'how_to_search':
bot.sendMessage(chat_id, text_choose_search_type)
elif self.indicator == 'type_search':
bot.sendMessage(chat_id, text_search_type1)
elif self.indicator == 'find':
bot.sendMessage(chat_id, text_search_type2)
elif self.indicator == 'keyword_search':
try:
search_result = self.search_keyword('description', msg['text'])
self.we_have_found(search_result, chat_id)
bot.sendMessage(chat_id, text_choose_item, parse_mode='MarkDown')
self.indicator = 'choose_item'
except KeyError:
bot.sendMessage(chat_id, text_type_keyword)
elif self.indicator == 'shop_name_search':
try:
shop_result = self.search_data('shop_info.json', 'shop_name', msg['text'])
search_result = self.search_data('item.json', 'chat_id', shop_result[0]['chat_id'])
self.we_have_found(search_result, chat_id)
bot.sendMessage(chat_id, text_choose_item, parse_mode='MarkDown')
self.indicator = 'choose_item'
except IndexError:
self.send_list_keyboard(chat_id, text_sorry_shop, [text_button_main])
self.close()
except KeyError:
bot.sendMessage(chat_id, text_type_shop_name)
elif self.indicator == 'give_internet_search_result':
if 'text' in msg:
reply_amazon = search_info(msg)
reply_taobao = search_taobao(msg)
bot.sendMessage(chat_id, reply_amazon, parse_mode='Markdown')
bot.sendMessage(chat_id, reply_taobao)
self.send_list_keyboard(chat_id, text_thank_you, [text_button_main])
self.close()
else:
bot.sendMessage(chat_id, text_type_keyword)
elif self.indicator == 'choose_item':
try:
id_int = int(msg['text'][1:])
self.store.add_shop('temp_item', id_int)
try:
item_info = self.search_data('item.json', 'item_id', id_int)
self.display_info(item_info[0], chat_id, ['item_id', 'item_type_2', 'description', 'price'])
if not item_info[0]['photo_id']:
pass
else:
bot.sendMessage(chat_id, text_display_photo)
bot.sendPhoto(chat_id, item_info[0]['photo_id'])
self.send_custom_inline_keyboard(chat_id, text_want_contact, yes_or_no)
self.indicator = 'want_contact_or_not'
except IndexError:
bot.sendMessage(chat_id, text_choose_from_list)
except ValueError:
bot.sendMessage(chat_id, text_choose_from_list)
except KeyError:
bot.sendMessage(chat_id, text_choose_from_list)
# --------------- CHECK ---------------
elif self.indicator == 'choose_my_item':
try:
id_int = int(msg['text'][1:])
item_info = self.search_data('item.json', 'item_id', id_int)
self.display_info(item_info[0], chat_id, ['item_id', 'item_type_2', 'description', 'price'])
self.indicator = 'check_another'
self.send_custom_inline_keyboard(chat_id, text_check_another, yes_or_no)
except ValueError:
bot.sendMessage(chat_id, text_choose_from_list)
except KeyError:
bot.sendMessage(chat_id, text_choose_from_list)
# --------------- CHANGE ---------------
elif self.indicator == 'choose_item_modify':
try:
self.store.new_item['item_id'] = msg['text'][1:]
self.send_custom_inline_keyboard(chat_id, text_modify_method, attribute_list)
self.indicator = 'change_attribute'
except KeyError:
bot.sendMessage(chat_id, text_choose_from_list)
elif self.indicator == 'change_attribute':
bot.sendMessage(chat_id, text_new_info)
elif self.indicator == 'new_info':
# extract data from the original file
with open('item.json', 'r') as handle:
user_sale_data = [json.loads(line) for line in handle]
# remove the info of the item to be modified from database
for data in user_sale_data:
if str(data['item_id']) == str(self.store.new_item['item_id']):
user_sale_data.remove(data)
self.store.new_item['original_data'] = data
handle.close()
# write the modified data to the file
with open('item.json', 'w') as handle:
for data in user_sale_data:
json.dump(data, handle)
handle.write("\n")
handle.close()
# modify the item info and append to the file
try:
if self.store.new_item['to_change_attribute'] != 'price':
self.store.new_item['original_data'][self.store.new_item['to_change_attribute']] = msg['text']
elif self.store.new_item['to_change_attribute'] == 'price':
try:
price_int = int(msg['text'])
self.store.new_item['original_data'][self.store.new_item['to_change_attribute']] = price_int
except ValueError:
bot.sendMessage(chat_id, text_only_number)
self.send_list_keyboard(chat_id, text_thank_you, [''])
self.close()
with open('item.json', 'a') as handle:
json.dump(self.store.new_item['original_data'], handle)
handle.write("\n")
handle.close()
bot.sendMessage(chat_id, text_successfully_change)
except KeyError:
bot.sendMessage(chat_id, text_choose_from_list)
self.close()
TOKEN = '471830109:AAEIixxauDJp7dY9AgeMZ7Mpg6TZ3tLKucw'
bot = telepot.DelegatorBot(TOKEN, [
pave_event_space()(
per_chat_id(), create_open, Aladdin, timeout=3600000),
])
MessageLoop(bot).run_as_thread()
print('Listening ...')
while 1:
time.sleep(200)
|
# Generated by Django 3.2.5 on 2021-07-28 08:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clinic_app', '0011_feedback'),
]
operations = [
migrations.AddField(
model_name='patienthealthhistory',
name='date_recorded',
field=models.DateTimeField(auto_now_add=True, default='2021-07-28 00:10:25.142087+03'),
preserve_default=False,
),
]
|
__module_attr = "module1.__attr"
class test1(object):
__var1 = "test1.__var1"
nvar1 = "test1.nvar1"
def test(self):
self.__var2 = "test1(self).__var2"
self.__var3__ = "test1(self).__var3__"
self.nvar2 = "test1(self).nvar2"
print("test1.__var1:{0}".format(test1.__var1))
print("test1(self).__var1: {0}".format(self.__var1))
print("test1(self).__dict__: {0}".format(self.__dict__))
class test2(object):
def test(self, o):
o.__var4 = "test2.o.__var4"
o.__var5__ = "test2.o.__var5__"
o.nvar3 = "test2.o.nvar3"
print("test2.o.__dict__:{0}".format(o.__dict__))
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import copy
import os
import sys
from collections import defaultdict
from six.moves import range
from twitter.common.collections import OrderedSet
from pants.backend.jvm.subsystems.jvm_platform import JvmPlatform
from pants.backend.jvm.subsystems.shader import Shader
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.java_tests import JavaTests as junit_tests
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.backend.jvm.tasks.coverage.base import Coverage
from pants.backend.jvm.tasks.coverage.cobertura import Cobertura, CoberturaTaskSettings
from pants.backend.jvm.tasks.jvm_task import JvmTask
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TargetDefinitionException, TaskError, TestFailedTaskError
from pants.base.workunit import WorkUnitLabel
from pants.binaries import binary_util
from pants.build_graph.target_scopes import Scopes
from pants.java.distribution.distribution import DistributionLocator
from pants.java.executor import SubprocessExecutor
from pants.task.testrunner_task_mixin import TestRunnerTaskMixin
from pants.util.argutil import ensure_arg, remove_arg
from pants.util.contextutil import environment_as
from pants.util.strutil import pluralize
from pants.util.xml_parser import XmlParser
# TODO(ji): Add unit tests.
def _classfile_to_classname(cls):
return ClasspathUtil.classname_for_rel_classfile(cls)
def interpret_test_spec(test_spec):
"""Parses a test spec string.
Returns either a (sourcefile,method) on the left, or a (classname,method) on the right.
"""
components = test_spec.split('#', 2)
classname_or_srcfile = components[0]
methodname = '#' + components[1] if len(components) == 2 else ''
if os.path.exists(classname_or_srcfile):
# It's a source file.
return ((classname_or_srcfile, methodname), None)
else:
# It's a classname.
return (None, (classname_or_srcfile, methodname))
class JUnitRun(TestRunnerTaskMixin, JvmToolTaskMixin, JvmTask):
"""
:API: public
"""
_MAIN = 'org.pantsbuild.tools.junit.ConsoleRunner'
@classmethod
def register_options(cls, register):
super(JUnitRun, cls).register_options(register)
register('--batch-size', advanced=True, type=int, default=sys.maxint,
help='Run at most this many tests in a single test process.')
register('--test', type=list,
help='Force running of just these tests. Tests can be specified using any of: '
'[classname], [classname]#[methodname], [filename] or [filename]#[methodname]')
register('--per-test-timer', type=bool, help='Show progress and timer for each test.')
register('--default-concurrency', advanced=True,
choices=junit_tests.VALID_CONCURRENCY_OPTS, default=junit_tests.CONCURRENCY_SERIAL,
help='Set the default concurrency mode for running tests not annotated with'
' @TestParallel or @TestSerial.')
register('--default-parallel', advanced=True, type=bool,
removal_hint='Use --concurrency instead.', removal_version='1.1.0',
help='Run classes without @TestParallel or @TestSerial annotations in parallel.')
register('--parallel-threads', advanced=True, type=int, default=0,
help='Number of threads to run tests in parallel. 0 for autoset.')
register('--test-shard', advanced=True,
help='Subset of tests to run, in the form M/N, 0 <= M < N. '
'For example, 1/3 means run tests number 2, 5, 8, 11, ...')
register('--output-mode', choices=['ALL', 'FAILURE_ONLY', 'NONE'], default='NONE',
help='Specify what part of output should be passed to stdout. '
'In case of FAILURE_ONLY and parallel tests execution '
'output can be partial or even wrong. '
'All tests output also redirected to files in .pants.d/test/junit.')
register('--cwd', advanced=True,
help='Set the working directory. If no argument is passed, use the build root. '
'If cwd is set on a target, it will supersede this argument.')
register('--strict-jvm-version', type=bool, advanced=True,
help='If true, will strictly require running junits with the same version of java as '
'the platform -target level. Otherwise, the platform -target level will be '
'treated as the minimum jvm to run.')
register('--failure-summary', type=bool, default=True,
help='If true, includes a summary of which test-cases failed at the end of a failed '
'junit run.')
register('--allow-empty-sources', type=bool, advanced=True,
help='Allows a junit_tests() target to be defined with no sources. Otherwise,'
'such a target will raise an error during the test run.')
register('--use-experimental-runner', type=bool, advanced=True,
help='Use experimental junit-runner logic for more options for parallelism.')
cls.register_jvm_tool(register,
'junit',
classpath=[
JarDependency(org='org.pantsbuild', name='junit-runner', rev='1.0.7'),
],
main=JUnitRun._MAIN,
# TODO(John Sirois): Investigate how much less we can get away with.
# Clearly both tests and the runner need access to the same @Test,
# @Before, as well as other annotations, but there is also the Assert
# class and some subset of the @Rules, @Theories and @RunWith APIs.
custom_rules=[
Shader.exclude_package('junit.framework', recursive=True),
Shader.exclude_package('org.junit', recursive=True),
Shader.exclude_package('org.hamcrest', recursive=True),
Shader.exclude_package('org.pantsbuild.junit.annotations', recursive=True),
])
# TODO: Yuck, but will improve once coverage steps are in their own tasks.
for c in [Coverage, Cobertura]:
c.register_options(register, cls.register_jvm_tool)
@classmethod
def subsystem_dependencies(cls):
return super(JUnitRun, cls).subsystem_dependencies() + (DistributionLocator,)
@classmethod
def request_classes_by_source(cls, test_specs):
"""Returns true if the given test specs require the `classes_by_source` product to satisfy."""
for test_spec in test_specs:
src_spec, _ = interpret_test_spec(test_spec)
if src_spec:
return True
return False
@classmethod
def prepare(cls, options, round_manager):
super(JUnitRun, cls).prepare(options, round_manager)
# Compilation and resource preparation must have completed.
round_manager.require_data('runtime_classpath')
# If the given test specs require the classes_by_source product, request it.
if cls.request_classes_by_source(options.test or []):
round_manager.require_data('classes_by_source')
def __init__(self, *args, **kwargs):
super(JUnitRun, self).__init__(*args, **kwargs)
options = self.get_options()
self._coverage = None
if options.coverage or options.is_flagged('coverage_open'):
coverage_processor = options.coverage_processor
if coverage_processor == 'cobertura':
settings = CoberturaTaskSettings.from_task(self)
self._coverage = Cobertura(settings)
else:
raise TaskError('unknown coverage processor {0}'.format(coverage_processor))
self._tests_to_run = options.test
self._batch_size = options.batch_size
self._fail_fast = options.fail_fast
self._working_dir = options.cwd or get_buildroot()
self._strict_jvm_version = options.strict_jvm_version
self._args = copy.copy(self.args)
self._failure_summary = options.failure_summary
if options.output_mode == 'ALL':
self._args.append('-output-mode=ALL')
elif options.output_mode == 'FAILURE_ONLY':
self._args.append('-output-mode=FAILURE_ONLY')
else:
self._args.append('-output-mode=NONE')
if self._fail_fast:
self._args.append('-fail-fast')
self._args.append('-outdir')
self._args.append(self.workdir)
if options.per_test_timer:
self._args.append('-per-test-timer')
# TODO(zundel): Simply remove when --default_parallel finishes deprecation
if options.default_parallel:
self._args.append('-default-parallel')
if options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_BOTH:
self.context.log.warn('--default-concurrency=PARALLEL_BOTH is experimental.')
self._args.append('-default-concurrency')
self._args.append('PARALLEL_BOTH')
elif options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_CLASSES:
self._args.append('-default-concurrency')
self._args.append('PARALLEL_CLASSES')
elif options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_METHODS:
self.context.log.warn('--default-concurrency=PARALLEL_METHODS is experimental.')
self._args.append('-default-concurrency')
self._args.append('PARALLEL_METHODS')
elif options.default_concurrency == junit_tests.CONCURRENCY_SERIAL:
# TODO(zundel): we can't do anything here yet while the --default-parallel
# option is in deprecation mode.
pass
self._args.append('-parallel-threads')
self._args.append(str(options.parallel_threads))
if options.test_shard:
self._args.append('-test-shard')
self._args.append(options.test_shard)
if options.use_experimental_runner:
self._args.append('-use-experimental-runner')
def classpath(self, targets, classpath_product=None):
return super(JUnitRun, self).classpath(targets, classpath_product=classpath_product,
include_scopes=Scopes.JVM_TEST_SCOPES)
def preferred_jvm_distribution_for_targets(self, targets):
return JvmPlatform.preferred_jvm_distribution([target.platform for target in targets
if isinstance(target, JvmTarget)],
self._strict_jvm_version)
def _spawn(self, distribution, executor=None, *args, **kwargs):
"""Returns a processhandler to a process executing java.
:param Executor executor: the java subprocess executor to use. If not specified, construct
using the distribution.
:param Distribution distribution: The JDK or JRE installed.
:rtype: ProcessHandler
"""
actual_executor = executor or SubprocessExecutor(distribution)
return distribution.execute_java_async(*args,
executor=actual_executor,
**kwargs)
def execute_java_for_targets(self, targets, *args, **kwargs):
"""Execute java for targets using the test mixin spawn and wait.
Activates timeouts and other common functionality shared among tests.
"""
distribution = self.preferred_jvm_distribution_for_targets(targets)
actual_executor = kwargs.get('executor') or SubprocessExecutor(distribution)
return self._spawn_and_wait(*args, executor=actual_executor, distribution=distribution, **kwargs)
def execute_java_for_coverage(self, targets, executor=None, *args, **kwargs):
"""Execute java for targets directly and don't use the test mixin.
This execution won't be wrapped with timeouts and other testmixin code common
across test targets. Used for coverage instrumentation.
"""
distribution = self.preferred_jvm_distribution_for_targets(targets)
actual_executor = executor or SubprocessExecutor(distribution)
return distribution.execute_java(*args, executor=actual_executor, **kwargs)
def _collect_test_targets(self, targets):
"""Returns a mapping from test names to target objects for all tests that are included in targets.
If self._tests_to_run is set, return {test: None} for these tests instead.
"""
tests_from_targets = dict(list(self._calculate_tests_from_targets(targets)))
if targets and self._tests_to_run:
# If there are some junit_test targets in the graph, find ones that match the requested
# test(s).
tests_with_targets = {}
unknown_tests = []
for test in self._get_tests_to_run():
# A test might contain #specific_method, which is not needed to find a target.
test_class_name = test.partition('#')[0]
target = tests_from_targets.get(test_class_name)
if target is None:
unknown_tests.append(test)
else:
tests_with_targets[test] = target
if len(unknown_tests) > 0:
raise TaskError("No target found for test specifier(s):\n\n '{}'\n\nPlease change " \
"specifier or bring in the proper target(s)."
.format("'\n '".join(unknown_tests)))
return tests_with_targets
else:
return tests_from_targets
def _get_failed_targets(self, tests_and_targets):
"""Return a mapping of target -> set of individual test cases that failed.
Targets with no failed tests are omitted.
Analyzes JUnit XML files to figure out which test had failed.
The individual test cases are formatted strings of the form org.foo.bar.classname#methodName.
:tests_and_targets: {test: target} mapping.
"""
def get_test_filename(test_class_name):
return os.path.join(self.workdir, 'TEST-{0}.xml'.format(test_class_name.replace('$', '-')))
xml_filenames_to_targets = defaultdict()
for test, target in tests_and_targets.items():
if target is None:
self.context.log.warn('Unknown target for test %{0}'.format(test))
# Look for a TEST-*.xml file that matches the classname or a containing classname
test_class_name = test
for _part in test.split('$'):
filename = get_test_filename(test_class_name)
if os.path.exists(filename):
xml_filenames_to_targets[filename] = target
break
else:
test_class_name = test_class_name.rsplit('$', 1)[0]
failed_targets = defaultdict(set)
for xml_filename, target in xml_filenames_to_targets.items():
try:
xml = XmlParser.from_file(xml_filename)
failures = int(xml.get_attribute('testsuite', 'failures'))
errors = int(xml.get_attribute('testsuite', 'errors'))
if target and (failures or errors):
for testcase in xml.parsed.getElementsByTagName('testcase'):
test_failed = testcase.getElementsByTagName('failure')
test_errored = testcase.getElementsByTagName('error')
if test_failed or test_errored:
failed_targets[target].add('{testclass}#{testname}'.format(
testclass=testcase.getAttribute('classname'),
testname=testcase.getAttribute('name'),
))
except (XmlParser.XmlError, ValueError) as e:
self.context.log.error('Error parsing test result file {0}: {1}'.format(xml_filename, e))
return dict(failed_targets)
def _run_tests(self, tests_to_targets):
if self._coverage:
extra_jvm_options = self._coverage.extra_jvm_options
classpath_prepend = self._coverage.classpath_prepend
classpath_append = self._coverage.classpath_append
else:
extra_jvm_options = []
classpath_prepend = ()
classpath_append = ()
tests_by_properties = self._tests_by_properties(
tests_to_targets,
self._infer_workdir,
lambda target: target.test_platform,
lambda target: target.payload.extra_jvm_options,
lambda target: target.payload.extra_env_vars,
lambda target: target.concurrency,
lambda target: target.threads
)
# the below will be None if not set, and we'll default back to runtime_classpath
classpath_product = self.context.products.get_data('instrument_classpath')
result = 0
for properties, tests in tests_by_properties.items():
(workdir, platform, target_jvm_options, target_env_vars, concurrency, threads) = properties
for batch in self._partition(tests):
# Batches of test classes will likely exist within the same targets: dedupe them.
relevant_targets = set(map(tests_to_targets.get, batch))
complete_classpath = OrderedSet()
complete_classpath.update(classpath_prepend)
complete_classpath.update(self.tool_classpath('junit'))
complete_classpath.update(self.classpath(relevant_targets,
classpath_product=classpath_product))
complete_classpath.update(classpath_append)
distribution = JvmPlatform.preferred_jvm_distribution([platform], self._strict_jvm_version)
# Override cmdline args with values from junit_test() target that specify concurrency:
args = self._args + [u'-xmlreport']
# TODO(zundel): Combine these together into a single -concurrency choices style argument
if concurrency == junit_tests.CONCURRENCY_SERIAL:
args = remove_arg(args, '-default-parallel')
elif concurrency == junit_tests.CONCURRENCY_PARALLEL_CLASSES:
args = ensure_arg(args, '-default-parallel')
elif concurrency == junit_tests.CONCURRENCY_PARALLEL_METHODS:
self.context.log.warn('Not implemented: parallel_methods')
elif concurrency == junit_tests.CONCURRENCY_PARALLEL_BOTH:
self.context.log.warn('specifying {} is experimental.'.format(concurrency))
args = ensure_arg(args, '-default-parallel')
args = ensure_arg(args, '-parallel-methods')
if threads is not None:
args = remove_arg(args, '-parallel-threads', has_param=True)
args += ['-parallel-threads', str(threads)]
with binary_util.safe_args(batch, self.get_options()) as batch_tests:
self.context.log.debug('CWD = {}'.format(workdir))
self.context.log.debug('platform = {}'.format(platform))
with environment_as(**dict(target_env_vars)):
result += abs(self._spawn_and_wait(
executor=SubprocessExecutor(distribution),
distribution=distribution,
classpath=complete_classpath,
main=JUnitRun._MAIN,
jvm_options=self.jvm_options + extra_jvm_options + list(target_jvm_options),
args=args + batch_tests,
workunit_factory=self.context.new_workunit,
workunit_name='run',
workunit_labels=[WorkUnitLabel.TEST],
cwd=workdir,
synthetic_jar_dir=self.workdir,
create_synthetic_jar=self.synthetic_classpath,
))
if result != 0 and self._fail_fast:
break
if result != 0:
failed_targets_and_tests = self._get_failed_targets(tests_to_targets)
failed_targets = sorted(failed_targets_and_tests, key=lambda target: target.address.spec)
error_message_lines = []
if self._failure_summary:
for target in failed_targets:
error_message_lines.append('\n{0}{1}'.format(' '*4, target.address.spec))
for test in sorted(failed_targets_and_tests[target]):
error_message_lines.append('{0}{1}'.format(' '*8, test))
error_message_lines.append(
'\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
.format(main=JUnitRun._MAIN, code=result, failed=len(failed_targets),
targets=pluralize(len(failed_targets), 'target'))
)
raise TestFailedTaskError('\n'.join(error_message_lines), failed_targets=list(failed_targets))
def _infer_workdir(self, target):
if target.cwd is not None:
return target.cwd
return self._working_dir
def _tests_by_property(self, tests_to_targets, get_property):
properties = defaultdict(OrderedSet)
for test, target in tests_to_targets.items():
properties[get_property(target)].add(test)
return {property: list(tests) for property, tests in properties.items()}
def _tests_by_properties(self, tests_to_targets, *properties):
def combined_property(target):
return tuple(prop(target) for prop in properties)
return self._tests_by_property(tests_to_targets, combined_property)
def _partition(self, tests):
stride = min(self._batch_size, len(tests))
for i in range(0, len(tests), stride):
yield tests[i:i + stride]
def _get_tests_to_run(self):
for test_spec in self._tests_to_run:
src_spec, cls_spec = interpret_test_spec(test_spec)
if src_spec:
sourcefile, methodname = src_spec
for classname in self._classnames_from_source_file(sourcefile):
# Tack the methodname onto all classes in the source file, as we
# can't know which method the user intended.
yield classname + methodname
else:
classname, methodname = cls_spec
yield classname + methodname
def _calculate_tests_from_targets(self, targets):
"""
:param list targets: list of targets to calculate test classes for.
generates tuples (class_name, target).
"""
classpath_products = self.context.products.get_data('runtime_classpath')
for target in targets:
contents = ClasspathUtil.classpath_contents((target,), classpath_products, confs=self.confs)
for f in contents:
classname = ClasspathUtil.classname_for_rel_classfile(f)
if classname:
yield (classname, target)
def _classnames_from_source_file(self, srcfile):
relsrc = os.path.relpath(srcfile, get_buildroot())
source_products = self.context.products.get_data('classes_by_source').get(relsrc)
if not source_products:
# It's valid - if questionable - to have a source file with no classes when, for
# example, the source file has all its code commented out.
self.context.log.warn('Source file {0} generated no classes'.format(srcfile))
else:
for _, classes in source_products.rel_paths():
for cls in classes:
yield _classfile_to_classname(cls)
def _test_target_filter(self):
def target_filter(target):
return isinstance(target, junit_tests)
return target_filter
def _validate_target(self, target):
# TODO: move this check to an optional phase in goal_runner, so
# that missing sources can be detected early.
if not target.payload.sources.source_paths and not self.get_options().allow_empty_sources:
msg = 'JavaTests target must include a non-empty set of sources.'
raise TargetDefinitionException(target, msg)
def _execute(self, targets):
"""Implements the primary junit test execution.
This method is called by the TestRunnerTaskMixin, which contains the primary Task.execute function
and wraps this method in timeouts.
"""
# We only run tests within java_tests/junit_tests targets.
#
# But if coverage options are specified, we want to instrument
# and report on all the original targets, not just the test targets.
#
# We've already filtered out the non-test targets in the
# TestRunnerTaskMixin, so the mixin passes to us both the test
# targets and the unfiltered list of targets
tests_and_targets = self._collect_test_targets(self._get_test_targets())
if not tests_and_targets:
return
def compute_complete_classpath():
return self.classpath(targets)
self.context.release_lock()
if self._coverage:
self._coverage.instrument(
targets, tests_and_targets.keys(), compute_complete_classpath, self.execute_java_for_coverage)
def _do_report(exception=None):
if self._coverage:
self._coverage.report(
targets, tests_and_targets.keys(), self.execute_java_for_coverage, tests_failed_exception=exception)
try:
self._run_tests(tests_and_targets)
_do_report(exception=None)
except TaskError as e:
_do_report(exception=e)
raise
|
#!/usr/bin/env python2
import os
import ConfigParser
import time
import subprocess
import readline
class color:
HEADER = '\033[95m'
IMPORTANT = '\33[35m'
NOTICE = '\033[33m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
RED = '\033[91m'
WHITE = '\033[37m'
END = '\033[0m'
UNDERLINE = '\033[4m'
LOGGING = '\33[34m'
def clearScr():
os.system('clear')
def yesOrNo():
return (input("Continue Y / N: ") in yes)
def return_fw():
bashCommand = ". ./cyanide-framework.sh && main"
output = subprocess.call(['bash','-c', bashCommand])
def logo():
print(color.RED+" ..,;:ccccccc:;...")
print(color.WHITE+" ..,clllc:;;;;;;:cllc,.")
print(color.RED+" .,cllc,..............';;'.")
print(color.WHITE+" .;lol;......"+color.WHITE+"_______"+color.RED+"....;lol;.")
print(color.RED+" .,lol;......"+color.WHITE+"/ _____/"+color.RED+".....;lol;.. ")
print(color.WHITE+" .coo......."+color.WHITE+"/ /"+color.RED+".............coo")
print(color.RED+".'lol,....."+color.WHITE+"/ /"+color.RED+"............'lol,.")
print(color.WHITE+".,lol,...."+color.WHITE+"/ /_____"+color.RED+"........,lol,.")
print(color.RED+".,lol,...."+color.WHITE+"\______/"+color.RED+".......,lol,.")
print(color.WHITE+" .:ooc'.................:ooc'")
print(color.RED+" .'cllc'.............cllc.")
installDir = os.path.dirname(os.path.abspath(__file__)) + '/'
configFile = installDir + "/cyanide.cfg"
print(installDir)
config = ConfigParser.RawConfigParser()
config.read(configFile)
toolDir = installDir + config.get('cyanide', 'toolDir') # makes folder
logDir = installDir + config.get('cyanide', 'logDir') # make logs
yes = config.get('cyanide', 'yes').split()
color_random=[color.HEADER,color.IMPORTANT,color.NOTICE,color.OKBLUE,color.OKGREEN,color.WARNING,color.RED,color.END,color.UNDERLINE,color.LOGGING]
# random.shuffle(color_random)
continuePrompt = "\nClick [Return] to continue"
alreadyInstalled = "Already Installed"
class fluxion:
def __init__(self):
self.installDir = toolDir + "fluxion"
self.gitRepo = "https://github.com/FluxionNetwork/fluxion"
if not self.installed():
self.install()
self.run()
else:
self.run()
def installed(self):
return (os.path.isfile(installDir+ "tools/fluxion/fluxion.sh"))
def install(self):
os.system("git clone --depth=1 %s %s" %(self.gitRepo, self.installDir))
os.system("cd %s && chmod u+x fluxion.sh && ./fluxion -i" %self.installDir)
os.system("cd %s && bash fluxion.sh" % self.installDir)
def run(self):
clearScr()
self.menu()
os.system("cd %s && bash fluxion.sh" % self.installDir)
def menu(self):
clearScr()
logo()
print(color.RED + "\tEC-Council " + color.WHITE + "Methodology\n")
print(color.RED + " 1) Start Fluxion")
print(color.WHITE + " 99) Return to Main-Menu \n")
response = int(input(color.RED + " Select an Option : "))
time.sleep(1.5)
try:
if response == 1:
clearScr()
logo()
print(color.WHITE + "Starting Fluxion")
time.sleep(2)
clearScr()
os.system("cd tools/fluxion &&./fluxion.sh")
elif response == 99:
main()
else:
fluxion.menu(self)
except KeyboardInterrupt:
fluxion.menu(self)
class airgeddon:
def __init__(self):
if not self.installed():
self.install()
self.run()
else:
self.run()
def installed(self):
return (os.path.isfile("/usr/bin/airgeddon"))
def install(self):
os.system("sudo apt-get install airgeddon -y")
def run(self):
clearScr()
self.menu()
def menu(self):
clearScr()
logo()
print(color.RED + "\tEC-Council " + color.WHITE + "Methodology\n")
print(color.RED + " 1) Start Airgeddon")
print(color.WHITE + " 99) Return to Main-Menu \n")
response = int(input(color.RED + " Select an Option : "))
time.sleep(1.5)
try:
if response == 1:
clearScr()
logo()
print(color.WHITE+"Starting Airgeddon")
time.sleep(2)
clearScr()
os.system("airgeddon")
elif response == 99:
main()
else:
airgeddon.menu(self)
except KeyboardInterrupt:
main()
def main():
clearScr()
logo()
print(color.WHITE + " [ 1 ] Brute-Force Wi-Fi(Comming Soon) ")
print(color.RED + " [ 2 ] Fluxion")
print(color.WHITE + " [ 3 ] Bluetooth Scanner")
print(color.RED + " [ 4 ] Airgeddon")
print(color.WHITE + "\n [ 99 ] Back To Framework")
response = raw_input(color.RED +"\n Select your option : ")
try:
if response == "1":
clearScr()
hiddeneye()
main()
elif response == "2":
clearScr()
blackeye()
main()
elif response == "3":
clearScr()
socialphish()
main()
elif response == "4":
clearScr()
AdvPhishing()
main()
elif response == "5":
clearScr()
zphisher()
main()
elif response == "6":
clearScr()
nexphisher()
main()
elif response == "99":
print("\n\t Returning to main-menu")
return_fw()
# bashCommand = "clear;. ./cyanide-framework.sh;main"
# output = subprocess.call(['bash','-c', bashCommand])
else:
self.menu(target)
except KeyboardInterrupt:
self.menu(target)
main()
|
def unique(integers):
uniq = set()
result = []
for a in integers:
if a not in uniq:
result.append(a)
uniq.add(a)
return result
|
import location_functions as loc_fns
import numpy as np
import pickle
# TIME VARIABLES
totalYears = 25
monthsInAYear = 12
hoursInADay = 24
daysInAMonth = 30
# COST VARIABLES
elecTariff = 0.20 # CHF/kWh
capacityPerPanel = 255 # Wp
costPerPanel = 500 # CHF
panelArea = 1.62 # m2
# STATIONS MONTHLY INFORMATION
means = pickle.load(open('../learning/groupedStations.p', 'rb'))
def get_average_power_potential(stations, weights, means):
"""
Retrieve monthly average power potential for home according to weighted sum of potentials from closest stations.
:param stations: a pd.DataFrame with columns=[name, lat, lng]
:param weights: a sorted np.Array of distance weights associated to the k closest stations to house
:param means: a pd.DataFrame of average montly potential for each station
:return: monthly average power potential for home address
"""
potential = 0
for i in range(len(stations)):
potential += np.sum(means.power[stations[i]].get_values()) * \
weights[i] / len(means.power[stations[i]].get_values())
return potential
def get_new_bill(bill, power, elec_tariff):
"""
Compute monthly electricity bill when using installation giving passed solar power.
:param bill: monthly electricity bill of user
:param power: monthly solar power produced with a given installation
:param elec_tariff: electricity tariff at user's house location
:return: new monthly average electricity bill and new monthly usage pattern for the produced solar power
"""
avgEnergyUsed = bill / elec_tariff
avgEnergyUsed_daytime = avgEnergyUsed / 2
nightUsage = avgEnergyUsed / 2
avgEnergyProduced = power * hoursInADay * daysInAMonth
newBill = np.empty(len(power))
newUsageFromUtility = np.empty(len(power))
for i in range(len(power)):
dayUsage = max(0, avgEnergyUsed_daytime - avgEnergyProduced[i])
newUsageFromUtility[i] = dayUsage + nightUsage
newBill[i] = newUsageFromUtility[i] * elec_tariff
return newBill, newUsageFromUtility
def get_cumulative_savings(oldBill, newBill, cost):
"""
Compute savings over 25 years earned by going solar.
:param oldBill: average monthly electricity bill of the user before installing solar panels
:param newBill: monthly electricity bill when installing a given number of solar panels
:param cost: initial investment for solar installation
:return:
"""
months = range(1, totalYears*monthsInAYear + 1)
savings = np.empty((len(newBill), len(months)))
for i in range(len(newBill)):
eachMonth = oldBill - newBill[i]
savings[i,:] = eachMonth * months - cost[i]
return savings
def get_break_even_time(savings):
"""
Compute number of years until payback for each possible installation plan.
:param savings:
:return: array of number of years until payback
"""
nInstalls = savings.shape[0]
breakEven = np.empty(nInstalls)
for i in range(nInstalls):
breakEven[i] = np.argmax(savings[i,:] > 0)
return breakEven
def get_results(coordinates, stations, k, bill, roof_area):
"""
Retrieve break even, capacity, solar power, cost and savings for all possible installation options.
:param coordinates: (lat,lng) coordinates of home address
:param stations: a pd.DataFrame with columns=[name, lat, lng]
:param k: number of nearest weather stations
:param bill: average electricity bill of the user (CHF)
:param roof_area: approximated roof area of the user (m2)
:return: dictionary of dictionaries containing the results to be fed into JS.
"""
neighbourNames = loc_fns.get_k_nearest_neighbours(coordinates, k, stations)
neighbourWeights = loc_fns.get_weights_for_k_nearest(coordinates, k, stations)
potential = get_average_power_potential(neighbourNames.get_values(), neighbourWeights, means) # Wp
# Get the user values
averageElectricityBill = np.array([bill]) # CHF
# Compute electric power and solar power for all installation options
avgPower = averageElectricityBill / (elecTariff * hoursInADay * daysInAMonth) # kW
installation = np.array(range(5,101,5)) / 100
numPanels = np.floor(roof_area / panelArea)
capacity = installation * numPanels * capacityPerPanel / 1000 # kWp
cost = installation * numPanels * costPerPanel # CHF
solarPower = capacity * potential # kW
# Take only installation plans with less solarPower than avgPower
indxx = solarPower <= avgPower
newBill, newUsage = get_new_bill(averageElectricityBill, solarPower[indxx], elecTariff)
savings = get_cumulative_savings(averageElectricityBill, newBill, cost[indxx])
breakEven = get_break_even_time(savings) / monthsInAYear
finalSavings = savings[:,-1]
results = dict()
for i in range(0, 20):
if(solarPower[i] <= avgPower):
indexName = 'result_' + str((i + 1) * 5)
results[indexName] = {'type': 'result', 'percentage': 5 * (i + 1),
'breakEven': round(breakEven[i]*2)/2, 'cost': int(cost[i]),
'capacity': capacity[i], 'power': solarPower[i], 'savings': int(finalSavings[i])}
return results
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import TYPE_CHECKING, Iterable, Mapping, Optional, Sequence, Tuple, Union
# Note: several of these types are re-exported as the public API of `engine/fs.py`.
from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior as GlobMatchErrorBehavior
from pants.engine.collection import Collection
from pants.engine.engine_aware import SideEffecting
from pants.engine.internals.native_engine import EMPTY_DIGEST as EMPTY_DIGEST # noqa: F401
from pants.engine.internals.native_engine import ( # noqa: F401
EMPTY_FILE_DIGEST as EMPTY_FILE_DIGEST,
)
from pants.engine.internals.native_engine import EMPTY_SNAPSHOT as EMPTY_SNAPSHOT # noqa: F401
from pants.engine.internals.native_engine import AddPrefix as AddPrefix
from pants.engine.internals.native_engine import Digest as Digest
from pants.engine.internals.native_engine import FileDigest as FileDigest
from pants.engine.internals.native_engine import MergeDigests as MergeDigests
from pants.engine.internals.native_engine import RemovePrefix as RemovePrefix
from pants.engine.internals.native_engine import Snapshot as Snapshot
from pants.engine.rules import QueryRule
from pants.util.frozendict import FrozenDict
if TYPE_CHECKING:
from pants.engine.internals.scheduler import SchedulerSession
@dataclass(frozen=True)
class Paths:
"""A Paths object is a collection of sorted file paths and dir paths.
Paths is like a Snapshot, but has the performance optimization that it does not digest the files
or save them to the LMDB store.
"""
files: Tuple[str, ...]
dirs: Tuple[str, ...]
@dataclass(frozen=True)
class FileContent:
"""The content of a file.
This can be used to create a new Digest with `Get(Digest, CreateDigest)`. You can also get back
a list of `FileContent` objects by using `Get(DigestContents, Digest)`.
"""
path: str
content: bytes
is_executable: bool = False
def __repr__(self) -> str:
return (
f"FileContent(path={self.path}, content=(len:{len(self.content)}), "
f"is_executable={self.is_executable})"
)
@dataclass(frozen=True)
class FileEntry:
"""An indirect reference to the content of a file by digest.
This can be used to create a new Digest with `Get(Digest, CreateDigest)`. You can also get back
a list of `FileEntry` objects by using `Get(DigestEntries, Digest)`.
"""
path: str
file_digest: FileDigest
is_executable: bool = False
@dataclass(frozen=True)
class SymlinkEntry:
"""A symlink pointing to a target path.
For the symlink target:
- uses a a forward slash `/` path separator.
- can be relative to the parent directory of the symlink or can be an absolute path starting with `/`.
- Allows `..` components anywhere in the path (as logical canonicalization may lead to
different behavior in the presence of directory symlinks).
See also the REAPI for a SymlinkNode:
https://github.com/bazelbuild/remote-apis/blob/aa29b91f336b9be2c5370297210b67a6654c0b72/build/bazel/remote/execution/v2/remote_execution.proto#L882
"""
path: str
target: str
@dataclass(frozen=True)
class Directory:
"""The path to a directory.
This can be used to create empty directories with `Get(Digest, CreateDigest)`.
"""
path: str
def __repr__(self) -> str:
return f"Directory({repr(self.path)})"
class DigestContents(Collection[FileContent]):
"""The file contents of a Digest.
Although the contents of the Digest are not memoized across `@rules` or across runs (each
request for `DigestContents` will load the file content from disk), this API should still
generally only be used for small inputs, since concurrency might mean that very many `@rule`s
are holding `DigestContents` simultaneously.
"""
class DigestEntries(Collection[Union[FileEntry, SymlinkEntry, Directory]]):
"""The indirect file contents of a Digest.
DigestEntries is a collection of FileEntry/SymlinkEntry/Directory instances representing,
respectively, actual files, actual symlinks, and empty directories present in the Digest.
"""
class CreateDigest(Collection[Union[FileContent, FileEntry, SymlinkEntry, Directory]]):
"""A request to create a Digest with the input FileContent/FileEntry/SymlinkEntry/Directory
values.
The engine will create any parent directories necessary, e.g. `FileContent('a/b/c.txt')` will
result in `a/`, `a/b`, and `a/b/c.txt` being created. You only need to use `Directory` to
create an empty directory.
This does _not_ actually materialize the digest to the build root. You must use
`engine.fs.Workspace` in a `@goal_rule` to save the resulting digest to disk.
"""
class GlobExpansionConjunction(Enum):
"""Describe whether to require that only some or all glob strings match in a target's sources.
NB: this object is interpreted from within Snapshot::lift_path_globs() -- that method will need to
be aware of any changes to this object's definition.
"""
any_match = "any_match"
all_match = "all_match"
@dataclass(frozen=True)
class PathGlobs:
globs: Tuple[str, ...]
glob_match_error_behavior: GlobMatchErrorBehavior
conjunction: GlobExpansionConjunction
description_of_origin: str | None
def __init__(
self,
globs: Iterable[str],
glob_match_error_behavior: GlobMatchErrorBehavior = GlobMatchErrorBehavior.ignore,
conjunction: GlobExpansionConjunction = GlobExpansionConjunction.any_match,
description_of_origin: str | None = None,
) -> None:
"""A request to find files given a set of globs.
The syntax supported is roughly Git's glob syntax. Use `*` for globs, `**` for recursive
globs, and `!` for ignores.
:param globs: globs to match, e.g. `foo.txt` or `**/*.txt`. To exclude something, prefix it
with `!`, e.g. `!ignore.py`.
:param glob_match_error_behavior: whether to warn or error upon match failures
:param conjunction: whether all `globs` must match or only at least one must match
:param description_of_origin: a human-friendly description of where this PathGlobs request
is coming from, used to improve the error message for unmatched globs. For example,
this might be the text string "the option `--isort-config`".
"""
# NB: this object is interpreted from within Snapshot::lift_path_globs() -- that method
# will need to be aware of any changes to this object's definition.
object.__setattr__(self, "globs", tuple(sorted(globs)))
object.__setattr__(self, "glob_match_error_behavior", glob_match_error_behavior)
object.__setattr__(self, "conjunction", conjunction)
object.__setattr__(self, "description_of_origin", description_of_origin)
self.__post_init__()
def __post_init__(self) -> None:
if self.glob_match_error_behavior == GlobMatchErrorBehavior.ignore:
if self.description_of_origin:
raise ValueError(
"You provided a `description_of_origin` value when `glob_match_error_behavior` "
"is set to `ignore`. The `ignore` value means that the engine will never "
"generate an error when the globs are generated, so `description_of_origin` "
"won't end up ever being used. Please either change "
"`glob_match_error_behavior` to `warn` or `error`, or remove "
"`description_of_origin`."
)
else:
if not self.description_of_origin:
raise ValueError(
"Please provide a `description_of_origin` so that the error message is more "
"helpful to users when their globs fail to match."
)
@dataclass(frozen=True)
class PathGlobsAndRoot:
"""A set of PathGlobs to capture relative to some root (which may exist outside of the
buildroot).
If the `digest_hint` is set, it must be the Digest that we would expect to get if we were to
expand and Digest the globs. The hint is an optimization that allows for bypassing filesystem
operations in cases where the expected Digest is known, and the content for the Digest is
already stored.
"""
path_globs: PathGlobs
root: str
digest_hint: Optional[Digest] = None
@dataclass(frozen=True)
class DigestSubset:
"""A request to get a subset of a digest.
The digest will be traversed symlink-oblivious to match the provided globs. If you require a
symlink-aware subset, you can access the digest's entries `Get(DigestEntries, Digest, digest)`,
filter them out, and create a new digest: `Get(Digest, CreateDigest(...))`.
Example:
result = await Get(Digest, DigestSubset(original_digest, PathGlobs(["subdir1", "f.txt"]))
"""
digest: Digest
globs: PathGlobs
@dataclass(frozen=True)
class DownloadFile:
"""Retrieve the contents of a file via an HTTP GET request or directly for local file:// URLs.
To compute the `expected_digest`, manually download the file, then run `shasum -a 256` to
compute the fingerprint and `wc -c` to compute the expected length of the downloaded file in
bytes.
"""
url: str
expected_digest: FileDigest
@dataclass(frozen=True)
class NativeDownloadFile:
"""Retrieve the contents of a file via an HTTP GET request or directly for local file:// URLs.
This request is handled directly by the native engine without any additional coercion by plugins,
and therefore should only be used in cases where the URL is known to be publicly accessible.
Otherwise, callers should use `DownloadFile`.
The auth_headers are part of this nodes' cache key for memoization (changing a header invalidates
prior results) but are not part of the underlying cache key for the local/remote cache (changing
a header won't re-download a file if the file was previously downloaded).
"""
url: str
expected_digest: FileDigest
# NB: This mapping can be of any arbitrary headers, but should be limited to those required for
# authorization.
auth_headers: FrozenDict[str, str]
def __init__(
self, url: str, expected_digest: FileDigest, auth_headers: Mapping[str, str] | None = None
) -> None:
object.__setattr__(self, "url", url)
object.__setattr__(self, "expected_digest", expected_digest)
object.__setattr__(self, "auth_headers", FrozenDict(auth_headers or {}))
@dataclass(frozen=True)
class Workspace(SideEffecting):
"""A handle for operations that mutate the local filesystem."""
_scheduler: "SchedulerSession"
_enforce_effects: bool = True
def write_digest(
self,
digest: Digest,
*,
path_prefix: Optional[str] = None,
clear_paths: Sequence[str] = (),
side_effecting: bool = True,
) -> None:
"""Write a digest to disk, relative to the build root.
You should not use this in a `for` loop due to slow performance. Instead, call `await
Get(Digest, MergeDigests)` beforehand.
As an advanced usecase, if the digest is known to be written to a temporary or idempotent
location, side_effecting=False may be passed to avoid tracking this write as a side effect.
"""
if side_effecting:
self.side_effected()
self._scheduler.write_digest(digest, path_prefix=path_prefix, clear_paths=clear_paths)
@dataclass(frozen=True)
class SpecsPaths(Paths):
"""All files matched by command line specs.
`@goal_rule`s may request this when they only need source files to operate and do not need any
target information. This allows running on files with no owning targets.
"""
@dataclass(frozen=True)
class SnapshotDiff:
our_unique_files: tuple[str, ...] = ()
our_unique_dirs: tuple[str, ...] = ()
their_unique_files: tuple[str, ...] = ()
their_unique_dirs: tuple[str, ...] = ()
changed_files: tuple[str, ...] = ()
@classmethod
def from_snapshots(cls, ours: Snapshot, theirs: Snapshot) -> "SnapshotDiff":
return cls(*ours._diff(theirs))
def rules():
return (
QueryRule(Digest, (CreateDigest,)),
QueryRule(Digest, (PathGlobs,)),
QueryRule(Digest, (AddPrefix,)),
QueryRule(Digest, (RemovePrefix,)),
QueryRule(Digest, (NativeDownloadFile,)),
QueryRule(Digest, (MergeDigests,)),
QueryRule(Digest, (DigestSubset,)),
QueryRule(DigestContents, (Digest,)),
QueryRule(Snapshot, (Digest,)),
QueryRule(Paths, (PathGlobs,)),
)
|
import ast
import datetime
import os
# returns a list with [child selection, game result, total time of game, child_selection,...]
def get_headers():
headers = []
headers.append("subject_id")
for game in ['pre','post']:
for i in range(0, 16):
headers.append(game+'_selection_'+str(i))
headers.append(game + '_result_' + str(i))
headers.append(game + '_time_' + str(i))
return headers
def analyze_result(filename, pathname='./processed_data/'):
correct_sequence = {'pre': {}, 'post': {}}
correct_sequence['pre'] = ["1_A","3_B","5_C","7_A","9_D","11_A","13_B","15_A","17_B","19_B","21_C","23_D","25_D","27_D","29_B","31_C"]
correct_sequence['post'] = ["2_D","4_C","6_D","8_A","10_C","12_D","14_C","16_C","18_A","20_D","22_A","24_B","26_C","28_B","30_A","32_B"]
result_list = []
data = {'pre': {}, 'post': {}}
current_game = 'pre'
# init an empty dictionary:
for game in ["pre", "post"]:
for x in range(0, 16):
data[game]['selection' + str(x)] = 'Null'
data[game]['result' + str(x)] = 'Null'
data[game]['time' + str(x)] = 'Null'
with open(os.path.join(pathname,filename), 'r') as fp:
i=0
for line in fp:
raw_dic = ast.literal_eval(line[6:])
action = raw_dic['action']
comment = raw_dic['comment']
obj = raw_dic['obj']
time = raw_dic['time']
if (action == 'down'):
if (obj == 'start_button_pre'):
start_time = datetime.datetime.strptime(raw_dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
current_game="pre"
elif (obj == 'start_button_post'):
start_time = datetime.datetime.strptime(raw_dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
current_game = "post"
i=0
else:
end_time = datetime.datetime.strptime(raw_dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
total_time = (end_time - start_time).total_seconds()
start_time = end_time
data[current_game]['selection' + str(i)] = obj
if (obj == correct_sequence[current_game][i]):
data[current_game]['result' + str(i)] = 1
else:
data[current_game]['result' + str(i)] = 0
data[current_game]['time' + str(i)] = total_time
i = i + 1
subject_id = filename.replace('bag_spatial_test','')
subject_id = subject_id.replace('.txt','')
result_list.append(subject_id)
for game in ["pre","post"]:
for x in range (0,16):
result_list.append (data[game]['selection'+str(x)])
result_list.append (data[game]['result'+str(x)])
result_list.append(data[game]['time' + str(x)])
return result_list
#result = analyze_result('bag_spatial_test17.txt', pathname='./processed_data/txt/')
result = analyze_result('bag_spatial_p017.txt', pathname='./results/txt/')
print result
|
# Generated by Django 2.2.20 on 2021-09-28 15:09
import django.utils.timezone
import django_extensions.db.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("organisations", "0060_delete_duplicate_org_type"),
]
operations = [
migrations.AddField(
model_name="organisation",
name="created",
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True,
default=django.utils.timezone.now,
verbose_name="created",
),
preserve_default=False,
),
migrations.AddField(
model_name="organisation",
name="modified",
field=django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
migrations.AddField(
model_name="organisationdivision",
name="created",
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True,
default=django.utils.timezone.now,
verbose_name="created",
),
preserve_default=False,
),
migrations.AddField(
model_name="organisationdivision",
name="modified",
field=django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-28 20:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('firstapp', '0007_auto_20170628_1203'),
]
operations = [
migrations.RenameField(
model_name='poke',
old_name='counter',
new_name='pokecount',
),
migrations.RemoveField(
model_name='poke',
name='pokedby',
),
migrations.RemoveField(
model_name='poke',
name='poker',
),
migrations.AddField(
model_name='poke',
name='poker',
field=models.ManyToManyField(related_name='pokerpokes', to='firstapp.User'),
),
]
|
import unittest
import os
from wordCounter import count_words
class MyTest(unittest.TestCase):
def test_count_v1(self):
self.assertEqual(count_words(os.getcwd(), 'тест'), 3)
def test_count_v2(self):
self.assertEqual(count_words(os.getcwd(), 'привет'), 4)
def test_count_v3(self):
self.assertEqual(count_words(os.getcwd(), 'письмо'), 3) |
import os
import re
import pandas as pd
#
#Get row data from raw text file usinf regular expression
def getData(data):
EMGdata = []
for line in data:
m = re.search('([0-9]*?) \[(.*?)\]', line)
temp = m.group(1)+', '+m.group(2)
temp = temp.split(', ')
temp = list(map(int, temp))
EMGdata.append(temp)
return EMGdata
if __name__ == "__main__":
#Sensor data path
path = "data/mydata/sensor data"
#Path to write the processed file, Create this manually
writepath = "data/processed"
filename = []
#
#Read all files from raw sensor data folder
for files in os.listdir(path):
filename.append(files)
filetype = []
#
#Get the file type from the raw sensor data
for files in filename:
matchObj = re.match( r'([0-9]*)_([a-z]*).[a-z]*', files, re.M|re.I)
filetype.append(matchObj.group(2))
filetype = pd.Series(filetype)
filename = pd.Series(filename)
#get files where type = IMU
ImuIndex = filetype[filetype == 'IMU'].index.tolist()
#IMU files
ImuFiles = filename[ImuIndex].tolist()
#Features for IMU files
features=['timestamp', 'acc x', 'acc y','acc z', 'gyro x', 'gyro y', 'gyro z', 'ori w', 'ori x', 'ori y', 'ori z']
#Read data from IMU.txt file and store them as a csv
for files in ImuFiles:
file = files.split('.')[0]
filepath = os.path.join(path, files)
writefile= os.path.join(writepath, file+'.csv')
lines = [line.rstrip('\n').split(' ')[:-1] for line in open(filepath, 'r')]
IMUFrame = pd.DataFrame(lines, columns= features)
IMUFrame.to_csv(writefile, index = False)
#Features for EMU data
features = ['timestamp', 'emg1', 'emg2', 'emg3', 'emg4', 'emg5', 'emg6', 'emg7', 'emg8']
#Get files whose type is EMU
EMGIndex = filetype[filetype == 'EMG'].index.tolist()
EMGFiles = filename[EMGIndex].tolist()
for i in range(len(EMGFiles)):
files = EMGFiles[i]
file = files.split('.')[0]
filepath = os.path.join(path, files)
writefile= os.path.join(writepath, file+'.csv')
lines = getData([line.rstrip('\n') for line in open(filepath, 'r')])
lineframe = pd.DataFrame(lines, columns=features)
lineframe.to_csv(writefile, index = False)
|
# import turtle
# angle=1
# turtle.speed(5000)
# for i in range(360):
# turtle.forward(300)
# turtle.right(60)
# turtle.forward(100)
# turtle.right(90)
# turtle.forward(70)
# turtle.home()
# turtle.right(angle)
# angle=angle+1
# turtle.mainloop() |
# coding: utf-8
import sys,shutil,os
import numpy as np
f1 = open(sys.argv[1])
f2 = open(sys.argv[2])
f =float
def get_coord_from_pdb(col=1): #PDBファイルから座標をとってくる関数 col=1 or 2
File = open(sys.argv[col])
coord = []
for line in File:
if line.startswith("ATOM") or line.startswith("HETATM"):
coord.append([f(line[31:38]),f(line[39:46]),f(line[47:54])])
File.close()
return coord
#print get_coord_from_pdb(1) #座標とってこれるかのテスト
coord_A = get_coord_from_pdb(1)
coord_B = get_coord_from_pdb(2)
def center_of_mass(coord): #座標から重心を計算する
#それぞれの座標の総和を計算
x = sum([f(coord[i][0]) for i in range(len(coord))])
y = sum([f(coord[i][1]) for i in range(len(coord))])
z = sum([f(coord[i][2]) for i in range(len(coord))])
#重心の座標を計算
comx = x/len(coord)
comy = y/len(coord)
comz = z/len(coord)
com = [comx,comy,comz]
return com
com_A = center_of_mass(coord_A)
com_B = center_of_mass(coord_B)
"""重心の計算ができてるかのテスト"""
#coord_test = get_coord_from_pdb(1)
#print coord_test
#print center_of_mass(coord_test)
""""""
def trans(coord,com): # 重心を原点にする
trans_coord = []
for i in range(len(coord)):
trans_coord.append([f(coord[i][0] - com[0]),f(coord[i][1])-com[1],f(coord[i][2])-com[2]])
return trans_coord
trans_A = trans(coord_A,com_A)
trans_B = trans(coord_B,com_B)
def make_sym_matrix(coord1,coord2): #対称行列つくる
mat1 = np.array(coord1) #coord1から行列をつくる。このときはまだ行ベクトル
mat2 = np.array(coord2)
a = np.add(mat1,mat2) #二つの行列の和
b = np.subtract(mat1,mat2) #二つの行列の差
B11 = 0
B12 = 0
B13 = 0
B14 = 0
B22 = 0
B23 = 0
B24 = 0
B33 = 0
B34 = 0
B44 = 0
for i in range(len(coord1)):
B11 += b[i][0] * b[i][0] + b[i][1] * b[i][1] + b[i][2] * b[i][2]
B12 += a[i][2] * b[i][1] - b[i][2] * a[i][1]
B13 += -a[i][2] * b[i][0] + a[i][0] * b[i][2]
B14 += a[i][1] * b[i][0] - a[i][0] * b[i][1]
B22 += b[i][0] * b[i][0] + a[i][1] * a[i][1] + a[i][2] * a[i][2]
B23 += b[i][0] * b[i][1] - a[i][0] * a[i][1]
B24 += b[i][0] * b[i][2] - a[i][2] * a[i][0]
B33 += a[i][0] * a[i][0] + b[i][1] * b[i][1] + a[i][2] * a[i][2]
B34 += b[i][1] * b[i][2] - a[i][1] * a[i][2]
B44 += a[i][0] * a[i][0] + a[i][1] * a[i][1] + b[i][2] * b[i][2]
B21 = B12
B31 = B13
B41 = B14
B32 = B23
B42 = B24
B43 = B34
sym_matrix = np.array([[B11,B12,B13,B14],[B21,B22,B23,B24],[B31,B32,B33,B34],[B41,B42,B43,B44]],dtype=float)
return sym_matrix/len(coord1)
sym_matrix = make_sym_matrix(trans_A,trans_B)
#print sym_matrix
#対称行列の固有値と固有ベクトルを計算
la,v = np.linalg.eig(sym_matrix)
#print la
#print v
#最小固有値に対応する固有ベクトル
index = np.where(la==min(la))[0][0]
#qは固有ベクトルの成分
q0 = v[0][index]
q1 = v[1][index]
q2 = v[2][index]
q3 = v[3][index]
q11 = 2*q0*q0 + 2*q1*q1 - 1
q12 = 2*q1*q2 - 2*q0*q3
q13 = 2*q1*q3 + 2*q0*q2
q21 = 2*q1*q2 + 2*q0*q3
q22 = 2*q0*q0 + 2*q2*q2 - 1
q23 = 2*q2*q3 - 2*q0*q1
q31 = 2*q1*q3 - 2*q0*q2
q32 = 2*q2*q3 + 2*q0*q1
q33 = 2*q0*q0 + 2*q3*q3 - 1
rot = np.array([[q11,q12,q13],[q21,q22,q23],[q31,q32,q33]])
print 'rot', rot
coord_freeze = np.array(trans_A)
coord_rotation = np.array(trans_B).T #あとでもう一回転置
print '固定する方の原子座標'
print coord_freeze
print 'まわす前の原子座標'
print coord_rotation #まわす前
coord_rotated = np.dot(rot,coord_rotation).T
print "まわしたあとの原子座標"
print coord_rotated
msd = 0
for i in range(len(coord_freeze)):
msd += np.dot((coord_freeze[i] - coord_rotated[i]),(coord_freeze[i] - coord_rotated[i]))
rmsd = np.sqrt(msd/len(coord_freeze))
print "RMSD"
print rmsd
list_rotated = coord_rotated.tolist()
list_freeze = coord_freeze.tolist()
#書き込むファイル
output = open('tmp.txt','w')
norot = []
rota = []
for i in range(len(list_freeze)):
norot.append("%4s%2s%5d%1s%-4s%8s%6s%8.3f%8.3f%8.3f%22s%-2s\n" %("ATOM"," ",i+1," ","C","MOL F 1"," ",list_freeze[i][0],list_freeze[i][1],list_freeze[i][2]," ","C"))
rota.append("%4s%2s%5d%1s%-4s%8s%6s%8.3f%8.3f%8.3f%22s%-2s\n" %("ATOM"," ",len(list_freeze)+i+1," ","C","MOL R 1"," ",list_rotated[i][0],list_rotated[i][1],list_rotated[i][2]," ","C"))
for i in range(len(list_freeze)):
output.write(norot[i])
for i in range(len(list_freeze)):
output.write(rota[i])
output.close()
shutil.copyfile("tmp.txt",(str(sys.argv[3])))
os.remove("./tmp.txt")
|
#-*-coding:utf8-*-
import sys
import urllib
import urllib2
import urlparse
import re
from lxml import etree
class my_urlopener(urllib.FancyURLopener):
version = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'
def process():
url = 'http://movie.douban.com/tag/?view=cloud'
print 'process: ' + url
opener = my_urlopener()
page = opener.open(url)
text = page.read()
page.close()
tags = []
selector = etree.HTML(text)
tagspans = selector.xpath('//*[@id="content"]/div/div[@class="article"]/div[@class="indent tag_cloud"]/span')
for span in tagspans:
tag = span.xpath('a/text()')[0]
tags.append(tag)
print tags
def main():
process()
if __name__ == "__main__":
main()
|
"""Construct word map graph booklet and assemble section gutter information"""
import re
import os
from operator import itemgetter
import xmlStaticOperators
import xmlPageData
import xmlPageOperator
import xmlColumnChart
import xmlWordOperators
class xmlSheetSearch(object):
def __init__(self, file_path, xml_data):
self.page_data_dictionary = xml_data[0]
self.dictionary_length = xml_data[1]
self.file_path = file_path
self.year = self.identify_year()
self.section_dictionaries = self.define_page_section_dictionaries()
self.section_dictionary_center = self.section_dictionaries[0]
self.section_dictionary_thirds = self.section_dictionaries[1]
self.page_graphs = self.chart_page_structure_graphs()
self.xml_column_chart_center = self.page_graphs[0]
self.xml_column_chart_thirds = self.page_graphs[1]
self.firm_line_data = self.identify_firms_lines_locations_on_page()
self.location_data = self.firm_line_data[0]
self.line_data = self.firm_line_data[1]
def identify_year(self):
"""Extract year from file path."""
year = re.search(r'.*Industrials_(\d{4}).*', self.file_path).group(1)
return year
@staticmethod
def manual_begin_end(year, page_index):
"""define beginning and endpoints of manual in terms of areas to search."""
manual_begin_end_dict = {
'1920': [[-1, False], [133, True], [1513, False]],
'1921': [[-1, False], [158, True], [1751, False]],
'1922': [[-1, False], [193, True], [2077, False]],
'1923': [[-1, False], [159, True], [2414, False]],
'1924': [[-1, False], [310, True], [2878, False]],
'1925': [[-1, False], [225, True], [2405, False]],
'1926': [[-1, False], [270, True], [2665, False]],
'1927': [[-1, False], [306, True], [3057, False]],
'1928': [[-1, False], [348, True], [3425, False]],
'1929': [[-1, False], [391, True], [3485, False]],
'1940': [[-1, False], [386, True], [3581, False]],
'1941': [[-1, False], [329, True], [3466, False]]
}
difference_list = sorted([[item, page_index - item[0]] for item in
manual_begin_end_dict[year] if page_index - item[0] > 0],
key=itemgetter(1))
begin_end_value = difference_list[0][0][1]
return begin_end_value
def define_page_section_dictionaries(self):
"""Initialise class containing page data."""
section_dictionary_center = {}
section_dictionary_thirds = {}
for i, (page, data) in enumerate(self.page_data_dictionary.items()):
page_data = xmlPageData.xmlPageData(page, data)
self.page_data_dictionary[page] = page_data
section_dictionary_center.update({i: [page, page_data.gutter_count_center]})
section_dictionary_thirds.update({i: [page, page_data.gutter_count_thirds]})
return (section_dictionary_center, section_dictionary_thirds)
def chart_page_structure_graphs(self):
""""""
xml_column_chart_center = xmlColumnChart.xmlColumnChart(self.section_dictionary_center, 'center', self.year)
xml_column_chart_thirds = xmlColumnChart.xmlColumnChart(self.section_dictionary_thirds, 'thirds', self.year)
return (xml_column_chart_center, xml_column_chart_thirds)
def identify_firms_lines_locations_on_page(self):
""""""
file_path = '../../text_output/xml_firm_search_output/{}_company_names.txt'.format(self.year)
xmlStaticOperators.clear_destination(file_path)
firm_location_data = {}
page_lines_data = {}
for i, (page, data) in enumerate(self.page_data_dictionary.items()):
manual_operate_key = xmlSheetSearch.manual_begin_end(self.year, i)
if data.page_bounds[2] and manual_operate_key:
page_operator_data = xmlPageOperator.xmlPageOperator(i, self.year, data, file_path,
self.xml_column_chart_center,
self.xml_column_chart_thirds)
if len(page_operator_data.line_data_dict) > 0:
for key, value in page_operator_data.line_data_dict.items():
page_lines_data.update({key: value})
if len(page_operator_data.page_break_dictionary_insgesamt) > 0:
for key, value in page_operator_data.page_break_dictionary_insgesamt.items():
firm_location_data.update({key: value})
return (firm_location_data, page_lines_data)
|
import os
from imageai.Detection import ObjectDetection
print('===>Iniciando.')
model_path = "./models/yolo-tiny.h5"
input_path = "./input/"
output_path = "./output/"
file_paths = [os.path.join(input_path, file) for file in os.listdir(input_path)]
files = [file for file in file_paths if os.path.isfile(file) and file.lower().endswith(".jpg")]
# Load model
detector = ObjectDetection()
detector.setModelTypeAsTinyYOLOv3()
detector.setModelPath(model_path)
detector.loadModel()
print('===> Modelo Yolo carregado')
# Object detection
for i, file in enumerate(files):
detection = detector.detectObjectsFromImage(input_image=file, output_image_path=f"{output_path}image{i}.jpg")
print(f"\n\n=====> ITEM {i} = {file} <======")
for eachItem in detection:
print("===> ",eachItem["name"] , " : ", eachItem["percentage_probability"]) |
# -*- python -*-
# Assignment: Names
# Write the following function.
# Part I
# Given the following list:
students = [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'},
{'first_name' : 'Mark', 'last_name' : 'Guillen'},
{'first_name' : 'KB', 'last_name' : 'Tonel'}
]
# Create a program that outputs:
#
# Michael Jordan
# John Rosales
# Mark Guillen
# KB Tonel
def print_students( l ):
for i in l:
print "{} {}".format( i['first_name'], i['last_name'] )
print "Testing print_students ..."
print_students( students )
print "End testing print_students"
# Part II
# Now, given the following dictionary:
users = {
'Students': [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'},
{'first_name' : 'Mark', 'last_name' : 'Guillen'},
{'first_name' : 'KB', 'last_name' : 'Tonel'}
],
'Instructors': [
{'first_name' : 'Michael', 'last_name' : 'Choi'},
{'first_name' : 'Martin', 'last_name' : 'Puryear'}
]
}
# Create a program that prints the following format
# (including number of characters in each combined name):
#
# Students
# 1 - MICHAEL JORDAN - 13
# 2 - JOHN ROSALES - 11
# 3 - MARK GUILLEN - 11
# 4 - KB TONEL - 7
# Instructors
# 1 - MICHAEL CHOI - 11
# 2 - MARTIN PURYEAR - 13
def print_users( d ):
for user_t in ["Students", "Instructors"]:
print user_t
for i in range(0, len( d[user_t] ) ):
user = d[user_t][i]
user_name_length = len( user['first_name'] ) + len( user['last_name'] )
print "{} - {} {} - {}".format( (i+1), user['first_name'].upper(), user['last_name'].upper(), user_name_length )
print "Testing print_users ..."
print_users( users )
print "End testing print_users"
# Note: The majority of data we will manipulate as web developers will be hashed
# in a dictionary using key-value pairs. Repeat this assignment a few times to
# really get the hang of unpacking dictionaries, as it's a very common requirement
# of any web application.
|
# Create a calculator application that has use in your life.
# It might be an floaterest calculator, or it might be something that you can use in the classroom.
# For example, if you were in physics class, you might want to make a F = M * A calc.
import sys
class FMACalc:
COMMANDS = ('force', 'f', 'mass', 'm', 'acceleration', 'a', 'help', 'quit')
def __init__(self):
self.f = self.force
self.m = self.mass
self.a = self.acceleration
def quit(self):
sys.exit("Goodbye")
def help(self):
print 'Available functions: Force, Mass, Acceleration.'
print 'Type name or first letter to perform calculation'
print 'Type quit to exit program'
def get_force(self):
while(1):
try:
return float(raw_input('Enter force: '))
except ValueError:
print "Must be a number"
def get_mass(self):
while(1):
try:
return float(raw_input('Enter mass: '))
except ValueError:
print "Must be a number"
def get_accel(self):
while(1):
try:
return float(raw_input('Enter acceleration: '))
except ValueError:
print "Must be a number"
def force(self):
print '>> %f' % ( self.get_mass() * self.get_accel() )
def mass(self):
print '>> %f' % ( self.get_force() / self.get_accel() )
def acceleration(self):
print '>> %f' % ( self.get_force() / self.get_mass() )
def run(self):
while(1):
input = raw_input("\nEnter a calculation to perform >> ").lower()
if input in self.COMMANDS:
getattr(self, input)()
else:
print "Function not found ('help' for options)"
app = FMACalc()
app.run()
|
#----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
import json
import dateutil.parser
import babel
from flask import (
Flask,
render_template,
request, Response,
flash, redirect,
url_for
)
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
from flask_wtf import Form
from forms import *
from datetime import datetime
from flask_migrate import Migrate
import sys
from models import db, Venue, Artist, Show
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
app = Flask(__name__)
moment = Moment(app)
app.config.from_object('config')
db = SQLAlchemy(app)
# connect to a local postgresql database
migrate = Migrate(app, db)
db.create_all()
#----------------------------------------------------------------------------#
# Filters.
#----------------------------------------------------------------------------#
def format_datetime(value, format='medium'):
date = dateutil.parser.parse(str(value))
if format == 'full':
format="EEEE MMMM, d, y 'at' h:mma"
elif format == 'medium':
format="EE MM, dd, y h:mma"
return babel.dates.format_datetime(date, format)
app.jinja_env.filters['datetime'] = format_datetime
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route('/')
def index():
return render_template('pages/home.html')
# Venues
# ----------------------------------------------------------------
@app.route('/venues')
def venues():
data = []
venue_areas = Venue.query.distinct(Venue.city, Venue.state)
for area in venue_areas:
venues_data = []
venues = Venue.query.filter_by(city = area.city).filter_by(state = area.state).all()
for venue in venues:
venues_data.append({
"id": venue.id,
"name": venue.name,
"num_upcoming_shows": len(Show.query.filter(Show.venue_id==venue.id).filter(Show.start_time>datetime.now()).all())
})
data.append({
"city": area.city,
"state": area.state,
"venues": venues_data
})
return render_template('pages/venues.html', areas=data)
@app.route('/venues/search', methods=['POST'])
def search_venues():
search_term = request.form.get('search_term', '')
search_venues = Venue.query.filter(Venue.name.ilike('%' + search_term + '%')).all()
data = []
for venue in search_venues:
data.append({
"id": venue.id,
"name": venue.name,
"num_upcoming_shows": len(Show.query.filter(Show.venue_id==venue.id).filter(Show.start_time>datetime.now()).all())
})
response = {
"count": len(search_venues),
"data": data
}
return render_template('pages/search_venues.html', results=response, search_term=search_term)
@app.route('/venues/<int:venue_id>')
def show_venue(venue_id):
venue = Venue.query.get(venue_id)
if not venue:
return render_template('error/404.html')
shows = venue.shows
upcoming_shows = []
past_shows = []
for show in shows:
show.artist_name = show.artist.name
show.artist_image_link = show.artist.image_link
if show.start_time > datetime.now():
upcoming_shows.append(show)
else:
past_shows.append(show)
venue.upcoming_shows = upcoming_shows
venue.upcoming_shows_count = len(upcoming_shows)
venue.past_shows = past_shows
venue.past_shows_count = len(past_shows)
return render_template('pages/show_venue.html', venue=venue)
# Create Venue
# ----------------------------------------------------------------
@app.route('/venues/create', methods=['GET'])
def create_venue_form():
form = VenueForm()
return render_template('forms/new_venue.html', form=form)
@app.route('/venues/create', methods=['POST'])
def create_venue_submission():
error = False
try:
venue = Venue(
name = request.form.get('name'),
city = request.form.get('city'),
state = request.form.get('state'),
address = request.form.get('address'),
phone = request.form.get('phone'),
genres = request.form.getlist('genres'),
image_link = request.form.get('image_link'),
facebook_link = request.form.get('facebook_link'),
website = request.form.get('website'),
seeking_talent = True if request.form.get('seeking_talent') == 'Yes' else False,
seeking_description = request.form.get('seeking_description')
)
db.session.add(venue)
db.session.commit()
except:
error = True
db.session.rollback()
print(sys.exc_info())
finally:
db.session.close()
if error:
flash('An error occurred. Venue ' + request.form['name'] + ' could not be added.')
else:
flash('Venue ' + request.form['name'] + ' was successfully added!')
return render_template('pages/home.html')
@app.route('/venues/<venue_id>', methods=['DELETE'])
def delete_venue(venue_id):
try:
venue = Venue.query.get(venue_id)
db.session.delete(venue)
db.session.commit()
flash('Venue was successfully deleted!')
except:
db.session.rollback()
flash('An error occurred. Venue could not be deleted.')
finally:
db.session.close()
return redirect(url_for('index'))
# Artists
# ----------------------------------------------------------------
@app.route('/artists')
def artists():
return render_template('pages/artists.html', artists = Artist.query.all())
@app.route('/artists/search', methods=['POST'])
def search_artists():
search_term = request.form.get('search_term', '')
search_artists = Artist.query.filter(Artist.name.ilike('%' + search_term + '%')).all()
data = []
num_upcoming_shows = 0
for artist in search_artists:
data.append({
"id": artist.id,
"name": artist.name,
"num_upcoming_shows": len(Show.query.filter(Show.artist_id==artist.id).filter(Show.start_time>datetime.now()).all())
})
response={
"count": len(search_artists),
"data": data
}
return render_template('pages/search_artists.html', results=response, search_term=search_term)
@app.route('/artists/<int:artist_id>')
def show_artist(artist_id):
artist = Artist.query.get(artist_id)
if not artist:
return render_template('error/404.html')
shows = artist.shows
upcoming_shows = []
past_shows = []
for show in shows:
show.venue_name = show.venue.name
show.venue_image_link = show.venue.image_link
if show.start_time > datetime.now():
upcoming_shows.append(show)
else:
past_shows.append(show)
artist.upcoming_shows = upcoming_shows
artist.upcoming_shows_count = len(upcoming_shows)
artist.past_shows = past_shows
artist.past_shows_count = len(past_shows)
return render_template('pages/show_artist.html', artist=artist)
# Update
# ----------------------------------------------------------------
@app.route('/artists/<int:artist_id>/edit', methods=['GET'])
def edit_artist(artist_id):
form = ArtistForm()
artist = Artist.query.get(artist_id)
return render_template('forms/edit_artist.html', form=form, artist=artist)
@app.route('/artists/<int:artist_id>/edit', methods=['POST'])
def edit_artist_submission(artist_id):
error = False
try:
artist = Artist.query.get(artist_id)
artist.name = request.form.get('name')
artist.city = request.form.get('city')
artist.state = request.form.get('state')
artist.phone = request.form.get('phone')
artist.genres = request.form.getlist('genres')
artist.image_link = request.form.get('image_link')
artist.facebook_link = request.form.get('facebook_link')
artist.website = request.form.get('webstie')
artist.seeking_venue = True if request.form.get('seeking_venue')=='Yes' else False
artist.seeking_description = request.form.get('seeking_description')
db.session.commit()
except:
error = True
db.session.rollback()
print(sys.exc_info())
finally:
db.session.close()
if error:
flash('An error occurred. Artist could not be updated.')
else:
flash('Artist was successfully updated!')
return redirect(url_for('show_artist', artist_id=artist_id))
@app.route('/venues/<int:venue_id>/edit', methods=['GET'])
def edit_venue(venue_id):
form = VenueForm()
venue = Venue.query.get(venue_id)
return render_template('forms/edit_venue.html', form=form, venue=venue)
@app.route('/venues/<int:venue_id>/edit', methods=['POST'])
def edit_venue_submission(venue_id):
error = False
try:
venue = Venue.query.get(venue_id)
venue.name = request.form.get('name')
venue.city = request.form.get('city')
venue.state = request.form.get('state')
venue.address = request.form.get('address')
venue.phone = request.form.get('phone')
venue.genres = request.form.getlist('genres')
venue.image_link = request.form.get('image_link')
venue.facebook_link = request.form.get('facebook_link')
venue.website = request.form.get('webstie')
venue.seeking_talent = True if request.form.get('seeking_venue')=='Yes' else False
venue.seeking_description = request.form.get('seeking_description')
db.session.commit()
except:
error = True
db.session.rollback()
print(sys.exc_info())
finally:
db.session.close()
if error:
flash('An error occurred. Venue could not be updated.')
else:
flash('Venue was successfully updated!')
return redirect(url_for('show_venue', venue_id=venue_id))
# Create Artist
# ----------------------------------------------------------------
@app.route('/artists/create', methods=['GET'])
def create_artist_form():
form = ArtistForm()
return render_template('forms/new_artist.html', form=form)
@app.route('/artists/create', methods=['POST'])
def create_artist_submission():
error = False
try:
artist = Artist(
name = request.form.get('name'),
city = request.form.get('city'),
state = request.form.get('state'),
phone = request.form.get('phone'),
genres = request.form.getlist('genres'),
image_link = request.form.get('image_link'),
facebook_link = request.form.get('facebook_link'),
website = request.form.get('website'),
seeking_venue = True if request.form.get('seeking_venue')=='Yes' else False,
seeking_description = request.form.get('seeking_description')
)
db.session.add(artist)
db.session.commit()
except:
error = True
db.session.rollback()
print(sys.exc_info())
finally:
db.session.close()
if error:
flash('An error occurred. Artist ' + request.form['name'] + ' could not be added.')
else:
flash('Artist ' + request.form['name'] + ' was successfully added!')
return render_template('pages/home.html')
# Shows
# ----------------------------------------------------------------
@app.route('/shows')
def shows():
shows = Show.query.all()
for show in shows:
show.venue_name = show.venue.name
show.artist_name = show.artist.name
show.artist_image_link = show.artist.image_link
return render_template('pages/shows.html', shows=shows)
@app.route('/shows/create')
def create_shows():
# renders form. do not touch.
form = ShowForm()
return render_template('forms/new_show.html', form=form)
@app.route('/shows/create', methods=['POST'])
def create_show_submission():
try:
show = Show(
artist_id = request.form.get('artist_id'),
venue_id = request.form.get('venue_id'),
start_time = request.form.get('start_time')
)
print(show)
db.session.add(show)
db.session.commit()
flash('Show was successfully added!')
except:
db.session.rollback()
print(sys.exc_info())
flash('An error occurred. Show could not be added.')
finally:
db.session.close()
return render_template('pages/home.html')
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def server_error(error):
return render_template('errors/500.html'), 500
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
|
liste=['P','6','-','U', 'P', 'M', 'C', ' ', 'U', 'n', 'i', 'v', 'e', 'r', 's', 'i', 't', 'e', ' ', 'P', 'a', 'r', 'i', 's', ' ', 'V','I']
print(len((liste)))
tuple(liste)
str(liste)
dict((x,0) for x in liste)
len(liste)
for i in liste:
print(i, " ", end= " ")
print("********")
chaine='P6-UPMC Universite Paris VI'
list(chaine)
tuple(chaine)
str(chaine)
dict((x,0) for x in chaine)
set(chaine)
for i in chaine:
print(i, " ", end=" ")
len(chaine)
ensemble={'P','6','-','U', 'P', 'M', 'C', ' ', 'U', 'n', 'i', 'v', 'e', 'r', 's', 'i', 't', 'e', ' ', 'P', 'a', 'r', 'i', 's', ' ', 'V','I'}
print()
print(ensemble)
for i in liste:
print(i, " ", end= " ")
print()
list(ensemble)
tuple(ensemble)
str(ensemble)
D = dict((x,0) for x in ensemble)
set(ensemble)
len(ensemble)
nuplet=('P','6','-','U', 'P', 'M', 'C', ' ', 'U', 'n', 'i', 'v', 'e', 'r', 's', 'i', 't', 'e', ' ', 'P', 'a', 'r', 'i', 's', ' ', 'V','I')
list(nuplet)
tuple(nuplet)
str(nuplet)
dict((x,0) for x in nuplet)
set(nuplet)
liste[0] = 't'
len(nuplet)
|
#!/usr/bin/env python
#
# Copyright (c) 2007 Academic Superstore
# Copyright (c) 2007 Mike "Fuzzy" Partin <fuzzy@academicsuperstore.com>
#
# Program code
import sys, os, popen2
from THWAP.core import config
class thGlsa:
def __init__(self, user=None, host=None):
self.host = host
self.user = user
self.garg = ''
self.glsa = '/usr/bin/glsa-check'
self.applied = {}
self.unaffct = {}
self.affectd = {}
def addUnaffected(self, st=''):
if st != '':
tmp = st.strip().split()
name = tmp[0]
self.unaffct[name] = st.strip().split(st.strip().split()[1])[1].strip()
self.printStatus()
else:
return False
def addAffected(self, st=''):
if st != '':
tmp = st.strip().split()
name = tmp[0]
self.affectd[name] = st.strip().split(st.strip().split()[1])[1].strip()
self.printStatus()
else:
return False
def addApplied(self, st=''):
if st != '':
tmp = st.strip().split()
name = tmp[0]
self.applied[name] = st.strip().split(st.strip().split()[1])[1].strip()
self.printStatus()
else:
return False
def printStatus(self):
sys.stdout.write('Applied:( %5d ) Unaffected:( %5d ) Affected:( %5d )\r' % (len(self.applied.keys()),len(self.unaffct.keys()),len(self.affectd.keys())))
sys.stdout.flush()
def check(self):
if self.host == None:
sys.stdout.write('Checking system security status ... \n')
else:
sys.stdout.write('Checking system security status on %s ... \n' % self.host)
sys.stdout.write('Fetching current GLSA list...\r')
sys.stdout.flush()
self.slurp = config.thSlurp()
self.slurp.registerTrigger('^[0-9]+.*\[U\]', self.addUnaffected)
self.slurp.register_trigger('^[0-9]+.*\[A\]', self.addApplied)
self.slurp.register_trigger('^[0-9]+.*\[N\]', self.addAffected)
if self.host != None and self.user == None:
obj = popen2.Popen4('ssh %s@%s "%s -ln"' % (os.getenv('USER'),self.host,self.glsa))
elif self.host != None and self.user != None:
obj = popen2.Popen4('ssh %s@%s "%s -nl"' % (self.user,self.host,self.glsa))
else:
obj = popen2.Popen4('%s -ln' % self.glsa)
self.slurp.run(obj.fromchild)
print ''
for i in self.affectd.keys():
sys.stdout.write('INFO: %s : %s\n' % (i, self.affectd[i]))
if __name__ == '__main__':
o = GLSA()
o.check()
|
from django.conf import settings
from django.conf.urls import include, url
from django.urls import path
from django.contrib import admin
from .views import (
CandidateListView,
CandidateDetailView,
CandidateCreateView,
CandidateUpdateView,
CandidateDeleteView,
ClientListView,
#CandidateSearch,
SearchResultsView,
)
from . import views
from clients import views as user_views
urlpatterns = [
path('user/', CandidateListView.as_view(), name='candidates-home'),
path('candidate/<int:pk>/', CandidateDetailView.as_view(), name='candidates-detail'),
path('user/', CandidateListView.as_view(), name='candidates-home-overview'),
#path('clientprofile/', user_views.clientprofile, name='clientprofile'),
path('user/<str:username>', ClientListView.as_view(), name='client-name'),
#path('candidate/<int:pk>/', CandidateSearch.as_view(), name='candidates-search'),
path('candidate/new/', CandidateCreateView.as_view(), name='candidates-create'),
path('candidate/<int:pk>/update/', CandidateUpdateView.as_view(), name='candidates-update'),
path('candidate/<int:pk>/delete/', CandidateDeleteView.as_view(), name='candidates-delete'),
path('about/', views.about, name='candidates-about'),
path('search/', views.search, name='candidates-search'),
path('admin/', admin.site.urls, name='admin'),
path('search/results/', SearchResultsView.as_view(), name='search-results'),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
# For django versions before 2.0:
# url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns |
"""
Experimental getNgramFrequencies implemented in C for more speed.
Turned out to be even slower due to so many simple function calls.
"""
from ctypes import *
from ctypes.util import find_library
class dict_struct(Structure):
_fields_ = [("sub", c_char_p), ("c", c_int)]
c_lib = CDLL(find_library("c"))
malloc = c_lib.malloc
malloc.argtypes = [c_long]
malloc.restype = c_void_p
libtest = cdll.LoadLibrary('dll_test/test_dll.dll')
libtest.getngramfrequencies.restype = c_int
def getNgramFrequencies(text, length):
arr_type = dict_struct * (len(text) + 1 - length)
arr = arr_type()
for j, i in enumerate(arr):
address = malloc(length)
i.sub = c_char_p(address)
l = libtest.getngramfrequencies(arr, text, length, len(text))
return dict([(i.sub, i.c) for i in arr[:l]]) |
"""Alias related views."""
from reversion import revisions as reversion
from django.contrib.auth import mixins as auth_mixins
from django.contrib.auth.decorators import login_required, permission_required
from django.db import IntegrityError
from django.shortcuts import render
from django.urls import reverse
from django.utils.translation import gettext as _, ngettext
from django.views import generic
from django.views.decorators.http import require_http_methods
from modoboa.core import signals as core_signals
from modoboa.lib.exceptions import Conflict, PermDeniedException
from modoboa.lib.web_utils import render_to_json_response
from ..forms import AliasForm
from ..models import Alias
def _validate_alias(request, form, successmsg, callback=None):
"""Alias validation
Common function shared between creation and modification actions.
"""
if form.is_valid():
try:
alias = form.save()
except IntegrityError:
raise Conflict(_("Alias with this name already exists"))
if callback:
callback(request.user, alias)
return render_to_json_response(successmsg)
return render_to_json_response({"form_errors": form.errors}, status=400)
def _new_alias(request, title, action, successmsg,
tplname="admin/aliasform.html"):
core_signals.can_create_object.send(
"new_alias", context=request.user, klass=Alias)
if request.method == "POST":
def callback(user, alias):
alias.post_create(user)
form = AliasForm(request.user, request.POST)
return _validate_alias(
request, form, successmsg, callback
)
ctx = {
"title": title,
"action": action,
"formid": "aliasform",
"action_label": _("Create"),
"action_classes": "submit",
"form": AliasForm(request.user)
}
return render(request, tplname, ctx)
@login_required
@permission_required("admin.add_alias")
@reversion.create_revision()
def newalias(request):
return _new_alias(
request, _("New alias"), reverse("admin:alias_add"),
_("Alias created")
)
@login_required
@permission_required("admin.change_alias")
@reversion.create_revision()
def editalias(request, alid, tplname="admin/aliasform.html"):
alias = Alias.objects.get(pk=alid)
if not request.user.can_access(alias):
raise PermDeniedException
if request.method == "POST":
successmsg = _("Alias modified")
form = AliasForm(request.user, request.POST, instance=alias)
return _validate_alias(request, form, successmsg)
ctx = {
"action": reverse("admin:alias_change", args=[alias.id]),
"formid": "aliasform",
"title": alias.address,
"action_label": _("Update"),
"action_classes": "submit",
"form": AliasForm(request.user, instance=alias)
}
return render(request, tplname, ctx)
@login_required
@permission_required("admin.delete_alias")
@require_http_methods(["DELETE"])
def delalias(request):
selection = request.GET["selection"].split(",")
for alid in selection:
alias = Alias.objects.get(pk=alid)
if not request.user.can_access(alias):
raise PermDeniedException
alias.delete()
msg = ngettext("Alias deleted", "Aliases deleted", len(selection))
return render_to_json_response(msg)
class AliasDetailView(
auth_mixins.PermissionRequiredMixin, generic.DetailView):
"""DetailView for Alias."""
model = Alias
permission_required = "admin.add_alias"
def has_permission(self):
"""Check object-level access."""
result = super(AliasDetailView, self).has_permission()
if not result:
return result
return self.request.user.can_access(self.get_object())
def get_context_data(self, **kwargs):
"""Add information to context."""
context = super(AliasDetailView, self).get_context_data(**kwargs)
context["selection"] = "identities"
return context
|
## Santosh Khadka
## Python - Warmup Card Game Project
import random
import pdb
suits = {"Hearts", "Diamonds", "Spades", "Clubs"}
ranks = {"Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten", "Jack", "Queen", "King", "Ace"}
# Dictionary for deck
values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':11, 'Queen':12, 'King':13, 'Ace':14}
## CLASSES
class Card:
'''
Instantiate a card.
Card: Suit, rank, value
'''
def __init__(self, suit, rank):
self.suit = suit
self.rank = rank
self.value = values[rank]
def __str__(self):
return self.rank + " of " + self.suit
class Deck():
'''
Instantiate a new deck.
- Create all 52 Card objects.
- Hold as a list of Card objects
- Suggle a Deck through a method call
- Random library shuffle() function
- Deal cards from the Deck object
- Pop method from cards list.
- Will return Card class object instances, not just normal python data types.
'''
def __init__(self): # No user input because every new deck will be the same
# All 52 cards
self.all_cards = []
for suit in suits:
for rank in ranks:
# Create the Card object
created_card = Card(suit, rank)
self.all_cards.append(created_card)
def shuffle(self):
random.shuffle(self.all_cards) # shuffles the dictionary around
def deal_one(self):
# Because we 'deal one card' were techincally removing it from the list of all_cards -> pop
# The pop() method removes the item at the given index from the list and returns the removed item.
# argument passed to method is optional. Default index -1 is passed as an argument (index of the last item).
return self.all_cards.pop()
class Player:
'''
Instantiate a player.
- Hold instances of Cards.
- Add and remove cards from hand.
- Flexible: add one or many cards.
'''
def __init__(self, name):
self.name = name
self.all_cards = [] # new player has no cards
def remove_one(self):
# Use pop to remove
return self.all_cards.pop()
def add_cards(self, new_cards):
if type(new_cards) == type([]):
self.all_cards.extend(new_cards) # adds one list to another - 'extends'
else:
self.all_cards.append(new_cards)
def __str__(self):
string1 = "Player " + str(self.name) + " has " + str(len(self.all_cards)) + " card(s)."
return string1
# globals
player_one = Player("One")
player_two = Player("Two")
# ## LOGIC
# def logic_new_game():
# # New players
# global player_one
# global player_two
# player_one = Player("One")
# player_two = Player("Two")
# # New deck
# new_deck = Deck()
# new_deck.shuffle()
# # Splitting deck between 2 players
# deck_half = int(len(new_deck.all_cards)/2)
# for x in range(deck_half):
# player_one.add_cards(new_deck.deal_one())
# player_two.add_cards(new_deck.deal_one())
# #print(len(player_one.all_cards))
# #print(len(player_two.all_cards))
def main():
# New players
global player_one
global player_two
player_one = Player("One")
player_two = Player("Two")
# New deck
new_deck = Deck()
new_deck.shuffle()
# Splitting deck between 2 players
deck_half = int(len(new_deck.all_cards)/2)
for x in range(deck_half):
player_one.add_cards(new_deck.deal_one())
player_two.add_cards(new_deck.deal_one())
#print(len(player_one.all_cards))
#print(len(player_two.all_cards))
game_on = True
round_num = 0
while game_on:
round_num += 1
print("Round: ", round_num)
# Out of cards check
if len(player_one.all_cards) == 0:
print("Player One out of cards! Game Over!")
print("Player Two Wins!")
game_on = False
break
if len(player_two.all_cards) == 0:
print("Player Two is out of cards! Game over!")
print("Player One Wins!")
game_on = False
break
# IF neither == 0; then continue
# NEW ROUND
player_one_cards = []
player_one_cards.append(player_one.remove_one)
player_two_cards = []
player_two_cards.append(player_two.remove_one)
at_war = True
print(player_one_cards[-1])
# break
while at_war == True:
if player_one_cards[-1].value > player_two_cards[-1].value: # [-1] represents the last value
# Give cards to player one - since P1 has the bigger card
player_one.add_cards(player_one_cards) # adds the entire list of p1's cards
player_two.add_cards(player_one_cards)
at_war == False # no war -> next round
elif player_one_cards[-1].value < player_two_cards[-1].value:
player_two.add_cards(player_one_cards)
player_two.add_cards(player_one_cards)
at_war == False # no war -> next round
# Cards are equal
else:
print("WAR TIME!!")
if len(player_one.all_cards) < 5:
print("Player ONE is unable to play war! Game over!")
print("Player TWO wins!")
game_on = False # end game
break
elif len(player_two.all_cards) < 5:
print("Player TWO is unable to play war! Game over!")
print("Player ONE wins!")
game_on = False # end game
break
else:
for num in range(5):
player_one_cards.append(player_one.remove_one())
player_two_cards.append(player_two.remove_one())
# break # for testing
if __name__ == "__main__":
main() |
import sys,os
sys.path.insert(1,os.path.abspath(os.path.join(os.path.dirname( __file__ ),'..','..','lib')))
from time import strftime
import pytest
from clsCommon import Common
import clsTestService
import enums
from localSettings import *
import localSettings
from utilityTestFunc import *
class Test:
#================================================================================================================================
# @Author: Oded.berihon @Test name : Enable/Disable comments in channel
# Test description:
# Upload entry publish it to channel edit channel and enable comment go to entry in the channel and add comment
# go back to edit channel and disable comment go to entry and try to add comment.
#
#================================================================================================================================
testNum = "740"
supported_platforms = clsTestService.updatePlatforms(testNum)
status = "Pass"
driver = None
common = None
# Test variables
entryName1 = None
entryDescription = "description"
entryTags = "tag,"
channelName = "9437BD9A_this Is My New Channel"
channelDescription = "description"
channelTags = "tag,"
privacyType = ""
comment = "Comment 1"
filePath1 = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\videos\QR30SecMidRight.mp4'
#run test as different instances on all the supported platforms
@pytest.fixture(scope='module',params=supported_platforms)
def driverFix(self,request):
return request.param
def test_01(self,driverFix,env):
#write to log we started the test
logStartTest(self,driverFix)
try:
############################# TEST SETUP ###############################
#capture test start time
self.startTime = time.time()
#initialize all the basic vars and start playing
self,self.driver = clsTestService.initialize(self, driverFix)
self.common = Common(self.driver)
########################################################################
self.entryName1 = clsTestService.addGuidToString('Video', self.testNum)
self.channelName = clsTestService.addGuidToString('Enable/Disable comments in channel', self.testNum)
########################## TEST STEPS - MAIN FLOW #######################
writeToLog("INFO","Step 1: Going to perform login to KMS site as user")
if self.common.loginAsUser() == False:
self.status = "Fail"
writeToLog("INFO","Step 1: FAILED to login as user")
return
writeToLog("INFO","Step 2: Going to upload Video type entry")
if self.common.upload.uploadEntry(self.filePath1, self.entryName1, self.entryDescription, self.entryTags) == None:
self.status = "Fail"
writeToLog("INFO","Step 2: FAILED failed to upload entry Video")
return
writeToLog("INFO","Step 3: Going to create new channel")
if self.common.channel.createChannel(self.channelName, self.channelDescription, self.channelTags, enums.ChannelPrivacyType.OPEN, False, True, True) == False:
self.status = "Fail"
writeToLog("INFO","Step 3: FAILED to create Channel#1")
return
writeToLog("INFO","Step 4: Going to publish entry1")
if self.common.myMedia.publishSingleEntry(self.entryName1, [], [self.channelName], publishFrom = enums.Location.MY_MEDIA) == False:
writeToLog("INFO","Step 4: FAILED - could not publish Video to channel")
return
writeToLog("INFO","Step 5: Going to add comment to entry")
if self.common.channel.addCommentToEntryFromChannel(self.channelName, self.entryName1, self.comment) == False:
self.status = "Fail"
writeToLog("INFO","Step 5: FAILED to add comment to entry")
return
writeToLog("INFO","Step 6: Going navigate to edit channel page")
if self.common.channel.navigateToEditChannelPage(self.channelName) == False:
self.status = "Fail"
writeToLog("INFO","Step 6: FAILED to navigate to edit channel page")
return
writeToLog("INFO","Step 7: Going to enable and disable comments in channel")
if self.common.channel.enableDisableCommentsInChannel(self.channelName, False) == False:
self.status = "Fail"
writeToLog("INFO","Step 7: FAILED to add and disable comments in channel")
return
writeToLog("INFO","Step 8: Going to navigate to entry from channel page")
if self.common.channel.navigateToEntryFromChannel(self.channelName, self.entryName1) == False:
self.status = "Fail"
writeToLog("INFO","Step 8: FAILED to navigate to edit channel page")
return False
writeToLog("INFO","Step 9: Going to verify user can't add comment")
if self.common.entryPage.checkEntryCommentsSection(self.comment, True, False) == False:
self.status = "Fail"
writeToLog("INFO","Step 9:FAILED to verify user can't add comment")
return False
sleep(3)
#########################################################################
writeToLog("INFO","TEST PASSED: 'Enable/Disable comments in channel' was done successfully")
# If an exception happened we need to handle it and fail the test
except Exception as inst:
self.status = clsTestService.handleException(self,inst,self.startTime)
########################### TEST TEARDOWN ###########################
def teardown_method(self,method):
try:
self.common.handleTestFail(self.status, leavePageExpected=True)
writeToLog("INFO","**************** Starting: teardown_method ****************")
self.common.myMedia.deleteEntriesFromMyMedia([self.entryName1])
self.common.channel.deleteChannel(self.channelName)
writeToLog("INFO","**************** Ended: teardown_method *******************")
except:
pass
clsTestService.basicTearDown(self)
#write to log we finished the test
logFinishedTest(self,self.startTime)
assert (self.status == "Pass")
pytest.main('test_' + testNum + '.py --tb=line') |
default_app_config = 'StrategyBacktest.apps.StrategybacktestConfig' |
from django.shortcuts import render
from app.models import Student
def home(request):
data = {}
data['students'] = Student.objects.all()
return render(request, 'home.html', data)
def view_student(request, pk):
data = {}
data['students'] = Student.objects.get(pk=pk)
return render(request, 'view_student.html', data) |
"""
This script evaluates the *.log files produced by the solver of the Differential Evolution problem for any of the
algorithms.
Note that this script evaluates all files placed in the {LOGS_DIR} directory, where the {LOGS_DIR} represents the path
to a directory containing *.log files produced by any of AgE3-DifferentialEvolution algorithms, as well as copies of
configuration files used for customization of this algorithm. The {LOGS_DIR} path is provided by the first script
parameter.
"""
# TODO: Implement evaluation of log entries for a particular workplace in a tick (i.e. the log entry starting with the
# '[W]' prefix) and a log entry with the best solution (i.e. the log entry staring with the '[B]' prefix).
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
def main():
# PARSING THE CONFIGURATION FILE.
config = dict()
with open(sys.argv[1] + '/common-config.properties') as properties_file:
for entry in properties_file:
if len(entry.strip()) > 0 and (not entry.startswith('#')):
key, value = entry.split('=', 1)
config[key.strip()] = value.strip()
logging_interval_seconds = int(config['de.logging.interval-in-milliseconds']) / 1000
simulation_time = int(config['de.stop-condition.time-in-seconds'])
ticks_number = int(simulation_time / logging_interval_seconds)
# PARSING *.LOG FILES.
logs_directory_path = sys.argv[1]
best_solutions = list()
evaluations_count = list()
for log_file_path in os.listdir(logs_directory_path):
if not log_file_path.endswith('.log'):
continue
with open(os.path.join(logs_directory_path, log_file_path)) as log_file:
best_solutions_by_file = list()
evaluations_count_by_file = list()
for entry in log_file:
if entry.startswith('[S]'):
splitted_entry = entry.split(';', 3)
best_solutions_by_file.append(float(splitted_entry[2]))
evaluations_count_by_file.append(int(splitted_entry[3]))
assert len(best_solutions_by_file) == ticks_number
assert len(evaluations_count_by_file) == ticks_number
best_solutions.append(best_solutions_by_file)
evaluations_count.append(evaluations_count_by_file)
# PROCESSING COLLECTED DATA.
best_solutions_by_tick = np.array(
[np.array([solution[tick] for solution in best_solutions]) for tick in range(ticks_number)])
mean_evaluations_count_by_tick = np.array(
[np.array([evaluations[tick] for evaluations in evaluations_count]).mean() for tick in range(ticks_number)])
start, end, step = 0, ticks_number, 1
# GENERAL PLOT SETTINGS.
fig, solutions_plot = plt.subplots()
plt.xticks(fontsize=10, rotation=90)
plt.xlabel('Simulation time [s]', fontsize=10)
# SOLUTIONS PLOT.
solutions_plot.boxplot(best_solutions_by_tick[start:end:step].T, notch=False, showmeans=True, meanline=True,
flierprops=dict(markerfacecolor='white', markeredgecolor='black', markersize=7, marker='.'),
meanprops=dict(color='green', linestyle='-'),
whiskerprops=dict(color='purple', linestyle='-'),
capprops=dict(color='purple'))
plt.yscale('log')
plt.yticks(fontsize=10)
plt.ylabel('Best fitness value', fontsize=10)
# EVALUATIONS PLOT.
evaluations_plot = solutions_plot.twinx()
evaluations_plot.plot(np.arange(1, (ticks_number - start)/step + 1), mean_evaluations_count_by_tick[start:end:step], 'y-')
y_tick_size = 1e5
y_ticks_range = np.arange(plt.gca().get_ylim()[0], plt.gca().get_ylim()[1] + y_tick_size, y_tick_size)
plt.yticks(y_ticks_range, ['%.1E' % tick if tick != 0 else '0.0' for tick in y_ticks_range], fontsize=10)
plt.ylabel('Evaluations count', fontsize=10)
# OX TICKS PROPERTIES.
plt.xlim((0, np.ceil((ticks_number - start)/step) + 1))
x_ticks_range = np.arange(logging_interval_seconds, simulation_time + logging_interval_seconds, logging_interval_seconds)[start:end:step]
plt.xticks(np.arange(1, (ticks_number - start)/step + 1), ['%.1f' % tick for tick in x_ticks_range])
# GENERAL PLOT SETTINGS.
plt.title('Best fitness and evaluations count in the time domain')
plt.subplots_adjust(left=0.04, bottom=0.08, right=0.94, top=0.96, wspace=0.0, hspace=0.0)
plt.show()
if __name__ == '__main__':
main()
|
import os, time
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.applications import inception_v3
import matplotlib.pyplot as plt
from utils import preprocess_image, deprocess_image
from model import get_feature_extractor
import argparse
print("Libraries Loaded!")
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
# You can tweak these setting to obtain new visual effects.
layer_settings = {
"mixed4": 0.0,
"mixed5": 1.5,
"mixed6": 2.0,
"mixed7": 0.5,
}
# Playing with these hyperparameters will also allow you to achieve new effects
step = 0.01 # Gradient ascent step size
num_octave = 3 # Number of scales at which to run gradient ascent
octave_scale = 1.4 # Size ratio between scales
iterations = 10 # Number of ascent steps per scale
max_loss = 15.0
#----------------------------------------------------------------------
def compute_loss(input_image, feature_extractor):
features = feature_extractor(input_image)
# Initialize the loss
loss = tf.zeros(shape=())
for name in features.keys():
coeff = layer_settings[name]
activation = features[name]
# We avoid border artifacts by only involving non-border pixels in the loss.
scaling = tf.reduce_prod(tf.cast(tf.shape(activation), "float32"))
loss += coeff * tf.reduce_sum(tf.square(activation[:, 2:-2, 2:-2, :])) / scaling
return loss
@tf.function
def gradient_ascent_step(img, feature_extractor, learning_rate):
with tf.GradientTape() as tape:
tape.watch(img)
loss = compute_loss(img, feature_extractor)
# Compute gradients.
grads = tape.gradient(loss, img)
# Normalize gradients.
grads /= tf.maximum(tf.reduce_mean(tf.abs(grads)), 1e-6)
img += learning_rate * grads
return loss, img
def gradient_ascent_loop(img, feature_extractor, iterations, learning_rate, max_loss=max_loss):
for i in range(iterations):
loss, img = gradient_ascent_step(img, feature_extractor, learning_rate)
if max_loss is not None and loss > max_loss:
break
print("... Loss value at step %d: %.2f" % (i, loss))
return img
#----------------------------------------------------------------------
def dream_on(original_img, feature_extractor, output_name="result.jpg"):
#original_img = preprocess_image(base_image_path)
original_shape = original_img.shape[1:3]
successive_shapes = [original_shape]
for i in range(1, num_octave):
shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape])
print(shape)
successive_shapes.append(shape)
successive_shapes = successive_shapes[::-1]
shrunk_original_img = tf.image.resize(original_img, successive_shapes[0])
img = tf.identity(original_img) # Make a copy
for i, shape in enumerate(successive_shapes):
print("Processing octave %d with shape %s" % (i, shape))
img = tf.image.resize(img, shape)
img = gradient_ascent_loop(
img, feature_extractor=feature_extractor,
iterations=iterations, learning_rate=step, max_loss=max_loss
)
upscaled_shrunk_original_img = tf.image.resize(shrunk_original_img, shape)
same_size_original = tf.image.resize(original_img, shape)
lost_detail = same_size_original - upscaled_shrunk_original_img
img += lost_detail
shrunk_original_img = tf.image.resize(original_img, shape)
keras.preprocessing.image.save_img(output_name, deprocess_image(img.numpy()))
#----------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="Deep Dream tutorial")
parser.add_argument("--src_img", default="sky.jpg", required=True, type=str, help="Source image to perform deep dram on")
parser.add_argument("--result_img", default="results/dream_result.jpg", type=str, help="Result image to perform deep dram on")
parser.add_argument("--downscale_factor", default=1, type=float, help="Downscale Factor")
args = parser.parse_args()
proc = preprocess_image(args.src_img)
if args.downscale_factor > 1:
print(proc.shape)
new_shape = [int(proc.shape[1]//args.downscale_factor),
int(proc.shape[2]//args.downscale_factor)]
proc = tf.image.resize(proc, new_shape)
print(proc.shape)
model = get_feature_extractor(layer_settings)
print("model loaded\nDreaming")
print(os.path.split(args.result_img))
if not os.path.isdir(args.result_img.split("/")[0]):
try:
d_dir = args.result_img.split("/")[0]
os.mkdir(d_dir)
print(f"created directory: {d_dir}")
except:
print("couldn't create directory")
dream_on(proc, model, args.result_img)
if __name__ == "__main__":
st = time.time()
main()
print(f"Total time: {time.time()-st} s")
|
#palindrome
n=input("Enter number:")
m=n
r=0
while n!=0:
digit=n%10
r=r*10+digit
n=n/10
if m==r:
print"yes"
else:
print"no"
#Primenumber checker
print""
n=input("Enter prime number")
prime=True
if n==1:
print"Neither prime nor composite"
else :
for i in range(2,n):
n%2==0
prime=False
if prime==False:
print"Not prime"
else:
print"Prime"
#fibonacci
n=input("Enter numbr of terms")
first=0
second=1
print first,second,
count=3
while count<=n:
third=first+second
print third,
first=second
second=third
count=count+1
#sumofdigits
print""
n=input("Enter number:")
sod=0
m=n
while(n>0):
d=n%10
sod=sod+d
n=n/10
print sod
|
name = "ImageTools"
major = 0
minor = 1
status = "dev"
|
import ais.stream
from tqdm import tqdm
import os
import json
def parse_raw(path):
with open(path,'r') as f:
for msg in ais.stream.decode(f):
yield msg
def check_msg(msg: dict):
if 'hour' in msg:
return True
return False
def write_json(raw_file, json_file):
if os.path.exists(json_file):
os.remove(json_file)
count = 0
with open(json_file,'a') as f:
for msg in tqdm(parse_raw(raw_file)):
if check_msg(msg):
json.dump(msg,f)
f.write('\n')
count += 1
return count
if __name__ == "__main__":
raw_dir = "../../data/raw_all/CCG_AIS_Log_2018-05-02.csv"
count = write_json(raw_dir,'out_hour.json')
print(count)
|
import os.path as osp
from six.moves import cPickle
from smqtk.representation.classification_element import ClassificationElement
from smqtk.exceptions import NoClassificationError
from smqtk.utils import file_utils
from smqtk.utils.string_utils import partition_string
__author__ = "paul.tunison@kitware.com"
class FileClassificationElement (ClassificationElement):
@classmethod
def is_usable(cls):
return True
def __init__(self, type_name, uuid, save_dir, subdir_split=None,
pickle_protocol=-1):
"""
Initialize a file-base descriptor element.
:param type_name: Type of classification. This is usually the name of
the classifier that generated this result.
:type type_name: str
:param uuid: uuid for this classification
:type uuid: collections.Hashable
:param save_dir: Directory to save this element's contents. If this path
is relative, we interpret as relative to the current working
directory.
:type save_dir: str | unicode
:param subdir_split: If a positive integer, this will cause us to store
the vector file in a subdirectory under the ``save_dir`` that was
specified. The integer value specifies the number of splits that we
will make in the stringification of this descriptor's UUID. If there
happen to be dashes in this stringification, we will remove them
(as would happen if given an uuid.UUID instance as the uuid
element).
:type subdir_split: None | int
:param pickle_protocol: Pickling protocol to use. We will use -1 by
default (latest version, probably binary).
:type pickle_protocol: int
"""
super(FileClassificationElement, self).__init__(type_name, uuid)
self.save_dir = osp.abspath(osp.expanduser(save_dir))
self.pickle_protocol = pickle_protocol
# Saving components
self.subdir_split = subdir_split
if subdir_split and int(subdir_split) > 0:
self.subdir_split = subdir_split = int(subdir_split)
# Using all but the last split segment. This is so we don't create
# a whole bunch of directories with a single element in them.
save_dir = osp.join(self.save_dir,
*partition_string(str(uuid).replace('-', ''),
subdir_split)[:subdir_split-1]
)
else:
save_dir = self.save_dir
self.filepath = osp.join(save_dir,
"%s.%s.classification.pickle"
% (self.type_name, str(self.uuid)))
def get_config(self):
return {
"save_dir": self.save_dir,
'subdir_split': self.subdir_split,
"pickle_protocol": self.pickle_protocol,
}
def has_classifications(self):
"""
:return: If this element has classification information set.
:rtype: bool
"""
return osp.isfile(self.filepath)
def get_classification(self):
"""
Get classification result map, returning a label-to-confidence dict.
We do no place any guarantees on label value types as they may be
represented in various forms (integers, strings, etc.).
Confidence values are in the [0,1] range.
:raises NoClassificationError: No classification labels/confidences yet
set.
:return: Label-to-confidence dictionary.
:rtype: dict[collections.Hashable, float]
"""
if not self.has_classifications():
raise NoClassificationError("No classification values.")
with open(self.filepath) as f:
return cPickle.load(f)
def set_classification(self, m=None, **kwds):
"""
Set the whole classification map for this element. This will strictly
overwrite the entire label-confidence mapping (vs. updating it)
Label/confidence values may either be provided via keyword arguments or
by providing a dictionary mapping labels to confidence values.
:param m: New labels-to-confidence mapping to set.
:type m: dict[collections.Hashable, float]
:raises ValueError: The given label-confidence map was empty.
"""
m = super(FileClassificationElement, self)\
.set_classification(m, **kwds)
file_utils.safe_create_dir(osp.dirname(self.filepath))
with open(self.filepath, 'w') as f:
cPickle.dump(m, f, self.pickle_protocol)
|
# type: ignore
import argparse
import collections.abc
import contextlib
import inspect
import itertools
import os
import os.path
import pathlib
import shutil
import sys
import tempfile
import time
import unittest.mock
import warnings
import torch
from torch.utils.data import DataLoader
from torch.utils.data.dataloader_experimental import DataLoader2
from torchvision import datasets as legacy_datasets
from torchvision.datasets.utils import extract_archive
from torchvision.prototype import datasets as new_datasets
from torchvision.transforms import PILToTensor
def main(
name,
*,
variant=None,
legacy=True,
new=True,
start=True,
iteration=True,
num_starts=3,
num_samples=10_000,
temp_root=None,
num_workers=0,
):
benchmarks = [
benchmark
for benchmark in DATASET_BENCHMARKS
if benchmark.name == name and (variant is None or benchmark.variant == variant)
]
if not benchmarks:
msg = f"No DatasetBenchmark available for dataset '{name}'"
if variant is not None:
msg += f" and variant '{variant}'"
raise ValueError(msg)
for benchmark in benchmarks:
print("#" * 80)
print(f"{benchmark.name}" + (f" ({benchmark.variant})" if benchmark.variant is not None else ""))
if legacy and start:
print(
"legacy",
"cold_start",
Measurement.time(benchmark.legacy_cold_start(temp_root, num_workers=num_workers), number=num_starts),
)
print(
"legacy",
"warm_start",
Measurement.time(benchmark.legacy_warm_start(temp_root, num_workers=num_workers), number=num_starts),
)
if legacy and iteration:
print(
"legacy",
"iteration",
Measurement.iterations_per_time(
benchmark.legacy_iteration(temp_root, num_workers=num_workers, num_samples=num_samples)
),
)
if new and start:
print(
"new",
"cold_start",
Measurement.time(benchmark.new_cold_start(num_workers=num_workers), number=num_starts),
)
if new and iteration:
print(
"new",
"iteration",
Measurement.iterations_per_time(
benchmark.new_iteration(num_workers=num_workers, num_samples=num_samples)
),
)
class DatasetBenchmark:
def __init__(
self,
name: str,
*,
variant=None,
legacy_cls=None,
new_config=None,
legacy_config_map=None,
legacy_special_options_map=None,
prepare_legacy_root=None,
):
self.name = name
self.variant = variant
self.new_raw_dataset = new_datasets._api.find(name)
self.legacy_cls = legacy_cls or self._find_legacy_cls()
if new_config is None:
new_config = self.new_raw_dataset.default_config
elif isinstance(new_config, dict):
new_config = self.new_raw_dataset.info.make_config(**new_config)
self.new_config = new_config
self.legacy_config_map = legacy_config_map
self.legacy_special_options_map = legacy_special_options_map or self._legacy_special_options_map
self.prepare_legacy_root = prepare_legacy_root
def new_dataset(self, *, num_workers=0):
return DataLoader2(new_datasets.load(self.name, **self.new_config), num_workers=num_workers)
def new_cold_start(self, *, num_workers):
def fn(timer):
with timer:
dataset = self.new_dataset(num_workers=num_workers)
next(iter(dataset))
return fn
def new_iteration(self, *, num_samples, num_workers):
def fn(timer):
dataset = self.new_dataset(num_workers=num_workers)
num_sample = 0
with timer:
for _ in dataset:
num_sample += 1
if num_sample == num_samples:
break
return num_sample
return fn
def suppress_output(self):
@contextlib.contextmanager
def context_manager():
with open(os.devnull, "w") as devnull:
with contextlib.redirect_stdout(devnull), contextlib.redirect_stderr(devnull):
yield
return context_manager()
def legacy_dataset(self, root, *, num_workers=0, download=None):
legacy_config = self.legacy_config_map(self, root) if self.legacy_config_map else dict()
special_options = self.legacy_special_options_map(self)
if "download" in special_options and download is not None:
special_options["download"] = download
with self.suppress_output():
return DataLoader(
self.legacy_cls(legacy_config.pop("root", str(root)), **legacy_config, **special_options),
shuffle=True,
num_workers=num_workers,
)
@contextlib.contextmanager
def patch_download_and_integrity_checks(self):
patches = [
("download_url", dict()),
("download_file_from_google_drive", dict()),
("check_integrity", dict(new=lambda path, md5=None: os.path.isfile(path))),
]
dataset_module = sys.modules[self.legacy_cls.__module__]
utils_module = legacy_datasets.utils
with contextlib.ExitStack() as stack:
for name, patch_kwargs in patches:
patch_module = dataset_module if name in dir(dataset_module) else utils_module
stack.enter_context(unittest.mock.patch(f"{patch_module.__name__}.{name}", **patch_kwargs))
yield stack
def _find_resource_file_names(self):
info = self.new_raw_dataset.info
valid_options = info._valid_options
file_names = set()
for options in (
dict(zip(valid_options.keys(), values)) for values in itertools.product(*valid_options.values())
):
resources = self.new_raw_dataset.resources(info.make_config(**options))
file_names.update([resource.file_name for resource in resources])
return file_names
@contextlib.contextmanager
def legacy_root(self, temp_root):
new_root = pathlib.Path(new_datasets.home()) / self.name
legacy_root = pathlib.Path(tempfile.mkdtemp(dir=temp_root))
if os.stat(new_root).st_dev != os.stat(legacy_root).st_dev:
warnings.warn(
"The temporary root directory for the legacy dataset was created on a different storage device than "
"the raw data that is used by the new dataset. If the devices have different I/O stats, this will "
"distort the benchmark. You can use the '--temp-root' flag to relocate the root directory of the "
"temporary directories.",
RuntimeWarning,
)
try:
for file_name in self._find_resource_file_names():
(legacy_root / file_name).symlink_to(new_root / file_name)
if self.prepare_legacy_root:
self.prepare_legacy_root(self, legacy_root)
with self.patch_download_and_integrity_checks():
yield legacy_root
finally:
shutil.rmtree(legacy_root)
def legacy_cold_start(self, temp_root, *, num_workers):
def fn(timer):
with self.legacy_root(temp_root) as root:
with timer:
dataset = self.legacy_dataset(root, num_workers=num_workers)
next(iter(dataset))
return fn
def legacy_warm_start(self, temp_root, *, num_workers):
def fn(timer):
with self.legacy_root(temp_root) as root:
self.legacy_dataset(root, num_workers=num_workers)
with timer:
dataset = self.legacy_dataset(root, num_workers=num_workers, download=False)
next(iter(dataset))
return fn
def legacy_iteration(self, temp_root, *, num_samples, num_workers):
def fn(timer):
with self.legacy_root(temp_root) as root:
dataset = self.legacy_dataset(root, num_workers=num_workers)
with timer:
for num_sample, _ in enumerate(dataset, 1):
if num_sample == num_samples:
break
return num_sample
return fn
def _find_legacy_cls(self):
legacy_clss = {
name.lower(): dataset_class
for name, dataset_class in legacy_datasets.__dict__.items()
if isinstance(dataset_class, type) and issubclass(dataset_class, legacy_datasets.VisionDataset)
}
try:
return legacy_clss[self.name]
except KeyError as error:
raise RuntimeError(
f"Can't determine the legacy dataset class for '{self.name}' automatically. "
f"Please set the 'legacy_cls' keyword argument manually."
) from error
_SPECIAL_KWARGS = {
"transform",
"target_transform",
"transforms",
"download",
}
@staticmethod
def _legacy_special_options_map(benchmark):
available_parameters = set()
for cls in benchmark.legacy_cls.__mro__:
if cls is legacy_datasets.VisionDataset:
break
available_parameters.update(inspect.signature(cls.__init__).parameters)
available_special_kwargs = benchmark._SPECIAL_KWARGS.intersection(available_parameters)
special_options = dict()
if "download" in available_special_kwargs:
special_options["download"] = True
if "transform" in available_special_kwargs:
special_options["transform"] = PILToTensor()
if "target_transform" in available_special_kwargs:
special_options["target_transform"] = torch.tensor
elif "transforms" in available_special_kwargs:
special_options["transforms"] = JointTransform(PILToTensor(), PILToTensor())
return special_options
class Measurement:
@classmethod
def time(cls, fn, *, number):
results = Measurement._timeit(fn, number=number)
times = torch.tensor(tuple(zip(*results))[1])
return cls._format(times, unit="s")
@classmethod
def iterations_per_time(cls, fn):
num_samples, time = Measurement._timeit(fn, number=1)[0]
iterations_per_second = torch.tensor(num_samples) / torch.tensor(time)
return cls._format(iterations_per_second, unit="it/s")
class Timer:
def __init__(self):
self._start = None
self._stop = None
def __enter__(self):
self._start = time.perf_counter()
def __exit__(self, exc_type, exc_val, exc_tb):
self._stop = time.perf_counter()
@property
def delta(self):
if self._start is None:
raise RuntimeError()
elif self._stop is None:
raise RuntimeError()
return self._stop - self._start
@classmethod
def _timeit(cls, fn, number):
results = []
for _ in range(number):
timer = cls.Timer()
output = fn(timer)
results.append((output, timer.delta))
return results
@classmethod
def _format(cls, measurements, *, unit):
measurements = torch.as_tensor(measurements).to(torch.float64).flatten()
if measurements.numel() == 1:
# TODO format that into engineering format
return f"{float(measurements):.3f} {unit}"
mean, std = Measurement._compute_mean_and_std(measurements)
# TODO format that into engineering format
return f"{mean:.3f} ± {std:.3f} {unit}"
@classmethod
def _compute_mean_and_std(cls, t):
mean = float(t.mean())
std = float(t.std(0, unbiased=t.numel() > 1))
return mean, std
def no_split(benchmark, root):
legacy_config = dict(benchmark.new_config)
del legacy_config["split"]
return legacy_config
def bool_split(name="train"):
def legacy_config_map(benchmark, root):
legacy_config = dict(benchmark.new_config)
legacy_config[name] = legacy_config.pop("split") == "train"
return legacy_config
return legacy_config_map
def base_folder(rel_folder=None):
if rel_folder is None:
def rel_folder(benchmark):
return benchmark.name
elif not callable(rel_folder):
name = rel_folder
def rel_folder(_):
return name
def prepare_legacy_root(benchmark, root):
files = list(root.glob("*"))
folder = root / rel_folder(benchmark)
folder.mkdir(parents=True)
for file in files:
shutil.move(str(file), str(folder))
return folder
return prepare_legacy_root
class JointTransform:
def __init__(self, *transforms):
self.transforms = transforms
def __call__(self, *inputs):
if len(inputs) == 1 and isinstance(inputs, collections.abc.Sequence):
inputs = inputs[0]
if len(inputs) != len(self.transforms):
raise RuntimeError(
f"The number of inputs and transforms mismatches: {len(inputs)} != {len(self.transforms)}."
)
return tuple(transform(input) for transform, input in zip(self.transforms, inputs))
def caltech101_legacy_config_map(benchmark, root):
legacy_config = no_split(benchmark, root)
# The new dataset always returns the category and annotation
legacy_config["target_type"] = ("category", "annotation")
return legacy_config
mnist_base_folder = base_folder(lambda benchmark: pathlib.Path(benchmark.legacy_cls.__name__) / "raw")
def mnist_legacy_config_map(benchmark, root):
return dict(train=benchmark.new_config.split == "train")
def emnist_prepare_legacy_root(benchmark, root):
folder = mnist_base_folder(benchmark, root)
shutil.move(str(folder / "emnist-gzip.zip"), str(folder / "gzip.zip"))
return folder
def emnist_legacy_config_map(benchmark, root):
legacy_config = mnist_legacy_config_map(benchmark, root)
legacy_config["split"] = benchmark.new_config.image_set.replace("_", "").lower()
return legacy_config
def qmnist_legacy_config_map(benchmark, root):
legacy_config = mnist_legacy_config_map(benchmark, root)
legacy_config["what"] = benchmark.new_config.split
# The new dataset always returns the full label
legacy_config["compat"] = False
return legacy_config
def coco_legacy_config_map(benchmark, root):
images, _ = benchmark.new_raw_dataset.resources(benchmark.new_config)
return dict(
root=str(root / pathlib.Path(images.file_name).stem),
annFile=str(
root / "annotations" / f"{benchmark.variant}_{benchmark.new_config.split}{benchmark.new_config.year}.json"
),
)
def coco_prepare_legacy_root(benchmark, root):
images, annotations = benchmark.new_raw_dataset.resources(benchmark.new_config)
extract_archive(str(root / images.file_name))
extract_archive(str(root / annotations.file_name))
DATASET_BENCHMARKS = [
DatasetBenchmark(
"caltech101",
legacy_config_map=caltech101_legacy_config_map,
prepare_legacy_root=base_folder(),
legacy_special_options_map=lambda config: dict(
download=True,
transform=PILToTensor(),
target_transform=JointTransform(torch.tensor, torch.tensor),
),
),
DatasetBenchmark(
"caltech256",
legacy_config_map=no_split,
prepare_legacy_root=base_folder(),
),
DatasetBenchmark(
"celeba",
prepare_legacy_root=base_folder(),
legacy_config_map=lambda benchmark: dict(
split="valid" if benchmark.new_config.split == "val" else benchmark.new_config.split,
# The new dataset always returns all annotations
target_type=("attr", "identity", "bbox", "landmarks"),
),
),
DatasetBenchmark(
"cifar10",
legacy_config_map=bool_split(),
),
DatasetBenchmark(
"cifar100",
legacy_config_map=bool_split(),
),
DatasetBenchmark(
"emnist",
prepare_legacy_root=emnist_prepare_legacy_root,
legacy_config_map=emnist_legacy_config_map,
),
DatasetBenchmark(
"fashionmnist",
prepare_legacy_root=mnist_base_folder,
legacy_config_map=mnist_legacy_config_map,
),
DatasetBenchmark(
"kmnist",
prepare_legacy_root=mnist_base_folder,
legacy_config_map=mnist_legacy_config_map,
),
DatasetBenchmark(
"mnist",
prepare_legacy_root=mnist_base_folder,
legacy_config_map=mnist_legacy_config_map,
),
DatasetBenchmark(
"qmnist",
prepare_legacy_root=mnist_base_folder,
legacy_config_map=mnist_legacy_config_map,
),
DatasetBenchmark(
"sbd",
legacy_cls=legacy_datasets.SBDataset,
legacy_config_map=lambda benchmark: dict(
image_set=benchmark.new_config.split,
mode="boundaries" if benchmark.new_config.boundaries else "segmentation",
),
legacy_special_options_map=lambda benchmark: dict(
download=True,
transforms=JointTransform(
PILToTensor(), torch.tensor if benchmark.new_config.boundaries else PILToTensor()
),
),
),
DatasetBenchmark("voc", legacy_cls=legacy_datasets.VOCDetection),
DatasetBenchmark("imagenet", legacy_cls=legacy_datasets.ImageNet),
DatasetBenchmark(
"coco",
variant="instances",
legacy_cls=legacy_datasets.CocoDetection,
new_config=dict(split="train", annotations="instances"),
legacy_config_map=coco_legacy_config_map,
prepare_legacy_root=coco_prepare_legacy_root,
legacy_special_options_map=lambda benchmark: dict(transform=PILToTensor(), target_transform=None),
),
DatasetBenchmark(
"coco",
variant="captions",
legacy_cls=legacy_datasets.CocoCaptions,
new_config=dict(split="train", annotations="captions"),
legacy_config_map=coco_legacy_config_map,
prepare_legacy_root=coco_prepare_legacy_root,
legacy_special_options_map=lambda benchmark: dict(transform=PILToTensor(), target_transform=None),
),
]
def parse_args(argv=None):
parser = argparse.ArgumentParser(
prog="torchvision.prototype.datasets.benchmark.py",
description="Utility to benchmark new datasets against their legacy variants.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("name", help="Name of the dataset to benchmark.")
parser.add_argument(
"--variant", help="Variant of the dataset. If omitted all available variants will be benchmarked."
)
parser.add_argument(
"-n",
"--num-starts",
type=int,
default=3,
help="Number of warm and cold starts of each benchmark. Default to 3.",
)
parser.add_argument(
"-N",
"--num-samples",
type=int,
default=10_000,
help="Maximum number of samples to draw during iteration benchmarks. Defaults to 10_000.",
)
parser.add_argument(
"--nl",
"--no-legacy",
dest="legacy",
action="store_false",
help="Skip legacy benchmarks.",
)
parser.add_argument(
"--nn",
"--no-new",
dest="new",
action="store_false",
help="Skip new benchmarks.",
)
parser.add_argument(
"--ns",
"--no-start",
dest="start",
action="store_false",
help="Skip start benchmarks.",
)
parser.add_argument(
"--ni",
"--no-iteration",
dest="iteration",
action="store_false",
help="Skip iteration benchmarks.",
)
parser.add_argument(
"-t",
"--temp-root",
type=pathlib.Path,
help=(
"Root of the temporary legacy root directories. Use this if your system default temporary directory is on "
"another storage device as the raw data to avoid distortions due to differing I/O stats."
),
)
parser.add_argument(
"-j",
"--num-workers",
type=int,
default=0,
help=(
"Number of subprocesses used to load the data. Setting this to 0 (default) will load all data in the main "
"process and thus disable multi-processing."
),
)
return parser.parse_args(argv or sys.argv[1:])
if __name__ == "__main__":
args = parse_args()
try:
main(
args.name,
variant=args.variant,
legacy=args.legacy,
new=args.new,
start=args.start,
iteration=args.iteration,
num_starts=args.num_starts,
num_samples=args.num_samples,
temp_root=args.temp_root,
num_workers=args.num_workers,
)
except Exception as error:
msg = str(error)
print(msg or f"Unspecified {type(error)} was raised during execution.", file=sys.stderr)
sys.exit(1)
|
import time
def insertion_sort(vetor,tempo):
vetor.ordenado = False
for i in range(1, len(vetor.lista)):
key = vetor.lista[i]
j = i - 1
while j >= 0 and key < vetor.lista[j]:
vetor.lista[j + 1] = vetor.lista[j]
j -= 1
vetor.lista[j + 1] = key
time.sleep(tempo)
vetor.ordenado = True
return vetor.lista |
HTML_RESPONSE = 'text/html'
JSON_RESPONSE = 'application/json'
CSV_RESPONSE = 'text/csv' |
import pickle
import os
import tarfile
def get_pickle_file_content(full_path_pickle_file):
pickle_file = open(full_path_pickle_file,'rb')
pickle_list = pickle.load(pickle_file, encoding='latin1')
pickle_file.close()
return pickle_list
def main():
tar_file_dir = "/tmp/embstoredir"
file = "full_dataset_att_int_seq.pickle"
content = get_pickle_file_content(tar_file_dir + '/' + file)
biggest_length = 0
len_list = list()
c = 0
for i in content:
for dis,ret in i:
len_list.append(len(dis))
if len(dis) > biggest_length:
if len(dis) > 50 and len(dis) < 30000:
print(f'New length of int_seq: {len(dis)}')
biggest_length = len(dis)
###debug
if len(dis) == 8:
print(f'len >{len(dis)} ret >{ret}< disas >{dis}<')
c += 1
if c > 10:
break
print(f'Biggest length of int_seq is: {biggest_length}')
len_list_dict = dict()
c = 0
counter = 1
for b in sorted(len_list):
#print(f'One item in list: >{b}<')
if b in len_list_dict:
counter += 1
else:
counter = 1
len_list_dict[b] = counter
#c += 1
#if c > 10:
#break
print(f'dict: >{len_list_dict}<')
size_file = open(tar_file_dir + '/full_dataset_att_int_seq_biggest_int_seq_nr.txt','w+')
size_file.write(str(biggest_length))
size_file.close()
if __name__ == "__main__":
main()
|
for i in range(100, 1000):
if (i%1)==0 and (i%2)==1 and (i%3)==2 and (i%4)==3 and (i%5)==4 and (i%6)==5 and (i%7)==0 :
print(i) |
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from torchvision import models
import argparse
import h5py
import json
import os
import progressbar
import torch
import torch.nn as nn
import torchvision.transforms as transforms
parser = argparse.ArgumentParser(description="Extract all the features for images in coco")
parser.add_argument("--images", type=str, default='/data/ranjaykrishna/coco')
parser.add_argument("--data", type=str, default='/data/ranjaykrishna/coco/annotations')
parser.add_argument("--batch-size", type=int, default=64)
parser.add_argument("--output", type=str, default='/data/ranjaykrishna/coco/gen')
parser.add_argument("--model", type=str, default='resnet50')
parser.add_argument("--image-size", type=int, default=224)
parser.add_argument("--feature-size", type=int, default=2048)
parser.add_argument('--cuda', action='store_true')
args = parser.parse_args()
class COCO(Dataset):
def __init__(self, image_folder, annotations_file):
self.image_folder = image_folder
self.image_names = []
self.categories = []
self.image_ids = []
self.boxes = []
self.transform = transforms.Compose([
transforms.CenterCrop(args.image_size),
transforms.ToTensor(),
transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ],
std = [ 0.229, 0.224, 0.225 ]),
])
# Now let's parse all the annotations
annotations = json.load(open(annotations_file))
id2name = {}
for image in annotations['images']:
id2name[image['id']] = image['file_name']
for annotation in annotations['annotations']:
self.image_ids.append(annotation['image_id'])
self.image_names.append(id2name[annotation['image_id']])
self.boxes.append(annotation['bbox'])
self.categories.append(annotation['category_id'])
def __getitem__(self, index):
image = Image.open(os.path.join(self.image_folder, self.image_names[index])).convert('RGB')
box = [int(b) for b in self.boxes[index]]
crop = image.crop((box[0], box[1], box[0]+box[2], box[1]+box[3]))
crop = self.transform(crop)
return torch.IntTensor(box).view(4), torch.IntTensor([self.image_ids[index]]).view(1), torch.IntTensor([self.categories[index]]).view(1), crop
def __len__(self):
return len(self.categories)
class Features(nn.Module):
def __init__(self, original_model):
super(Features, self).__init__()
self.model = original_model
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = x.view(x.size(0), -1)
return x
# Let's get the model
original_model = getattr(models, args.model)(pretrained=True)
model = Features(original_model)
if args.cuda:
model.cuda()
# Let's iterate over all the images
for split in ['val', 'train']:
print "Extracting features in %s set" % split
dataset = COCO(os.path.join(args.images, split + '2014'), os.path.join(args.data, 'instances_' + split + '2014.json'))
dataloader = DataLoader(dataset, shuffle=False, batch_size=args.batch_size)
total = len(dataset)
with h5py.File(os.path.join(args.output, split + '_object_features.hdf5'), 'w') as of:
# Create the 3 datasets
dids = of.create_dataset('image_ids', (total, 1), dtype='int64')
dboxes = of.create_dataset('boxes', (total, 4), dtype='int64')
dcategories = of.create_dataset('categories', (total, 1), dtype='int64')
dfeatures = of.create_dataset('features', (total, args.feature_size), dtype='f')
bar = progressbar.ProgressBar(maxval=len(dataloader)).start()
# iterate over the batches and extract features
for progress, (boxes, image_ids, categories, images) in enumerate(dataloader):
N = boxes.size(0)
begin = progress*args.batch_size
finish = begin + N
# Forward the model and get features
if args.cuda:
images = images.cuda()
images = Variable(images, volatile=True)
features = model(images)
# Store the values in hdf5
dfeatures[begin:finish, :] = features.data.type(torch.FloatTensor).numpy()
dids[begin:finish] = image_ids.numpy()
dboxes[begin:finish, :] = boxes.numpy()
dcategories[begin:finish] = categories.numpy()
bar.update(progress)
bar.finish()
|
#!/usr/bin/env
# encoding: utf-8
"""
Created by John DiBaggio on 2017-05-25
"""
__author__ = 'johndibaggio'
import sys
argv = list(sys.argv)
output_file = open(argv[2], 'w+')
dna_strand_s = ""
dna_strand_t = ""
with open(argv[1]) as f:
lines = f.readlines()
if len(lines) >= 2:
dna_strand_s = lines[0].replace('\n', '')
dna_strand_t = lines[1].replace('\n', '')
else:
print("There was an error with the data file")
def get_next_index(indices, strand_s, strand_t, start):
index = dna_strand_s.find(dna_strand_t, start)
if index > -1:
indices.append(index + 1)
get_next_index(indices, strand_s, strand_t, index + 1)
def get_indices(strand_s, strand_t):
indices = []
get_next_index(indices, strand_s, strand_t, 0)
return indices
def indices_as_string(indices):
indices_str = ""
for i in indices:
indices_str += str(i) + " "
if len(indices_str) > 1:
indices_str = indices_str[:-1]
return indices_str
locations = indices_as_string(get_indices(dna_strand_s, dna_strand_t))
print("All locations of t as a substring of s:\n" + locations)
output_file.write(locations)
output_file.close()
"""
s1,s2 = open('rosalind_subs.txt').read().split('\r\n')
for i in range(len(s1)):
if s1[i:].startswith(s2):
print i+1,
"""
"""
import re
def find_substrings(t, s):
return [m.start() for m in re.finditer('(?=%s)' % s, t)]
with open('rosalind4.txt') as f:
s, t = f.read().split()
# diff. order since usually we search for string in text
locs = find_substrings(s, t)
print ' '.join(str(x+1) for x in locs)
"""
"""
input_file = 'rosalind_SUBS.txt'
with open(input_file) as file:
dna1 = file.readline().strip()
dna2 = file.readline().strip()
i = dna1.find(dna2)
while i != -1:
print i + 1,
i = dna1.find(dna2, i + 1)
"""
|
__author__ = 'AmmiNi'
import unittest
import HazeInfoParser
class HazeInfoParserTest(unittest.TestCase):
def test_parser(self):
"""Test may fail due to change in haze reading
"""
parser = HazeInfoParser.HazeInfoParser()
self.assertEqual(parser.north_data, 62)
self.assertEqual(parser.south_data, 61)
self.assertEqual(parser.east_data, 59)
self.assertEqual(parser.west_data, 61)
self.assertEqual(parser.central_data, 60)
if __name__ == '__main__':
unittest.main() |
"""
剑指 Offer 42. 连续子数组的最大和
输入一个整型数组,数组中的一个或连续多个整数组成一个子数组。求所有子数组的和的最大值。
要求时间复杂度为O(n)。
"""
"""
思路分析,这个应该就是比较基本的动态规划法,比方说这个例子(空字串应该算几呢,还是说不允许有空字串,我们先假定不允许空字串):
numList = [-2,1,-3,4,-1,2,1,-5,4]
令f(n)表示以第n个数字结尾的子字符串获得最大值字符串起始的位置和结束的位置.
f(1) = 1,1
f(2) = 2,2
f(3) = 2,2
f(4) = 2,4
... ...
"""
def maxSubArray(nums: list):
"""
动态规划法最难的地方就是如何书写转移方程,之前的方法转移方程应该就是写错了.
重新写一遍动态规划.
状态dp[i]表示最后一位一定是nums[i]的数组列表加和的最大值.
最后返回dp列表中最大的值就可以了.
状态转移方程为:
dp[i-1] < 0: dp[i] = nums[i]
dp[i-1] > 0: dp[i] = dp[i-1] + nums[i]
:param nums:
:return:
"""
dp = [0] * len(nums)
dp[0] = nums[0]
for i in range(1,len(nums)):
dp[i] = nums[i] if dp[i-1] < 0 else dp[i-1] + nums[i]
return max(dp)
if __name__ == '__main__':
res = maxSubArray([8,-19,5,-4,20])
print(res)
|
# Contains the Board class, which makes heavy use of the Store abstraction.
# Get the journal store
from store import Store
# Double-ended queue for the flooding mechanism
from collections import deque
# Our events declarations
import events
# The way we will handle representing the game board is in two ways -
# a graph of piece spaces which are linked to the adjacent spaces, and a
# two dimensional list which allows direct numerical
# indexing (hopefully shortcuts)
# Note that the convention is for the edge of the board to have left/up/right/down
# values of None (as appropriate, a corner will only have two Nones, for example)
# and the board will be initialized for all self.player values to be Empty but with
# the adjacent spaces linked up correctly.
# A class (using it like a type) to represent a space where no piece has gone
# Could be a string, but this is a little nicer
class Empty:
pass
class Node:
def __init__ (self, player=Empty):
self.player = player
self.adjacent = []
# A class to represent a whole board
# Defining it with a field for size to allow for alteration of the board size later
class Board:
# Initializes a board with Empty nodes
# First dimension is the row, second is the column
def __init__ (self, size):
self.size = size
self.store = Store(orderMoves)
self.buildGame(self.store.log())
# Clears a board
# Reallocates a Store and rebuilds the game
def clear (self):
self.store = Store(orderMoves)
self.buildGame(self.store.log())
# Scores a board
# Returns a dict with all of the player's scores
# For now, just counts the stones, and apparently empty classes can act an an index. Weird, but okay
def score (self):
scores = {}
for i in range(size):
for j in range(size):
scores[self.shortcut[i][j].player] += 1
return scores
# Sets a space to be owned by a player
def set (self, player, row, col):
if row >= 0 and row < self.size and col >= 0 and col < self.size:
self.shortcut[row][col].player = player
return
# Gets the owner of a space
def get (self, row, col):
if row >= 0 and row < self.size and col >= 0 and col < self.size:
return self.shortcut[row][col].player
else:
return None
# Returns if a space can be flooded for a given player
def can_flood (self, player, row, col):
if row >= 0 and row < self.size and col >= 0 and col < self.size:
# Checks to see if an area can be flooded (is surrounded by nothing but None and the given player)
# Flooding means removing the pieces from the board, as per the rules of Go
# Approach is to iteratively flood out from our starting point, using
# a deque to track items to check and a set to track visited items
# This is breadth-first.
# Check on the first entry
if self.shortcut[row][col].player == player:
return False
# Keep track of the Nodes we have yet to visit
nexts = deque()
# Keep track of the Nodes we've visited so as not to revisit them
visited = set()
# Add ourself to start
nexts.appendleft((row,col))
# Also append None to the visited list, which is a really nice way to
# just pass on the None values
visited.add(None)
# Note that len(Set) has constant time complexity, as it does with a
# list
while len(nexts) != 0:
(ci, cj) = nexts.pop()
cur = self.shortcut[ci][cj]
if cur not in visited:
visited.add(cur)
if cur.player != player and cur.player != Empty:
nexts.extendleft(cur.adjacent)
elif cur.player == Empty:
return False
else: # cur.player == player
pass
else:
pass
# If we make it all the way, then we didn't encounter an empty space in
# a region surrounded by the given player's pieces and the edge
return True
else:
return False
# Floods a space for a player
def flood (self, player, row, col):
if row >= 0 and row < self.size and col >= 0 and col < self.size:
# Keep track of the Nodes we have yet to visit
nexts = deque()
# Keep track of the Nodes we've visited so as not to revisit them
visited = set()
# Add ourself to start
nexts.appendleft((row,col))
# Also append None to the visited list, which is a really nice way to
# just pass on the None values
visited.add(None)
# Note that len(Set) has constant time complexity, as it does with a
# list
while len(nexts) != 0:
(ci, cj) = nexts.pop()
cur = self.shortcut[ci][cj]
if cur not in visited:
visited.add(cur)
if cur.player != player and cur.player != Empty:
nexts.extendleft(cur.adjacent)
cur.player = Empty
# The buildGame function which builds the game out of an ordered list of
# Events. Updates the shortcut array in place.
# Also returns a value that indicates if the last move (of this log)
# is a duplicate move.
def buildGame (self,evts):
# First we need to walk the events and remove all those which were
# duplicate moves or undoed
# To check for duplicates, we build up the game board in concert with
# processing events. If we reach an undo, we remove the event and
# rebuild the board.
# Advantages:
# - Not keeping lots of copies of the game board around
# - Undoes are common, but usually take up a minority of the events
# Disadvantages:
# - Building the board multiple times, which is especially expensive
# for undoes near the end of the game.
# Other approach:
# - Save all copies of the game board as you build, so an undo only
# rolls back one board. This seemingly comes with a lower
# complexity, but each copy of the board comes with the burden
# of allocating the space for a whole board. So, it consumes
# dramatically more memory and the speed benefits are not clear.
# Empty the shortcut array
def empty ():
# We should return a shortcut array here with interlinked nodes
# Empty array
self.shortcut = [[Node() for x in range(self.size)] for x in range(self.size)]
for i in range(self.size):
for j in range(self.size):
if j > 0:
self.shortcut[i][j].adjacent.append((i,j-1))
if i > 0:
self.shortcut[i][j].adjacent.append((i-1,j))
if j < self.size - 1:
self.shortcut[i][j].adjacent.append((i,j+1))
if i < self.size - 1:
self.shortcut[i][j].adjacent.append((i+1,j))
# Add a single move to the shortcut array
def makeMove (move):
name, row, col = move[0], move[1], move[2]
# We already checked to make sure the space is empty, so we can
# apply
self.set(name,row,col)
# If we went there, take any zones that are now surrounded
# by the player
# can_flood handles out of bounds indices nicely, so don't
# need to filter them here
for roff in range(-1,2):
for coff in range(-1,2):
if self.can_flood(name, row + roff, col + coff):
self.flood(name, row + roff, col + coff)
# Applies the moves, assuming there are no undoes
def build (evtList):
# Applying the moves
# Note that all events here are now moves (3-tuples)
for i in range(len(evtList)):
makeMove(evtList[i])
empty()
evtList = []
sendImage = True
for i in range(len(evts)):
(date, evt, args) = evts[i]
if (evt == events.undo and len(evtList) > 0):
evtList.pop()
empty()
build(evtList)
elif (evt == events.move):
name, row, col = args[0], args[1], args[2]
# If there isn't already something there and it's within
# bounds
if (self.shortcut[row][col].player == Empty and
row < self.size and col < self.size and
row >= 0 and col >= 0):
evtList.append(args)
makeMove(args)
elif (i == len(evts) - 1):
# If the last event is invalid, say so
sendImage = False
else: # Just ignore
pass
elif (len(evtList) == 0):
pass
else: # Something went wrong
print("Unsupported event in buildGame")
sendImage = False
# Report if we should respond
return sendImage
# Adds an event to the journal and rebuilds the shortcut array
# This is the method that should be used to add events, not get/set
# Returns whether or not a board image should be sent
def addEvent (self, evt):
self.store.insert(evt)
return self.buildGame(self.store.log())
# Orders moves by Telegram-ordered ID
# Arguments are like (date1, evt1, args1), (date2, evt2, args2)
def orderMoves (m1, m2):
return m1[0] < m2[0] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.