text stringlengths 8 6.05M |
|---|
from cvs import login
from cvs import create_driver
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
def test_items_shopping_basket():
driver = create_driver()
login(driver)
driver.find_element_by_class_name('head-basket').click()
driver.find_element_by_css_selector('h1.cart__header')
wait = WebDriverWait(driver, 10)
wait.until(ec.presence_of_element_located((By.CLASS_NAME, 'cart__header')))
item = driver.find_element_by_class_name('item__name-heading')
assert item.text == 'Flexitol Heel Balm, 3 OZ'
select = Select(driver.find_element_by_class_name('item__quantity-dropdown'))
select.select_by_value('5')
|
"""Write a program that prompts for an integer—let’s call it X—and then finds the
sum of X consecutive integers starting at 1."""
value_str = input("Enter a value ")
sum = 0
count = 0
try:
#convert input to int
value_int = int(value_str)
print("Consecutive sums: ")
for i in range(1, value_int + 1):
# for j in range(1, i + 1):
sum += i
print(sum)
# sum = 0
if sum % len(range(1, value_int + 1)) == 0:
print("Total sum: ",sum, end=" ")
else:
print("Sum is not divisible by number")
# print(i)
except ValueError:
print("Invalid input")
|
"""
This file is part of pysofar: A client for interfacing with Sofar Ocean's Spotter API
Contents: Classes used to connect to the Sofar API and return data
Copyright 2019-2022
Sofar Ocean Technologies
Authors: Mike Sosa et al.
"""
from datetime import datetime
from itertools import chain
from multiprocessing.pool import ThreadPool
from pysofar import SofarConnection
from pysofar.tools import parse_date
from pysofar.wavefleet_exceptions import QueryError, CouldNotRetrieveFile
from typing import List, Tuple, Dict
import warnings
class SofarApi(SofarConnection):
"""
Class for interfacing with the Sofar Wavefleet API
"""
def __init__(self, custom_token=None):
if custom_token is not None:
super().__init__(custom_token)
else:
super().__init__()
self.devices = []
self.device_ids = []
self._sync()
# ---------------------------------- Simple Device Endpoints -------------------------------------- #
def get_device_location_data(self):
"""
:return: The most recent locations of all Spotters belonging to this account
"""
return self._device_radius()
# ---------------------------------- Single Spotter Endpoints -------------------------------------- #
def grab_datafile(self, spotter_id: str, start_date: str, end_date: str):
"""
:param spotter_id: The string id of the Spotter
:param start_date: ISO8601 formatted start date of the data
:param end_date: ISO8601 formatted end date of the data
:return: None if not completed, else the status of the file download
"""
# TODO: If the generation of the file isn't instantaneous, will fail
# TODO : Look into async.io for potential solution?
import urllib.request
import shutil
# QUERY to request the file
body = {
"spotterId": spotter_id,
"startDate": start_date,
"endDate": end_date
}
scode, response = self._post("history", body)
if scode != 200:
raise QueryError(f"{response['message']}")
file_id = response['data']['fileId']
# QUERY to download the requested file
scode, response = self._get(f"datafile/{file_id}")
status = response['fileStatus']
file_url = response['fileUrl']
if status != "complete":
raise CouldNotRetrieveFile(f"File creation not yet complete. Try {file_url} in a little bit")
# downloading the file
file_name = f"{spotter_id}_{start_date}_{end_date}"
with urllib.request.urlopen(file_url) as response, open(file_name, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
return f"{file_name} downloaded successfully"
def get_latest_data(self, spotter_id: str,
include_wind_data: bool = False,
include_directional_moments: bool = False,
include_barometer_data: bool = False,
include_partition_data: bool = False,
include_surface_temp_data: bool = False
):
"""
:param spotter_id: The string id of the Spotter
:param include_wind_data: Defaults to False. Set to True if you want the latest data to include wind data
:param include_directional_moments: Defaults to False. Only applies if the Spotter is in 'full_waves' mode.
Set to True if you want the latest data to include directional moments
:param include_barometer_data: Defaults to False. Only applies to barometer-equipped Spotters.
:param include_partition_data: Defaulse to False. Only applies to Spotters in Waves:Partition mode.
:param include_surface_temp_data: Defaults to False. Only applies to SST sensor-equipped Spotters.
:return: The latest data values based on the given parameters from the requested Spotter
"""
params = {'spotterId': spotter_id}
if include_directional_moments:
params['includeDirectionalMoments'] = 'true'
if include_wind_data:
params['includeWindData'] = 'true'
if include_barometer_data:
params['includeBarometerData'] = 'true'
if include_partition_data:
params['includePartitionData'] = 'true'
if include_surface_temp_data:
params['includeSurfaceTempData'] = 'true'
scode, results = self._get('/latest-data', params=params)
if scode != 200:
raise QueryError(results['message'])
data = results['data']
return data
def get_sensor_data(self, spotter_id: str, start_date: str, end_date: str):
"""
:param spotter_id: The string id of the Spotter
:param start_date: ISO8601 formatted start date of the data
:param end_date: ISO8601 formatted end date of the data
:return: Data as a json from the requested Spotter
"""
params = {
"spotterId": spotter_id,
"startDate": start_date,
"endDate": end_date
}
scode, results = self._get('/sensor-data', params=params)
if scode != 200:
raise QueryError(results['message'])
data = results['data']
return data
def update_spotter_name(self, spotter_id, new_spotter_name):
"""
Update the name of a Spotter
:param spotter_id: The string id of the Spotter whose name you want to change
:param new_spotter_name: The new name to give to the requested Spotter
:return: The new name if the query succeeds else throws an error
"""
body = {
"spotterId": spotter_id,
"name": new_spotter_name
}
# request name update
scode, response = self._post("change-name", body)
message = response['message']
if scode != 200:
raise QueryError(f"{message}")
print(f"{spotter_id} updated with name: {response['data']['name']}")
return new_spotter_name
# ---------------------------------- Multi Spotter Endpoints -------------------------------------- #
def get_wave_data(self, start_date: str = None, end_date: str = None, params: dict = None):
"""
Get all wave data for related Spotters
:param start_date: ISO8601 start date of data period
:param end_date: ISO8601 end date of data period
:param params: dict of additional query parameters to write beyond default values
:return: Wave data as a list
"""
return self._get_all_data(['waves'], start_date, end_date, params)
def get_wind_data(self, start_date: str = None, end_date: str = None, params: dict = None):
"""
Get all wind data for related Spotters
:param start_date: ISO8601 start date of data period
:param end_date: ISO8601 end date of data period
:param params: dict of additional query parameters to write beyond default values
:return: Wind data as a list
"""
return self._get_all_data(['wind'], start_date, end_date, params)
def get_frequency_data(self, start_date: str = None, end_date: str = None, params: dict = None):
"""
Get all Frequency data for related Spotters
:param start_date: ISO8601 start date of data period
:param end_date: ISO8601 end date of data period
:param params: dict of additional query parameters to write beyond default values
:return: Frequency data as a list
"""
return self._get_all_data(['frequency'], start_date, end_date, params)
def get_track_data(self, start_date: str = None, end_date: str = None, params: dict = None):
"""
Get all track data for related Spotters
:param start_date: ISO8601 start date of data period
:param end_date: ISO8601 end date of data period
:param params: dict of additional query parameters to write beyond default values
:return: track data as a list
"""
return self._get_all_data(['track'], start_date, end_date, params)
def get_all_data(self, start_date: str = None, end_date: str = None, params: dict = None):
"""
Get all data for related Spotters
:param start_date: ISO8601 start date of data period
:param end_date: ISO8601 end date of data period
:param params: dict of additional query parameters to write beyond default values
:return: Data as a list
"""
return self._get_all_data(['waves', 'wind', 'frequency', 'track'], start_date, end_date, params)
def get_spotters(self): return get_and_update_spotters(_api=self)
def search(self, shape:str, shape_params:List[Tuple], start_date:str, end_date:str,
radius=None, page_size=100,return_generator=False):
if shape not in ('circle','envelope'):
raise TypeError('Shape needs to be one of type Circle or Envelope')
if page_size > 500:
warnings.warn('Maximum page size is 500')
page_size=500
if shape == 'circle' and radius is None:
raise ValueError('Radius needs to be set when shape is circle')
# flatten
if shape == 'envelope':
vertices = []
for point in shape_params:
vertices += point
elif shape == 'circle':
vertices = shape_params
params = {
'shape':shape,
# convert list to a comma seperated string of values. Requests does not
# like iterators as argument.
'shapeParams':','.join([str(x) for x in vertices]),
'startDate':start_date,
'endDate':end_date,
'pageSize':page_size,
'radius':radius
}
def get_function(endpoint_suffix,params ):
scode, data = self._get(endpoint_suffix, params=params)
if scode != 200:
raise QueryError(data['message'])
return data
if return_generator:
return unpaginate(get_function,'search',params)
else:
return list(unpaginate(get_function,'search',params))
# ---------------------------------- Helper Functions -------------------------------------- #
@property
def token(self):
return self._token
@token.setter
def token(self, value):
temp = self.token
self.set_token(value)
try:
self._sync()
except QueryError:
print('Authentication failed. Please check the key')
print('Reverting to old key')
self.set_token(temp)
def _sync(self):
self.devices = self._devices()
self.device_ids = [device['spotterId'] for device in self.devices]
def _devices(self):
# Helper function to access the devices endpoint
scode, data = self._get('/devices')
if scode != 200:
raise QueryError(data['message'])
_spotters = data['data']['devices']
return _spotters
def _device_radius(self):
# helper function to access the device radius endpoint
status_code, data = self._get('device-radius')
if status_code != 200:
raise QueryError(data['message'])
spot_data = data['data']['devices']
return spot_data
def _get_all_data(self, worker_names: list, start_date: str = None, end_date: str = None, params: dict = None):
# helper function to return another function used for grabbing all data from Spotters in a period
def helper(_name):
_ids = self.device_ids
# default to bound values if not included
st = start_date or '2000-01-01T00:00:00.000Z'
end = end_date or datetime.utcnow()
_wrker = worker_wrapper((_name, _ids, st, end, params))
return _wrker
# processing the data_types in parallel
pool = ThreadPool(processes=len(worker_names))
all_data = pool.map(helper, worker_names)
pool.close()
all_data = {name: l for name, l in zip(worker_names, all_data)}
# if len(all_data) > 0:
# all_data.sort(key=lambda x: x['timestamp'])
return all_data
class WaveDataQuery(SofarConnection):
"""
General Query class
"""
_MISSING = object()
def __init__(self, spotter_id: str, limit: int = 20, start_date=_MISSING, end_date=_MISSING, params=None):
"""
Query the Sofar API for Spotter data
:param spotter_id: String id of the Spotter to query for
:param limit: The limit of data to query. Defaults to 20, max of 100 for frequency data, max of 500 otherwise
:param start_date: ISO8601 formatted string for start date, otherwise if not included, defaults to
a date arbitrarily far back to include all Spotter data
:param end_date: ISO8601 formatted string for end date, otherwise if not included defaults to present
:param params: Defaults to None. Parameters to overwrite/add to the default query parameter set
"""
super().__init__()
self.spotter_id = spotter_id
self._limit = limit
if start_date is self._MISSING or start_date is None:
self.start_date = None
else:
self.start_date = parse_date(start_date)
if end_date is self._MISSING or end_date is None:
self.end_date = None
else:
self.end_date = parse_date(end_date)
self._params = {
'spotterId': spotter_id,
'limit': limit,
'includeWaves': 'true',
'includeWindData': 'false',
'includeTrack': 'false',
'includeFrequencyData': 'false',
'includeDirectionalMoments': 'false',
'includeSurfaceTempData': 'false',
'includeSpikes': 'false',
'includeNonObs': 'false',
'includeMicrophoneData': 'false',
'includeBarometerData': 'false'
}
if params is not None:
self._params.update(params)
if self.start_date is not None:
self._params.update({'startDate': self.start_date})
if self.end_date is not None:
self._params.update({'endDate': self.end_date})
def execute(self):
"""
Calls the api wave-data endpoint and if successful returns the queried data with the set query parameters
:return: Data as a dictionary
"""
scode, data = self._get('wave-data', params=self._params)
if scode != 200:
raise QueryError(data['message'])
return data['data']
def limit(self, value: int):
"""
Sets the limit on how many query results to return
Defaults to 20
Max of 500 if tracking or waves-standard
Max of 100 if frequency data is included
"""
self._limit = value
self._params.update({'limit': value})
def barometer(self, include: bool):
"""
:param include: True if you want the query to include barometer data
"""
self._params.update({'includeBarometerData': str(include).lower()})
def microphone(self, include: bool):
"""
:param include: True if you want the query to include microphone data
"""
self._params.update({'includeMicrophoneData': str(include).lower()})
def waves(self, include: bool):
"""
:param include: True if you want the query to include waves
"""
self._params.update({'includeWaves': str(include).lower()})
def wind(self, include: bool):
"""
:param include: True if you want the query to include wind data
"""
self._params.update({'includeWindData': str(include).lower()})
def track(self, include: bool):
"""
:param include: True if you want the query to include tracking data
"""
self._params.update({'includeTrack': str(include).lower()})
def frequency(self, include: bool):
"""
:param include: True if you want the query to include frequency data
"""
self._params.update({'includeFrequencyData': str(include).lower()})
def directional_moments(self, include: bool):
"""
:param include: True if you want the query to include directional moment data
"""
if include and not self._params['includeFrequencyData']:
print("""Warning: You have currently selected the query to include directional moment data however
frequency data is not currently included. \n
Directional moment data only applies if the Spotter is in full waves/waves spectrum mode. \n
Since the query does not include frequency data (of which directional moments are a subset)
the data you have requested will not be included. \n
Please set includeFrequencyData to true with .frequency(True) if desired. \n""")
self._params.update({'includeDirectionalMoments': str(include).lower()})
def surface_temp(self, include: bool):
"""
:param include: True if you want the query to include surface temp data
"""
self._params.update({'includeSurfaceTempData': str(include).lower()})
def spikes(self, include: bool):
"""
:param include: True if you want the query to include data points exceeding our spike filter
"""
self._params.update({'includeSpikes': str(include).lower()})
def smooth_wave_data(self, include: bool):
"""
:param include: True if you want the query to smooth wave data
"""
self._params.update({'smoothWaveData': str(include).lower()})
def smooth_sg_window(self, value: int):
"""
:param value: Window size of the SG smoothing filter. Must be odd positive int.
"""
self._params.update({'smoothSGWindow': value})
def smooth_sg_order(self, value: int):
"""
:param value: Polynomial order of SG smoothing filter. Positive int > 0.
"""
self._params.update({'smoothSGOrder': value})
def interpolate_utc(self, include: bool):
"""
:param include: True if you want the query to interpolate data to UTC hours time base.
"""
self._params.update({'interpolateUTC': str(include).lower()})
def interpolate_period_seconds(self, value: int):
"""
:param value: Period in seconds of samples after smoothing and/or interpolation.
"""
self._params.update({'interpolatePeriodSeconds': value})
def set_start_date(self, new_date: str):
self.start_date = parse_date(new_date)
self._params.update({'startDate': self.start_date})
def clear_start_date(self):
self.start_date = None
if 'startDate' in self._params:
del self._params['startDate']
def set_end_date(self, new_date: str):
self.end_date = parse_date(new_date)
self._params.update({'endDate': self.end_date})
def clear_end_date(self):
if 'endDate' in self._params:
del self._params['endDate']
def __str__(self):
s = f"Query for {self.spotter_id} \n" + \
f" Start: {self.start_date or 'From Beginning'} \n" + \
f" End: {self.end_date or 'Til Present'} \n" + \
" Params:\n" + \
f" id: {self._params['spotterId']}\n" + \
f" limit: {self._params['limit']} \n" + \
f" waves: {self._params['includeWaves']} \n" + \
f" wind: {self._params['includeWindData']} \n" + \
f" barometer: {self._params['includeBarometerData']} \n" + \
f" sst: {self._params['includeSurfaceTempData']} \n" + \
f" microphone: {self._params['includeMicrophoneData']} \n" + \
f" track: {self._params['includeTrack']} \n" + \
f" frequency: {self._params['includeFrequencyData']} \n" + \
f" directional_moments: {self._params['includeDirectionalMoments']} \n"
return s
# ---------------------------------- Util Functions -------------------------------------- #
def get_and_update_spotters(_api=None):
"""
:return: A list of the Spotter objects associated with this account
"""
from itertools import repeat
api = _api or SofarApi()
# grab device id's and query for device data
# initialize Spotter objects
spot_data = api.devices
pool = ThreadPool(processes=16)
spotters = pool.starmap(_spot_worker, zip(spot_data, repeat(api)))
pool.close()
return spotters
# ---------------------------------- Workers -------------------------------------- #
def _spot_worker(device: dict, api: SofarApi):
"""
Worker to grab Spotter data
:param device: Dictionary containing the Spotter id and name
:return: Spotter object updated from the Sofar api with its latest data values
"""
from pysofar.spotter import Spotter
_id = device['spotterId']
_name = device['name']
_api = api
sptr = Spotter(_id, _name, _api)
sptr.update()
return sptr
def worker_wrapper(args):
"""
Wrapper for creating workers to grab lots of data
:param args: Tuple of the worker_type: str (ex. 'wind', 'waves', 'frequency', 'track')
_ids: list of str, which are the Spotter ids
st_date: str, iso 8601 formatted start date of period to query
end_date: str, iso 8601 formatted end date of period to query
params: dict, query parameters to set
:return: All data for that type for all Spotters in the queried period
"""
worker_type, _ids, st_date, end_date, params = args
queries = [WaveDataQuery(_id, limit=500, start_date=st_date, end_date=end_date, params=params) for _id in _ids]
# grabbing data from all of the Spotters in parallel
pool = ThreadPool(processes=16)
_wrkr = _worker(worker_type)
worker_data = pool.map(_wrkr, queries)
pool.close()
# unwrap list of lists
worker_data = list(chain(*worker_data))
if len(worker_data) > 0:
worker_data.sort(key=lambda x: x['timestamp'])
return worker_data
def _worker(data_type):
"""
Worker to grab data from certain data type for a specific query
:param data_type: The desired data type
:return: A helper function able to process a query for that specific data type
"""
def _helper(data_query):
st = data_query.start_date
end = data_query.end_date
# setup the query
data_query.waves(False)
getattr(data_query, data_type)(True)
if data_type == 'frequency':
dkey = 'frequencyData'
data_query.directional_moments(True)
elif data_type == 'surface_temp':
dkey = 'surfaceTemp'
data_query.surface_temp(True)
elif data_type == 'barometer':
dkey = 'barometerData'
data_query.barometer(True)
elif data_type == 'microphone':
dkey = 'microphoneData'
data_query.microphone(True)
else:
dkey = data_type
query_data = []
while st < end:
_query = data_query.execute()
results = _query[dkey]
for dt in results:
dt.update({'spotterId': _query['spotterId']})
query_data.extend(results)
# break if no results are returned
if len(results) == 0:
break
st = results[-1]['timestamp']
data_query.set_start_date(st)
# break if start and end dates are the same to avoid potential infinite loop for samples
# at end time
if st == end:
break
# here query data is a list of dictionaries
return query_data
return _helper
def unpaginate( get_function, endpoint_suffix , params )->Dict:
"""
Generator function to unpaginate a paginated request.
Note:
It is a little ugly now with the removing of the endpoint prefix so that
the _get function can append it again. Right now it looks like the paginated
server returns http instead of https in the url so this may actually be a
good thing.
:param get_function: the _get fuction that takes an endpoint suffic and params as arguments
:param endpoint_suffix: endpoint to hit from the Sofar Api
:param params: dict of additional query parameters to write beyond default values
:return: track data as a list
"""
suffix = endpoint_suffix
while True:
page = get_function( suffix, params)
for item in page['data']:
yield item
if page['metadata']['page']['hasMoreData']:
url = page['metadata']['page']['nextPage']
# here we remove the prefix, but keep everything else in the url
# returned by wavefleet.
suffix = endpoint_suffix + url.split(endpoint_suffix)[1]
# parameters are no longer needed as these are already encoded
# in the given url.
params = None
else:
break
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-14 17:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("election_snooper", "0001_initial")]
operations = [
migrations.AlterField(
model_name="snoopedelection",
name="date",
field=models.DateField(null=True),
),
migrations.AlterField(
model_name="snoopedelection",
name="detail_url",
field=models.URLField(blank=True, max_length=800),
),
]
|
import pandas as pd
import pymysql as db
import logging
import sys
import os
import numpy as np
import sqlalchemy
from datetime import datetime
import json
import streamlit as st
import pandas as pd
def ssh_auto(for_accounts,cred_path):
server_port = []
localhost_port = []
cred = pd.read_csv(cred_path)
#print(cred)
for index, row in for_accounts.iterrows():
server_port.append(row['port'])
localhost_port.append(row['local_port'])
server_localhost = {"Server Port": server_port,
"LocalHost Port": localhost_port}
server_localhost_df = pd.DataFrame(server_localhost)
server_localhost_df.drop_duplicates(keep="first", inplace=True)
server_localhost_df = server_localhost_df.astype({"Server Port": int, "LocalHost Port": int})
from sshtunnel import SSHTunnelForwarder
for index, row in server_localhost_df.iterrows():
server = SSHTunnelForwarder('172.27.128.59', ssh_username=ldap_user, ssh_password=ldap_pass,
remote_bind_address=('127.0.0.1', int(row["Server Port"])),
local_bind_address=('0.0.0.0', int(row["LocalHost Port"])))
print(f"Destination Server Port {row['Server Port']} and Source Port {row['LocalHost Port']} in execution")
print(f" Establishing Connection with Destination Server Port {row['Server Port']} and Source Port {row['LocalHost Port']} in execution")
server.start()
print("*********Putty SSH Connection Done************")
st.success("Connection Extablished Successfully")
|
import pytest
from spectree.response import Response
from spectree.spec import SpecTree
from spectree.utils import (
has_model,
parse_code,
parse_comments,
parse_name,
parse_params,
parse_request,
parse_resp,
)
from .common import DemoModel, DemoQuery, get_model_path_key
api = SpecTree()
def undecorated_func():
"""summary
description"""
@api.validate(json=DemoModel, resp=Response(HTTP_200=DemoModel))
def demo_func():
"""
summary
description"""
@api.validate(query=DemoQuery)
def demo_func_with_query():
"""
a summary
a description
"""
class DemoClass:
@api.validate(query=DemoModel)
def demo_method(self):
"""summary
description
"""
demo_class = DemoClass()
@pytest.mark.parametrize(
"docstring, expected_summary, expected_description",
[
pytest.param(None, None, None, id="no-docstring"),
pytest.param("", "", None, id="empty-docstring"),
pytest.param(" ", "", None, id="all-whitespace-docstring"),
pytest.param("summary", "summary", None, id="single-line-docstring"),
pytest.param(
" summary ", "summary", None, id="single-line-docstring-with-whitespace"
),
pytest.param(
"summary first line\nsummary second line",
"summary first line summary second line",
None,
id="multi-line-docstring-without-empty-line",
),
pytest.param(
" summary first line \n summary second line ",
"summary first line summary second line",
None,
id="multi-line-docstring-without-empty-line-whitespace",
),
pytest.param(
"summary\n\ndescription",
"summary",
"description",
id="multi-line-docstring-with-empty-line",
),
pytest.param(
" summary \n\n description ",
"summary",
"description",
id="multi-line-docstring-with-empty-line-whitespace",
),
pytest.param(
"summary\n\t \ndescription",
"summary",
"description",
id="multi-line-docstring-with-whitespace-line",
),
pytest.param(
"summary\n \n \n \n \n \ndescription",
"summary",
"description",
id="multi-line-docstring-with-multiple-whitespace-lines",
),
pytest.param(
"summary first line\nsummary second line\nsummary third line"
"\n\t \n"
"description first line\ndescription second line\ndescription third line",
"summary first line summary second line summary third line",
"description first line description second line description third line",
id="large-multi-line-docstring-with-whitespace-line",
),
pytest.param(
"summary first line\nsummary second line\ftruncated part",
"summary first line summary second line",
None,
id="multi-line-docstring-without-empty-line-and-truncation-char",
),
pytest.param(
"summary first line\nsummary second line\nsummary third line"
"\n\t \n"
"description first line\ndescription second line\ndescription third line"
"\ftruncated part",
"summary first line summary second line summary third line",
"description first line description second line description third line",
id="large-multi-line-docstring-with-whitespace-line-and-truncation-char",
),
pytest.param(
"summary first line\nsummary second line\n"
"\t \n"
"description first line \ndescription second line\n"
"\t \n"
"description second paragraph \n"
"\n \n \n"
"description third paragraph\ndescription third paragraph second line",
"summary first line summary second line",
"description first line description second line"
"\n\n"
"description second paragraph"
"\n\n"
"description third paragraph description third paragraph second line",
id="large-multi-line-docstring-with-multiple-paragraphs",
),
pytest.param(
"\tcode block while indented\n"
"\t\n"
"\tdescription first paragraph\n"
"\t\n"
"\t\tcode block\n"
"\t\n"
"\tdescription third paragraph\n",
"code block while indented",
"description first paragraph"
"\n\n"
" code block"
"\n\n"
"description third paragraph",
id="multi-line-docstring-with-code-block",
),
],
)
def test_parse_comments(docstring, expected_summary, expected_description):
def func():
pass
func.__doc__ = docstring
assert parse_comments(func) == (expected_summary, expected_description)
@pytest.mark.parametrize(
"func, expected_summary, expected_description",
[
pytest.param(lambda x: x, None, None, id="lambda"),
pytest.param(
undecorated_func, "summary", "description", id="undecorated-function"
),
pytest.param(demo_func, "summary", "description", id="decorated-function"),
pytest.param(
demo_class.demo_method, "summary", "description", id="class-method"
),
],
)
def test_parse_comments_with_different_callable_types(
func, expected_summary, expected_description
):
assert parse_comments(func) == (expected_summary, expected_description)
def test_parse_code():
with pytest.raises(TypeError):
assert parse_code(200) == 200
assert parse_code("200") == ""
assert parse_code("HTTP_404") == "404"
def test_parse_name():
assert parse_name(lambda x: x) == "<lambda>"
assert parse_name(undecorated_func) == "undecorated_func"
assert parse_name(demo_func) == "demo_func"
assert parse_name(demo_class.demo_method) == "demo_method"
def test_has_model():
assert not has_model(undecorated_func)
assert has_model(demo_func)
assert has_model(demo_class.demo_method)
def test_parse_resp():
assert parse_resp(undecorated_func) == {}
resp_spec = parse_resp(demo_func)
assert resp_spec["422"]["description"] == "Unprocessable Entity"
model_path_key = get_model_path_key("spectree.models.ValidationError")
assert (
resp_spec["422"]["content"]["application/json"]["schema"]["$ref"]
== f"#/components/schemas/{model_path_key}"
)
model_path_key = get_model_path_key("tests.common.DemoModel")
assert (
resp_spec["200"]["content"]["application/json"]["schema"]["$ref"]
== f"#/components/schemas/{model_path_key}"
)
def test_parse_request():
model_path_key = get_model_path_key("tests.common.DemoModel")
assert (
parse_request(demo_func)["content"]["application/json"]["schema"]["$ref"]
== f"#/components/schemas/{model_path_key}"
)
assert parse_request(demo_class.demo_method) == {}
def test_parse_params():
models = {
get_model_path_key("tests.common.DemoModel"): DemoModel.schema(
ref_template="#/components/schemas/{model}"
)
}
assert parse_params(demo_func, [], models) == []
params = parse_params(demo_class.demo_method, [], models)
assert len(params) == 3
assert params[0] == {
"name": "uid",
"in": "query",
"required": True,
"description": "",
"schema": {"title": "Uid", "type": "integer"},
}
assert params[2]["description"] == "user name"
def test_parse_params_with_route_param_keywords():
models = {
get_model_path_key("tests.common.DemoQuery"): DemoQuery.schema(
ref_template="#/components/schemas/{model}"
)
}
params = parse_params(demo_func_with_query, [], models)
assert params == [
{
"name": "names1",
"in": "query",
"required": True,
"description": "",
"schema": {"title": "Names1", "type": "array", "items": {"type": "string"}},
},
{
"name": "names2",
"in": "query",
"required": True,
"description": "",
"schema": {
"title": "Names2",
"type": "array",
"items": {"type": "string"},
"non_keyword": "dummy",
},
"style": "matrix",
"explode": True,
},
]
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
# __author__ = caicaizhang
# 迭代器
class yield_im(object):
def __init__(self,start=0,stop=0):
'''
ref blog:
http://www.codebelief.com/article/2017/02/python-advanced-programming-generator/
生成器的工作方式与迭代器相同,可以被视为是迭代器的一种。需要注意的是,生成器只支持一遍的迭代。当一个生成器被迭代完之后,就无法再产生结果,此时必须使用一个新的生成器以便再次迭代。
'''
self.start = start
self.stop = stop
# def __doc__(self):
# pass
def __iter__(self):
return self
def __next__(self):
if (self.start < self.stop):
start = self.start
self.start += 1
return start
else:
raise StopIteration
# 生成器
def zrange(start=0,stop=0):
while start < stop:
yield start
start += 1
# else:
# raise StopIteration
for i in zrange(0,9):
print('yield num is:{}'.format(i))
# y = yield_im(1,9)
# print('y.__doc__ is.{}'.format(y.__doc__()))
#
# for i in range(8):
# print('{} time next(y) is:{}'.format(i,next(y))) |
# ============================================
#datasetName = "RelValTTbar"
#datasetPath = "/RelValTTbar_13/CMSSW_7_3_0-MCRUN2_73_V7-v1/MINIAODSIM"
#prodTag = "crabTest"
import os
userName=os.getlogin()
outputPath="/store/user/"+userName+"/FlatTrees/"+prodTag+"/"
# ============================================
from WMCore.Configuration import Configuration as crabTemplate
config = crabTemplate()
config.section_('General')
# =====================
config.General.transferOutputs = True
config.General.workArea = prodTag
config.General.requestName = datasetName
config.section_('JobType')
# =====================
config.JobType.psetName = './common/flatTreeProducer_cfg.py'
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['./common/conf.xml']
config.JobType.outputFiles = ['FlatTree.root']
#config.JobType.pyCfgParams = ['isData=0']
config.section_('Data')
# =====================
config.Data.totalUnits = 10000
config.Data.unitsPerJob = 2
config.Data.splitting = 'FileBased'
config.Data.inputDataset = datasetPath
config.Data.outLFNDirBase = outputPath
config.Data.publication = False
config.Data.publishDataName = ''
config.Data.publishDBS = 'https://cmsweb.cern.ch/dbs/prod/phys03/DBSWriter'
config.section_('User')
# =====================
config.section_('Site')
# =====================
config.Site.storageSite = 'T2_FR_IPHC'
|
a=int(input())
cnt=0
while(a>0):
cnt=cnt+1
a=int(a/10)
print(cnt)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#准备好r.txt文件,将要索引的单词输入到r.txt文本文件中,每行一个单词,每行为一页.
#每页含有两个以上的词,每行的单词间用'|'分开:ab|ac|ad
#python pyprint.py,结果写入index.txt文件
#for mac osx
f = open('index.txt','w')
r = open('r.txt')
count = 1
line = len(open('r.txt').readlines())
print >> f,'<?xml version="1.0" encoding="UTF-8"?>\n<d:dictionary xmlns="http://www.w3.org/1999/xhtml" xmlns:d="http://www.apple.com/DTDs/DictionaryService-1.0.rng">'
hw = '<d:entry id="%s" d:title="%s">\n <d:index d:value="%s"/>\n<html><body><link href="DefaultStyle.css" rel="stylesheet" type="text/css"/>'
word = '<span class="word"><a class="swd" href="x-dictionary:r:%s"</a></span>'
left = '<span class="left"><a class="slf" href="x-dictionary:r:%s">previous</a></span>'
right = '<span class="right"><a class="srt" href="x-dictionary:r:%s">next</a></span>'
img = '<img class="img" src="images/%d.jpg"/>'
ew = '</body></html>\n</d:entry>'
while (count <= line):
rd = r.readline()
rw = rd.strip('\n')
for i in rw.split('|'):
print i.strip('\n')
print >> f,hw % (i,i,i), word % (count),left % (count-1),right % (i), img % (count),ew
count = count + 1
print >> f,'</d:dictionary>'
f.close()
|
import pickle
TRUE_LABEL_DICT_dir = '../SYN-IG/new-SYN10wLabelDict.pickle'
TRUE_NOTE_DICT_dir = '../SYN-IG/new-SYN10wNoteDict.pickle'
SAMPLES_dir = '../SYN-IG/new-SYN10w.pickle'
#INITIAL_STRATIFICATION = [(0, 0.20), (0.20, 0.40), (0.40, 0.60), (0.60, 0.80), (0.80, 1.01)]
INITIAL_STRATIFICATION = [(0, 0.10), (0.10, 0.20), (0.20, 0.30), (0.30, 0.40), (0.40, 0.50), (0.50, 0.60), (0.60, 0.70), (0.70, 0.80), (0.80, 0.90), (0.90, 1.01)]
#INITIAL_STRATIFICATION = [(0, 0.20), (0.20, 0.40), (0.40, 0.60), (0.60, 0.80), (0.80, 1.01)]
#INITIAL_STRATIFICATION = [(0.90, 0.95), (0.95, 0.96), (0.96, 0.97), (0.97, 0.98), (0.98, 0.99), (0.99, 0.995), (0.995, 1.01)]
# 20层
#INITIAL_STRATIFICATION = [(0.90, 0.95), (0.95, 0.955), (0.955, 0.96), (0.96, 0.965), (0.965, 0.97), (0.97, 0.975), (0.975, 0.98), (0.98, 0.985), (0.985, 0.99), (0.99, 0.991), (0.991, 0.992), (0.992, 0.993), (0.993, 0.994), (0.994, 0.995), (0.995, 0.996), (0.996, 0.997), (0.997, 0.998), (0.998, 0.999), (0.999, 1), (1, 1.01)]
P1 = 0.5
P2 = 0.5
#TRUE_LABEL_DICT = {}
#with open('../NELL/label_dict.pickle', 'rb') as f:
with open(TRUE_LABEL_DICT_dir, 'rb') as f:
TRUE_LABEL_DICT = pickle.load(f)
with open(TRUE_NOTE_DICT_dir, 'rb') as f:
TRUE_NOTE_DICT = pickle.load(f)
with open(SAMPLES_dir, 'rb') as f:
samples = pickle.load(f)
A1, A2, A3, A4 = 5.7, 11, 10, 1.2
EPSILON1, EPSILON2 = 0.05, 0.05
ALPHA1, ALPHA2 = 0.05, 0.05
# MCTS终止条件类型
THRE_TYPE = 'time'
# MCTS总时间上限
TIME_THRE = 1
# MCTS总模拟次数上限
SIMU_THRE = 2000
# UCB中的C
C = 6 |
print("The number of students involved in are %d" , 8)
print("This Github is very nice")
|
with open("input.txt","r") as f:
s=f.read()
f.close
with open("output.txt","w") as f:
f.write(s)
f.close
|
import os, time
t=input(u'请输入几点几分关机(格式 05:59) : ')
h_off, m_off = t.strip().split(":")
h_off, m_off = int(h_off), int(m_off)
t = time.localtime()
h, m = t.tm_hour, t.tm_min
print(f'当前时间:{h}:{m}')
if h > h_off:
time_shut = h_off*3600 + m_off*60 - h*3600 - m*60 + 24*3600
else:
time_shut = h_off*3600 + m_off*60 - h*3600 - m*60
print(time_shut)
os.system(f'shutdown -s -t {time_shut}')
print(f'关机计划完成,系统将在{time_shut}秒后关机....')
|
ages = [19, 20, 18]
for x in ages:
sum += x
sum /=len(ages)
print(sum)
|
import os
from flask import Flask
from database import db
import config
from dotenv import load_dotenv
from authentication import authentication_bp
from views.algorithms import algorithms_bp
from views.general import general_bp
load_dotenv()
# creating app
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
# registering blueprints
app.register_blueprint(authentication_bp)
app.register_blueprint(algorithms_bp)
app.register_blueprint(general_bp)
# Home route
@app.route('/')
def hello():
return "hello"
# Main
if __name__ == '__main__':
app.run() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtGui, QtCore
from datetime import datetime, date, time, timedelta
import calendar
class App(QtGui.QWidget):
def __init__(self):
super(App, self).__init__()
self.initUI()
def initUI(self):
#Agregamos los nombres de los personajes
p1 = QtGui.QLabel("Miguel Hidalgo y Costilla (1753-1811)", self)
p1.move(15,10)
p2 = QtGui.QLabel("Ignacio Allende (1769-1811)", self)
p2.move(15,40)
p3 = QtGui.QLabel("Jose Maria Morelos (1765-1815)", self)
p3.move(15,70)
#Creamos el boton
boton = QtGui.QPushButton("Aprietame", self)
boton.move(150,150)
self.connect(boton, QtCore.SIGNAL("clicked()"), self.fecha)
#Agregamos los atributos de la ventana principal
self.setGeometry(300, 300, 350, 250)
self.setWindowTitle('Viva Mexico')
self.setWindowIcon(QtGui.QIcon('bandera.jpg'))
self.show()
#Calcular los dias que faltan
def fecha(self):
hoy = date.today()
prox = date(2017, 9, 15)
faltan = prox - hoy
print("Faltan", faltan.days, "dias")
def main():
app = QtGui.QApplication(sys.argv)
aa = App()
sys.exit(app.exec_())
if __name__ == '__main__':
main() |
import os
from config import DEBUG, LOCAL_DATABASE_URL, DATABASE_URL, USE_LOCAL_VARIABLES
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = DEBUG
TESTING = False
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = LOCAL_DATABASE_URL if USE_LOCAL_VARIABLES else DATABASE_URL
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
#!/usr/local/bin/python
# coding: UTF-8
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream, API
from hyphen import Hyphenator, dict_info
from threading import Thread
from time import sleep
import os
import csv
import sys
import re
import string
import json
import random
TWEETS = []
########
# Enter your twitter credentials here
CONS_KEY = os.environ.get('AH_CONS_KEY') # consumer key
CONS_SECRET = os.environ.get('AH_CONS_SECRET') # consumer secret
AXS_TKN = os.environ.get('AH_AXS_TKN') # access token
AXS_TKN_SECRET = os.environ.get('AH_AXS_TKN_SECRET') # access token secret
TWEET_FILE = 'data.txt'
#########
class listenr(StreamListener):
""" A listener handles TWEETS are the received from the stream.
This is a basic listener that just prints received TWEETS to stdout.
"""
def on_data(self, data):
# print data
TWEETS.append(data)
return True
def on_error(self, status):
print status
class HyphenatorDictionary(object):
"""
This class wraps and builds our dictionary
of words. It provides simple methods for us
to retrieve relevant information from the dictionary
(the hyphenated version if it exists)
"""
def __init__(self):
self._syl_dict = {}
def load_dict(self, filename):
"""
Loads a syllable dictionary that is seperated by "-" or by "bullets"
"""
with open(filename) as f: #open our hyphenator dictionary
ls = csv.reader(f)
for _string in ls:
word, syllables = self._split_word(_string[0].decode('UTF-8'))
self.add_word(word, syllables)
def add_word(self, word, syllables):
"""
adds a word to the in memory dictionary, this is not permanent
"""
if word not in self._syl_dict:
self._syl_dict[word] = syllables
# else:
# if self._syl_dict[word] != syllables:
# print("%s => Current: %i :: New: %i"
# % (word, self._syl_dict[word], syllables))
def _split_word(self, unsplit_word):
"""This will take an unsplit word and
return the word and number of syllables
from our hyphenation dictionary"""
if u"•" in unsplit_word and "-" not in unsplit_word:
splitted_word = unsplit_word.split(u"•")
return u"".join(splitted_word), len(splitted_word)
elif u"•" not in unsplit_word and "-" in unsplit_word:
splitted_word = unsplit_word.split(u"-")
return u"".join(splitted_word), len(splitted_word)
else:
return unsplit_word, 1
unsplit_word.split()
def syllables(self, word):
"""
Searches for the word in our dictionary, if it's there returns number
of syllables, if not returns False
"""
if word in self._syl_dict:
return self._syl_dict[word]
else:
return 0
class HyphenatorAlgorithm(object):
"""
This is a small wrapper on the Hyphenator method from our Hyphen import.
Conforms to the same return type as the HyphenatorDictionary class
"""
def __init__(self):
"""
Initialize the class
"""
self._hyphenator = Hyphenator('en_US')
def syllables(self, word):
"""
Calculates the number of syllables, if it tries to return 0 it returns 1.
All words should count as a syllable
"""
syll = self._hyphenator.syllables(unicode(word))
length = len(syll)
if length != 0:
return length
else:
return 1
class Evaluator(object):
"""
Our Evaluator allows us to evaluate an individual word or a string of words
to get the syllable count. It does this by first looking in the dictionary,
if it finds nothing it uses the hyphenator algorithm to try and get a response
"""
def __init__(self):
self.h_dict = HyphenatorDictionary()
self.h_algo = HyphenatorAlgorithm()
self.h_dict.load_dict('dictionary files/mhyph-utf8.txt')
def evaluate_word(self, word):
"""
Evaluate word takes in one word and returns a tuple of
the syllable count from our dictionary and our algorithm
"""
# print word, self.h_dict.syllables(word), self.h_algo.syllables(word)
return self.h_dict.syllables(word), self.h_algo.syllables(word)
def strip_retweet(self, _string):
"""
If a tweet starts with RT, it's a retweet. We want to strips the beginning
returns original string minus RT :
"""
if _string.startswith("RT"):
_string = _string[_string.index(":")+1:]
return _string
def clean_word(self, word):
"""
Cleans a word by:
- Stripping symbols
- converting ascii
- stripping new line formatting
We currently do the following but
should figure out a better way to handle it
- removing user mentions
- removing links
"""
regex = re.compile('[%s]' % re.escape(string.punctuation))
word = word.encode('ascii', 'ignore')
word = regex.sub('', word)
if word.startswith("@"):
return ""
elif word.startswith("http"):
return ""
else:
return word
def clean_string(self, _string):
"""
Cleans up our strings by stripping retweets and cleaning each word in the tweet
"""
word_value_tuples = []
raw_word_list = self.strip_retweet(_string).replace('\n',' ').replace('-',' ').split(" ")
raw_word_list = [word for word in raw_word_list if word != '' and len(word) < 100]
for word in raw_word_list:
w = self.clean_word(word)
if w:
dic, alg = self.evaluate_word(unicode(w.lower()))
if dic != 0:
word_value_tuples.append((w, dic, alg, dic))
else:
word_value_tuples.append((w, dic, alg, alg))
return word_value_tuples
def check_user_mentions(self, tweet, _string):
"""
Checks whether or not there is a user mention.
returns True or False
"""
mntns = tweet['entities']['user_mentions']
for mtn in mntns:
if self.clean_word(mtn['screen_name']) in _string:
return False
return True
def evaluate_string(self, _string):
"""
Evaluates a string by splitting it then passing along the unicode to
the evaluate word method. It then returns a tuple of the mix, the dictionary's
calculation and our algorithm's calculation for further comparison
"""
word_val_list = self.clean_string(_string)
mixed = [x[3] for x in word_val_list]
temp = 0
break_1 = 0
break_2 = 0
break_3 = 0
for count, val in enumerate(mixed):
temp += val
if temp == 5 and break_1 == 0:
temp = 0
break_1 = count
elif temp == 7 and break_1 != 0 and break_2 == 0:
temp = 0
break_2 = count
elif temp == 5 and break_1 != 0 and break_2 != 0 and break_3 == 0:
break_3 = count
break
breaks = (break_1, break_2, break_3)
if break_1 and break_2 and break_3:
return True, breaks, word_val_list
else:
return False, breaks, word_val_list
class TwitterWrap(object):
def __init__(self):
self.auth = OAuthHandler(CONS_KEY, CONS_SECRET)
self.auth.set_access_token(AXS_TKN, AXS_TKN_SECRET)
self.twitter = API(self.auth)
def tweet_length_check(self, user_name, tweet_id, haiku):
""""
Makes sure our tweet length is short enough for twitter
"""
p1, p2, p3 = haiku
tweet = "A #haiku: https://twitter.com/%s/status/%s\n\n%s\n%s\n%s" % (user_name, tweet_id, p1, p2, p3)
return tweet
def tweet(self, _string):
"""Updates the status of the twitter user, then sleeps for a random
amount of time to avoid getting blocked by the API"""
sleeptime1 = random.random()
sleeptime2 = random.randint(0,50)
print "Sleeping for " + str(sleeptime1 + sleeptime2)
sleep(sleeptime1 + sleeptime2)
self.twitter.update_status("%s" % (_string))
def debug_tweet(self, tweet, to_tweet, word_val_list):
"""Prints out the information about a tweet"""
template = "{0:30}{1:5}{2:5}{3:5}"
print "ORIGINAL TWEET::"
print tweet['text']
print "BY::"
print tweet['user']['screen_name']
print "PARSED VERSION::"
print to_tweet
print "length %i" % len(to_tweet)
print "Number of syllables in each word in the tweet..."
print template.format(" ", ' DIC', ' ALG', ' Best')
for count, val in enumerate(word_val_list):
print template.format(*val)
def post_to_twitter(tweets, count=200):
"""Takes in a list of tweets then posts to twitter the ones that are good tweets
"""
evaluator = Evaluator()
tw = TwitterWrap()
tweets = [json.loads(tweet) for tweet in tweets]
print "%i tweets processed" % (len(tweets))
if count == -1:
count = len(tweets)
for tweet in tweets:
print tweet
haiku, breaks, word_val_list = evaluator.evaluate_string(tweet['text'])
# print tweet['entities']
if haiku and tweet['lang'] == 'en':
words = [_x[0] for _x in word_val_list]
p1 = " ".join(words[:breaks[0]+1])
p2 = " ".join(words[breaks[0]+1:breaks[1]+1])
p3 = " ".join(words[breaks[1]+1:breaks[2]+1])
# print evaluator.check_user_mentions(tweet, p1+" "+p2+" "+p3)
if evaluator.check_user_mentions(tweet, p1+" "+p2+" "+p3):
to_tweet = tw.tweet_length_check(tweet['user']['screen_name'], tweet['id'], (p1,p2,p3))
tw.debug_tweet(tweet, to_tweet, word_val_list)
tw.tweet(to_tweet)
if count == 0:
break
count -= 1
def print_to_std_out(tweets, count=200):
"""Takes in a list of tweets then prints to stdout the ones
that qualify as tweets
"""
evaluator = Evaluator()
tw = TwitterWrap()
tweets = [json.loads(tweet) for tweet in tweets]
print "%i tweets processed" % (len(tweets))
if count == -1:
count = len(tweets)
for tweet in tweets:
haiku, breaks, word_val_list = evaluator.evaluate_string(tweet['text'])
# print tweet['entities']
if haiku and tweet['lang'] == 'en':
words = [_x[0] for _x in word_val_list]
p1 = " ".join(words[:breaks[0]+1])
p2 = " ".join(words[breaks[0]+1:breaks[1]+1])
p3 = " ".join(words[breaks[1]+1:breaks[2]+1])
# print evaluator.check_user_mentions(tweet, p1+" "+p2+" "+p3)
if evaluator.check_user_mentions(tweet, p1+" "+p2+" "+p3):
to_tweet = tw.tweet_length_check(tweet['user']['screen_name'], tweet['id'], (p1,p2,p3))
tw.debug_tweet(tweet, to_tweet, word_val_list)
if count == 0:
break
count -= 1
auth = OAuthHandler(CONS_KEY, CONS_SECRET)
auth.set_access_token(AXS_TKN, AXS_TKN_SECRET)
stream = Stream(auth, listenr())
def download_tweets():
# We have to create the above in order for this to know what to do with stream,
# this is a side effect and probably could be done better.
stream.filter(track=['World Cup', 'Brazil', 'WorldCup'])
def main():
print "Creating Thread to Download Tweets..."
th = Thread(target=download_tweets)
th.daemon = True
th.start()
print "Thread created, now sleeping for 30 seconds..."
sleep(15)
print "15 seconds is up, waiting another 15 seconds..."
sleep(15)
print_to_std_out(TWEETS)
if __name__ == '__main__':
# print sys.argv
# for parsing command line args
main()
|
from collections import deque
class MaximumIndependentSet:
def __init__(self, bipartite_graph : dict):
self.bipartite_graph = bipartite_graph
def compute(self):
# Hopcroft-Karp gets the maximal matching of the bipartite graph in O(E sqrt(V))
self.hopcrofKarp()
# Using Koenigs theorem we can retreive the minimum vertex cover in O(V)
self.koenig()
self.max_independent_set = set()
for vertex in self.U:
if vertex not in self.min_vertex_cover:
self.max_independent_set.add(vertex)
for vertex in self.V:
if vertex not in self.min_vertex_cover:
self.max_independent_set.add(vertex)
# Python implementation of Hopcroft-Karp Algorithm from https://en.wikipedia.org/wiki/Hopcroft%E2%80%93Karp_algorithm
def hopcrofKarp(self):
flatten = lambda x: [item for sublist in x for item in sublist]
self.U = self.bipartite_graph.keys()
self.V = set(flatten(self.bipartite_graph.values()))
self.pairs = {}
self.dist = {}
self.Q = deque()
# Initialize pairs and dist
for u in self.U:
self.pairs[u] = None
self.dist[u] = float('inf')
for v in self.V:
self.pairs[v] = None
self.dist[v] = float('inf')
matching = 0
while self.bfs():
for u in self.U:
if self.pairs[u] is None:
if self.dfs(u):
matching = matching + 1
return matching
# Used in HopcroftKarp
def bfs(self):
for u in self.U:
if self.pairs[u] == None:
self.dist[u] = 0
self.Q.append(u)
else:
self.dist[u] = float('inf')
self.dist[None] = float('inf')
while len(self.Q) > 0:
u = self.Q.popleft()
if self.dist[u] < self.dist[None]:
for v in self.bipartite_graph[u]:
if self.dist[self.pairs[v]] == float('inf'):
self.dist[self.pairs[v]] = self.dist[u] + 1
self.Q.append(self.pairs[v])
return self.dist[None] != float('inf')
# Used in HopcroftKarp
def dfs(self, u):
if u is not None:
for v in self.bipartite_graph[u]:
if self.dist[self.pairs[v]] == self.dist[u] + 1:
if self.dfs(self.pairs[v]) == True:
self.pairs[v] = u
self.pairs[u] = v
return True
self.dist[u] = float('inf')
return False
return True
# Use Koenigs theorem to get the minimum vertex cover of the bipartite_graph
def koenig(self):
unmatched_left = set()
alternating_connected = set()
bfs = []
for vertex in self.U:
if self.pairs[vertex] is None:
unmatched_left.add(vertex)
for nbr in self.bipartite_graph[vertex]:
bfs.append((nbr, 0))
while len(bfs) != 0:
cur_vertex, edge_type = bfs.pop()
alternating_connected.add(cur_vertex)
# Added via an unmatched edge therefore need matched
if edge_type == 0:
bfs.append((self.pairs[cur_vertex], 1))
# Otherwise added via a matched edge so add all except the edge we came from and any visited already
else:
for nbr in self.bipartite_graph[cur_vertex]:
if nbr not in alternating_connected and self.pairs[cur_vertex] != cur_vertex:
bfs.append((nbr, 0))
alternating_connected = alternating_connected.union(unmatched_left)
self.min_vertex_cover = (set(self.U) - alternating_connected).union(set(self.V).intersection(alternating_connected)) |
import pandas as pd
import matplotlib.pyplot as plt
airquality_df=pd.read_csv('E:\csvdhf5xlsxurlallfiles/airquality.csv')
|
## ENTIRE RUN FILE FOR THE PUNE PROTOTYPE
__author__ = 'hassaankhan'
import time
import pandas as pd
import numpy as np
import logging
import shapefile
log = logging.getLogger(__name__)
from __future__ import division
from pynsim import Network
from pynsim import Simulator
from dateutil.relativedelta import relativedelta
import datetime
from pune_components.nodes.network_nodes import Pune_Reservoir, FarmAgent, Junction, WTP, WWTP
from pune_components.links.links import River, Canal, Pipeline
from pune_components.institutions.institutions import MKVDC, PMC, WUA, WRD, AllNodesOfType
import pune_engines
##############################################
model_inputs_xlsx = pd.ExcelFile("C:\Users\hfkhan\Desktop\PycharmProjects\Pune\data\model_setup.xlsx")
simulation_inputs = model_inputs_xlsx.parse("simulation")
network_inputs = model_inputs_xlsx.parse("network")
engines_inputs = model_inputs_xlsx.parse("engines")
res_inputs = pd.ExcelFile("data/reservoirs.xlsx")
res_char = res_inputs.parse('res_char')
farmerdata = pd.ExcelFile("data/farmers.xlsx").parse()
cropdata = pd.ExcelFile("data/crops.xlsx").parse()
##############################################
############################################################################################################
# SIMULATOR
############################################################################################################
start_time = time.time()
# Setup Simulation
pune_simulation = Simulator()
number_of_years = simulation_inputs[simulation_inputs.simulation_name == 'prototype']['number_of_years'].values[0]
start_month = simulation_inputs[simulation_inputs.simulation_name == 'prototype']['start_month'].values[0]
num_months = 12 * number_of_years - 1
one_month = relativedelta(months=1)
timesteps = [datetime.datetime.strptime(start_month, '%b %Y')]
for m in range(num_months):
new_timestep = timesteps[-1] + one_month
timesteps.append(new_timestep)
pune_simulation.set_timesteps(timesteps)
############################################################################################################
# NETWORK
############################################################################################################
network_name = 'prototype network'
network = Network(name=network_name)
global network_nodes
global network_links
# Read in network nodes from shapefile
network_nodes_obj = shapefile.Reader('C:\Users\hfkhan\Desktop\PycharmProjects\Pune\data\shapefiles\prototype_nodes_ubb').shapeRecords()
network_nodes = []
count = 0
for i in network_nodes_obj:
node_name = i.record[0]
type = i.record[3]
if type == 'res':
network_nodes.append(Pune_Reservoir(x=i.shape.points[0][0], y=i.shape.points[0][1], name=node_name))
if type == 'jnc':
network_nodes.append(Junction(x=i.shape.points[0][0], y=i.shape.points[0][1], name=node_name))
if type == 'farm':
network_nodes.append(FarmAgent(x=i.shape.points[0][0], y=i.shape.points[0][1], name=node_name))
if type == 'tp':
network_nodes.append(WTP(x=i.shape.points[0][0], y=i.shape.points[0][1], name=node_name))
if type == 'wwtp':
network_nodes.append(WWTP(x=i.shape.points[0][0], y=i.shape.points[0][1], name=node_name))
network_nodes[count].node_type = i.record[3]
network_nodes[count].institution_names = ['mkvdc']
count = count + 1
# Read in network links from shapefile
network_links_obj = shapefile.Reader('C:\Users\hfkhan\Desktop\PycharmProjects\Pune\data\shapefiles\prototype_links_ubb').shapeRecords()
network_links = []
count = 0
for i in network_links_obj:
link_name = i.record[4]
link_type = i.record[5]
link_start = i.record[6]
link_end = i.record[7]
for start in network_nodes:
for end in network_nodes:
if start.name == link_start and end.name == link_end:
if link_type == 'river':
templink = River(start_node=start, end_node=end, name=link_name)
templink.linktype = 'River'
network_links.append(templink)
if link_type == 'pipeline':
templink = Pipeline(start_node=start, end_node=end, name=link_name)
templink.linktype = 'Pipeline'
network_links.append(templink)
if link_type == 'canal':
templink = Canal(start_node=start, end_node=end, name=link_name)
templink.linktype = 'Irr Canals'
network_links.append(templink)
network_links[count].institution_names = 'mkvdc' #FOR NOW, ASSUMING ALL THE NODES/LINKS ARE OPERATED BY MKVDC
count = count + 1
network.add_nodes(*network_nodes)
network.add_links(*network_links)
### Read in reservoir characteristics
allres = ["R" + str(i).zfill(2) for i in xrange(1,56)]
allfarms = ["T" + str(i).zfill(2) for i in xrange(1,45)]
#res_char.iloc[:,1]
for p in allres:
s_node = network.get_node(p)
myrec = res_char.loc[res_char['Res_ID'] == p]
s_node.min_stor = myrec.min_stor.values[0]
s_node.max_stor = myrec.max_stor.values[0]
s_node.init_stor = myrec.init_stor.values[0]
s_node.release_schedule = myrec.iloc[:,5:17].values.tolist()[0]
for f in allfarms:
f_node = network.get_node(f)
sel_f = farmerdata.loc[farmerdata['agent']== f]
f_node.area = sel_f.area.values[0] #area in hectares (randomly assigned)
f_node.crop = sel_f.crop.values[0]
f_node.y = sel_f.cyield.values[0] # yield in kg/hectares
network.cropdata = cropdata
pune_simulation.network = network
############################################################################################################
# INSTITUTIONS
############################################################################################################
# INSTITUTION SETUP #
institution_list =[]
institution_list.extend([MKVDC(name="mkvdc"), PMC(name="pmc"), WRD(name="wrd")])
institution_list.extend([WUA(name = "wua_pune"), WUA(name = "wua_solapur")])
all_nodes_of_type_institutions = []
all_nodes_of_type_institutions.append(AllNodesOfType('all_farms', 'FarmAgent', pune_simulation.network))
all_nodes_of_type_institutions.append(AllNodesOfType('all_reservoirs', 'Pune_Reservoir', pune_simulation.network))
pune_simulation.network.add_institutions(*institution_list)
pune_simulation.network.add_institutions(*all_nodes_of_type_institutions)
#add nodes/links to associated institutions
for n in pune_simulation.network.nodes:
n.add_to_institutions(n.institution_names, pune_simulation.network) # add nodes to designated institutions
for l in pune_simulation.network.links:
l.add_to_institutions(l.institution_names, pune_simulation.network) # add links to designated institutions
############################################################################################################
# ENGINES
############################################################################################################
no_of_engines = engines_inputs[(engines_inputs.simulation_name == 'prototype')].shape[0]
for e in range(no_of_engines):
engine_class = engines_inputs[(engines_inputs.simulation_name == 'prototype') &
(engines_inputs.order == e + 1)]['engine_class'].values[0]
engine_target = engines_inputs[(engines_inputs.simulation_name == 'prototype') &
(engines_inputs.order == e + 1)]['engine_target'].values[0]
EngineClass = getattr(pune_engines, engine_class)
target = pune_simulation.network.get_institution(engine_target)
new_engine = EngineClass(target)
pune_simulation.add_engine(new_engine)
##############################################################################################################################
##############################################################################################################################
pune_simulation.start()
end_time = time.time()
sim_time = end_time-start_time
print "Simulation took: %s" % sim_time
|
#!/usr/bin/env python
# Implementation of https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_majority_vote_algorithm
import logging
def get_max_vote_2(arr):
"""
Get value that appears at least in n/2 + 1 places in list
"""
i = 0
m = 0
for x in arr:
if i == 0:
m = x
i = 1
elif m == x:
i += 1
else:
i -= 1
if arr.count(m) > len(arr) / 2:
return m
return None
def get_max_vote_3(arr):
"""
Get any of values that appears at least in n/3 places in list
"""
counters = {0: 0, 1: 0}
for x in arr:
logging.debug("X=%d, C=%s", x, counters)
if x in counters.keys():
counters[x] += 1
logging.debug("IN, C=%s", counters)
elif 0 in counters.values():
zero_key = counters.keys()[counters.values().index(0)]
del counters[zero_key]
counters[x] = 1
logging.debug("Z, C=%s", counters)
else:
counters = {k: v - 1 for k, v in counters.items()}
logging.debug("SUB, C=%s", counters)
for k in counters.keys():
if arr.count(k) > len(arr) / 3:
return k
return None
logging.basicConfig(level=logging.DEBUG)
assert get_max_vote_2([1, 2, 2, 3, 2, 4, 2, 6]) is None
assert get_max_vote_2([1, 2, 2, 3, 2, 4, 2, 6, 2]) == 2
assert get_max_vote_3([1, 2, 3, 2, 1, 3, 1]) == 1
assert get_max_vote_3([1, 2, 3, 1, 2, 3]) is None
|
import time
string_1="MNSS Rai"
def stringCopies(n):
if n<0:
time.sleep(1)
print("Please enter a positive integer :")
else:
print("Printing",n,"copies of string ...")
time.sleep(1)
print(string_1*n)
stringCopies(3)
stringCopies(-2) |
from django.contrib import admin
from django.urls import path, include
from pyweb.views import base_views
urlpatterns = [
path('admin/', admin.site.urls),
path('pyweb/', include('pyweb.urls')),
path('common/', include('common.urls')),
path('', base_views.index, name='index'),
]
|
from django.apps import AppConfig
class AcsysSystemConfig(AppConfig):
name = 'acsys_system'
|
s1,s2,n=input().split()
d={}
c=0
for i in range(len(s1)):
if(s1[i] not in d.keys()):
d[s1[i]]=s2[i]
else:
if(d[s1[i]]==s2[i]):
continue
else:
c+=1
if(c==n or c==0):
print("yes")
else:
print("no")
|
a=input()
n=input().split()
s=sorted(n,reverse=True)
print("".join(s))
|
"""
求解编辑距离:
----------- 题意 -----------
给定两个单词 word1 和 word2,计算出将 word1 转换成 word2 所使用的最少操作数 。
你可以对一个单词进行如下三种操作:
插入一个字符
删除一个字符
替换一个字符
----------- 题解 -----------
动态规划:
两个指针i,j分别指向word1和word2
定义dp[i][j]表示word1[:i]转换成word2[:j]需要的最少步骤
初始化时,dp[i][0] = i, dp[0][j] = j
如果word1[i-1] == word2[j-1]时:
dp[i][j] = dp[i-1][j-1] // 什么都不需要做
反之,如果word1[i] == word2[j]时:
dp[i][j] = min(
dp[i-1][j] + 1, // 向word2插入一个字符
dp[i][j-1] + 1, // 向word1插入一个字符
dp[i-1][j-1] + 1, // 将word1和word2最后一个字符进行替换
)
最后返回dp[word1_len][word2_len]即为最终结果
"""
class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
word1_len, word2_len = len(word1), len(word2)
dp = [[0]*(word2_len+1) for _ in range(word1_len+1)]
for i in range(1, word1_len+1):
dp[i][0] = i
for j in range(1, word2_len+1):
dp[0][j] = j
for i in range(1, word1_len+1):
for j in range(1, word2_len+1):
if word1[i-1] == word2[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1
return dp[word1_len][word2_len]
if __name__ == "__main__":
word1 = 'intention'
word2 = 'execution'
solution = Solution()
print(solution.minDistance(word1, word2)) |
"""豆瓣电影
"""
import json
from urllib.request import urlopen
from urllib.parse import urlencode
class Movie():
url = 'https://api.douban.com/v2/movie/'
def __init__(self):
pass
def get_subject_by_id(self, movie_id):
url = self.url + 'subject/' + movie_id
response = urlopen(url)
return json.loads(response.read().decode())
def search_by_name(self, name):
data = {
'q': name,
'start': 0,
'count': 1,
}
url = self.url + 'search?' + urlencode(data)
response = urlopen(url)
return json.loads(response.read().decode())
def get_movie_by_name(self, name):
"""结果不可靠
"""
movie_id = self.search_by_name(name)['subjects'][0]['id']
result = self.get_subject_by_id(movie_id)
return result
if __name__ == '__main__':
movie = Movie()
m = movie.get_movie_by_name('色,戒')
print(m)
|
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django import forms
from game.models import Game
from phase10Scorer.settings import PASSWORD
class LoginForm(forms.Form):
"""
Form used to log in to the web application.
"""
username = forms.CharField(widget=forms.TextInput(attrs=dict(required=True, max_length=128)), label="Username")
class Meta:
fields = ['username', ]
def clean(self):
try:
User.objects.get(username__iexact=self.cleaned_data['username'])
user = authenticate(username=self.cleaned_data['username'], password=PASSWORD)
if user is not None:
if not user.is_authenticated:
raise forms.ValidationError("Username/password not found.")
else:
raise forms.ValidationError("Username not found.")
except User.DoesNotExist:
raise forms.ValidationError("Username doesn't exist")
return self.cleaned_data
class UserCreateForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(attrs=dict(required=True, max_length=128)), label="Username")
class Meta:
model = User
fields = ('username',)
def clean(self):
try:
User.objects.get(username__iexact=self.cleaned_data['username'])
raise forms.ValidationError('Username already exists.')
except:
return self.cleaned_data
class NewGameForm(forms.Form):
"""
Form for a new game
"""
name = forms.CharField(widget=forms.TextInput(attrs=dict(required=True, max_length=128)), label="Unique Game Name")
class Meta:
fields = ['name', ]
def clean(self):
try:
game = Game.objects.get(name__iexact=self.cleaned_data['name'])
if game:
raise forms.ValidationError("Game name already taken.")
except:
return self.cleaned_data
|
from flask import Flask, render_template, request, redirect
app = Flask(__name__)
@app.route("/")
def home():
return render_template("index.html")
@app.route("/ninja")
def ninja():
return render_template("ninja.html")
@app.route("/ninja/<userinput>")
def userinput(userinput):
if userinput == "blue":
return render_template("blue_leonardo.html")
elif userinput == "orange":
return render_template("orange_michelangelo.html")
elif userinput == "red":
return render_template("red_raphael.html")
elif userinput == "purple":
return render_template("purple_donatello.html")
else:
return render_template("notapril.html")
app.run(debug=True) |
l=[100, 10, 5, 25, 35, 14]
n=11
mul=1
for i in l:
mul=mul*i
print(mul%n)
|
# Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import re
from DynamicSchedulerGeneric import Utils as DynSchedUtils
class GLUE1Exception(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
ce_regex = re.compile("dn:\s*GlueCEUniqueID\s*=\s*.+")
vo_regex = re.compile("dn:\s*GlueVOViewLocalID\s*=\s*.+")
attr_regex = re.compile("(Glue\w+)\s*:\s*(.+)")
chunk_key_regex = re.compile("GlueCEUniqueID\s*=\s*(.+)")
acbr_regex = re.compile("(VO|VOMS)\s*:\s*(.+)")
class ACBR:
def __init__(self, acbrStr):
parsed = acbr_regex.match(acbrStr)
if not parsed:
raise GLUE1Exception("Wrong ACBR definition: %s" % acbrStr)
self.fmt = parsed.group(1)
self.name = parsed.group(2).strip()
class GlueCEContainer:
def __init__(self):
self.id = None
self.queue = None
self.acbrs = set()
def check(self):
if not self.queue:
raise GLUE1Exception("Missing mandatory attribute GlueCEName")
if not self.id:
raise GLUE1Exception("Missing mandatory attribute GlueCEUniqueID")
class GlueVOViewContainer:
def __init__(self):
self.id = None
self.name = None
self.fkey = None
def check(self):
if self.fkey == None:
raise GLUE1Exception("Missing foreing key for GlueCEUniqueID for %s" % self.id)
if self.name == None:
raise GLUE1Exception("Missing ACBR for %s" % self.id)
def parseGLUETemplate(tplFilename, glueCETable, voViewTable, ce_fkeys):
static_file = None
try:
currGLUECE = None
currVOView = None
static_file = open(tplFilename)
for line in static_file:
parsed = ce_regex.match(line)
if parsed:
currGLUECE = line.strip()
glueCETable[currGLUECE] = GlueCEContainer()
continue
parsed = vo_regex.match(line)
if parsed:
currVOView = line.strip()
voViewTable[currVOView] = GlueVOViewContainer()
continue
parsed = attr_regex.match(line)
if parsed:
key = parsed.group(1)
value = parsed.group(2).strip()
if key == "GlueCEName" and currGLUECE:
glueCETable[currGLUECE].queue = value
elif key == 'GlueCEUniqueID' and currGLUECE:
glueCETable[currGLUECE].id = value
elif key == "GlueCEAccessControlBaseRule" and currGLUECE:
acbrItem = ACBR(value)
glueCETable[currGLUECE].acbrs.add(acbrItem.name)
elif key == "GlueVOViewLocalID" and currVOView:
voViewTable[currVOView].id = value
elif key == "GlueChunkKey" and currVOView:
parsed = chunk_key_regex.match(value)
if parsed:
voViewTable[currVOView].fkey = parsed.group(1).strip()
elif key == "GlueCEAccessControlBaseRule" and currVOView:
# select the first valid ACBR to be the vo name
if voViewTable[currVOView].name == None:
acbrItem = ACBR(value)
voViewTable[currVOView].name = acbrItem.name
continue
if len(line.strip()) == 0:
if currGLUECE:
glueCETable[currGLUECE].check()
tmpid = glueCETable[currGLUECE].id
tmpqueue = glueCETable[currGLUECE].queue
ce_fkeys[tmpid] = tmpqueue
if currVOView:
voViewTable[currVOView].check()
currGLUECE = None
currVOView = None
#close cycle
if currGLUECE:
glueCETable[currGLUECE].check()
tmpid = glueCETable[currGLUECE].id
tmpqueue = glueCETable[currGLUECE].queue
ce_fkeys[tmpid] = tmpqueue
if currVOView:
voViewTable[currVOView].check()
finally:
if static_file:
static_file.close()
def process(config, collector, out=sys.stdout):
glueCETable = dict()
voViewTable = dict()
ce_fkeys = dict()
ldifList = DynSchedUtils.getLDIFFilelist(config, 'static-file-CE.ldif')
for ldifFilename in ldifList:
parseGLUETemplate(ldifFilename, glueCETable, voViewTable, ce_fkeys)
for glueceDN in glueCETable:
ceData = glueCETable[glueceDN]
out.write("%s\n" % glueceDN)
nwait = collector.queuedJobsOnQueue(ceData.queue)
nrun = collector.runningJobsOnQueue(ceData.queue)
out.write("GlueCEStateWaitingJobs: %d\n" % nwait)
out.write("GlueCEStateRunningJobs: %d\n" % nrun)
out.write("GlueCEStateTotalJobs: %d\n" % (nrun + nwait))
if collector.isSetERT(ceData.queue):
out.write("GlueCEStateEstimatedResponseTime: %d\n" % collector.getERT(ceData.queue))
else:
out.write("GlueCEStateEstimatedResponseTime: 0\n")
if collector.isSetWRT(ceData.queue):
out.write("GlueCEStateWorstResponseTime: %d\n" % collector.getWRT(ceData.queue))
else:
out.write("GlueCEStateWorstResponseTime: 0\n")
if collector.free >= 0:
out.write("GlueCEStateFreeJobSlots: %d\n" % collector.free)
out.write("\n");
for voviewDN in voViewTable:
voData = voViewTable[voviewDN]
out.write("%s\n" % voviewDN)
if not voData.fkey in ce_fkeys:
raise GLUE1Exception("Invalid foreign key for " + voviewDN)
queue = ce_fkeys[voData.fkey]
nwait = collector.queuedJobsOnQueueForVO(queue, voData.name)
nrun = collector.runningJobsOnQueueForVO(queue, voData.name)
out.write("GlueCEStateWaitingJobs: %d\n" % nwait)
out.write("GlueCEStateRunningJobs: %d\n" % nrun)
out.write("GlueCEStateTotalJobs: %d\n" % (nrun + nwait))
if collector.isSetERT(queue):
out.write("GlueCEStateEstimatedResponseTime: %d\n" % collector.getERT(queue))
else:
out.write("GlueCEStateEstimatedResponseTime: 0\n")
if collector.isSetWRT(queue):
out.write("GlueCEStateWorstResponseTime: %d\n" % collector.getWRT(queue))
else:
out.write("GlueCEStateWorstResponseTime: 0\n")
nfreeSlots = collector.freeSlots(queue, voData.name)
if nfreeSlots >= 0:
out.write("GlueCEStateFreeJobSlots: %d\n" % nfreeSlots)
out.write("\n");
|
"""
This file builds on the C extension _distributions.c and contains python classes
that are used to evaluate PDFs and CDFs of distributions and draw samples from
a range of distributions.
The following distributions are currently implemented
- Exponential (http://en.wikipedia.org/wiki/Exponential_distribution)
- Lognormal (http://en.wikipedia.org/wiki/Lognormal_distribution)
- Gamma (http://en.wikipedia.org/wiki/Gamma_distribution)
- Pareto (http://en.wikipedia.org/wiki/Pareto_distribution)
- Rayleigh (http://en.wikipedia.org/wiki/Rayleigh_distribution)
- Uniform (http://en.wikipedia.org/wiki/Uniform_distribution_(continuous))
Other distributions can be added easily by implementing the following template
class mydistribution:
def __init__(self, param1, param2):
#Initialize the parameters characterizing the distribution
self.param1 = param1
self.param2 = param2
def rvs(self):
#Sample the distribution
return draw_a_sample
def pdf(self, x):
#Evaluate the PDF at x
return evalue_the_pdf
def cdf(self, x):
#Evaluate the CDF at x
return evaluate_the_cdf
"""
from random import Random
import numpy as np
import scipy.special as special
import _distributions
random = Random()
class exponential:
def __init__(self, mean):
self.mean = mean
def rvs(self):
return random.expovariate(1 / self.mean)
def pdf(self, x):
return np.exp(-x / self.mean) / self.mean
def cdf(self, x):
return 1 - np.exp(-x / self.mean)
class lognormal:
"""
Lognormal distribution with mean 'exp(mu + sigma ** 2 / 2)' and
variance 'mean ** 2 * (exp(sigma ** 2) - 1)'.
"""
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def pdf(self, x):
return _distributions.lognormal_pdf(self.mu, self.sigma, x)
def cdf(self, x):
return _distributions.lognormal_cdf(self.mu, self.sigma, x)
def rvs(self):
return random.lognormvariate(self.mu, self.sigma)
@staticmethod
def from_moments(mean, std):
"""
This function creates a lognormal distribution with given mean and
standard deviation.
"""
mean = float(mean) #Cast to floating point to avoid integer division
sigma2 = np.log(1 + (std / mean) ** 2)
mu = np.log(mean) - .5 * sigma2
return lognormal(mu, np.sqrt(sigma2))
class gamma:
def __init__(self, alpha, beta):
self.alpha = alpha
self.beta = beta
def rvs(self):
return random.gammavariate(self.alpha, self.beta)
def pdf(self, x):
return _distributions.gamma_pdf(self.alpha, self.beta, x)
def cdf(self, x):
return special.gammainc(self.alpha, x / self.beta)
def __str__(self):
return str.format("gamma: alpha = {0}; beta = {1}", self.alpha, self.beta)
@staticmethod
def from_moments(mean, std):
"""
This function creates a gamma distribution with given mean and
standard deviation.
"""
return gamma((mean / std) ** 2, std ** 2 / mean)
class pareto:
def __init__(self, k, alpha):
self.k = k
self.alpha = alpha
def rvs(self):
c = random.random()
return self.k * (1 - c) ** (-1. / self.alpha)
def pdf(self, x):
return 0 if x < self.k else self.alpha * self.k ** self.alpha * \
x ** -(self.alpha + 1)
def cdf(self, x):
return 0 if x < self.k else 1 - (self.k / x) ** self.alpha
@staticmethod
def from_moments(mean, std):
"""
This function creates a Pareto distribution with given mean and
standard deviation.
"""
var = std ** 2
return pareto(mean + var / mean - std * np.sqrt(1 + var / mean ** 2),
1 + np.sqrt(1 + mean ** 2 / var))
class rayleigh:
def __init__(self, sigma):
self.sigma = sigma
def pdf(self, x):
return x / self.sigma ** 2 * np.exp(-.5 * (x / self.sigma) ** 2)
def cdf(self, x):
return 1 - np.exp(-.5 * (x / self.sigma) ** 2)
def rvs(self):
return self.sigma * np.sqrt(-2 * np.log(random.random()))
class uniform:
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
def rvs(self):
return self.lower + (self.upper - self.lower) * random.random()
def pdf(self, x):
return 1. / (self.upper - self.lower) if self.lower <= x <= self.upper else 0
def cdf(self, x):
if x > self.upper:
return 1
elif x < self.lower:
return 0
else:
return (x - self.lower) / (self.upper - self.lower)
|
import json
import datetime
import time
import os
import dateutil.parser
import logging
import boto3
import re
region = 'us-east-1'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ec2 = boto3.resource('ec2', region_name=region)
ec2_client = boto3.client('ec2')
lex_client = boto3.client('lex-models')
# Start an instance
def action_instance(intent_request):
instance_action = intent_request['currentIntent']['slots']['instance_actions']
instance_identifier = intent_request['currentIntent']['slots']['instance_identifiers']
'''if instance_identifier is None:
response_get_slot_type = lex_client.get_slot_type(name='instance_identifiers', version='$LATEST')
print response_get_slot_type
slot_values_present = []
for evals in response_get_slot_type[enumerationValues]:
slot_values_present.append(evals['value'])
print slot_values_present
user_input = intent_request['currentIntent']['inputTranscript'].split()
response_put_slot_type = lex_client.put_slot_type(name='instance_identifiers',enumerationValues=[{'value': 'ekta'}],checksum='0379e74f-1cbe-4a3a-8fd0-efeba73c608f')
instance_identifier = 'none' '''
#print (type(instance_action))
#print (type(instance_identifier))
#response_all_instances = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'instance_identifier'*']}])
#print (response_all_instances)
response_describe = ec2_client.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['*'+instance_identifier+'*']}])
print response_describe
words_show = ['show','list']
words_start = ['start']
words_stop = ['stop']
instance_ids = []
instance_names = []
if instance_action in words_show:
for i in range(0, len(response_describe['Reservations'])):
for j in range(0, len(response_describe['Reservations']['i']['Instances'])):
for k in range(0, len(response_describe['Reservations']['i']['Instances']['j']['Tags'])):
if(response_describe['Reservations']['i']['Instances']['j']['Tags']['k']['Key'] == 'Name'):
instance_names.append(response_describe['Reservations']['i']['Instances']['j']['Tags']['k']['Value'])
break
str1 = ''.join(instance_names)
print str1
return close(
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'These are the instances:-'
}
)
if instance_action in words_start:
#response_action = ec2_client.start_instances(Filters=[{'Name': 'tag:Name','Values': ['*'instance_identifier'*']}])
print('startAction')
if instance_action in words_stop:
#response_action = ec2_client.stop_instances(Filters=[{'Name': 'tag:Name','Values': ['*'instance_identifier'*']}])
print('stopAction')
return close(
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'The instance you have requested has been started.'+instance_identifier+instance_action
}
)
# Greetings
def greetings(intent_request):
return elicit(
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Hello!! My name is LabRat How can I help you today?'
}
)
def close(fulfillment_state, message):
response = {
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': message
}
}
return response
def elicit(fulfillment_state, message):
response = {
'dialogAction': {
'type': 'ElicitIntent',
'message': message
}
}
return response
# --- Intent handler ---
def dispatch(intent_request):
logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))
intent_name = intent_request['currentIntent']['name']
print(intent_request)
# Dispatch to your bot's intent handlers
if intent_name == 'action_instances':
return action_instance(intent_request)
else :
return close(
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'The request which you are looking for does not support with the current release '
}
)
# --- Main handler ---
def lambda_handler(event, context):
# By default, treat the user request as coming from the America/New_York time zone.
os.environ['TZ'] = 'America/New_York'
time.tzset()
logger.debug('event.bot.name={}'.format(event['bot']['name']))
return dispatch(event) |
import numpy as np
def one_hot_encode_along_channel_axis(sequence):
to_return = np.zeros((len(sequence),4), dtype=np.int8)
seq_to_one_hot_fill_in_array(zeros_array=to_return,
sequence=sequence, one_hot_axis=1)
return to_return
def seq_to_one_hot_fill_in_array(zeros_array, sequence, one_hot_axis):
assert one_hot_axis==0 or one_hot_axis==1
if (one_hot_axis==0):
assert zeros_array.shape[1] == len(sequence)
elif (one_hot_axis==1):
assert zeros_array.shape[0] == len(sequence)
#will mutate zeros_array
for (i,char) in enumerate(sequence):
if (char=="A" or char=="a"):
char_idx = 0
elif (char=="C" or char=="c"):
char_idx = 1
elif (char=="G" or char=="g"):
char_idx = 2
elif (char=="T" or char=="t"):
char_idx = 3
elif (char=="N" or char=="n"):
continue #leave that pos as all 0's
else:
raise RuntimeError("Unsupported character: "+str(char))
if (one_hot_axis==0):
zeros_array[char_idx,i] = 1
elif (one_hot_axis==1):
zeros_array[i,char_idx] = 1
def normalize_scores(impscores, hyp_impscores, onehot_data):
normed_hyp_impscores = []
normed_impscores = []
for i in range(len(impscores)):
imp_score_each_pos = np.sum(impscores[i],axis=-1)
imp_score_sign_each_pos = np.sign(imp_score_each_pos)
hyp_scores_same_sign_mask = (np.sign(hyp_impscores[i])
*imp_score_sign_each_pos[:,None] > 0)
hyp_scores_same_sign_imp_scores_sum = np.sum(
hyp_impscores[i]*hyp_scores_same_sign_mask,axis=-1)
norm_ratio = imp_score_each_pos/hyp_scores_same_sign_imp_scores_sum
norm_hyp = hyp_impscores[i]*norm_ratio[:,None]
normed_hyp_impscores.append(norm_hyp)
normed_impscores.append(norm_hyp*onehot_data[i])
return normed_impscores, normed_hyp_impscores
def get_impscores(hyp_impscores, onehot_data):
return [x*y for x,y in zip(hyp_impscores, onehot_data)]
def get_hyp_scores(hyp_file, seqs):
return [w[0] for w in zip([
np.array([[float(z) for z in y.split(",")]
for y in x.rstrip().split("\t")[2].split(";")])
for x in open(hyp_file)], seqs)]
def get_scores(hyp_file):
return [[[float(z) for z in y.split(",")]
for y in x.rstrip().split("\t")[2].split(";")]
for x in open(hyp_file)]
|
import json
import unittest
import sqlparse
from sje import extractor
ONE_LEVEL_JSON = [
{"name": "name", "type": "STRING"},
{"name": "age", "type": "NUMERIC"},
]
NESTED_JSON = [
{"name": "name", "type": "STRING"},
{
"name": "metadata",
"type": "RECORD",
"fields": [
{
"name": "skills",
"type": "RECORD",
"fields": [
{
"name": "java",
"type": "RECORD",
"fields": [{"name": "rating", "type": "NUMERIC"}],
}
],
},
{"name": "age", "type": "NUMERIC"},
],
},
]
NESTED_JSON_FLATTEN = [
{"name": "name", "type": "STRING"},
{"name": "metadata_skills_java_rating", "type": "NUMERIC"},
{"name": "metadata_age", "type": "NUMERIC"},
]
TRAILER_CLAUSE = "from `project:dataset.table limit 1000"
NESTED_JSON_SQL = "SELECT CAST(name AS STRING), \
CAST(JSON_EXTRACT(metadata, '$.skills.java.rating') AS NUMERIC) AS metadata_skills_java_rating, \
CAST(JSON_EXTRACT(metadata, '$.age') AS NUMERIC) AS metadata_age"
class TestExtractor(unittest.TestCase):
def test_flatten_all_empty(self):
result = extractor.flatten_all([])
assert result == []
def test_flatten_all_one_level(self):
result = extractor.flatten_all(ONE_LEVEL_JSON)
assert result == ONE_LEVEL_JSON
def test_flatten_all_nested(self):
result = extractor.flatten_all(NESTED_JSON)
assert result == NESTED_JSON_FLATTEN
def test_flatten_all_as_sql_nested(self):
result = extractor.flatten_all_as_sql(NESTED_JSON)
nested_json_sql_parsed = sqlparse.format(
NESTED_JSON_SQL, reindent=True, keyword_case="upper"
)
assert result == nested_json_sql_parsed
def test_flatten_all_as_sql_nested_with_trailer_clause(self):
result = extractor.flatten_all_as_sql(
NESTED_JSON, trailer_clause=TRAILER_CLAUSE
)
nested_json_sql_parsed = sqlparse.format(
f"{NESTED_JSON_SQL} {TRAILER_CLAUSE}", reindent=True, keyword_case="upper"
)
assert result == nested_json_sql_parsed
if __name__ == '__main__':
unittest.main() |
import csv, math
strInputFilename = 'PcieTrace.csv'
strOutputFilename = 'PcieTrace.log'
adminQueueBaseAddress = 0
adminQueueSize = 0
barAddress = 0xDF000000
# Queue Id, SQ Base Address, SQ Size, CQ Base Address, CQ Size, SQ Tail Doorbell, CQ Head Doorbell, SQ Head Pointer
QueueTable = [
# Qid, SqBase, SqSize, CqBase, CqSize, SqTdbl, CqHdbl, SqHptr
#[0, 0, 0x100, 0, 0, 0, 0, 0],
#[1, 0, 0x20, 0, 0, 0, 0, 0],
#[2, 0, 0x100, 0, 0, 0, 0, 0],
#[3, 0, 0x20, 0, 0, 0, 0, 0],
[4, 0x4F1E0000, 0x100, 0x50340000, 0x100, 0, 0, 0],
[5, 0x4F200000, 0x100, 0x4F1F0000, 0x100, 0, 0, 0],
[6, 0x4f220000, 0x10, 0x4F210000, 0x10, 0, 0, 0]]
fieldNames = ['PACKET', 'DESCRIPTION', 'QTYPE', 'QID', 'ADDRESS', 'DOORBELL', 'OPCODE', 'CID', 'NSPACE', 'LBA', 'NLB', 'SQHD', 'DATA']
fileInput = open(strInputFilename, newline='')
csvReader = csv.DictReader(fileInput)
fileOutput = open(strOutputFilename, 'w', newline = '')
csvWriter = csv.DictWriter(fileOutput, fieldnames = fieldNames)
pendingCommand = False
savTlpType = ""
savQueueId = -1
savQueueType = -1
savDescription = ""
savAddress = 0
numberOfLines = 0
cMaxNumberOfLines = 1000
csvWriter.writeheader()
for row in csvReader:
nvmePacketType = 0xFFFF # Invalid
address = 0;
if(row['Address']!=''):
address = int(row['Address'].replace(':',''), 16)
description = ""
linkDir = row['Link Dir'].replace('stream','')
dllpType = row['DLLP Type']
tlpType = row['TLP Type']
psn = row['PSN']
ackNakSeqNum = row['AckNak_Seq_Num']
queueId = -1
queueType = -1
doorbell = ""
opCode = 0
cid = 0
nspace = 0
lba = 0
nlb = 0
sqhd = 0
data = row['DATA']
if(barAddress <= address < barAddress + 0x1000):
nvmePacketType = 0 # NVMe Controller Registers
# Doorbell checking
if( barAddress+0x1000 <= address < barAddress + 0x2000):
if( (row['Length'] == '1') and (row['TLP Type'] == 'MWr(32)')):
queueId = math.floor((address - (barAddress+0x1000)) / 8)
# if(data.find('E') != -1): doorbell = 0xFFFF
# else: doorbell = int(data, 16)
doorbell = data
if( (address - (barAddress+0x1000)) % 8 == 0 ):
queueType = 0 # SQ
nvmePacketType = 1 # NVME SQ Tail Doorbell
else:
queueType = 1 # CQ
nvmePacketType = 2 # NVME CQ Head Doorbell
if(queueType == 0):
description = "SQ " + str(queueId) + " TDBL " + doorbell[4:]
else:
description = "CQ " + str(queueId) + " HDBL " + doorbell[4:]
# SQ Command checking
sizeOfIoSqEntry = 0x40
for queue in QueueTable:
# SQ check
if(queue[1]!=0 and queue[1] <= address < queue[1]+queue[2]*sizeOfIoSqEntry):
queueId = queue[0];
queueType = 0 # SQ
nvmePacketType = 103 # NVME SQ Command pending
description = "SQ " + str(queueId) + " CMND "
pendingCommand = True
savQueueId = queueId
savQueueType = queueType
savDescription = description
savAddress = address
savTlpType = tlpType
# Pending Command checking
if(tlpType == 'CplD' and pendingCommand == True):
nvmePacketType = 3 # NVME SQ Command
pendingCommand = False
queueId= savQueueId
queueType = savQueueType
address = savAddress
tlpType = savTlpType + '-' + tlpType
opCode = data[4:8]
cid = data[0:4]
nspace = data[1*(8+1):1 * (8+1)+8]
lba = data[11*(8+1):11 * (8+1)+8] + data[10*(8+1):10 * (8+1)+8]
nlb = data[12*(8+1):12 * (8+1)+8]
description = savDescription + cid
# CQE checking
sizeOfCqEntry = 0x10
for queue in QueueTable:
if(queue[3]!=0 and queue[3] <= address < queue[3]+queue[4]*sizeOfCqEntry):
queueId = queue[0]
queueType = 1 # CQ
nvmePacketType = 4 # NVME CQE
sqhd = data[2 * (8+1)+4:2 * (8+1)+8]
cid = data[3 * (8+1)+4:3 *(8+1)+8]
description = "CQ " + str(queueId) + " CQE " + cid
if(pendingCommand != True and dllpType != 'ACK' and nvmePacketType < 100):
print('{:^7}{:5}{:4}{:12}{:>5}{:>5}{:16}{:>10X}{:>10}{:>5}{:>5}{:>9}{:>17}{:>9}{:>9},{:24}'.format(row['Packet'], linkDir, dllpType, tlpType, psn, ackNakSeqNum, description, address, doorbell, opCode, cid, nspace, lba, nlb, sqhd, data[:9*8]))
if(queueType == 0) : qTypeStr = "SQ"
else: qTypeStr = "CQ"
#fieldNames = ['PACKET', 'DESCRIPTION', 'QTYPE', 'QID', 'ADDRESS', 'DOORBELL', 'OPCODE', 'CID', 'NSPACE', 'LBA', 'NLB', 'SQHD', 'DATA']
csvWriter.writerow({'PACKET': row['Packet'], 'DESCRIPTION':description, 'QTYPE':qTypeStr, 'QID':queueId, 'ADDRESS':address, 'DOORBELL':doorbell, 'OPCODE': opCode, 'CID':cid, 'NSPACE':nspace, 'LBA':lba, 'NLB':nlb, 'SQHD':sqhd, 'DATA':data})
fileInput.close()
fileOutput.close()
|
# Conway's Game of Life - Python Implementation
# Author: Daniel: dan@imdany.com
import pygame
from config import *
# Logic of the Cell
class Cell:
def __init__(self, gameDisplay, c, r, state=0):
self.c = c
self.r = r
self.state = state
self.gameDisplay = gameDisplay
def display(self):
if self.state == 1:
pygame.draw.rect(
self.gameDisplay,
BLACK,
[SIZE_C * self.r, SIZE_C * self.c, SIZE_C, SIZE_C],
)
else:
pygame.draw.rect(
self.gameDisplay,
WHITE,
[SIZE_C * self.r, SIZE_C * self.c, SIZE_C, SIZE_C],
)
def checkNeighbors(self, cellList):
neighbs = 0
# Check for all
for dr, dc in [
[-1, -1],
[-1, 0],
[-1, 1],
[1, 0],
[1, -1],
[1, 1],
[0, -1],
[0, 1],
]:
try:
if cellList[self.r + dr][self.c + dc].state == 1:
neighbs += 1
except IndexError:
continue
if self.state == 1:
if neighbs in [2, 3]:
return 1
return 0
if neighbs == 3:
return 1
return 0
|
from elements.code_elements import GenericElement, CommandElement
from abc import abstractproperty
from elements.return_types import *
class CompareElement(CommandElement):
def __init__(self, a: GenericElement, b: GenericElement):
if a.return_type != b.return_type and a.return_type is not self.return_type \
and b.return_type is not self.return_type:
self.input_warning()
self.a = a
self.b = b
def write_out(self, sqf=False):
return "({} {} {})".format(self.a.write_out(sqf),
self.sign,
self.b.write_out(sqf))
@abstractproperty
def sign(self):
return "=="
@property
def return_type(self):
return BOOL
class EqualsElement(CompareElement):
sign = "=="
class NotEqualsElement(CompareElement):
sign = "!="
class AndElement(CompareElement):
sign = "&&"
class OrElement(CompareElement):
sign = "||"
class GreaterElement(CompareElement):
sign = ">"
class GreaterOrEqualElement(CompareElement):
sign = ">="
class LessElement(CompareElement):
sign = "<="
class LessOrEqualElement(CompareElement):
sign = "<="
|
def insertionsort(list):
for i in range(1, len(list)):
j = i
while j > 0 and list[j-1] > list[j]:
temp = list[j]
list[j] = list[j-1]
list[j-1] = temp
j -= 1
return list
if __name__ == "__main__":
list = [55,51,74,86,36,67,88,0,49]
print insertionsort(list)
|
#! /usr/bin/env python
"""
Author: Zhiqiang Liang
Program: fd_faceboxes.py
Date: Tuesday, Mar. 12 2019
Description: re-write demo.py functionalities of faceboxes.
"""
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import cv2
import argparse
caffe_root = r'../'
# os.chdir(caffe_root)
sys.path.insert(0, caffe_root + '/python')
import caffe
import datetime
def imglist_in_folder(root_dir, ext_names):
imglist = []
for root, dirs, files in os.walk(root_dir, topdown=False):
for i in range(len(files)):
filename, fileext = os.path.splitext(files[i])
if fileext in ext_names:
imglist.append(os.path.join(root, files[i]))
for i in range(len(imglist)):
print(imglist[i])
aaaaaa=0
return imglist
def predict_imgs_infolder(root_dir, ext_names):
algo_output_folder = 'algo_output'
if not os.path.exists(algo_output_folder):
os.makedirs(algo_output_folder)
img_list = imglist_in_folder(root_dir, ext_names)
fd = FD_Faceboxes(args.network_file, args.trained_model_file)
for i in range(len(img_list)):
img_path = img_list[i]
img_path = "D:/datasets/test_images/fd/0_Parade_Parade_0_178.jpg"
filepath, fullfilename = os.path.split(img_path)
shortname, extension = os.path.splitext(fullfilename)
t0 = datetime.datetime.now()
prediction = fd.predict_one_image(img_path)
t1 = datetime.datetime.now()
delta_t = t1 - t0
msecond = delta_t.microseconds / 1000 + delta_t.seconds * 1000
print(msecond)
det_label = prediction[0, 0, :, 1]
det_conf = prediction[0, 0, :, 2]
det_xmin = prediction[0, 0, :, 3]
det_ymin = prediction[0, 0, :, 4]
det_xmax = prediction[0, 0, :, 5]
det_ymax = prediction[0, 0, :, 6]
# face boxes with confidence greater than args.face_threshold.
top_indices = [i for i, conf in enumerate(det_conf) if conf >= args.face_threshold]
top_conf = det_conf[top_indices]
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
img = cv2.imread(img_path)
if img is None:
print("can not open image:", img_path)
return
for i in range(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * img.shape[1]))
ymin = int(round(top_ymin[i] * img.shape[0]))
xmax = int(round(top_xmax[i] * img.shape[1]))
ymax = int(round(top_ymax[i] * img.shape[0]))
cv2.rectangle(img, (xmin, ymin + 4), (xmax, ymax), (0, 255, 0), 1)
font_size = 1 if (xmax-xmin)/60.0>1 else (xmax-xmin)/60.0
cv2.putText(img, '{:.3f}'.format(top_conf[i]), (xmin, ymin), cv2.FONT_HERSHEY_COMPLEX_SMALL, font_size, (0, 255, 0))
cv2.putText(img, 'face_threshold == {}'.format(args.face_threshold), (20, 20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0))
cv2.imshow('facebox', img)
cv2.imwrite(os.path.join(algo_output_folder, fullfilename), img)
cv2.waitKey(1)
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa=0
aaaaaa=0
return
class FD_Faceboxes():
def __init__(self, model_def, model_weights):
# caffe.set_device(0)
# caffe.set_mode_gpu()
caffe.set_mode_cpu()
self.model_def = model_def
self.model_weights = model_weights
self.net = caffe.Net(self.model_def, self.model_weights, caffe.TEST)
return
def predict_one_image(self, image_path):
image_file = image_path
image = caffe.io.load_image(image_file)
im_scale = 1.0
if im_scale != 1.0:
image = cv2.resize(image, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
self.net.blobs['data'].reshape(1, 3, image.shape[0], image.shape[1])
transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_mean('data', np.array([104, 117, 123])) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2, 1, 0)) # the reference model has channels in BGR order instead of RGB
transformed_image = transformer.preprocess('data', image)
self.net.blobs['data'].data[...] = transformed_image
detections = self.net.forward()['detection_out']
return detections
def parse_args():
parser = argparse.ArgumentParser(description='Faceboxes prediction of the original code of the author', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--trained_model', dest='trained_model_file', default='../models/faceboxes/faceboxes.caffemodel', help='model file of facboxes', type=str)
parser.add_argument('--network_file', dest='network_file', default='../models/faceboxes/deploy.prototxt', help='network_file', type=str)
parser.add_argument('--face_threshold', dest='face_threshold', default=0.4, help='threshold of face', type=float)
args = parser.parse_args()
return args
if __name__ == '__main__':
# --------------------------------------
args = parse_args()
root_dir = 'test_images'
root_dir = r'D:\datasets\test_images\fd_fl'
ext_names = ['.jpg', '.png']
predict_imgs_infolder(root_dir, ext_names)
aaaaaaaaaaaaaaa=0
|
"""Entry point for pydantic-aioredis"""
from .config import RedisConfig # noqa: F401
from .model import Model # noqa: F401
from .store import Store # noqa: F401
|
import base64
encoded = base64.b64encode('user:pass'.encode('UTF-8')).decode('ASCII')
print(encoded) |
# -*- coding: utf-8 -*-
"""
Created on Sat May 23 14:54:43 2015
@author: Martin Nguyen
"""
from __future__ import division
import simpy
import csv
from MonitorClass import Monitor
from PatientClass import Patient
simulationTime = 500
class SimulationSystem(object):
def __init__ (self,size,file_name):
self.size = size
self.file_name = file_name
self.list_IOP = []
self.list_MD = []
self.list_MDR = []
self.list_Age = []
self.list_Death = []
self.list_Gender = []
self.patientlist = []
self.monitor = Monitor (self.size)
def csv_dict_reader(self,file_obj):
reader = csv.DictReader(file_obj, delimiter=',')
for line in reader:
self.list_IOP.append(float(line["IOP"]))
self.list_MD.append(float(line["MD"]))
self.list_MDR.append(float(line["MDR"]))
self.list_Age.append(float(line["Age"]))
self.list_Death.append(float(line["DeathAge"]))
self.list_Gender.append(float(line["Gender"]))
def final_cost_calculate(self):
i = 0
for obj in self.patientlist:
self.monitor.finalCostPatient(i,obj.medicalRecords['NumberTrabeculectomy'],obj.medicalRecords['PatientVisits'],obj.medicalRecords['NumberVF'],obj.medicalRecords['SurgeryCataract'])
i += 1
def SystemSimulation (self):
with open(self.file_name) as f_obj:
self.csv_dict_reader(f_obj)
env = simpy.Environment()
for i in range(self.size):
self.patientlist.append( Patient(env,i,self.monitor,{'IOP':self.list_IOP[i],'MD': self.list_MD[i],'MDR':self.list_MDR[i],'CumulativeMDR': 0,'IOPTarget': 24,'Age':self.list_Age[i],
'TrabeculectomyIOP': 0,'Death':self.list_Death[i],'Gender': self.list_Gender[i]}))
env.run(until = simulationTime)
self.final_cost_calculate ()
|
import os
import pandas as pd
import torch
import time
from train import train
from test import test
def main(model_name, parameters_name, save_name, cuda_device=0):
train_loss_list = []
valid_loss_list = []
train_accuracy_list = []
valid_accuracy_list = []
test_accuracy_list = []
time_list = []
base_path = os.path.dirname(os.path.abspath(__file__))
parameters = pd.read_csv(os.path.join(base_path, parameters_name), header=0)
device = torch.device(f'cuda:{cuda_device}' if torch.cuda.is_available() else 'cpu')
for index in range(parameters.shape[0]):
batch_size = int(parameters.iloc[index]['batch_size'])
epochs = int(parameters.iloc[index]['epochs'])
learning_rate = parameters.iloc[index]['learning_rate']
state_name = f"{batch_size}_{epochs}_{learning_rate}"
save_path = os.path.join(base_path, save_name, state_name)
if not os.path.exists(os.path.join(base_path, save_name)):
os.mkdir(os.path.join(base_path, save_name))
if not os.path.exists(save_path):
os.mkdir(save_path)
start_time = time.time()
(
train_loss,
valid_loss,
train_accuracy,
valid_accuracy
) = train (
model_name=model_name,
batch_size=batch_size,
epochs=epochs,
learning_rate=learning_rate,
device=device,
base_path=base_path,
save_path=save_path
)
test_accuracy = test (
model_name=model_name,
device=device,
base_path=base_path,
save_path=save_path
)
end_time = time.time()
cost_time = end_time - start_time
struct_time = time.gmtime(cost_time) # 轉換成時間元組
timeString = time.strftime("%H:%M:%S", struct_time)
time_list.append(timeString)
train_loss_list.append(train_loss)
valid_loss_list.append(valid_loss)
train_accuracy_list.append(train_accuracy)
valid_accuracy_list.append(valid_accuracy)
test_accuracy_list.append(test_accuracy)
parameters['train_loss'] = pd.Series(train_loss_list)
parameters['valid_loss'] = pd.Series(valid_loss_list)
parameters['train_accuracy'] = pd.Series(train_accuracy_list)
parameters['valid_accuracy'] = pd.Series(valid_accuracy_list)
parameters['test_accuracy'] = pd.Series(test_accuracy_list)
parameters['cost_time'] = pd.Series(time_list)
parameters.sort_values(
by=['test_accuracy', 'valid_accuracy'],
axis=0,
ascending=False,
inplace=True,
na_position='last'
)
parameters.to_csv(os.path.join(base_path, f'{save_name}.csv'), sep='\t', na_rep='NAN', index=False, float_format='%.4f')
if __name__ == '__main__':
# part 1
main(model_name="ExampleCNN", parameters_name='parameters.csv', save_name="part_1_result", cuda_device=0)
# part 2
main(model_name="MyCNN", parameters_name='parameters_test.csv', save_name="part_2_result", cuda_device=0)
|
## Santosh Khadka - Tic Tac Toe Game
#Globals
row1 = ['-', '-', '-']
row2 = ['-', '-', '-']
row3 = ['-', '-', '-']
def print_start():
print("===============================")
print(' Lets play Tic-Tac-Toe!')
print("===============================")
print("[Type 'exit' to leave anytime.]")
print()
x = 0
while x == 0:
choice = input("Would you like to be X or O?")
if (choice == 'x') or (choice == 'X') or (choice == 'o') or (choice == 'O'):
print()
return choice.upper()
else:
print('Invalid input try again..')
def print_board(r1, r2, r3):
#h_bar = ' -----------'
h_bar = ' #############'
h_bar = ' -------------'
print(' 1 2 3')
print(h_bar)
print('A: |',r1[0],'|',r1[1],'|',r1[2],'|')
print(h_bar)
print('B: |',r2[0],'|',r2[1],'|',r2[2],'|')
print(h_bar)
print('C: |',r3[0],'|',r3[1],'|',r3[2],'|')
print(h_bar)
print()
def get_input():
#location = input("Input column letter then row number (i.e. c1): ")
colRow = [0,0]
x = 0
while x == 0:
print("Pick a location to add your piece...")
location = input("Input column letter then row number (i.e. c1): ")
if str(location).lower() == "exit":
quit()
#print(location[0])
#print(location[1])
if ((str(location[0]).lower() == 'a') or (str(location[0]).lower() =='b') or (str(location[0]).lower() =='c')) and ((location[1] == '1') or (location[1] == '2') or (location[1] == '3')):
break
else:
print("Invalid input, try again...")
column = str(location[0].lower())
if column == 'a':
column = 1
elif column == 'b':
column = 2
elif column == 'c':
column = 3
row = int(location[1])
colRow[0] = column
colRow[1] = row
return colRow
def replace_board(col, row, choice):
global row1
global row2
global row3
# if row == 1:
# row1[col] =
# elif row == 2:
# elif row == 3:
def reset_board():
global row1
global row2
global row3
row1 = ['-', '-', '-']
row2 = ['-', '-', '-']
row3 = ['-', '-', '-']
def end_game(winner):
if winner != 'computer':
print("Congratulations you WON!")
else:
print("Better luck next time!")
choice = input("Make your next choice:")
if choice == 1:
print("Ok, lets play again!")
reset_board()
return(print_start())
elif choice == 2:
print("Thanks for playing, bye!")
quit()
def main():
global row1
global row2
global row3
x = 0
piece = print_start()
#print_start()
while x == 0:
print_board(row1, row2, row3)
print(get_input())
print()
if __name__ == "__main__":
main() |
from symbolTable import ST
from util import strToBinary16
from lockuptable import getMachineCode
def assemblePh3(code):
# // out = open("output.txt", 'w')
# // linesAgain = open(assemblesFile.name, 'r').readlines()
ph3_out = ''
addressNumber = 0
for line in code.splitlines(True):
if(line.find('$') != -1):
if(line.find('$VAR') != -1):
line = line.replace('$VAR', '')
variable = ST.get(line[0:-1])
if(not variable):
raise Exception("not defined variable {}".format(line[0:-1]))
ph3_out += strToBinary16(str(variable)) + '\n'
elif(line.find('$AVAR') != -1):
line = line.replace('$AVAR', '')
minusIndex = line.find('-')
variable, offset = line.strip()[0:minusIndex], line.strip()[minusIndex+1:]
if(not variable):
raise Exception("not defined variable {}".format(variable))
value = int(ST.get(variable)) - int(offset) # strToBinary
ph3_out += strToBinary16(str(value)) + '\n'
elif(line.find('$BCH') != -1):
line = line.replace('$BCH', '')
branchOperation, label = line.strip().split(' ')
if(not label):
raise Exception("not defined label {}".format(label))
offset = strToBinary16(str(int(ST.get(label)) - addressNumber - 1),fill=8) # TODO (مش فاكر البرانش بياخد قد ايه)
if(len(offset)>8):
raise Exception("can't branch to that address ")
ph3_out += "{}{}\n".format(getMachineCode(branchOperation), offset)
else:
ph3_out += line
addressNumber += 1
return ph3_out
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from contextlib import contextmanager
def assert_logged(
caplog, expect_logged: list[tuple[int, str]] | None = None, exclusively: bool = True
) -> None:
if not expect_logged:
assert not caplog.records
return
if exclusively:
assert len(caplog.records) == len(
expect_logged
), f"Expected {len(expect_logged)} records, but got {len(caplog.records)}."
for idx, (lvl, msg) in enumerate(expect_logged):
if not exclusively:
log_record = next(
(log_record for log_record in caplog.records if msg in log_record.message), None
)
assert log_record is not None, f"Expected log message {msg!r} was not found."
else:
log_record = caplog.records[idx]
assert (
msg in log_record.message
), f"The text {msg!r} was not found in {log_record.message!r}."
assert (
lvl == log_record.levelno
), f"Expected level {lvl}, but got level {log_record.levelno}."
@contextmanager
def no_exception():
"""Useful replacement for `pytest.raises()`, when there is no exception to be expected.
When declaring parametrized tests, the test function can take a exceptions
expectation as input, and always use a with-block for the code under test.
@pytest.mark.parametrize(
"answer, expect_raises",
[
(42, no_exception()),
(12, pytest.raises(WrongAnswer)),
]
)
def test_search_for_the_meaning_of_life_universe_and_everything(
answer: int,
expect_raises: typing.ContextManager,
):
with expect_raises:
computer.validate_result(answer)
"""
yield None
|
# functions that implement the Logistic Regression
from scipy import exp, dot, log, array, asarray, linspace, zeros, ones, append
import matplotlib.pyplot as plt
def sigmoid(z):
'''
Compute the Sigmoid function
g = sigmoid(z) returns the sigmoid of z
z can be a matrix, vector, or scalar
'''
g = 1/(1 + exp(-z))
return(g)
def costLogReg (theta, X, y):
'''
Return the cost [J] for a logistic regression.
theta ~ Hypothesis parameters for the regularized logistic regression
X ~ Input values
y ~ Output variables or features
'''
# m ~ number of training examples
# n ~ number of features
[m, n] = X.shape
# Initialize variables
J = 0
# Hypothesis function for logistic regression
h = sigmoid(dot(X, theta))
J = (1/m) * (dot(-y.T , log(h)) - dot((1 - y.T) , log(1 - h)))
return(J)
def costwReg (theta, X, y, lam):
'''
Return the cost and gradient [J, grad] for a logistic regression with regularization.
This function adds the effects of regularization to the results of the costLogReg function.
theta ~ Hypothesis parameters for the regularized logistic regression
X ~ Input values
y ~ Output variables or features
lam ~ (lambda) Regularization parameter
'''
# m ~ number of training examples
# n ~ number of features
[m, n] = X.shape
# Lambda should not be applied to theta_0
theta_reg = array(theta)
theta_reg[0] = 0
J = costLogReg(theta, X, y)
J = J + lam / (2*m) *dot(theta_reg.T, theta_reg)
return(J)
def gradLogReg (theta, X, y):
'''
Return gradient [grad] for a logistic regression.
theta ~ Hypothesis parameters for the regularized logistic regression
X ~ Input values
y ~ Output variables or features
'''
# m ~ number of training examples
# n ~ number of features
[m, n] = X.shape
# Initialize variables
J = 0
grad = zeros(n)
# Hypothesis function for logistic regression
h = sigmoid(dot(X, theta))
grad = (1/m) * dot(X.T , (h - y))
return(grad)
def gradwReg (theta, X, y, lam):
'''
Return the cost and gradient [J, grad] for a logistic regression with regularization.
This function adds the effects of regularization to the results of the costLogReg function.
theta ~ Hypothesis parameters for the regularized logistic regression
X ~ Input values
y ~ Output variables or features
lam ~ (lambda) Regularization parameter
'''
# m ~ number of training examples
# n ~ number of features
[m, n] = X.shape
# Lambda should not be applied to theta_0
theta_reg = array(theta)
theta_reg[0] = 0
grad = gradLogReg(theta, X, y)
grad = grad + (lam * theta_reg / m)
return(grad)
def plotData(X, y):
'''
Plots the datapoints X and y into a new figure.
Assumes
* X is an M x 2 matrix
* y is a series consisting of 1s and 0s
'''
X = asarray(X)
y = asarray(y)
pos = X[y==1]
neg = X[y==0]
plt.figure()
plt.plot(pos[:,0],pos[:,1], 'yo', label='y = 1')
plt.plot(neg[:,0],neg[:,1], 'k+', label='y = 0')
def mapFeature(X1, X2, degree=6):
'''
Maps the input features to polynomial features
'''
mappedX = ones([(X1).size, 1])
for i in range(degree):
for j in range(0,i+2):
mappedX = append(mappedX, (X1**(i+1-j) * X2**j).reshape([X1.size, 1]), axis=1)
return(mappedX)
def plotDecisionBoundary(theta, X, y, degree=6):
X0 = X[:,1:3]
plotData(X0, y)
if X.shape[1] <= 3:
print('Error, Need to code this section')
else:
# Grid Range
u = linspace(-1, 1.5, 50)
v = linspace(-1, 1.5, 50)
z = zeros([len(u), len(v)])
# Evaluate z = theta*x over the grid
for i in range(len(u)):
for j in range(len(v)):
z[i,j] = dot(mapFeature(asarray(u[i]), asarray(v[j]), degree), theta)
# Transpose z before calling contour
z = z.T
CS = plt.contour(u, v, z, 0, label='Decision Boundary')
#plt.clabel(CS, inline=1, fontsize=10)
|
import time
import numpy as np
import scipy.sparse as sps
import scipy.sparse.linalg as spsl
from Vertex import Vertex
from matplotlib import pyplot as plt
from true_field import true_field
# starting with nu = 0 and Neumann boundary conditions for simple precision matrix
class GMRF_Regression:
def __init__(self, theta, grid, f, var):
"""
:param theta:
:param grid_dim:
:param f:
:param var:
"""
self.num_agents = 1
self.theta = theta
self.grid = grid
self.f = f
self.var = var
# grid.shape = first dimension length, second dimension length..., num dimensions
self.rows = grid.shape[0]
self.cols = grid.shape[1]
n = self.rows * self.cols
p = len(self.f)
t = self.theta[0]
k = self.theta[1]
a = k ** 2 + 4
self.b = np.zeros(n + p).reshape(n + p, 1)
# INITILIAZE PRECISION MATRIX
self.precision = np.zeros((1, n))
for i in range(0, self.rows):
for j in range(0, self.cols):
A = np.zeros((self.rows, self.cols))
A[i][j] = 4 + a ** 2
A[i][j - 1] = -2 * a
A[i][(j + 1) % self.cols] = -2 * a
A[i - 1][j] = -2 * a
A[(i + 1) % self.rows][j] = -2 * a
A[(i + 1) % self.rows][(j + 1) % self.cols] = 2
A[i - 1][j - 1] = 2
A[i - 1][(j + 1) % self.cols] = 2
A[(i + 1) % self.rows][j - 1] = 2
A[i][j - 2] = 1
A[i][(j + 2) % self.cols] = 1
A[(i + 2) % self.rows][j] = 1
A[i - 2][j] = 1
self.precision = np.append(self.precision, A.reshape(1, n), axis=0)
self.precision = np.delete(np.array(self.precision), 0, 0)
# END INITLIAZE PRECISION MATRIX
T = 1 / 100 * np.eye(p)
F = np.ones((n, p))
upper = np.concatenate((self.precision, -self.precision @ F), axis=1)
lower = np.concatenate((-F.T @ self.precision, F.T @ self.precision @ F + T), axis=1)
self.full_precision = np.concatenate((upper, lower), axis=0)
# use covariance to check
full_cov = np.linalg.inv(self.full_precision)
self.cov_diag = np.diag(full_cov)
self.precision = self.full_precision
self.sparse_precision = sps.csc_matrix(self.precision)
# print("cov diag: ", self.cov_diag)
# print("covariance matrix: ", cov)
# cov_diag = np.diagonal(cov)
# print("cov_diag: ", cov_diag)
def regression_update(self, locations, measurements):
for k in range(0, len(locations)):
phi_k = self.compute_phi(locations[k], self.grid)
# print(measurements[k])
# print("at location: ", locations[k])
self.b = self.b + phi_k.T * measurements[k]
self.sparse_precision += sps.csc_matrix(1 / self.var * phi_k.T @ phi_k)
# h = spsl.spsolve(self.sparse_precision, phi_k.T)
# self.cov_diag = self.cov_diag - (np.multiply(h, h)) / (self.var + phi_k @ h) # conditional variance
# draw precision matrix
# plt.clf()
#
# x, y = np.mgrid[0:900:1, 0:900:1]
# grid = np.dstack((x, y))
# grid_points = grid.reshape(len(x) * len(y), 2)
# prec = np.zeros((900, 900))
# for [xi, yi] in grid_points:
# prec[xi][yi] = self.precision[xi][yi]
# plt.title("precision matrix")
# plt.pcolormesh(x, y, prec.reshape(x.shape))
# plt.colorbar()
# plt.show()
# draw field and variance
# if(k%10 ==0):
# x, y = np.mgrid[0:self.rows:1, 0:self.cols:1]
# grid = np.dstack((x, y))
# grid_points = grid.reshape(len(x) * len(y), 2)
# z = np.zeros((30, 30))
# var = np.zeros((30, 30))
# for [xi, yi] in grid_points:
# # z[xi][yi] = mu[self.cols * yi + xi]
# var[xi][yi] = self.cov_diag[self.cols * yi + xi]
# plt.subplot(2, 2, 1)
# plt.title("learned field")
# plt.pcolormesh(x, y, z.reshape(x.shape))
# plt.colorbar()
# plt.subplot(2, 2, 2)
# plt.title("variance field")
# plt.pcolormesh(x, y, var.reshape(x.shape))
# plt.colorbar()
# plt.show()
mu = spsl.spsolve(self.sparse_precision, self.b)
# mu = spsl.inv(self.sparse_precision) @ self.b
return mu, self.cov_diag
def compute_phi(self, location, grid):
"""
:param x: location of measurement
:param grid: grid
:return: phi
"""
x, y = location[0], location[1]
a = grid[1][0][0] - grid[0][0][0]
b = grid[0][1][1] - grid[0][0][1]
vertices = self.get_vertices(x, y, a, b) # tuple of four vertices forming rectangle around location
center = Vertex(vertices[0].x + a / 2, vertices[0].y + b / 2)
x_e = x - center.x
y_e = y - center.y
phi_temp = np.zeros(4)
phi_temp[0] = 1 / (a * b) * (x_e - a / 2) * (y_e - b / 2)
phi_temp[1] = -1 / (a * b) * (x_e + a / 2) * (y_e - b / 2)
phi_temp[2] = 1 / (a * b) * (x_e + a / 2) * (y_e + b / 2)
phi_temp[3] = -1 / (a * b) * (x_e - a / 2) * (y_e + b / 2)
phi = np.zeros((self.rows * self.cols + len(self.f), 1))
for i in range(len(phi_temp) - 1, -1, -1):
phi[int(self.cols * vertices[i].y + vertices[i].x)] = phi_temp[i]
phi = phi.T
return phi
def get_vertices(self, x, y, a, b):
"""
:param x, y: are location of measurment x,y
:param a, b: are spacing of grids. a is for horizontal, b is for vertical see page 17 as below
:return: tuple of four tuples of closest vertices.
Starting with -x, -y, then +x, -y, then +x,+y, then -x, +y as seen in page 17, Andre Rene Geist master thesis
"""
if y % b == 0:
low_y = y
high_y = y
else:
low_y = y - y % b
high_y = y + (b - y % b)
if x % a == 0:
low_x = x
high_x = x
else:
low_x = x - x % a
high_x = x + (a - x % a)
v1 = Vertex(low_x, low_y)
v2 = Vertex(high_x, low_y)
v3 = Vertex(high_x, high_y)
v4 = Vertex(low_x, high_y)
vertices = v1, v2, v3, v4
return vertices
def main():
start_time = time.time()
step_size = 1 # dimension of one side of grid
field = true_field(step_size=step_size)
x, y = np.mgrid[0:30:1, 0:30:1]
grid = np.dstack((x, y))
grid_points = grid.reshape(len(x) * len(y), 2)
gmrf = GMRF_Regression(theta=[1, 1], grid=grid, f=[1], var=1)
x2, y2 = np.mgrid[0:30:2, 0:30:2]
grid2 = np.dstack((x2, y2))
grid_points2 = grid2.reshape(len(x2) * len(y2), 2)
locations = np.array(grid_points2)
measurements = field.get_measurement(locations.T) # measurements
mu, conditional_var = gmrf.regression_update(locations, measurements)
plt.subplot(2, 2, 3)
plt.title("true field")
field.draw(plt)
z = np.zeros((gmrf.rows, gmrf.cols))
var = np.zeros((gmrf.rows, gmrf.cols))
for [xi, yi] in grid_points:
z[xi][yi] = mu[gmrf.cols * yi + xi] - mu[-1]
var[xi][yi] = conditional_var[gmrf.cols * yi + xi]
# FOR VALUE COMPARISONS AT GRID POINTS
# for i in range(0, len(field.xi)):
# for j in range(0, len(field.xi)):
# print(field.xi[i][j], field.yi[i][j], field.zi.reshape(field.xi.shape)[i][j])
# print("NOW THE OTHER ONE")
# for i in range(0, len(x)):
# for j in range(0, len(field.xi)):
# print(x[i][j], y[i][j], z.reshape(x.shape)[i][j])
plt.subplot(2, 2, 1)
plt.title("learned field")
plt.contourf(x, y, z.reshape(x.shape))
plt.colorbar()
plt.subplot(2, 2, 2)
plt.title("variance")
plt.contourf(x, y, var.reshape(x.shape))
plt.colorbar()
print("--- %s seconds ---" % (time.time() - start_time))
plt.show()
main()
|
import sys, os, operator
'''
Sorts grouped characters from greatest number of words to least.
'''
def sort_grouped(input, output):
try:
with open(input, 'r') as inputf, open(output, 'w') as outputf:
words = {}
lineDelim = ": "
wordDelim = ", "
for line in inputf:
line_info = line.strip('\n').split(lineDelim)
if len(line_info) == 2:
words[line_info[0]] = line_info[1].split(wordDelim)
else:
print("Line delim wrong. Please change.")
sys.exit(1)
sorted_list = sorted(words.items(), key=lambda x: len(x[1]), reverse=True)
for s in sorted_list:
row = '%s: %s\n'%(s[0].decode('UTF-8').encode('UTF-8'), ", ".join(s[1]).decode('UTF-8').encode('UTF-8'))
#row = '%s: %s\n'%(s[0].decode('UTF-8').encode('UTF-8'), len(s[1]))
#print row
outputf.write(row)
except IOError, err:
print("IOError.")
print err
if __name__ == "__main__":
if len(sys.argv) == 3:
append_to_row(sys.argv[1], sys.argv[2])
else:
print("Usage: %s path/to/input path/to/output"%(sys.argv[0]))
sys.exit(1) |
#!/usr/bin/env python
import sys
import pandas as pd
import fetch
from progress import ProgressBar
USERNAME = 'stephenroller'
ACCESS_KEY = '5b22bd3303fe4c21a4463d37cca0353813a56109'
MAPPING_URL = 'http://www.image-net.org/api/text/imagenet.synset.geturls.getmapping?wnid=%s'
HYPO_URL = 'http://www.image-net.org/api/text/wordnet.structure.hyponym?wnid=%s&full=1'
TARBALL_URL = 'http://www.image-net.org/download/synset?username=%s&accesskey=%s&release=latest&wnid=%%s' % (USERNAME, ACCESS_KEY)
mappings = pd.read_csv(sys.argv[1], sep="\t")
synsets = mappings.Synset[mappings.Synset.notnull()]
synsets = [y for x in synsets.map(lambda z: z.split()) for y in x]
def fetch_image_urls(synset):
data = fetch.fetch_data(MAPPING_URL % synset)
image_mappings = [y.split() for y in data.split("\r\n") if y]
return image_mappings
def fetch_hypos(synset):
data = fetch.fetch_data(HYPO_URL % synset)
return data.replace("-", "").split("\r\n")
pb = ProgressBar(len(synsets))
pb.errput()
for synset in synsets:
image_urls = fetch_image_urls(synset)
if len(image_urls) == 0:
children_synsets = fetch_hypos(synset)
children_urls = [fetch_image_urls(cs) for cs in children_synsets]
image_urls = [y for x in children_urls for y in x]
for imgid, url in image_urls:
print "%s\t%s\t%s" % (synset, imgid, url)
pb.incr_and_errput()
|
import itertools
import sys
import classes
import string
import csv
import exceptions
class Strand:
def __init__ (self, material, name, sequence):
"""
specifies whether DNA or RNA, gives name, and gives sequence
and checks self.correct_type(sequence)
"""
self.material = material
self.name = name
self.sequence = sequence
def update_seq (self, new_seq):
self.sequence = new_seq
return self
class Permutations:
""" Creates a permutations object """
def __init__(self, lis):
self.permutation_list = itertools.permutations(lis)
self.actual_permutation_list = []
for element in self.permutation_list:
self.actual_permutation_list.append(classes.Permutation(list(element)))
def permutations (self):
"""
Return all permutations
"""
return self.actual_permutation_list
class ScoreMatrix:
"""2D triangular matrix containing scores of optimal substructures"""
def __init__(self, i, j):
"""
Initializes triangular matrix with height i and width j, with zeroes
across the main diagonal
"""
self.matrix = [[None for n in range(i)] for m in range(j)]
self.width = i
self.height = j
def get(self, i, j):
"""Gets the element at i,j"""
return self.matrix[i][j]
def set(self, i, j, value):
"""Updates the element at i,j with value"""
self.matrix[i][j] = value
def has(self, i, j):
"""True if there is a value in the matrix at i,j; false otherwise"""
return self.matrix[i][j] != None
def get_width(self):
"""Returns the j-dimension (width) of the matrix"""
return self.width
def get_height(self):
"""Returns the i-dimension (height) of the matrix"""
return self.height
def insert(self, k):
"""Inserts both a row and a column at k"""
pass
def remove(self, k):
"""Removes both a row and a column at k"""
pass
def print_matrix(self, format="csv"):
matrix = self.matrix
if(format=="csv"):
writer = csv.writer(sys.stdout, delimiter="\t")
writer.writerows(map(lambda row: map(lambda x: None if x==None else round(x,3), row), matrix) )
else:
print "Rows: "+str(len(matrix))
#print nussinov.to_score_matrix().matrix
i = 0
for row in matrix:
i = i+1
print str(i)+"("+str(len(row))+") : "+ str(row)
def __str__(self):
return str(self.matrix)
class Permutation:
"""Represents a single circular permutation of named strands"""
def __init__(self, strands):
"""Accepts an ordered list of Strands"""
if(isinstance(strands, list)):
self.strands = strands
self.namelist = []
self.nameconcatenation = ""
self.seqconcatenation = ""
else:
raise Exception
def get_names(self):
"""Returns a list of names of the strands, in order"""
for element in self.strands:
self.namelist.append(element.name)
return self.namelist
def get_strands(self):
"""Returns a list of Strands, in order"""
return self.strands
def get_concatamer(self, separator=""):
"""
Returns a string containing the sequences concatenated together,
separated by an optional separator
"""
return separator.join(map(lambda strand: strand.sequence, self.strands))
def get_name(self, separator=""):
"""
Returns the names of the strands, concatenated together; can be used
as a unique identifier for this Permutation within the ensemble.
"""
return separator.join(map(lambda strand: strand.name, self.strands))
def substitution(self, strand_name, index, new_base):
"""
Updates the instance of the strand that is being updated by a substitution
"""
# tries to find strand in question to be updated
try:
strands_index = (self.get_names()).index(string.upper(strand_name))
except ValueError:
raise classes.StrandNameError
# checks to make sure base substitution is valid
sub = string.upper(new_base)
if (self.strands[strands_index]).material == "DNA":
if (sub != 'A') & (sub != 'T') & (sub != 'C') & (sub != 'G'):
raise classes.DNABaseError
elif (self.strands[strands_index]).material == "RNA":
if (sub != 'A') & (sub != 'U') & (sub != 'C') & (sub != 'G'):
raise classes.RNABaseError
# performs update to strand
strand_as_list = list((self.strands[strands_index]).sequence)
try:
strand_as_list[index] = sub
except IndexError:
raise classes.BaseIndexError
strand_as_string = "".join(strand_as_list)
self.strands[strands_index] = (self.strands[strands_index]).update_seq(strand_as_string)
# calculates overall index of change
overall_index = 0
for i in range(0, strands_index):
overall_index += len((self.strands[i]).sequence)
overall_index += index
# returns new permutation object with modifications
return (self, overall_index)
class Structure:
"""Represents the secondary structure of a given strand"""
def __init__(self, pairs, permutation):
"""Builds an initial structure from a list of (int,int) tuples"""
self.pairs = pairs
self.permutation = permutation
def __str__(self):
"""Prints an informal string-based representation of the structure"""
return str(self.pairs)
def get_permutation(self):
"""Returns permutation object"""
return self.permutation
def get_sequence(self):
"""Returns concatenated sequence of Permutation"""
return (self.permutation).get_concatamer()
def get_pairs(self):
"""Returns the structure as a list of (int,int) tuples"""
return self.pairs
class StrandNameError(exceptions.Exception):
def __init__(self):
return
class BaseIndexError(exceptions.Exception):
def __init__(self):
return
class DNABaseError(exceptions.Exception):
def __init__(self):
return
class RNABaseError(exceptions.Exception):
def __init__(self):
return |
# Generated by Django 3.0.6 on 2020-05-31 20:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Internship',
fields=[
('timestamp', models.DateTimeField(auto_now_add=True)),
('uuid_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('content', models.TextField(default='', max_length=1000)),
('reply', models.BooleanField(default=False, verbose_name='Is a reply?')),
('meta_url', models.CharField(max_length=2048, null=True)),
('meta_type', models.CharField(max_length=255, null=True)),
('meta_title', models.CharField(max_length=255, null=True)),
('meta_description', models.TextField(max_length=255, null=True)),
('meta_image', models.CharField(max_length=255, null=True)),
('attended', models.ManyToManyField(blank=True, related_name='attended_internship', to=settings.AUTH_USER_MODEL)),
('liked', models.ManyToManyField(blank=True, related_name='liked_internship', to=settings.AUTH_USER_MODEL)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='thread', to='internship.Internship')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='publisher3', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Internship',
'verbose_name_plural': 'Internship',
'ordering': ('-timestamp',),
},
),
]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from torch.jit import script, trace
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import csv
import random
import re
import os
import unicodedata
import codecs
from io import open
import itertools
import math
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
def printLines(file, n=10):
with open(file, 'rb') as datafile:
lines = datafile.readlines()
for line in lines[:n]:
print(line)
#printLines(r"C:\Users\wxwyl\Desktop\wyl code\cornell-movie\cornell movie-dialogs corpus\movie_lines.txt")
# Splits each line of the file into a dictionary of fields
def loadLines(fileName, fields):
lines = {}
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
lineObj = {}
for i, field in enumerate(fields):
lineObj[field] = values[i]
lines[lineObj['lineID']] = lineObj
return lines
# Groups fields of lines from `loadLines` into conversations based on *movie_conversations.txt*
def loadConversations(fileName, lines, fields):
conversations = []
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
convObj = {}
for i, field in enumerate(fields):
convObj[field] = values[i]
# Convert string to list (convObj["utteranceIDs"] == "['L598485', 'L598486', ...]")
utterance_id_pattern = re.compile('L[0-9]+')
lineIds = utterance_id_pattern.findall(convObj["utteranceIDs"])
# Reassemble lines
convObj["lines"] = []
for lineId in lineIds:
convObj["lines"].append(lines[lineId])
conversations.append(convObj)
return conversations
# Extracts pairs of sentences from conversations
def extractSentencePairs(conversations):
qa_pairs = []
for conversation in conversations:
# Iterate over all the lines of the conversation
for i in range(len(conversation["lines"]) - 1): # We ignore the last line (no answer for it)
inputLine = conversation["lines"][i]["text"].strip()
targetLine = conversation["lines"][i+1]["text"].strip()
# Filter wrong samples (if one of the lists is empty)
if inputLine and targetLine:
qa_pairs.append([inputLine, targetLine])
return qa_pairs
# Define path to new file
datafile = r"C:\Users\wxwyl\Desktop\wylcode\cornell-movie\cornell movie-dialogs corpus\formatted_movie_lines.txt"
datafile_validation = r"C:\Users\wxwyl\Desktop\wylcode\cornell-movie\cornell movie-dialogs corpus\formatted_movie_lines_validation.txt"
delimiter = '\t'
# Unescape the delimiter
delimiter = str(codecs.decode(delimiter, "unicode_escape"))
# Initialize lines dict, conversations list, and field ids
lines = {}
conversations = []
MOVIE_LINES_FIELDS = ["lineID", "characterID", "movieID", "character", "text"]
MOVIE_CONVERSATIONS_FIELDS = ["character1ID", "character2ID", "movieID", "utteranceIDs"]
if __name__ == '__main__':
# Load lines and process conversations
print("\nProcessing corpus...")
lines = loadLines(r"C:\Users\wxwyl\Desktop\wylcode\cornell-movie\cornell movie-dialogs corpus\movie_lines.txt", MOVIE_LINES_FIELDS)
print("\nLoading conversations...")
conversations = loadConversations(r"C:\Users\wxwyl\Desktop\wylcode\cornell-movie\cornell movie-dialogs corpus\movie_conversations.txt",
lines, MOVIE_CONVERSATIONS_FIELDS)
# Write new csv file
number = 1
print("\nWriting newly formatted file...")
with open(datafile_validation, 'w', encoding='utf-8') as outputfile:
writer = csv.writer(outputfile, delimiter=delimiter, lineterminator='\n')
for pair in extractSentencePairs(conversations):
if number <= 60000:
writer.writerow(pair)
number += 1
with open(datafile, 'w', encoding='utf-8') as outputfile:
writer = csv.writer(outputfile, delimiter=delimiter, lineterminator='\n')
for pair in extractSentencePairs(conversations):
if number > 60000:
writer.writerow(pair)
number += 1
# Print a sample of lines
#print("\nSample lines from file:")
#printLines(datafile)
|
# Anki addon for SRS Collector
# Author: Eric Kidd <http://kiddsoftware.com/>
#
# This is free and unencumbered software released into the public domain.
# This program comes with ABSOLUTELY NO WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# For more information, please refer to <http://unlicense.org/>
from anki.consts import MODEL_CLOZE
from aqt import mw
class CardModel:
"""Information from the server describing a model."""
def __init__(self, json):
self.id = json["id"]
self.shortName = json["short_name"]
self.name = json["name"]
self.cloze = json["cloze"]
self.css = json["anki_css"]
self.fields = []
self.fieldCardAttrs = {}
for field in json["card_model_fields"]:
self.fields.append(field["name"])
self.fieldCardAttrs[field["name"]] = field["card_attr"]
self.templates = json["card_model_templates"]
self.ensureModelExists()
def ensureModelExists(self):
"""If the model doesn't exist yet, create it."""
mm = mw.col.models
self.model = mm.byName(self.name)
if self.model is None:
self.model = mm.new(self.name)
if self.cloze:
self.model['type'] = MODEL_CLOZE
self.model["css"] = self.css
for f in self.fields:
mm.addField(self.model, mm.newField(f))
for template in self.templates:
t = mm.newTemplate(template["name"])
t['qfmt'] = template["anki_front_template"]
t['afmt'] = template["anki_back_template"]
mm.addTemplate(self.model, t)
mm.add(self.model)
|
OUTPUT = "I don't think you {} today, I think you {} {} {}!".format
def alan_annoying_kid(phrase):
words = phrase.split()
action = ' '.join(words[2:]).rstrip('.')
if "didn't" in phrase:
return OUTPUT(action, 'did', words[3], 'it')
return OUTPUT(action, "didn't", words[2][:-2], 'at all')
|
from nab.database import Database
class AltTitles(Database):
"""
Adds additional titles for shows and seasons.
"""
def __init__(self, **shows):
"""
Args:
shows: A structure of titles.
"""
self.shows = dict((s.lower(), v) for s, v in shows.iteritems())
def add_data(self, sh):
for t in map(lambda t: t.lower(), sh.titles):
try:
sh.titles.update(self.shows[t]["titles"])
except KeyError:
pass
for se in sh:
try:
sh[se].titles.update(self.shows[t][se]["titles"])
sh[se].title = self.shows[t][se]["titles"][0]
except KeyError:
pass
AltTitles.register("alt_titles")
|
import simplegui
import random
x = 500
y = 500
screen_text = "Khoa doesn't do homework"
#change screensaver text input field handler
def change_text(screensaver):
global screen_text
screen_text = screensaver
def draw(canvas):
canvas.draw_text(screen_text,(x,y),20,"Red")
def timer_handler():
global x,y
x = random.randrange(100,900)
y = random.randrange(100,900)
frame = simplegui.create_frame("Screensaver",1000,1000)
inp = frame.add_input('enter text', change_text,100)
frame.set_draw_handler(draw)
timer = simplegui.create_timer(1000,timer_handler)
frame.start()
timer.start()
|
from __future__ import unicode_literals, absolute_import
import django
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
TIME_ZONE = 'America/Chicago'
USE_TZ = True
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
STATIC_URL = '/static/'
SECRET_KEY = 'l#^#iad$8$4=dlh74$!xs=3g4jb(&j+y6*ozy&8k1-&d+vruzy'
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'qa.urls'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django_markdown',
'taggit',
'qa',
'hitcount',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGIN_REDIRECT_URL = '/foo/list/'
QA_SETTINGS = {
'qa_messages': True,
'qa_description_optional': False,
'count_hits': True,
'reputation': {
'CREATE_QUESTION': 0,
'CREATE_ANSWER': 0,
'CREATE_ANSWER_COMMENT': 0,
'CREATE_QUESTION_COMMENT': 0,
'ACCEPT_ANSWER': 0,
'UPVOTE_QUESTION': 0,
'UPVOTE_ANSWER': 0,
'DOWNVOTE_QUESTION': 0,
'DOWNVOTE_ANSWER': 0,
}
}
|
#! /usr/bin/env python3.10
from User import User, Credential
def create_user(user_name, password):
new_user = User(user_name, password)
return new_user
def save_user(user):
user.save_user()
def display_user():
return User.display_user()
def login_user(user_name, password):
check_user = Credential.verify_user(user_name, password)
return check_user
def make_acc(account, user_name, password):
new_acc = Credential(account, user_name, password)
return new_acc
def save_account(new_acc):
new_acc.save_account()
def display_acc():
return Credential.display_account()
def delete_acc(credentials):
credentials.del_account()
def find_credential(account):
return Credential.find_by_acc(account)
def generate_password():
gen_pass = Credential.generate_password()
return gen_pass
def pass_locker():
print("Hi,please enter one of the following to proceed.\n CA --- Create New Account\n LI --- Have An Account")
the_code = input('').lower().strip()
if the_code == 'ca':
print('Sign Up')
print('*' * 50)
user_name = input('User_name : ')
while True:
print("TP - To type your own password:\n GP - To generate Password")
password_choice = input().lower().strip()
if password_choice == 'tp':
password = input('Enter Password\n')
break
elif password_choice == 'gp':
password = generate_password()
break
else:
print('Invalid Password please try again')
save_user(create_user(user_name, password))
print('*' * 85)
print(f'Hello {user_name}, Your account has been created successfully! Your Password is:{password}')
print('*' * 85)
elif the_code == 'li':
print('*' * 50)
print('Enter your username and password to login:')
print('*' * 50)
user_name = input('User Name: ')
password = input('Password: ')
login = login_user(user_name, password)
if login == login_user:
print(f'Hello{user_name}.Welcome Back')
print('\n')
while True:
print('Use these Short Codes to proceed: \n CC - Create new Credential\n DC - Display Credentials \n FC -Find '
'Credential \n D - Delete Credential \n EX - Exit \n')
the_code = input().lower().strip()
if the_code == 'cc':
print('Create New Credential')
print('.' * 20)
account = input('Account Name : ').lower()
user_name = input('User Name : ')
while True:
print('TP - To type your own password if you already have an account:\n GP - To generate random '
'Password')
password_choice = input().lower().strip()
if password_choice == 'tp':
password = input("Enter Your Own Password\n")
break
elif password_choice == 'gp':
password = generate_password()
break
else:
print("Invalid password please try again")
save_account(make_acc(account, user_name, password))
print('\n')
print(f'Account: {account} Username: {user_name} Password: {password} created successfully ')
print('\n')
elif the_code == 'dc':
if display_acc():
print("Here's your list of accounts:")
for account in display_acc():
print(f'Account:{account}\n UserName:{user_name}\n Password:{password}')
else:
print('You have no saved accounts')
elif the_code == 'fc':
search_acc = input('Enter the account you are looking for: ').lower()
if find_credential(search_acc):
print(f'Account Name: {account}')
print(f'User Name: {user_name}')
print(f'Password: {password}')
elif the_code == 'd':
print('Which account do you want to delete?')
search_acc = input().lower()
if find_credential(search_acc):
search_acc = find_credential(search_acc)
print('_' * 50)
search_acc.del_account()
print('\n')
print(f'the credentials for : {account} has been deleted')
elif the_code == 'ex':
print(f'Goodbye {user_name}')
break
else:
print('Please enter valid input')
pass_locker()
|
from .dataset_utils import get_dataset_per_pixel_mean, make_batch_one_hot, make_single_pattern_one_hot
from .theano_utils import make_theano_training_function, make_theano_validation_function, \
make_theano_feature_extraction_function, make_theano_inference_function, LasagneHeNormal
from .training_utils import extract_features_from_layer
|
import sys
from loguru import logger
from {{cookiecutter.project_slug}}.config import settings
async def setup_logger() -> None:
if settings.is_debug:
level = 'DEBUG'
diagnose = True
else:
level = 'INFO'
diagnose = False
logger.remove()
logger.add(
sys.stdout,
level=level,
diagnose=diagnose,
)
|
#to run: python turnON.py GPIO_number
#where GPIO_number have to be an integer. e.g. 29
import RPi.GPIO as GPIO
import sys
# Set the GPIO number where LEDs control is conected
if len(sys.argv)==2:
GPIO_num = str(sys.argv[1]) # e.g. Timelapse
else:
GPIO_num = 29 #GPIO 29 is used as the default
GPIO.setmode(GPIO.BOARD)
GPIO.setup(GPIO_num, GPIO.OUT)
GPIO.output(GPIO_num,GPIO.HIGH)
|
'''David Naccache based Identity-Based Encryption
| From: "David Naccache Secure and Practical Identity-Based Encryption Section 4"
| Available from: http://eprint.iacr.org/2005/369.pdf
* type: encryption (identity-based)
* setting: bilinear groups (asymmetric)
:Authors: Gary Belvin
:Date: 06/2011
'''
from __future__ import print_function
from charm.toolbox.pairinggroup import PairingGroup,ZR,G1,G2,GT,pair
from charm.toolbox.IBEnc import IBEnc
from charm.toolbox.bitstring import Bytes
from charm.toolbox.hash_module import Waters
import hashlib, math
debug = False
class IBE_N04(IBEnc):
"""
>>> from charm.toolbox.pairinggroup import PairingGroup,GT
>>> from charm.toolbox.hash_module import Waters
>>> group = PairingGroup('SS512')
>>> waters_hash = Waters(group)
>>> ibe = IBE_N04(group)
>>> (master_public_key, master_key) = ibe.setup()
>>> ID = "bob@mail.com"
>>> kID = waters_hash.hash(ID)
>>> secret_key = ibe.extract(master_key, kID)
>>> msg = group.random(GT)
>>> cipher_text = ibe.encrypt(master_public_key, kID, msg)
>>> decrypted_msg = ibe.decrypt(master_public_key, secret_key, cipher_text)
>>> decrypted_msg == msg
True
"""
"""Implementation of David Naccahe Identity Based Encryption"""
def __init__(self, groupObj):
IBEnc.__init__(self)
IBEnc.setProperty(self, secdef='IND_ID_CPA', assumption='DBDH', secmodel='Standard')
#, other={'id':ZR}
#message_space=[GT, 'KEM']
global group
group = groupObj
def setup(self, l=32):
'''l is the security parameter
with l = 32, and the hash function at 160 bits = n * l with n = 5'''
global waters
sha1_func, sha1_len = 'sha1', 20
g = group.random(G1) # generator for group G of prime order p
hLen = sha1_len * 8
n = int(math.floor(hLen / l))
waters = Waters(group, n, l, sha1_func)
alpha = group.random() #from Zp
g1 = g ** alpha # G1
g2 = group.random(G2) #G2
uprime = group.random(G2)
U = [group.random() for x in range(n)]
pk = {'g':g, 'g1':g1, 'g2': g2, 'uPrime':uprime, 'U': U,
'n':n, 'l':l}
mk = pk.copy()
mk['g2^alpha'] = g2 ** alpha #master secret
if debug:
print(mk)
return (pk, mk)
def extract(self, mk, v):
'''v = (v1, .., vn) is an identity'''
r = group.random()
d1 = mk['uPrime']
for i in range(mk['n']):
d1 *= mk['U'][i] ** v[i]
d1 = mk['g2^alpha'] * (d1 ** r)
d2 = mk['g'] ** r
if debug:
print("D1 =>", d1)
print("D2 =>", d2)
return {'d1': d1, 'd2':d2}
def encrypt(self, pk, ID, M): # M:GT
t = group.random()
c1 = (pair(pk['g1'], pk['g2']) ** t) * M
c2 = pk['g'] ** t
c3 = pk['uPrime']
for i in range(pk['n']):
c3 *= pk['U'][i] ** ID[i]
c3 = c3 ** t
if debug:
print("Encrypting")
print("C1 =>", c1)
print("C2 =>", c2)
print("C3 =>", c3)
return {'c1':c1, 'c2': c2, 'c3':c3}
def decrypt(self, pk, sID, ct):
num = pair(sID['d2'], ct['c3'])
dem = pair(ct['c2'], sID['d1'])
if debug:
print("Decrypting")
print("arg1 =>", sID['d2'].type)
print("arg2 =>", ct['c3'].type)
print("Num: =>", num)
print("Dem: =>", dem)
return ct['c1'] * num / dem
def main():
group = PairingGroup('SS512')
waters_hash = Waters(group)
ibe = IBE_N04(group)
(master_public_key, master_key) = ibe.setup()
ID = "bob@mail.com"
kID = waters_hash.hash(ID)
secret_key = ibe.extract(master_key, kID)
msg = group.random(GT)
cipher_text = ibe.encrypt(master_public_key, kID, msg)
decrypted_msg = ibe.decrypt(master_public_key, secret_key, cipher_text)
assert msg == decrypted_msg, "invalid decryption"
if debug: print("Successful Decryption!")
if __name__ == "__main__":
debug = True
main()
|
from collections import deque
class Vertex:
def __init__(self, value, adj_list=None):
self.value = value
if adj_list is None:
adj_list = []
self.adj_list = adj_list
class Graph:
def __init__(self):
self.vertices = []
def insert(self, value, adj_list):
v = Vertex(value, adj_list)
self.vertices.append(v)
v_ind = len(self.vertices) - 1
for adj_v_ind in v.adj_list:
self.vertices[adj_v_ind].adj_list.append(v_ind)
def bfs(self, vert_ind, value):
queue = deque([vert_ind])
visited = [False] * len(self.vertices)
while queue:
v_ind = queue.popleft()
v = self.vertices[v_ind]
if visited[v_ind]:
continue
visited[v_ind] = True
if v.value == value:
return True
for adj_v_ind in v.adj_list:
if visited[adj_v_ind] is False:
queue.append(adj_v_ind)
return False
def dfs(self, vert_ind, value):
isFound = False
visited = [False] * len(self.vertices)
def recursive(ind):
nonlocal isFound
# if visited[ind]:
# return
if isFound:
return
visited[ind] = True
v = self.vertices[ind]
if v.value == value:
isFound = True
return
for adj_v_ind in v.adj_list:
if visited[adj_v_ind] is False:
recursive(adj_v_ind)
recursive(vert_ind)
return isFound
graph = Graph()
graph.insert(0, [])
graph.insert(1, [0])
graph.insert(2, [1])
graph.insert(3, [2])
graph.insert(4, [0, 2, 3])
print(graph.bfs(0, 2))
print(graph.dfs(0, 3))
|
# -*- coding: utf-8 -*-
# Since NoneType is not actually defined, yet is returned by 'type(None)'
NoneType = type(None)
def optional(t):
if isinstance(t, tuple):
return t + (NoneType, )
return (t, NoneType)
class Mixin(object):
"""
Base mixin class used for describing objects that share mutliple parents.
"""
def __init__(self, *args, **kwargs):
# Make sure that the next parent is called
super().__init__(*args, **kwargs)
class SlotDefinedClass(object):
__slots__ = tuple() # Names of attributes
__types__ = {} # Optional mapping of attribute to expected type
__defaults__ = {} # Optional default values for an attribute
def __init__(self, *args, **kwargs):
"""
- Note this must be placed last in the chain of parents since this
method does not call super()
"""
slots = self.__slots__
defaults = self.__defaults__
set_attrs = set()
# Go through args first, then kwargs
if len(args) > len(slots):
raise RuntimeError("Too many arguments provided. Args {} provided for {} when args {} were expected.".format(args, type(self), slots))
for i, val in enumerate(args):
attr = slots[i]
self.__check_and_set_attr(attr, val)
set_attrs.add(attr)
for attr in slots:
if attr in set_attrs:
# We already set this value in the args
continue
if attr in kwargs:
val = kwargs[attr]
elif attr in defaults:
val = defaults[attr]
else:
raise RuntimeError("No value for attribute '{}' provided in class '{}'".format(attr, type(self)))
self.__check_and_set_attr(attr, val)
def __check_and_set_attr(self, attr, val):
if attr in self.__types__:
self.__check_type(attr, val, self.__types__[attr])
setattr(self, attr, val)
def __check_type(self, attr, val, expected):
"""Check that the appropriate type is used.
If the attribute is meant to be a container, check the contents of the
container. In this case, just the first element of the container is
checked.
Args:
attr (str)
"""
if isinstance(expected, (type, tuple)):
# Base class
assert isinstance(val, expected), \
"Expected type '{}' for attribute '{}' in class '{}'. Got '{}'".format(
expected, attr, type(self), type(val)
)
elif isinstance(expected, list):
# Check container
self.__check_type(attr, val, list)
# Check elements
if val:
self.__check_type(attr, val[0], expected[0])
elif isinstance(expected, dict):
# Check container
self.__check_type(attr, val, dict)
# Check keys and vals
if val:
self.__check_type(attr, next(iter(val.keys())), next(iter(expected.keys())))
self.__check_type(attr, next(iter(val.values())), next(iter(expected.values())))
else:
raise RuntimeError("Uknown type handling for type '{}'".format(expected))
def merge_dicts(d1, d2):
d1_copy = d1.copy()
d1_copy.update(d2)
return d1_copy
|
import unittest
from katas.beta.find_the_middle_element import gimme
class GimmeTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(gimme([2, 3, 1]), 0)
def test_equals_2(self):
self.assertEqual(gimme([5, 10, 14]), 1)
|
def mean(arr):
letters = []
sum_of_digits = total_digits = 0.0
for a in arr:
try:
sum_of_digits += int(a)
total_digits += 1
except ValueError:
letters.append(a)
return [sum_of_digits / total_digits, ''.join(letters)]
|
#! /usr/bin/env python3
# # -*- coding: utf-8 -*-
#################################################################
# get modbus data from CU and save to log file
# Norihito Nov.2020
#
# Usage: python client_modbus_sample.py
#################################################################
import time
import datetime
# --------------------------------------------------------------------------- #
# import the various server implementations
# --------------------------------------------------------------------------- #
# from pymodbus.client.sync import ModbusTcpClient as ModbusClient
# from pymodbus.client.sync import ModbusUdpClient as ModbusClient
from pymodbus.client.sync import ModbusSerialClient as ModbusClient
# --------------------------------------------------------------------------- #
# configure the client logging
# --------------------------------------------------------------------------- #
#import logging
#FORMAT = ('%(asctime)-15s %(threadName)-15s '
# '%(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s')
#logging.basicConfig(format=FORMAT)
#log = logging.getLogger()
#log.setLevel(logging.DEBUG)
UNIT = 0x1
def run_sync_client():
# ------------------------------------------------------------------------#
# choose the client you want
# ------------------------------------------------------------------------#
# Here is an example of using these options::
#
# client = ModbusClient('localhost', retries=3, retry_on_empty=True)
# ------------------------------------------------------------------------#
# from pymodbus.transaction import ModbusRtuFramer
# client = ModbusClient('localhost', port=5020, framer=ModbusRtuFramer)
# client = ModbusClient(method='binary', port='/dev/ptyp0', timeout=1)
# client = ModbusClient(method='ascii', port='/dev/ptyp0', timeout=1)
# client = ModbusClient(method='rtu', port='/dev/ptyp0', timeout=1,
# baudrate=9600)
client = ModbusClient(method="rtu", port="/dev/ttyUSB0", stopbits=1, bytesize=8, baudrate=9600)
client.connect()
n = 0;
while(n < 10):
time.sleep(1)
temps = client.read_holding_registers(0xFE00, 10, unit=1) # address, count, slave address
print(temps.registers)
n += 1
# assert(not rq.isError()) # test that we are not an error
# log.debug("Read input registers")
# rr = client.read_input_registers(1, 8, unit=UNIT)
# assert(not rq.isError()) # test that we are not an error
# ----------------------------------------------------------------------- #
# close the client
# ----------------------------------------------------------------------- #
client.close()
if __name__ == "__main__":
run_sync_client()
|
'''
Имплементирајте ги функциите:
- Range compression (слајдот број 7 од презентацијата со име Оператори на ниво на пиксели во слика). Функцијата како влезени аргументи треба да има слика над која ќе се направи трансформацијата и параметарот C.
'''
import cv2
import math
def range_compression(img, c):
m, n = img.shape[0], img.shape[1] # Zemi ja goleminata na matricata m x n za slikata
for i in range(0, m): # Vrti za site x, y vo matricata dobiena od slikata
for j in range(0, n):
tmp = img[i, j]
tmp = [c * math.log10(1 + elem) for elem in tmp] #zameni go sekoj piksel so c*log10(piksel+1)
img[i, j] = tmp # zameni img[i, j] so tmp
return img
img = cv2.imread('zelda.pgm')
img1 = range_compression(img, 100)
cv2.imshow("img", img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
#Exercise 1
# n = 1
# while n < 101:
# print(n*(n+1)/2)
# n += 1
#Exercise 2 Given a number, print its factors.
#Exercise 3
# name = "" #empty string is giving an empty value here, b/c variables must be given a value
# while name != "clint":
# name = input("please say your name is clint\n")
# print("Great I knew you could it.")
# num = 5
# guess = 0
# while guess != num:
# guess = int(input("What's the number?\n"))
# if guess == num:
# print("Correct! You Win!")
# elif guess < num:
# print("Number too low!")
# else:
# print("Number too high!")
#adding end program for too many guesses and random integer for number:
import random
num = random.randint(1,10)
guess_count = 0
guess = None
while guess != num:
guess = int(input("What's the number?\n"))
if guess == num:
print("Correct! You Win!")
elif guess < num:
print("Number too low!")
else:
print("Number too high!")
guess_count += 1
if guess_count > 5:
print("too many guess bye.")
#Exercise 4
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-31 20:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalogues', '0017_auto_20170831_2304'),
]
operations = [
migrations.RemoveField(
model_name='school_photo_gallery',
name='description',
),
]
|
from selenium import webdriver
from time import sleep
# import xlrd
import random
import os
import time
import sys
sys.path.append("..")
# import email_imap as imap
# import json
import re
# from urllib import request, parse
from selenium.webdriver.support.ui import Select
# import base64
import Chrome_driver
import email_imap as imap
import name_get
import db
import selenium_funcs
import Submit_handle
import random
def web_submit(submit,chrome_driver,debug=0):
# test
if debug == 1:
site = 'https://www.cpagrip.com/show.php?l=0&u=218456&id=23359'
submit['Site'] = site
chrome_driver.get(submit['Site'])
chrome_driver.maximize_window()
chrome_driver.refresh()
sleep(3)
# page1
# click
chrome_driver.find_element_by_xpath('//*[@id="ytta"]').click()
# how old
old = ['//*[@id="q2"]/div[1]/label','//*[@id="q2"]/div[2]/label','//*[@id="q2"]/div[3]/label','//*[@id="q2"]/div[4]/label']
age_num = random.randint(0,3)
age = old[age_num]
chrome_driver.find_element_by_xpath(age).click()
# how many times to use facebook
# how old
old = ['//*[@id="q3"]/div[1]/label','//*[@id="q3"]/div[2]/label','//*[@id="q3"]/div[3]/label','//*[@id="q3"]/div[4]/label']
age_num = random.randint(0,3)
age = old[age_num]
chrome_driver.find_element_by_xpath(age).click()
sleep(30)
# page2
# firstname
chrome_driver.find_element_by_xpath('//*[@id="fn"]').send_keys(submit['Ukchoujiang']['firstname'])
# lastname
chrome_driver.find_element_by_xpath('//*[@id="ln"]').send_keys(submit['Ukchoujiang']['lastname'])
# email
chrome_driver.find_element_by_xpath('//*[@id="em"]').send_keys(submit['Ukchoujiang']['email'])
# primary phone
phone = submit['Ukchoujiang']['homephone']
phone = Submit_handle.get_uk_phone1(phone)
chrome_driver.find_element_by_xpath('//*[@id="tel"]').send_keys(phone)
# postcode
chrome_driver.find_element_by_xpath('//*[@id="pc"]').send_keys(submit['Ukchoujiang']['zip'])
# street address
chrome_driver.find_element_by_xpath('//*[@id="ad"]').send_keys(submit['Ukchoujiang']['address'])
# city
chrome_driver.find_element_by_xpath('//*[@id="city"]').send_keys(submit['Ukchoujiang']['city'])
# country
chrome_driver.find_element_by_xpath('//*[@id="pt"]').send_keys(submit['Ukchoujiang']['country'])
# date_of_birth
date_of_birth = Submit_handle.get_auto_birthday('')
# mm
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="dobday"]'))
s1.select_by_value(date_of_birth[1])
sleep(3)
# dd
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="dobmonth"]'))
s1.select_by_value(date_of_birth[0])
sleep(3)
# year
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="DOBYEAR"]'))
s1.select_by_value(date_of_birth[2])
sleep(3)
# gender
num_ = random.randint(0,1)
if num_ == 0:
chrome_driver.find_element_by_xpath('//*[@id="fieldslider"]/div[10]').click()
else:
chrome_driver.find_element_by_xpath('//*[@id="fieldslider"]/div[11]').click()
sleep(1)
# checkbox
chrome_driver.find_element_by_xpath('//*[@id="gdpr"]').click()
sleep(1)
# button
chrome_driver.find_element_by_xpath('//*[@id="subbtn"]').click()
sleep(300)
return 1
def test():
# db.email_test()
# date_of_birth = Submit_handle.get_auto_birthday('')
Mission_list = ['10023']
excel = 'Ukchoujiang'
Excel_name = [excel,'']
Email_list = ['hotmail.com','outlook.com','yahoo.com','aol.com','gmail.com']
submit = db.read_one_excel(Mission_list,Excel_name,Email_list)
# [print(item,':',submit[excel][item]) for item in submit[excel] if submit[excel][item]!=None]
[print(item,':',submit[excel][item]) for item in submit[excel] if item == 'homephone']
submit['Mission_Id'] = '10023'
phone = submit[excel]['homephone']
phone = Submit_handle.get_uk_phone1(phone)
print(phone)
chrome_driver = Chrome_driver.get_chrome(submit)
web_submit(submit,chrome_driver,1)
if __name__=='__main__':
test()
|
from wob_data_upload.handles.spark_handle import SparkCassandra
from cassandra.cluster import Cluster
from django.core.cache import cache
from unicodedata import normalize
from datetime import timedelta
import pandas as pd
import numpy as np
import time
import os
def getDevConnection():
cluster = Cluster([os.environ['CASSANDRA_PORT_9042_TCP_ADDR']])
metadata = cluster.metadata
session = cluster.connect('cassandra_dev')
print("Conectado ao cluster cassandra: " +metadata.cluster_name)
return session
def closeConnection(session):
session.cluster.shutdown()
session.shutdown()
print("Conexao ao cluster cassandra {} fechada".format(session.cluster.metadata.cluster_name))
def createTableFromDataFrame(table_name, alias_column_names, real_column_names, df):
session = getDevConnection()
query = 'CREATE TABLE '+table_name+' ( id bigint, '
for (f,s) in zip(alias_column_names,real_column_names):
col = f
f = normalize('NFKD', f).encode('ascii', 'ignore').decode('ascii')
f = f.lower().strip().replace(' ','_')
query += f+" "+getCassandraTypeFromDf(df,s)+", "
query += " PRIMARY KEY(id));"
session.execute(query)
print("Tabela "+table_name+" criada")
closeConnection(session)
def dropTableFromCassandra(table_name):
session = getDevConnection()
query = 'DROP TABLE '+table_name+';'
session.execute(query)
print("Tabela "+table_name+" deletada")
closeConnection(session)
def insertIntoTableFromDataFrame(table_name, df, alias_column_names):
df['id'] = range(len(df.index))
df = np.round(df, decimals=1)
alias_column_names.append('id')
my_tuples = [tuple(x) for x in df.values]
rdd = SparkCassandra.sc.parallelize([{
alias_column_names[index].lower().strip().replace(' ','_'):value
if not isinstance(value, np.float64) else float(value) \
for index,value in enumerate(tuple_entry)
} for tuple_entry in my_tuples
])
rdd.saveToCassandra( \
"cassandra_dev", \
table_name
)
def processDfToCassandra(session, metadata):
session_id = session['session_id']
cache_id = 'my_data_set_' + session_id
df = cache.get(cache_id)
alias_column_names = metadata.aliasColumns
real_column_names = metadata.originalColumns
table_name = metadata.tableId
try:
createTableFromDataFrame(table_name, alias_column_names, real_column_names,df)
except:
raise Exception("Error when creating table on Cassandra")
try:
insertIntoTableFromDataFrame(table_name, df, alias_column_names)
except:
dropTableFromCassandra(table_name)
raise Exception("Error when adding data to table on Cassandra")
def getCassandraTypeFromDf(df, col):
switcher = {
'float64':'double',
'int64':'bigint',
'object':'text'
}
return(switcher.get(str(df[col].dtypes), 'text')) |
# Generated by Django 2.0.5 on 2018-06-28 08:40
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
class Migration(migrations.Migration):
dependencies = [
('calculation', '0038_auto_20180628_0714'),
]
operations = [
migrations.CreateModel(
name='MenuItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('out', models.CharField(blank=True, max_length=255, null=True, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')], verbose_name='выход порции')),
('dish', models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.PROTECT, related_name='menu_dish', to='calculation.Dish', verbose_name='блюдо')),
],
options={
'ordering': ['invoce_doc', 'dish'],
'verbose_name': 'комплектующие меню',
'verbose_name_plural': 'комплектующие меню',
},
),
migrations.RemoveField(
model_name='menu',
name='dish',
),
migrations.RemoveField(
model_name='menu',
name='out',
),
migrations.AddField(
model_name='menuitems',
name='invoce_doc',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, related_name='items', to='calculation.Menu', verbose_name='меню'),
),
]
|
import os
import pathlib
from setuptools import setup, find_packages
# Package meta-data.
NAME = "greenponik-sht40"
DESCRIPTION = "Read SHT40 through Python3 on raspberry pi"
URL = "https://github.com/GreenPonik/GreenPonik_SHT40"
EMAIL = "contact@greenponik.com"
AUTHOR = "GreenPonik SAS"
REQUIRES_PYTHON = ">=3.7.0"
VERSION = "0.0.1"
# What packages are required for this module to be executed?
REQUIRED = [
# 'requests', 'maya', 'records',
'adafruit-blinka',
'adafruit-circuitpython-sht4x',
'adafruit-extended-bus',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / "README.md").read_text(encoding="utf-8")
# Load the package's version.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, "version.py")) as f:
exec(f.read(), about)
else:
about["__version__"] = VERSION
setup(
name=NAME,
version=about["__version__"],
author=AUTHOR,
author_email=EMAIL,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
url=URL,
license="MIT",
install_requires=REQUIRED,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=("docs")),
python_requires=REQUIRES_PYTHON,
project_urls={ # Optional
"Source": "https://github.com/GreenPonik/GreenPonik_SHT40/",
"Bug Reports": "https://github.com/GreenPonik/GreenPonik_SHT40/issues",
},
keywords="GreenPonik hydroponics sht40 \
light reader python hardware diy iot raspberry pi",
)
|
# -*- coding: utf-8 -*-
class Solution:
def licenseKeyFormatting(self, S, K):
S = S.upper()
S = "".join(S.split("-"))
S = S[::-1]
parts = [S[i : i + K] for i in range(0, len(S), K)]
return "-".join(parts)[::-1]
if __name__ == "__main__":
solution = Solution()
assert "5F3Z-2E9W" == solution.licenseKeyFormatting("5F3Z-2e-9-w", 4)
assert "2-5G-3J" == solution.licenseKeyFormatting("2-5g-3-J", 2)
|
import numpy as np
import sys
input_file_path = "in.txt"
output_file_path = "out.txt"
class TempMatrix:
def initializeElements(self):
return np.zeros((self.rows, self.columns))
def __init__(self, rows, columns):
self.rows = rows
self.columns = columns
self.elements = self.initializeElements()
class Matrix:
def extraSize(self, square_size):
# Check if the size isn't a power of 2.
if (not(square_size > 0 and (square_size & (square_size - 1) == 0))):
new_size = 1
while (new_size < square_size):
new_size = new_size << 1
return new_size
def __init__(self, base_rows, base_columns, square_size):
self.base_rows = base_rows
self.base_columns = base_columns
self.square_size = self.extraSize(square_size)
self.elements = self.initializeElements()
def initializeElements(self):
return np.zeros((int(self.square_size) * int(self.square_size))).reshape(self.square_size, self.square_size)
def fillMatrix(self, input_file):
for i in range(self.base_rows):
row = [int(x) for x in input_file.readline().split()]
for j in range(self.base_columns):
self.elements[i][j] = row[j]
def exit(input_file, output_file):
if (input_file != None):
input_file.close()
if (output_file != None):
output_file.close()
sys.exit(0)
def matrixAdd(matrix1, matrix1_index, matrix2, matrix2_index, output_matrix, output_matrix_index):
matrix1_i, matrix1_j = matrix1_index
matrix2_i, matrix2_j = matrix2_index
output_matrix_i, output_matrix_j = output_matrix_index
matrix1_current_i = matrix1_i[0]
matrix1_current_j = matrix1_j[0]
matrix2_current_i = matrix2_i[0]
matrix2_current_j = matrix2_j[0]
om_current_i = output_matrix_i[0]
om_current_j = output_matrix_j[0]
for _ in range(matrix1_i[1] - matrix1_i[0] + 1):
for _ in range(matrix1_j[1] - matrix1_j[0] + 1):
output_matrix.elements[om_current_i][om_current_j] = matrix1.elements[matrix1_current_i][
matrix1_current_j] + \
matrix2.elements[matrix2_current_i][matrix2_current_j]
matrix1_current_j += 1
matrix2_current_j += 1
om_current_j += 1
matrix1_current_i += 1
matrix2_current_i += 1
om_current_i += 1
matrix1_current_j = matrix1_j[0]
matrix2_current_j = matrix2_j[0]
om_current_j = output_matrix_j[0]
def matrixSubtract(matrix1, matrix1_index, matrix2, matrix2_index, output_matrix, output_matrix_index):
matrix1_i, matrix1_j = matrix1_index
matrix2_i, matrix2_j = matrix2_index
output_matrix_i, output_matrix_j = output_matrix_index
matrix1_current_i = matrix1_i[0]
matrix1_current_j = matrix1_j[0]
matrix2_current_i = matrix2_i[0]
matrix2_current_j = matrix2_j[0]
om_current_i = output_matrix_i[0]
om_current_j = output_matrix_j[0]
for _ in range(matrix1_i[1] - matrix1_i[0] + 1):
for _ in range(matrix1_j[1] - matrix1_j[0] + 1):
output_matrix.elements[om_current_i][om_current_j] = matrix1.elements[matrix1_current_i][
matrix1_current_j] - \
matrix2.elements[matrix2_current_i][matrix2_current_j]
matrix1_current_j += 1
matrix2_current_j += 1
om_current_j += 1
matrix1_current_i += 1
matrix2_current_i += 1
om_current_i += 1
matrix1_current_j = matrix1_j[0]
matrix2_current_j = matrix2_j[0]
om_current_j = output_matrix_j[0]
def strassen(matrix1, matrix1_index, matrix2, matrix2_index, output_matrix, output_matrix_index):
matrix_size = (matrix1_index[0][1] - matrix1_index[0][0] + 1)
if(matrix_size == 2):
matrix1_i = matrix1_index[0][0]
matrix1_j = matrix1_index[1][0]
matrix2_i = matrix2_index[0][0]
matrix2_j = matrix2_index[1][0]
output_matrix_i = output_matrix_index[0][0]
output_matrix_j = output_matrix_index[1][0]
output_matrix.elements[output_matrix_i][output_matrix_j] = int((matrix1.elements[matrix1_i][matrix1_j] * matrix2.elements[matrix2_i][
matrix2_j]) + (matrix1.elements[matrix1_i][matrix1_j + 1] * matrix2.elements[matrix2_i + 1][matrix2_j]))
output_matrix.elements[output_matrix_i][output_matrix_j + 1] = int((matrix1.elements[matrix1_i][matrix1_j] * matrix2.elements[matrix2_i][
matrix2_j + 1]) + (matrix1.elements[matrix1_i][matrix1_j + 1] * matrix2.elements[matrix2_i + 1][matrix2_j + 1]))
output_matrix.elements[output_matrix_i + 1][output_matrix_j] = int((matrix1.elements[matrix1_i + 1][matrix1_j] * matrix2.elements[matrix2_i][
matrix2_j]) + (matrix1.elements[matrix1_i + 1][matrix1_j + 1] * matrix2.elements[matrix2_i + 1][matrix2_j]))
output_matrix.elements[output_matrix_i + 1][output_matrix_j + 1] = int((matrix1.elements[matrix1_i + 1][matrix1_j] *
matrix2.elements[matrix2_i][matrix2_j + 1]) + (
matrix1.elements[matrix1_i + 1][matrix1_j + 1] *
matrix2.elements[matrix2_i + 1][matrix2_j + 1]))
elif (matrix_size > 2):
matrix1_i = matrix1_index[0]
matrix1_j = matrix1_index[1]
matrix1_mid_i = int((matrix1_i[0] + matrix1_i[1]) / 2)
matrix1_mid_j = int((matrix1_j[0] + matrix1_j[1]) / 2)
matrix2_i = matrix2_index[0]
matrix2_j = matrix2_index[1]
matrix2_mid_i = int((matrix2_i[0] + matrix2_i[1]) / 2)
matrix2_mid_j = int((matrix2_j[0] + matrix2_j[1]) / 2)
output_matrix_i = output_matrix_index[0]
output_matrix_j = output_matrix_index[1]
output_matrix_mid_i = int((output_matrix_i[0] + output_matrix_i[1]) / 2)
output_matrix_mid_j = int((output_matrix_j[0] + output_matrix_j[1]) / 2)
matrix1_a_index = ((matrix1_i[0], matrix1_mid_i), (matrix1_j[0], matrix1_mid_j))
matrix1_b_index = ((matrix1_i[0], matrix1_mid_i), (matrix1_mid_j + 1, matrix1_j[1]))
matrix1_c_index = ((matrix1_mid_i + 1, matrix1_i[1]), (matrix1_j[0], matrix1_mid_j))
matrix1_d_index = ((matrix1_mid_i + 1, matrix1_i[1]), (matrix1_mid_j + 1, matrix1_j[1]))
matrix2_a_index = ((matrix2_i[0], matrix2_mid_i), (matrix2_j[0], matrix2_mid_j))
matrix2_b_index = ((matrix2_i[0], matrix2_mid_i), (matrix2_mid_j + 1, matrix2_j[1]))
matrix2_c_index = ((matrix2_mid_i + 1, matrix2_i[1]), (matrix2_j[0], matrix2_mid_j))
matrix2_d_index = ((matrix2_mid_i + 1, matrix2_i[1]), (matrix2_mid_j + 1, matrix2_j[1]))
output_matrix_a_index = ((output_matrix_i[0], output_matrix_mid_i), (output_matrix_j[0], output_matrix_mid_j))
output_matrix_b_index = (
(output_matrix_i[0], output_matrix_mid_i), (output_matrix_mid_j + 1, output_matrix_j[1]))
output_matrix_c_index = (
(output_matrix_mid_i + 1, output_matrix_i[1]), (output_matrix_j[0], output_matrix_mid_j))
output_matrix_d_index = (
(output_matrix_mid_i + 1, output_matrix_i[1]), (output_matrix_mid_j + 1, output_matrix_j[1]))
temp_1 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
temp_2 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
temp_3 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
temp_4 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
temp_5 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
temp_6 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
temp_7 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
temp_8 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
temp_9 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
temp_10 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
temp_matrix_index = ((0, int((matrix_size / 2)) - 1), (0, int((matrix_size / 2)) - 1))
matrixSubtract(matrix2, matrix2_b_index, matrix2, matrix2_d_index, temp_1, temp_matrix_index)
matrixAdd(matrix1, matrix1_a_index, matrix1, matrix1_b_index, temp_2, temp_matrix_index)
matrixAdd(matrix1, matrix1_c_index, matrix1, matrix1_d_index, temp_3, temp_matrix_index)
matrixSubtract(matrix2, matrix2_c_index, matrix2, matrix2_a_index, temp_4, temp_matrix_index)
matrixAdd(matrix1, matrix1_a_index, matrix1, matrix1_d_index, temp_5, temp_matrix_index)
matrixAdd(matrix2, matrix2_a_index, matrix2, matrix2_d_index, temp_6, temp_matrix_index)
matrixSubtract(matrix1, matrix1_b_index, matrix1, matrix1_d_index, temp_7, temp_matrix_index)
matrixAdd(matrix2, matrix2_c_index, matrix2, matrix2_d_index, temp_8, temp_matrix_index)
matrixSubtract(matrix1, matrix1_a_index, matrix1, matrix1_c_index, temp_9, temp_matrix_index)
matrixAdd(matrix2, matrix2_a_index, matrix2, matrix2_b_index, temp_10, temp_matrix_index)
aux_1 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
aux_2 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
aux_3 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
aux_4 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
aux_5 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
aux_6 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
aux_7 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
aux_1 = strassen(matrix1, matrix1_a_index, temp_1, temp_matrix_index, aux_1, temp_matrix_index)
aux_2 = strassen(temp_2, temp_matrix_index, matrix2, matrix2_d_index, aux_2, temp_matrix_index)
aux_3 = strassen(temp_3, temp_matrix_index, matrix2, matrix2_a_index, aux_3, temp_matrix_index)
aux_4 = strassen(matrix1, matrix1_d_index, temp_4, temp_matrix_index, aux_4, temp_matrix_index)
aux_5 = strassen(temp_5, temp_matrix_index, temp_6, temp_matrix_index, aux_5, temp_matrix_index)
aux_6 = strassen(temp_7, temp_matrix_index, temp_8, temp_matrix_index, aux_6, temp_matrix_index)
aux_7 = strassen(temp_9, temp_matrix_index, temp_10, temp_matrix_index, aux_7, temp_matrix_index)
output_aux_1 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
output_aux_2 = TempMatrix(int((matrix_size / 2)), int((matrix_size / 2)))
matrixAdd(aux_5, temp_matrix_index, aux_4, temp_matrix_index, output_aux_1, temp_matrix_index)
matrixAdd(aux_2, temp_matrix_index, aux_6, temp_matrix_index, output_aux_2, temp_matrix_index)
matrixSubtract(output_aux_1, temp_matrix_index, output_aux_2, temp_matrix_index, output_matrix,
output_matrix_a_index)
matrixAdd(aux_1, temp_matrix_index, aux_2, temp_matrix_index, output_matrix, output_matrix_b_index)
matrixAdd(aux_3, temp_matrix_index, aux_4, temp_matrix_index, output_matrix, output_matrix_c_index)
matrixAdd(aux_5, temp_matrix_index, aux_1, temp_matrix_index, output_aux_1, temp_matrix_index)
matrixSubtract(aux_3, temp_matrix_index, aux_7, temp_matrix_index, output_aux_2, temp_matrix_index)
matrixSubtract(output_aux_1, temp_matrix_index, output_aux_2, temp_matrix_index, output_matrix,
output_matrix_d_index)
return output_matrix
def main():
try:
input_file = open(input_file_path)
except IOError as exception:
print("Error opening the input file!")
exit(None, None)
try:
output_file = open(output_file_path, "w")
except IOError as exception:
print("Error creating/opening the output file!")
exit(input_file, None)
try:
matrix1_rows, matrix1_columns, matrix2_rows, matrix2_columns = [int(x) for x in input_file.readline().split()]
if (matrix1_columns != matrix2_rows):
print("These matrices can not be multiplied!")
exit(input_file, output_file)
square_size = int(max(matrix1_rows, max(matrix2_columns, matrix1_columns)))
matrix1 = Matrix(matrix1_rows, matrix1_columns, square_size)
matrix2 = Matrix(matrix2_rows, matrix2_columns, square_size)
except IOError as exception:
print("Error reading the input file!")
exit(input_file, output_file)
output_matrix = Matrix(matrix1_rows, matrix2_columns, square_size)
matrix1.fillMatrix(input_file)
matrix2.fillMatrix(input_file)
matrix1_index = ((0, matrix1.square_size - 1), (0, matrix1.square_size - 1))
matrix2_index = ((0, matrix2.square_size - 1), (0, matrix2.square_size - 1))
output_matrix_index = ((0, output_matrix.square_size - 1), (0, output_matrix.square_size - 1))
output_matrix = strassen(matrix1, matrix1_index, matrix2, matrix2_index, output_matrix, output_matrix_index)
output_file.write("16113134\n")
output_file.write(str(output_matrix.base_rows) + " " + str(output_matrix.base_columns) + "\n")
for i in range(output_matrix.base_rows):
for j in range(output_matrix.base_columns):
output_file.write(str(int(output_matrix.elements[i][j])) + " ")
output_file.write("\n")
if __name__ == '__main__':
main() |
__author__ = "Narwhale"
import socket
sk = socket.socket()
sk.bind(("localhost",8080))
sk.listen()
conn,address = sk.accept()
conn.sendall(bytes("Hello world",encoding="utf-8")) |
"""
Model objects for images.
"""
from __future__ import absolute_import, division, unicode_literals
import attr
from json import dumps
from mimic.model.rackspace_images import OnMetalImage
from mimic.model.nova_objects import not_found
from mimic.canned_responses.mimic_presets import get_presets
@attr.s
class RegionalNovaImageCollection(object):
"""
A collection of nova images, in a given region, for a given tenant.
"""
tenant_id = attr.ib()
region_name = attr.ib()
clock = attr.ib()
image_store = attr.ib()
def list_images(self, include_details, absolutize_url):
"""
Return a list of images.
"""
images_store = self.image_store.create_image_store(self.tenant_id)
images = []
for image in images_store:
if self.region_name != "IAD" and isinstance(image, OnMetalImage):
continue
if include_details:
images.append(image.detailed_json(absolutize_url))
else:
images.append(image.brief_json(absolutize_url))
result = {"images": images}
return dumps(result)
def get_image(self, http_get_request, image_id, absolutize_url):
"""
Return an image object if one exists from the list `/images` api,
else return 404 Image not found.
"""
if image_id in get_presets['servers']['invalid_image_ref'] or image_id.endswith('Z'):
return dumps(not_found("The resource could not be found.",
http_get_request))
self.image_store.create_image_store(self.tenant_id)
image = self.image_store.get_image_by_id(image_id)
if image is None:
return dumps(not_found('Image not found.', http_get_request))
return dumps({"image": image.detailed_json(absolutize_url)})
@attr.s
class GlobalNovaImageCollection(object):
"""
A :obj:`GlobalNovaImageCollection` is a set of all the
:obj:`RegionalNovaImageCollection` objects owned by a given tenant. In other
words, all the image objects that a single tenant owns globally.
"""
tenant_id = attr.ib()
clock = attr.ib()
regional_collections = attr.ib(default=attr.Factory(dict))
def collection_for_region(self, region_name, image_store):
"""
Get a :obj:`RegionalFlavorCollection` for the region identified by the
given name.
"""
if region_name not in self.regional_collections:
self.regional_collections[region_name] = (
RegionalNovaImageCollection(tenant_id=self.tenant_id, region_name=region_name,
clock=self.clock, image_store=image_store))
return self.regional_collections[region_name]
|
import pandas as pd
import os
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.formula.api as smf
def get_data_Rmagic():
path_data = "../data/ReplicationDataset_ThePriceofForcedAttendance.dta"
df = pd.read_stata(path_data)
df["grade"] = df["grade"].astype(float)
### Treatment variable and centered running variable
df["treat"] = 0
df.loc[df["firstyeargpa"] < 7, ["treat"]] = 1
df["firstyeargpa_centered"] = -1*(df["firstyeargpa"] - 7)
### pass course variable
df["passcourse"] = 0
df.loc[df["grade"] >= 5.5, ["passcourse"]] = 1
return df
def get_truncated_data(df,bandwidth,cohort,coursetype):
if cohort==1:
df_temp = df.loc[df["cohort"] < 6]
elif cohort==6:
df_temp = df.loc[df["cohort"] == 6]
elif cohort== "all cohorts":
pass
if bandwidth == "total range":
pass
else:
df_temp = df_temp.loc[df_temp["firstyeargpa"]<=7 + bandwidth]
df_temp = df_temp.loc[df_temp["firstyeargpa"]>=7 - bandwidth]
if coursetype == "all courses":
pass
elif coursetype in ["voluntary","encouraged","forced"]:
df_temp = df_temp.loc[df_temp["coursepolicy"]== coursetype]
df_temp.reset_index(inplace=True)
return df_temp
def collect_each_student(df):
sing_id = [df["studentid"][0]] # studendid of each student
sing_gpa = [df["firstyeargpa"][0]] # firstyeargpa of each student
for i in range(len(df)-1):
if df["studentid"][i] != df["studentid"][i+1]:
sing_id.append(df["studentid"][i+1])
sing_gpa.append(df["firstyeargpa"][i+1])
df_temp = pd.DataFrame(sing_id, columns=["studentid"])
df_temp["firstyeargpa"] = sing_gpa
return df_temp
def get_fakecutoff_val(df,coursetype,c,y_var):
### data
df_reg = get_truncated_data(df,"total range",1,coursetype)
### create running variable centered at fake cutoff and fake treatment variables
df_reg["X_fake"] = -1*(df_reg["firstyeargpa"] - c)
df_reg["treat_fake"] = 0
df_reg.loc[df_reg["firstyeargpa"] < c, ["treat_fake"]] = 1
df_reg["treat_X_fake"] = df_reg["treat_fake"] * df_reg["X_fake"]
df_reg["kwgt_fake"] = 0
df_reg.loc[abs(df_reg["X_fake"]) <= 0.365, ["kwgt_fake"]] = (1-abs((df["firstyeargpa"]-c)/0.365))
return df_reg
|
# Crea un archivo de Python llamado for_loop_basic1.py que realice las siguientes tareas.
# Básico : imprime todos los enteros del 0 al 150.
for x in range(151):
print(x)
# Múltiplos de cinco : imprime todos los múltiplos de 5 de 5 a 1,000
for x in range(5,1001,5):
print(x)
# Contar, Dojo Way - imprime enteros del 1 al 100. Si es divisible por 5, imprima "Coding" en su lugar. Si es divisible por 10, imprima "Coding Dojo".
for x in range(1,101):
if x % 10 == 0:
print("Coding Dojo")
elif x%5==0:
print("Coding")
else:
print(x)
# ¡Uf, Eso es bastante grande!: suma enteros impares de 0 a 500,000 e imprime la suma final.
count = 1
sumaimpares = 0
while count < 500001:
sumaimpares = sumaimpares+count
count += 2
print("Suma final de enteros impares de 0 a 500.000 ", sumaimpares)
# Cuenta regresiva por cuatro : imprime números positivos del 2018 al 0, restando 4 en cada iteración.
positi = 2018
while positi > 0:
print(positi)
positi -=4
# Contador flexible : establece tres variables: lowNum, highNum, mult. Comenzando en lowNum y pasando por highNum, imprima solo los enteros que son múltiplos de mult. Por ejemplo, si lowNum = 2, highNum = 9 y mult = 3, el bucle debe imprimir 3, 6, 9 (en líneas sucesivas)
lowNum = 2
highNum= 9
mult= 3
for x in range(lowNum,highNum+1,1):
if x%mult == 0:
print(x)
# BONUS: ¿Cómo se puede detectar si un número es primo? ¿Cómo retornar una lista con los primos entre el 1 y el 1000?
#aun no esta funcionando
# primos=[]
# for x in range(1,1001,1):
# if x%x==0:
# primos.append(x)
# print(primos)
|
# Write a Python program to create a list of empty dictionaries
def createEmptyDict(n):
listEmptyDict = []
for i in range(n):
new_dict = dict()
listEmptyDict.append(new_dict)
return listEmptyDict
n = int(input("Please enter the number of empty dict you want to create :"))
output = createEmptyDict(n)
print(output) |
# @Author : bamtercelboo
# @Datetime : 2018/07/19 22:35
# @File : model_CNN.py
# @Last Modify Time : 2018/07/19 22:35
# @Contact : bamtercelboo@{gmail.com, 163.com}
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import random
import torch.nn.init as init
from torch.nn.parameter import Parameter
from torch.nn.init import xavier_normal_
"""
Neural Network: CNN
"""
class SubNet(nn.Module):
'''
The subnetwork that is used in LMF for video and audio in the pre-fusion stage
'''
def __init__(self, in_size, hidden_size, dropout):
'''
Args:
in_size: input dimension
hidden_size: hidden layer dimension
dropout: dropout probability
Output:
(return value in forward) a tensor of shape (batch_size, hidden_size)
'''
super(SubNet, self).__init__()
self.norm = nn.BatchNorm1d(in_size)
self.drop = nn.Dropout(p=dropout)
self.linear_1 = nn.Linear(in_size, hidden_size)
self.linear_2 = nn.Linear(hidden_size, hidden_size)
self.linear_3 = nn.Linear(hidden_size, hidden_size)
def forward(self, x):
'''
Args:
x: tensor of shape (batch_size, in_size)
'''
normed = self.norm(x)
dropped = self.drop(normed)
y_1 = F.relu(self.linear_1(dropped))
y_2 = F.relu(self.linear_2(y_1))
y_3 = F.relu(self.linear_3(y_2))
return y_3
class CNN_Text(nn.Module):
def __init__(self, args):
super(CNN_Text, self).__init__()
self.args = args
# V = args.embed_num
D = args.embed_dim
C = args.class_num
Ci = 1
Co = args.kernel_num
Ks = args.kernel_sizes
# if args.max_norm is not None:
# print("max_norm = {} ".format(args.max_norm))
# self.embed = nn.Embedding(V, D, max_norm=5, scale_grad_by_freq=True, padding_idx=args.paddingId)
# else:
# print("max_norm = {} ".format(args.max_norm))
# self.embed = nn.Embedding(V, D, scale_grad_by_freq=True, padding_idx=args.paddingId)
# if args.word_Embedding:
# self.embed.weight.data.copy_(args.pretrained_weight)
# # fixed the word embedding
# self.embed.weight.requires_grad = True
# print("dddd {} ".format(self.embed.weight.data.size()))
if args.wide_conv is True:
print("using wide convolution")
self.convs1 = nn.ModuleList([nn.Conv2d(in_channels=Ci, out_channels=Co, kernel_size=(K, D), stride=(1, 1),
padding=(K // 2, 0), dilation=1, bias=False) for K in Ks])
else:
print("using narrow convolution")
self.convs1 = nn.ModuleList([nn.Conv2d(in_channels=Ci, out_channels=Co, kernel_size=(K, D),
bias=True) for K in Ks])
print(self.convs1)
if args.init_weight:
print("Initing W .......")
for conv in self.convs1:
init.xavier_normal_(conv.weight.data, gain=np.sqrt(args.init_weight_value))
fan_in, fan_out = CNN_Text.calculate_fan_in_and_fan_out(conv.weight.data)
print(" in {} out {} ".format(fan_in, fan_out))
std = np.sqrt(args.init_weight_value) * np.sqrt(2.0 / (fan_in + fan_out))
# for cnn cuda
# if self.args.cuda is True:
# for conv in self.convs1:
# conv = conv.cuda()
self.dropout = nn.Dropout(args.dropout)
self.dropout_embed = nn.Dropout(args.dropout_embed)
in_fea = len(Ks) * Co + 12 + 205
self.fc = nn.Linear(in_features=in_fea, out_features=C, bias=True)
# whether to use batch normalizations
if args.batch_normalizations is True:
print("using batch_normalizations in the model......")
self.convs1_bn = nn.BatchNorm2d(num_features=Co, momentum=args.bath_norm_momentum,
affine=args.batch_norm_affine)
self.fc1_bn = nn.BatchNorm1d(num_features=in_fea // 2, momentum=args.bath_norm_momentum,
affine=args.batch_norm_affine)
self.fc2_bn = nn.BatchNorm1d(num_features=C, momentum=args.bath_norm_momentum,
affine=args.batch_norm_affine)
def calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with less than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def forward(self, user, image, text):
# text = self.embed(text) # (N,W,D)
# text = self.dropout_embed(text)
text = text.unsqueeze(1) # (N,Ci,W,D)
if self.args.batch_normalizations is True:
text = [self.convs1_bn(F.tanh(conv(text))).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
text = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in text] # [(N,Co), ...]*len(Ks)
else:
text = [F.relu(conv(text)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
text = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in text] # [(N,Co), ...]*len(Ks)
text = torch.cat(text, 1)
text = self.dropout(text) # (N,len(Ks)*Co)
# print(text.shape)
cat = torch.cat((user, image, text), 1)
if self.args.batch_normalizations is True:
cat = self.fc1_bn(self.fc1(cat))
logit = self.fc2_bn(self.fc2(F.tanh(cat)))
else:
logit = self.fc(cat)
return logit
class TUI(nn.Module):
def __init__(self, args, rank):
super(TUI, self).__init__()
self.args = args
self.rank = rank
self.user_hidden = 12
self.image_hidden = 205
self.output_dim = 8
# V = args.embed_num
D = args.embed_dim
C = args.class_num
Ci = 1
Co = args.kernel_num
Ks = args.kernel_sizes
# if args.max_norm is not None:
# print("max_norm = {} ".format(args.max_norm))
# self.embed = nn.Embedding(V, D, max_norm=5, scale_grad_by_freq=True, padding_idx=args.paddingId)
# else:
# print("max_norm = {} ".format(args.max_norm))
# self.embed = nn.Embedding(V, D, scale_grad_by_freq=True, padding_idx=args.paddingId)
# if args.word_Embedding:
# self.embed.weight.data.copy_(args.pretrained_weight)
# # fixed the word embedding
# self.embed.weight.requires_grad = True
# print("dddd {} ".format(self.embed.weight.data.size()))
if args.wide_conv is True:
print("using wide convolution")
self.convs1 = nn.ModuleList([nn.Conv2d(in_channels=Ci, out_channels=Co, kernel_size=(K, D), stride=(1, 1),
padding=(K // 2, 0), dilation=1, bias=False) for K in Ks])
else:
print("using narrow convolution")
self.convs1 = nn.ModuleList([nn.Conv2d(in_channels=Ci, out_channels=Co, kernel_size=(K, D),
bias=True) for K in Ks])
print(self.convs1)
if args.init_weight:
print("Initing W .......")
for conv in self.convs1:
init.xavier_normal_(conv.weight.data, gain=np.sqrt(args.init_weight_value))
fan_in, fan_out = CNN_Text.calculate_fan_in_and_fan_out(conv.weight.data)
print(" in {} out {} ".format(fan_in, fan_out))
std = np.sqrt(args.init_weight_value) * np.sqrt(2.0 / (fan_in + fan_out))
# for cnn cuda
# if self.args.cuda is True:
# for conv in self.convs1:
# conv = conv.cuda()
self.dropout = nn.Dropout(args.dropout)
self.dropout_embed = nn.Dropout(args.dropout_embed)
in_fea = len(Ks) * Co + 12 + 205
self.fc = nn.Linear(in_features=in_fea, out_features=C, bias=True)
# whether to use batch normalizations
if args.batch_normalizations is True:
print("using batch_normalizations in the model......")
self.convs1_bn = nn.BatchNorm2d(num_features=Co, momentum=args.bath_norm_momentum,
affine=args.batch_norm_affine)
self.fc1_bn = nn.BatchNorm1d(num_features=in_fea // 2, momentum=args.bath_norm_momentum,
affine=args.batch_norm_affine)
self.fc2_bn = nn.BatchNorm1d(num_features=C, momentum=args.bath_norm_momentum,
affine=args.batch_norm_affine)
self.user_factor = Parameter(torch.Tensor(self.rank, self.user_hidden + 1, self.output_dim))
self.image_factor = Parameter(torch.Tensor(self.rank, self.image_hidden + 1, self.output_dim))
self.text_factor = Parameter(torch.Tensor(self.rank, args.kernel_num + 1, self.output_dim))
self.fusion_weights = Parameter(torch.Tensor(1, self.rank))
self.fusion_bias = Parameter(torch.Tensor(1, self.output_dim))
xavier_normal_(self.user_factor)
xavier_normal_(self.image_factor)
xavier_normal_(self.text_factor)
xavier_normal_(self.fusion_weights)
self.fusion_bias.data.fill_(0)
def calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with less than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def forward(self, user, image, text):
# text = self.embed(text) # (N,W,D)
# text = self.dropout_embed(text)
text = text.unsqueeze(1) # (N,Ci,W,D)
if self.args.batch_normalizations is True:
text = [self.convs1_bn(F.tanh(conv(text))).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
text = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in text] # [(N,Co), ...]*len(Ks)
else:
text = [F.relu(conv(text)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
text = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in text] # [(N,Co), ...]*len(Ks)
text = torch.cat(text, 1)
text = self.dropout(text) # (N,len(Ks)*Co)
'''
position of dropout
'''
# # origin part
# cat = torch.cat((user, image, text), 1)
#
# if self.args.batch_normalizations is True:
# cat = self.fc1_bn(self.fc1(cat))
# logit = self.fc2_bn(self.fc2(F.tanh(cat)))
# else:
# logit = self.fc(cat)
# lmf part
batch_size = text.data.shape[0]
if text.is_cuda:
DTYPE = torch.cuda.FloatTensor
else:
DTYPE = torch.FloatTensor
_text = torch.cat((Variable(torch.ones(batch_size, 1).type(DTYPE), requires_grad=False), text), dim=1)
_image = torch.cat((Variable(torch.ones(batch_size, 1).type(DTYPE), requires_grad=False), image), dim=1)
_user = torch.cat((Variable(torch.ones(batch_size, 1).type(DTYPE), requires_grad=False), user), dim=1)
fusion_text = torch.matmul(_text, self.text_factor)
fusion_image = torch.matmul(_image, self.image_factor)
fusion_user = torch.matmul(_user, self.user_factor)
fusion_zy = fusion_text * fusion_image * fusion_user
# output = torch.sum(fusion_zy, dim=0).squeeze()
# use linear transformation instead of simple summation, more flexibility
output = torch.matmul(self.fusion_weights, fusion_zy.permute(1, 0, 2)).squeeze() + self.fusion_bias
logit = output.view(-1, self.output_dim)
return logit
class TUI_img_user(nn.Module):
def __init__(self, args, rank):
super(TUI_img_user, self).__init__()
self.args = args
self.rank = rank
self.user_hidden = 8
self.image_hidden = 128
self.output_dim = 8
# V = args.embed_num
D = args.embed_dim
C = args.class_num
Ci = 1
Co = args.kernel_num
Ks = args.kernel_sizes
self.user_subnet = SubNet(12, self.user_hidden, self.args.dropout)
self.image_subnet = SubNet(205, self.image_hidden, self.args.dropout)
# if args.max_norm is not None:
# print("max_norm = {} ".format(args.max_norm))
# self.embed = nn.Embedding(V, D, max_norm=5, scale_grad_by_freq=True, padding_idx=args.paddingId)
# else:
# print("max_norm = {} ".format(args.max_norm))
# self.embed = nn.Embedding(V, D, scale_grad_by_freq=True, padding_idx=args.paddingId)
# if args.word_Embedding:
# self.embed.weight.data.copy_(args.pretrained_weight)
# # fixed the word embedding
# self.embed.weight.requires_grad = True
# print("dddd {} ".format(self.embed.weight.data.size()))
if args.wide_conv is True:
print("using wide convolution")
self.convs1 = nn.ModuleList([nn.Conv2d(in_channels=Ci, out_channels=Co, kernel_size=(K, D), stride=(1, 1),
padding=(K // 2, 0), dilation=1, bias=False) for K in Ks])
else:
print("using narrow convolution")
self.convs1 = nn.ModuleList([nn.Conv2d(in_channels=Ci, out_channels=Co, kernel_size=(K, D),
bias=True) for K in Ks])
print(self.convs1)
if args.init_weight:
print("Initing W .......")
for conv in self.convs1:
init.xavier_normal_(conv.weight.data, gain=np.sqrt(args.init_weight_value))
fan_in, fan_out = CNN_Text.calculate_fan_in_and_fan_out(conv.weight.data)
print(" in {} out {} ".format(fan_in, fan_out))
std = np.sqrt(args.init_weight_value) * np.sqrt(2.0 / (fan_in + fan_out))
# for cnn cuda
# if self.args.cuda is True:
# for conv in self.convs1:
# conv = conv.cuda()
self.dropout = nn.Dropout(args.dropout)
self.dropout_embed = nn.Dropout(args.dropout_embed)
in_fea = len(Ks) * Co + 12 + 205
self.fc = nn.Linear(in_features=in_fea, out_features=C, bias=True)
# whether to use batch normalizations
if args.batch_normalizations is True:
print("using batch_normalizations in the model......")
self.convs1_bn = nn.BatchNorm2d(num_features=Co, momentum=args.bath_norm_momentum,
affine=args.batch_norm_affine)
self.fc1_bn = nn.BatchNorm1d(num_features=in_fea // 2, momentum=args.bath_norm_momentum,
affine=args.batch_norm_affine)
self.fc2_bn = nn.BatchNorm1d(num_features=C, momentum=args.bath_norm_momentum,
affine=args.batch_norm_affine)
self.user_factor = Parameter(torch.Tensor(self.rank, self.user_hidden + 1, self.output_dim))
self.image_factor = Parameter(torch.Tensor(self.rank, self.image_hidden + 1, self.output_dim))
self.text_factor = Parameter(torch.Tensor(self.rank, args.kernel_num + 1, self.output_dim))
self.fusion_weights = Parameter(torch.Tensor(1, self.rank))
self.fusion_bias = Parameter(torch.Tensor(1, self.output_dim))
xavier_normal_(self.fc.weight)
xavier_normal_(self.user_factor)
xavier_normal_(self.image_factor)
xavier_normal_(self.text_factor)
xavier_normal_(self.fusion_weights)
self.fusion_bias.data.fill_(0)
def calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with less than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def forward(self, user, image, text):
# text = self.embed(text) # (N,W,D)
# text = self.dropout_embed(text)
text = text.unsqueeze(1) # (N,Ci,W,D)
if self.args.batch_normalizations is True:
text = [self.convs1_bn(F.tanh(conv(text))).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
text = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in text] # [(N,Co), ...]*len(Ks)
else:
text = [F.relu(conv(text)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
text = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in text] # [(N,Co), ...]*len(Ks)
text = torch.cat(text, 1)
# text = self.dropout(text) # (N,len(Ks)*Co)
# origin part
# user = self.user_subnet(user)
# image = self.image_subnet(image)
cat = torch.cat((user, image, text), 1)
cat = self.dropout(cat)
if self.args.batch_normalizations is True:
cat = self.fc1_bn(self.fc1(cat))
logit = self.fc2_bn(self.fc2(F.tanh(cat)))
else:
logit = self.fc(cat)
# # lmf part
#
# user = self.user_subnet(user)
# image = self.image_subnet(image)
#
# batch_size = text.data.shape[0]
# if text.is_cuda:
# DTYPE = torch.cuda.FloatTensor
# else:
# DTYPE = torch.FloatTensor
#
# _text = torch.cat((Variable(torch.ones(batch_size, 1).type(DTYPE), requires_grad=False), text), dim=1)
# _image = torch.cat((Variable(torch.ones(batch_size, 1).type(DTYPE), requires_grad=False), image), dim=1)
# _user = torch.cat((Variable(torch.ones(batch_size, 1).type(DTYPE), requires_grad=False), user), dim=1)
#
# fusion_text = torch.matmul(_text, self.text_factor)
# fusion_image = torch.matmul(_image, self.image_factor)
# fusion_user = torch.matmul(_user, self.user_factor)
# fusion_zy = fusion_text * fusion_image * fusion_user
#
# # output = torch.sum(fusion_zy, dim=0).squeeze()
# # use linear transformation instead of simple summation, more flexibility
# output = torch.matmul(self.fusion_weights, fusion_zy.permute(1, 0, 2)).squeeze() + self.fusion_bias
#
# logit = output.view(-1, self.output_dim)
return logit
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch for image-user CNN')
# parser.add_argument('--embed_num', default=)
parser.add_argument('--embed_dim', default=300)
parser.add_argument('--class_num', default=8)
parser.add_argument('--kernel_num', default=100)
parser.add_argument('--kernel_sizes', default=[1])
# parser.add_argument('--max_norm', default=)
# parser.add_argument('--paddingId', default=)
# parser.add_argument('--word_Embedding', default=)
# parser.add_argument('--pretrained_weight', default=)
parser.add_argument('--wide_conv', default=True)
parser.add_argument('--init_weight', default=True)
parser.add_argument('--init_weight_value', default=2.0)
# parser.add_argument('--cuda', default=False)
parser.add_argument('--dropout', default=0.75)
parser.add_argument('--dropout_embed', default=0.75)
parser.add_argument('--batch_normalizations', default=False)
parser.add_argument('--batch_norm_affine', default=False)
config = parser.parse_args()
net = TUI_img_user(config, 4)
user = torch.randn((4, 12))
image = torch.randn((4, 205))
text = torch.randn((4, 214, 300))
# print(text)
y = net(user, image, text)
print(y.shape)
|
def is_balanced(s, pairs):
closing_chars = []
pair_dict = dict(zip(pairs[::2], pairs[1::2]))
keys = set(pair_dict.iterkeys())
values = set(pair_dict.itervalues())
for a in s:
in_keys = a in keys
in_values = a in values
if in_keys and in_values:
try:
last = closing_chars[-1]
except IndexError:
last = None
if last != a:
closing_chars.append(pair_dict[a])
else:
closing_chars.pop()
elif in_keys:
closing_chars.append(pair_dict[a])
elif in_values:
try:
if a == closing_chars.pop():
continue
except IndexError:
pass
return False
return not closing_chars
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
import sys
sys.path.append('/home/oushu/lixingyu/git_repo/attention_OCR')
from tensorflow.contrib import rnn
from models import cnn_basenet
from config.global_config import CFG
import numpy as np
class ShadowNet(cnn_basenet.CNNBaseModel):
def __init__(self, phase, is_train):
super(ShadowNet, self).__init__()
self._train_phase = tf.constant('train', dtype=tf.string)
self._test_phase = tf.constant('test', dtype=tf.string)
self._is_training = tf.equal(self._train_phase, phase)
self.flag_train = is_train
@property
def phase(self):
return self._phase
@phase.setter
def phase(self, value):
if not isinstance(value, str):
raise TypeError('value should be a str \'Test\' or \'Train\'')
if value.lower() not in ['test', 'train']:
raise ValueError('value should be a str \'Test\' or \'Train\'')
self._phase = value.lower()
return
def _conv_stage(self, inputdata, out_dims, name=None):
conv = self.conv2d(inputdata=inputdata, out_channel=out_dims,
kernel_size=3, stride=1, use_bias=False,
name=name)
relu = self.relu(inputdata=conv)
max_pool = self.maxpooling(inputdata=relu, kernel_size=2, stride=2)
return max_pool
def _feature_sequence_extraction(self, inputdata):
tensor_dict = dict()
conv1 = self._conv_stage(inputdata=inputdata, out_dims=64, name='conv1') # batch*16*50*64
tensor_dict['conv1'] = conv1
conv2 = self._conv_stage(inputdata=conv1, out_dims=128, name='conv2') # batch*8*25*128
tensor_dict['conv2'] = conv2
conv3 = self.conv2d(inputdata=conv2, out_channel=256,
kernel_size=3, stride=1, use_bias=False,
name='conv3') # batch*8*25*256
relu3 = self.relu(conv3) # batch*8*25*256
tensor_dict['conv3'] = conv3
tensor_dict['relu3'] = relu3
conv4 = self.conv2d(inputdata=relu3, out_channel=256,
kernel_size=3, stride=1, use_bias=False,
name='conv4') # batch*8*25*256
relu4 = self.relu(conv4) # batch*8*25*256
max_pool4 = self.maxpooling(inputdata=relu4, kernel_size=[2, 1], stride=[2, 1],
padding='VALID') # batch*4*25*256
tensor_dict['conv4'] = conv4
tensor_dict['relu4'] = relu4
tensor_dict['max_pool4'] = max_pool4
conv5 = self.conv2d(inputdata=max_pool4, out_channel=512,
kernel_size=3, stride=1, use_bias=False,
name='conv5') # batch*4*25*512
conv5_bn5 = self.layerbn(inputdata=conv5, is_training=self._is_training, name='bn5')
relu5 = self.relu(conv5_bn5) # batch*4*25*512
tensor_dict['conv5'] = conv5
tensor_dict['relu5'] = relu5
tensor_dict['bn5'] = conv5_bn5
conv6 = self.conv2d(inputdata=relu5, out_channel=512,
kernel_size=3, stride=1, use_bias=False,
name='conv6') # batch*4*25*512
conv6_bn6 = self.layerbn(inputdata=conv6, is_training=self._is_training, name='bn6')
relu6 = self.relu(conv6_bn6) # batch*4*25*512
max_pool6 = self.maxpooling(inputdata=relu6,
kernel_size=[2, 1], stride=[2, 1]) # batch*2*25*512
tensor_dict['conv6'] = conv6
tensor_dict['relu6'] = relu6
tensor_dict['bn6'] = conv6_bn6
tensor_dict['max_pool6'] = max_pool6
conv7 = self.conv2d(inputdata=max_pool6, out_channel=512,
kernel_size=2, stride=[2, 1], use_bias=False,
name='conv7') # batch*1*25*512
relu7 = self.relu(conv7) # batch*1*25*512
tensor_dict['conv7'] = conv7
tensor_dict['relu7'] = relu7
return relu7, tensor_dict
def _map_to_sequence(self, inputdata):
shape = inputdata.get_shape().as_list()
assert shape[1] == 1 # H of the feature map must equal to 1
return self.squeeze(inputdata=inputdata, axis=1)
def build_shadownet(self, inputdata, labels):
with tf.variable_scope('cnn_subnetwork'):
# print([labels.shape.as_list()[0], CFG.MAX_SEQ_LEN])
if labels:
labels = tf.sparse_to_dense(labels.indices, [CFG.BATCH_SIZE, CFG.MAX_SEQ_LEN], labels.values)
labels_one_hot = slim.one_hot_encoding(labels, num_classes=CFG.CLASSES_NUMS)
else:
labels_one_hot = None
cnn_out, tensor_dict = self._feature_sequence_extraction(inputdata=inputdata)
sequence = self._map_to_sequence(inputdata=cnn_out)
net_out = self.encode_coordinate_fn(sequence)
sequence_logit = self.sequence_logit_fn(net_out, labels_one_hot)
ids, logit_prob, scores = self.get_char_prdict(sequence_logit)
if self.flag_train:
loss = self.create_loss(labels, logit_prob)
# tensor_dict['loss_weights'] = weights
return loss, ids, scores, tensor_dict
else:
return None, ids, scores, tensor_dict
def encode_coordinate_fn(self, net):
batch_size, w, _ = net.shape.as_list()
x = tf.range(w)
w_loc = slim.one_hot_encoding(x, num_classes=w)
loc = tf.tile(tf.expand_dims(w_loc, 0), [batch_size, 1, 1])
return tf.concat([net, loc], 2)
def sequence_logit_fn(self, net, labels_one_hot):
sequence_layer = AttentionWithAutoRegression(net, labels_one_hot)
return sequence_layer.create_logits()
def get_char_prdict(self, chars_logit):
with tf.variable_scope('log_probabilities'):
reduction_indices = len(chars_logit.shape.as_list())-1
max_logits = tf.reduce_max(
chars_logit, reduction_indices=reduction_indices, keepdims=True)
safe_logits = tf.subtract(chars_logit, max_logits)
sum_exp = tf.reduce_sum(
tf.exp(safe_logits),
reduction_indices=reduction_indices,
keepdims=True
)
log_probs = tf.subtract(safe_logits, tf.log(sum_exp))
ids = tf.to_int32(tf.argmax(log_probs, axis=2), name='predicted_chars')
mask = tf.cast(
slim.one_hot_encoding(ids, CFG.CLASSES_NUMS), tf.bool
)
all_scores = tf.nn.softmax(chars_logit)
selected_scores = tf.boolean_mask(all_scores, mask, name='char_scores')
scores = tf.reshape(selected_scores, shape=(-1, CFG.MAX_SEQ_LEN))
return ids, log_probs, scores
def sequence_loss_fn(self, chars_logits, chars_labels):
with tf.variable_scope('sequence_loss_fn/SLF'):
labels_list = tf.unstack(chars_labels, axis=1)
batch_size, seq_length, _ = chars_logits.shape.as_list()
reject_char = tf.constant(
1,
shape=(batch_size, seq_length),
dtype=tf.int32
)
# known_char = tf.not_equal(chars_labels, reject_char)
# unknown_char = tf.equal(chars_labels, reject_char)
# unknown_mask = tf.to_float(unknown_char)
# unknown_origin_weight = tf.constant(
# 0.02,
# shape=(batch_size, seq_length),
# dtype=tf.float32
# )
# known_origin_weights = tf.to_float(known_char)
# weights = tf.add(known_origin_weights, tf.multiply(unknown_mask, unknown_origin_weight))
weights = tf.to_float(reject_char)
logits_list = tf.unstack(chars_logits, axis=1)
weights_list = tf.unstack(weights, axis=1)
loss = tf.contrib.legacy_seq2seq.sequence_loss(
logits_list,
labels_list,
weights_list,
softmax_loss_function=get_softmax_fn(),
average_across_timesteps=False
)
tf.losses.add_loss(loss)
return loss
def create_loss(self, labels, chars_logit):
self.sequence_loss_fn(chars_logit, labels)
total_loss = tf.losses.get_total_loss()
return total_loss
def get_softmax_fn():
def loss_fn(labels, logits):
return tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
return loss_fn
def arr2sparse(arr_tensor):
arr_idx = tf.where(tf.not_equal(arr_tensor, 0))
arr_sparse = tf.SparseTensor(arr_idx, tf.gather_nd(arr_tensor, arr_idx), arr_tensor.get_shape())
return arr_sparse
class AttentionWithAutoRegression(object):
def __init__(self, net, labels_one_hot):
self.net = net
self.batch_size = self.net.get_shape().dims[0].value
self.zero_labels = tf.zeros([self.batch_size, CFG.CLASSES_NUMS])
self.labels_one_hot = labels_one_hot
self.char_logits = {}
regularizer = slim.l2_regularizer(0.0)
self.softmax_w = slim.model_variable(
'softmax_w',
[CFG.NUM_LSTM_UNITS, CFG.CLASSES_NUMS],
initializer=orthogonal_initializer,
regularizer=regularizer
)
self.softmax_b = slim.model_variable(
'softmax_b',
[CFG.CLASSES_NUMS],
initializer=tf.zeros_initializer(),
regularizer=regularizer
)
def char_logits_fn(self, inputs, char_index):
if char_index not in self.char_logits:
self.char_logits[char_index] = tf.nn.xw_plus_b(inputs, self.softmax_w, self.softmax_b)
return self.char_logits[char_index]
def char_one_hot(self, logit):
prediction = tf.argmax(logit, axis=1)
return slim.one_hot_encoding(prediction, CFG.CLASSES_NUMS)
def get_train_input(self, prev, i):
if i == 0:
return self.zero_labels
else:
return self.labels_one_hot[:, i-1, :]
def get_eval_input(self, prev, i):
if i == 0:
return self.zero_labels
else:
logit = self.char_logits_fn(prev, char_index=i-1)
return self.char_one_hot(logit)
def get_input(self, prev, i):
if self.labels_one_hot is not None:
return self.get_train_input(prev, i)
else:
return self.get_eval_input(prev, i)
def create_logits(self):
with tf.variable_scope('LSTM'):
first_label = self.get_input(prev=None, i=0)
decoder_inputs = [first_label] + [None] * (CFG.MAX_SEQ_LEN-1)
lstm_cell = tf.contrib.rnn.LSTMCell(
CFG.NUM_LSTM_UNITS,
use_peepholes=False,
cell_clip=CFG.LSTM_STATE_CLIP_VAL,
state_is_tuple=True,
initializer=orthogonal_initializer
)
lstm_outputs, _ = tf.contrib.legacy_seq2seq.attention_decoder(
decoder_inputs=decoder_inputs,
initial_state=lstm_cell.zero_state(self.batch_size, tf.float32),
attention_states=self.net,
loop_function=self.get_input,
cell=lstm_cell
)
with tf.variable_scope('logits'):
logits_list = [
tf.expand_dims(self.char_logits_fn(logit, i), axis=1)
for i, logit in enumerate(lstm_outputs)
]
return tf.concat(logits_list, 1)
# def unroll_cell(self, decoder_input):
def orthogonal_initializer(shape, dtype=tf.float32, *args, **kwargs):
del args
del kwargs
flat_shape = (shape[0], np.prod(shape[1:]))
w = np.random.randn(*flat_shape)
u, _, v = np.linalg.svd(w, full_matrices=False)
w = u if u.shape == flat_shape else v
return tf.constant(w.reshape(shape), dtype=dtype)
# def _sequence_label(self, inputdata):
# """
# Implement the sequence label part of the network
# :param inputdata:
# :return:
# """
# if self._rnn_cell_type == 'lstm':
# with tf.variable_scope('LSTMLayers'):
# # construct stack lstm rcnn layer
# # forward lstm cell
# fw_cell_list = [rnn.BasicLSTMCell(nh, forget_bias=1.0) for nh in
# [self._hidden_nums, self._hidden_nums]]
# # Backward direction cells
# bw_cell_list = [rnn.BasicLSTMCell(nh, forget_bias=1.0) for nh in
# [self._hidden_nums, self._hidden_nums]]
#
# stack_lstm_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(
# fw_cell_list, bw_cell_list, inputdata, dtype=tf.float32)
#
# def f1():
# """
#
# :return:
# """
# return self.dropout(inputdata=stack_lstm_layer, keep_prob=0.5)
#
# def f2():
# """
#
# :return:
# """
# return stack_lstm_layer
#
# stack_lstm_layer = tf.cond(self._is_training, f1, f2)
#
# # [batch, width, 2*n_hidden]
# [batch_s, _, hidden_nums] = inputdata.get_shape().as_list()
#
# # [batch x width, 2*n_hidden]
# rnn_reshaped = tf.reshape(stack_lstm_layer, [-1, hidden_nums])
#
# var_w = tf.Variable(tf.truncated_normal([hidden_nums, self._num_classes],
# stddev=0.1), name="w")
#
# # Doing the affine projection
# # logits = tf.matmul(rnn_reshaped, var_w)
# logits = slim.fully_connected(inputs=rnn_reshaped, num_outputs=self._num_classes,
# activation_fn=None)
#
# logits = tf.reshape(logits, [batch_s, -1, self._num_classes])
#
# # raw_pred = tf.argmax(tf.nn.softmax(logits),
# # axis=2, name='raw_prediction')
#
# # Swap batch and batch axis
# rnn_out = tf.transpose(logits, (1, 0, 2),
# name='transpose_time_major') # [width, batch, n_classes]
# else:
# with tf.variable_scope('GRULayers'):
# # construct stack fru rcnn layer
# # forward gru cell
# fw_cell_list = [rnn.GRUCell(nh) for nh in
# [self._hidden_nums, self._hidden_nums]]
# # Backward direction cells
# bw_cell_list = [rnn.GRUCell(nh) for nh in
# [self._hidden_nums, self._hidden_nums]]
#
# stack_gru_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(
# fw_cell_list, bw_cell_list, inputdata, dtype=tf.float32)
#
# def f3():
# """
#
# :return:
# """
# return self.dropout(inputdata=stack_gru_layer, keep_prob=0.5)
#
# def f4():
# """
#
# :return:
# """
# return stack_gru_layer
#
# stack_gru_layer = tf.cond(self._is_training, f3, f4)
#
# # [batch, width, 2*n_hidden]
# [batch_s, _, hidden_nums] = inputdata.get_shape().as_list()
#
# # [batch x width, 2*n_hidden]
# rnn_reshaped = tf.reshape(stack_gru_layer, [-1, hidden_nums])
#
# var_w = tf.Variable(tf.truncated_normal([hidden_nums, self._num_classes],
# stddev=0.1),
# name="w")
#
# # Doing the affine projection
# # logits = tf.matmul(rnn_reshaped, var_w)
# logits = slim.fully_connected(inputs=rnn_reshaped, num_outputs=self._num_classes,
# activation_fn=None)
#
# logits = tf.reshape(logits, [batch_s, -1, self._num_classes])
#
# # raw_pred = tf.argmax(tf.nn.softmax(logits),
# # axis=2, name='raw_prediction')
#
# # Swap batch and batch axis
# rnn_out = tf.transpose(logits, (1, 0, 2),
# name='transpose_time_major') # [width, batch, n_classes]
#
# return rnn_out
# def build_shadownet_cnn_subnet(self, inputdata):
# """
# Build the cnn feature extraction part of the crnn model used for classification
# :param inputdata:
# :return:
# """
# # first apply the cnn feture extraction stage
# with tf.variable_scope('cnn_subnetwork'):
# cnn_out = self._feature_sequence_extraction(inputdata=inputdata)
#
# fc1 = self.fullyconnect(inputdata=cnn_out, out_dim=4096, use_bias=False, name='fc1')
#
# relu1 = self.relu(inputdata=fc1, name='relu1')
#
# fc2 = self.fullyconnect(inputdata=relu1, out_dim=CFG.TRAIN.CLASSES_NUMS,
# use_bias=False, name='fc2')
#
# return fc2
if __name__ == '__main__':
label = tf.ones([32, 10], tf.int64)
label = arr2sparse(label)
x = tf.ones([32, 32, 100, 3], tf.float32)
network = ShadowNet('train')
loss, ids, score, _ = network.build_shadownet(x, label)
with tf.Session() as sess:
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
# l, id, s = sess.run([loss, ids, score])
# print(l)
# print(id)
# print(s) |
import json
import requests
from urllib.request import urlopen
'''
def get_json(url):
with urlopen(url) as json_file:
json_data = json.loads(json_file.read())
return json_data
'''
def get_json(url):
response = requests.get(url)
return json.loads(response.content.decode('utf-8'))
|
import minimalmodbus
import serial
import pandas as pd
import time
import random
# Special Functions Registers. Parameters registers range from 0 - 4095
# Read Only: indicates the total number of parameters
TOTAL_NUMBER_OF_PARAM = 4097
# Read Only: indicates the state of the inverter (on/off, cw/ccw, etc.)
STATE = 4100
# R/W: when 1, turns the inverter ON. When 0 turns OFF.
ON_OFF = 4101
# R/W: when 1, the output frequency is incremented by 0.1 Hz. When 0 it is decremented by 0.1 Hz
INC_DEC_FREQ = 4103
# Read Only: indicates the firmware version
VERSION = 4104
# R/W: resets the inverter
RESET = 4105
# R/W: sets the output frequency
FREQ_SET = 4106
# R/W: when 1 turns on the inverter CW. When 0 turns on the inverter CCW.
CW_CCW = 4107
# Read Only: parameter P003, output current.
OUT_CURRENT = 2
# Read Only: parameter P005, junction temperature.
TEMPERATURE = 4
# R/W: P301
FREQ_REF = 26
# R/W: P302
CTRL = 27
class NotConnectedError(Exception):
"""Raised if there's no target connected"""
pass
class AgDrive:
def __init__(self, port=None, address=0, baud=None, stop_bits=None, parity=None, timeout=None):
self.instr = None
self.port = port
self.address = int(address)
self.baud = baud
self.stop_bits = stop_bits
self.parity = parity
self.timeout = timeout
self.connected = False
def connect2instrument(self):
if self.port == "":
self.port = "COM100"
try:
baud_int = int(self.baud)
except ValueError:
baud_int = 2400
pass
if self.parity == "":
self.parity = 'N'
if self.stop_bits == "":
self.stop_bits = 1
try:
self.instr = minimalmodbus.Instrument(self.port, int(self.address)) # port name, slave address
self.instr.serial.baudrate = baud_int
self.instr.serial.stopbits = int(self.stop_bits)
self.instr.serial.timeout = self.timeout
self.instr.serial.parity = self.parity
self.instr.clear_buffers_before_each_transaction = True
self.instr.close_port_after_each_call = True
self.connected = True
return True
except (minimalmodbus.ModbusException, NameError, serial.serialutil.SerialException):
self.connected = False
return False
pass
def inv_on(self):
try:
self.instr.write_register(ON_OFF, 1, 0, 6, False)
return True
except (minimalmodbus.NoResponseError, minimalmodbus.SlaveReportedException, minimalmodbus.InvalidResponseError,
NameError, NotConnectedError):
return False
pass
def inv_off(self):
try:
self.instr.write_register(ON_OFF, 0, 0, 6, False)
return True
except (minimalmodbus.NoResponseError, minimalmodbus.SlaveReportedException, minimalmodbus.InvalidResponseError,
NameError, NotConnectedError):
return False
pass
def inc_freq_step(self):
try:
self.instr.write_register(INC_DEC_FREQ, 1, 0, 6, False)
return True
except (minimalmodbus.NoResponseError, minimalmodbus.SlaveReportedException, minimalmodbus.InvalidResponseError,
NameError, NotConnectedError):
return False
pass
def dec_freq_step(self):
try:
self.instr.write_register(INC_DEC_FREQ, 0, 0, 6, False)
return True
except (minimalmodbus.NoResponseError, minimalmodbus.SlaveReportedException, minimalmodbus.InvalidResponseError,
NameError, NotConnectedError):
return False
pass
def inv_cw(self):
try:
self.instr.write_register(CW_CCW, 0, 0, 6, False)
return True
except (minimalmodbus.NoResponseError, minimalmodbus.SlaveReportedException, minimalmodbus.InvalidResponseError,
NameError, NotConnectedError):
return False
pass
def inv_ccw(self):
try:
self.instr.write_register(CW_CCW, 1, 0, 6, False)
return True
except (minimalmodbus.NoResponseError, minimalmodbus.SlaveReportedException, minimalmodbus.InvalidResponseError,
NameError, NotConnectedError):
return False
pass
def get(self, register):
if self.connected:
try:
value = self.instr.read_register(register)
return value
except (minimalmodbus.NoResponseError, minimalmodbus.SlaveReportedException,
minimalmodbus.InvalidResponseError, NameError, NotConnectedError):
return -1
pass
else:
return -1
def set(self, register, value):
try:
self.instr.write_register(register, value, 0, 6, False)
return True
except (minimalmodbus.NoResponseError, minimalmodbus.SlaveReportedException, minimalmodbus.InvalidResponseError,
NameError, NotConnectedError):
return False
pass
def error_handling(error):
if error == 'display':
return ' Reprovado no teste do display.'
elif error == 'dp1':
return ' Tensão no display DP1 diferente da especificação.'
elif error == 'dec':
return ' Inversor não desacelera via comunicação.'
elif error == 'acc':
return ' Inversor não acelera via comunicação.'
elif error == 'di2':
return ' DI2 não funciona.'
elif error == 'di1':
return ' DI1 não funciona.'
elif error == 'multispeed':
return ' Falha no acionamento multispeed, não foi possível realizar a configuração via Modbus.'
elif error == 'di3':
return ' DI3 não funciona.'
elif error == 'no_current':
return ' Falha na leitura de corrente.'
elif error == 'dc_voltage':
return ' ensão do barramento CC fora da especificação, erro de leitura.'
elif error == 'off':
return ' Inversor não desliga via comunicação.'
elif error == 'on':
return ' Inversor não liga via comunicação.'
# def file_save():
# f = filedialog.asksaveasfile(initialdir="/", title="testing='.txt'", initialfile="file",
# defaultextension=".txt", filetypes=(("Text files", "*.txt"), ("all files", "*.*")))
# if f is None:
# return
# return f.name |
def computepay(h,r):
if (h <= 40):
return h * r
if (h > 40):
return 40 * r + (h - 40) * 1.5 * r
hrs = input("Enter Hours:")
h = float(hrs)
rate = input("Enter Rate per hour:")
r = float(rate)
pay = computepay(h,r)
print(pay)
|
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
app = Flask(__name__)
bootstrap = Bootstrap(app)
@app.route('/')
def index():
title = 'Flask test'
return render_template('index.html', title=title)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') |
class MaxHeap:
def __init__(self, data, attr):
self.__data = data
self.__attr = attr
self.__build()
def __build(self):
n = (len(self.__data) // 2) - 1
for i in range(n, -1, -1):
self.__heapify(i, len(self.__data))
def __heapify(self, parent_index, n):
left = self.__left(parent_index)
right = self.__right(parent_index)
index = parent_index
if left < n and int(self.__data[left][self.__attr]) > int(self.__data[index][self.__attr]):
index = left
if right < n and int(self.__data[right][self.__attr]) > int(self.__data[index][self.__attr]):
index = right
if index != parent_index:
self.__swap(index, parent_index)
self.__heapify(index, n)
def __swap(self, i, j):
temp = self.__data[i]
self.__data[i] = self.__data[j]
self.__data[j] = temp
@staticmethod
def __left(index):
return 2 * index + 1
@staticmethod
def __right(index):
return 2 * index + 2
def extract(self):
if len(self.__data) == 0:
return None
element = self.__data[0]
self.__swap(0, len(self.__data) - 1)
self.__data.pop(len(self.__data) - 1)
self.__heapify(0, len(self.__data))
return element
|
"""Treadmill application environment.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
if os.name == 'nt':
from ._windows import WindowsAppEnvironment as AppEnvironment
else:
from ._linux import LinuxAppEnvironment as AppEnvironment
__all__ = ['AppEnvironment']
|
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator)
import numpy as np
import csv
from scipy.integrate import odeint
beginYear = 2011
endYear = 2018
endYearSim = 2025
scale = 100000
def f(p0, t, consta, constb, beta):
p = p0
d_i = consta[4]-consta[8]
d_t = consta[5]-consta[8]
c1 = beta*consta[9]*(p[2] + consta[6]*p[3])
c2 = p[6] - consta[8] - d_i*p[2] - d_t*p[3]
dLAdt = c1*(p[4]+constb[0]*(p[5]+p[1])) - p[0]*(consta[0]+consta[1]+consta[8]+c2)
dLBdt = consta[1]*p[0] + consta[3]*p[2] - p[1]*(constb[0]*c1+consta[2]+consta[8]+c2)
dIdt = consta[0]*p[0] + consta[2]*p[1] + consta[10]*p[3] - p[2]*(consta[3]+constb[4]+consta[4]+c2)
dTdt = constb[4]*p[2] - p[3]*(constb[3]*constb[1]+consta[10]+consta[5]+c2)
dSAdt = (1-constb[2])*p[6] - p[4]*(c1+consta[8]+c2)
dSBdt = constb[2]*p[6] + constb[3]*constb[1]*p[3] - p[5]*(constb[0]*c1+consta[8]+c2)
dNdt = -p[6]*c2
return np.array([dLAdt, dLBdt, dIdt, dTdt, dSAdt, dSBdt, dNdt])
def ploti(_ax, yr):
global beginYear, endYear, scale
_ax.set_xlabel("year")
_ax.set_ylabel("population proportion per " + str(scale) + " persons")
_ax.set_xticks(np.arange(yr, yr+endYear-beginYear+1, 1))
if yr==beginYear:
_ax.set_ylim(0, 0.00325*scale)
_ax.set_yticks(np.arange(0, 0.00325*scale, 0.0005*scale))
_ax.yaxis.set_minor_locator(MultipleLocator(0.0001*scale))
elif yr==endYear:
_ax.set_ylim(0, 0.00275*scale)
_ax.set_yticks(np.arange(0, 0.00275*scale, 0.0005*scale))
_ax.yaxis.set_minor_locator(MultipleLocator(0.0001*scale))
_ax.legend()
def plotp(_ax, yr):
global beginYear, endYear, scale
_ax.set_xlabel("year")
_ax.set_ylabel("population proportion per " + str(scale) + " persons")
_ax.set_xticks(np.arange(yr, yr+endYear-beginYear+1, 1))
if yr==beginYear:
_ax.set_ylim(0, 0.00650*scale)
_ax.set_yticks(np.arange(0, 0.00650*scale, 0.001*scale))
_ax.yaxis.set_minor_locator(MultipleLocator(0.00025*scale))
elif yr==endYear:
_ax.set_ylim(0, 0.00650*scale)
_ax.set_yticks(np.arange(0, 0.00650*scale, 0.001*scale))
_ax.yaxis.set_minor_locator(MultipleLocator(0.0002*scale))
_ax.legend()
def fnd(_y1, par, delt):
if par=="inci":
return _y1[:, 3]/delt
elif par=="preva":
return _y1[:, 3]+_y1[:, 2]
consta = []
constb = []
p0 = []
with open("Files\Summary-of-model-parameters.csv", "r") as csvfile:
csvreader = csv.reader(csvfile, delimiter = ",")
i = 0
for row in csvreader:
if i<11:
consta.append(float(eval(row[3])))
elif i>11 and i<17:
constb.append(float(eval(row[3])))
elif i==18:
beta = float(eval(row[3]))
elif i>19:
try:
p0.append(float(eval(row[3])))
except:
p0.append(float(eval(row[1])))
i += 1
consta = np.array(consta)
constb = np.array(constb)
p0 = np.array(p0)
# p0[0] = 0.022
t = np.arange(beginYear, endYear, 0.01)
p0[0] = 0.037622363572376645
p0[1] = 0.1819693909643521
p0[2] = 0.0033316250000000004
p0[3] = 0.001428375
p0[4] = 0.4599141990276218
p0[5] = 0.31556717163564785
p0[6] = 0.02480499957476217
beta = 25.185185185185045
print("-------------------------\n(p at 2011)")
for i in range(len(p0)):
print("p0[" + str(i) + "] = " + str(p0[i]))
print("beta: " + str(beta))
y = odeint(f, p0, t, args=(consta, constb, beta))
print("-------------------------\n(p at 2018)")
for i in range(len(p0)):
print("p[" + str(i) + "] = " + str(y[-1, i]))
fig, ax = plt.subplots(1, 2, figsize=(15, 6))
fig.suptitle("Fitted Model for Years (" + str(beginYear) + "-" + str(endYear) +")", fontsize=20)
ax[0].set_title("Tuberculosis Incidence Rate in the Philippines")
ax[1].set_title("Tuberculosis Prevalence Rate in the Philippines")
ax[0].plot(t, (y[:, 3]/constb[4])*scale, linewidth=2, label="Incidence rate")
ax[1].plot(t, (y[:, 3]+y[:, 2])*scale, linewidth=2, label="Prevalence rate")
ploti(ax[0], beginYear)
plotp(ax[1], beginYear)
t1 = np.arange(endYear, endYearSim, 0.01)
fig1, axs1 = plt.subplots(2, 2, figsize=(15, 10))
fig1.suptitle("Simulated Incidence Rates (Years " + str(endYear) + "-" + str(endYearSim) + ")", fontsize=20)
axs1[0, 0].set_title("Varied Partial Immunity")
axs1[0, 1].set_title("Varied Vaccine Coverage")
axs1[1, 0].set_title("Varied Treatment Success")
axs1[1, 1].set_title("Varied Treatment Duration")
fig2, axs2 = plt.subplots(2, 2, figsize=(15, 10))
fig2.suptitle("Simulated Prevalence Rates (Years " + str(endYear) + "-" + str(endYearSim) + ")", fontsize=20)
axs2[0, 0].set_title("Varied Partial Immunity")
axs2[0, 1].set_title("Varied Vaccine Coverage")
axs2[1, 0].set_title("Varied Treatment Success")
axs2[1, 1].set_title("Varied Treatment Duration")
for i in range(2):
ax[i].set_xlim([beginYear, endYear])
for j in range(2):
axs1[i, j].set_xlim([endYear, endYearSim])
axs2[i, j].set_xlim([endYear, endYearSim])
constbv = constb.copy()
for i in range(6):
y1 = odeint(f, y[-1], t1, args=(consta, constbv, beta))
y1i = fnd(y1, "inci", constbv[4])
y1p = fnd(y1, "preva", constbv[4])
axs1[0, 0].plot(t1, y1i*scale, linewidth=2, label="Partial immunity = {:.2f}".format(constbv[0]))
axs2[0, 0].plot(t1, y1p*scale, linewidth=2, label="Partial immunity = {:.2f}".format(constbv[0]))
ploti(axs1[0, 0], endYear)
plotp(axs2[0, 0], endYear)
constbv[0] -= 0.04
constbv = constb.copy()
for i in range(6):
y1 = odeint(f, y[-1], t1, args=(consta, constbv, beta))
y1i = fnd(y1, "inci", constbv[4])
y1p = fnd(y1, "preva", constbv[4])
axs1[0, 1].plot(t1, y1i*scale, linewidth=2, label="Vaccine Coverage = {:.2f}".format(constbv[2]))
axs2[0, 1].plot(t1, y1p*scale, linewidth=2, label="Vaccine Coverage = {:.2f}".format(constbv[2]))
ploti(axs1[0, 1], endYear)
plotp(axs2[0, 1], endYear)
constbv[2] += 0.1336/6
constbv = constb.copy()
for i in range(7):
y1 = odeint(f, y[-1], t1, args=(consta, constbv, beta))
y1i = fnd(y1, "inci", constbv[4])
y1p = fnd(y1, "preva", constbv[4])
axs1[1, 0].plot(t1, y1i*scale, linewidth=2, label="Treatment Success = {:.2f}".format(constbv[3]))
axs2[1, 0].plot(t1, y1p*scale, linewidth=2, label="Treatment Success = {:.2f}".format(constbv[3]))
ploti(axs1[1, 0], endYear)
plotp(axs2[1, 0], endYear)
constbv[3] += 0.04
constbv = constb.copy()
for i in range(7):
y1 = odeint(f, y[-1], t1, args=(consta, constbv, beta))
y1i = fnd(y1, "inci", constbv[4])
y1p = fnd(y1, "preva", constbv[4])
axs1[1, 1].plot(t1, y1i*scale, linewidth=2, label="Treatment Duration = {:.2f} months".format(12/constbv[1]))
axs2[1, 1].plot(t1, y1p*scale, linewidth=2, label="Treatment Duration = {:.2f} months".format(12/constbv[1]))
ploti(axs1[1, 1], endYear)
plotp(axs2[1, 1], endYear)
if constbv[1]==12:
constbv[1] = 24
else:
constbv[1] = (constbv[1]*12)/(12-constbv[1])
plt.show() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-14 20:18:22
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
_id = (id for id in range(1,100000))
print(next(_id))
print(next(_id)) |
"""
Train and Evaluation Script of Combined Model AutoEncoder and Classifier.
These are trained separatly.
"""
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
from load_data import ImbalancedCIFAR10
from model import Combine, CAE3
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
SEED = 40
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
transform_mean = [0.4920, 0.4825, 0.4500]
transform_std = [0.2039, 0.2009, 0.2026]
train_transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(degrees=20),
transforms.RandomResizedCrop(32),
transforms.ToTensor(),
transforms.Normalize(transform_mean, transform_std)
])
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(transform_mean, transform_std)]
)
# Load Train Data
train_imbalance_class_ratio = np.array([1., 1., .5, 1., .5, 1., 1., 1., 1., .5])
# train_imbalance_class_ratio = np.array([1.] * 10)
train_set = ImbalancedCIFAR10(train_imbalance_class_ratio, transform=train_transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True, num_workers=4)
# Load Test Data
test_set = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=4, shuffle=False, num_workers=4)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
net = CAE3()
net = net.to(device)
criterion = nn.CrossEntropyLoss()
# ae_criterion = nn.MSELoss()
ae_criterion = nn.BCELoss()
optimizer = optim.Adam(net.parameters(), lr=0.01)
# Train Auto Encoder Part
print('Start Auto Encoder Training')
net.classifier.requires_grad = False
for epoch in range(10):
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
inputs, _ = data
inputs = inputs.to(device)
decoded = net(inputs)
ae_loss = ae_criterion(decoded, inputs)
optimizer.zero_grad()
ae_loss.backward()
optimizer.step()
running_loss += ae_loss.item()
if i % 2000 == 1999:
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
# Train Classification Part
print('Start Classification Training')
net.classifier.requires_grad = True
net.encoder.requires_grad = False
net.decoder.requires_grad = False
for epoch in range(10):
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
predicted = net.classify(inputs)
loss = criterion(predicted, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 2000 == 1999:
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
def imshow(img):
# 非正規化する
img = img / 2 + 0.5
# torch.Tensor型からnumpy.ndarray型に変換する
print(type(img)) # <class 'torch.Tensor'>
npimg = img.numpy()
print(type(npimg))
# 形状を(RGB、縦、横)から(縦、横、RGB)に変換する
print(npimg.shape)
npimg = np.transpose(npimg, (1, 2, 0))
print(npimg.shape)
# 画像を表示する
plt.imshow(npimg)
plt.show()
# Evaluate Loop
dataiter = iter(test_loader)
images, labels = dataiter.next()
# imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
outputs = net.classify(images.to(device))
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4)))
correct = 0
total = 0
# 勾配を記憶せず(学習せずに)に計算を行う
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = net.classify(images.to(device))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted.to(device) == labels.to(device)).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = net.classify(images.to(device))
_, predicted = torch.max(outputs, 1)
c = (predicted.to(device) == labels.to(device)).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i]))
|
# -*- coding: utf-8 -*-
#############################################################################
# Copyright Vlad Popovici <popovici@bioxlab.org>
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
__author__ = "Vlad Popovici <popovici@bioxlab.org>"
__version__ = 0.1
#
# QPATH.MASKS - various functions for creating and manipulating image
# masks (i.e. binary images of 0s and 1s).
#
__all__ = ['add_region', 'masked_points', 'apply_mask']
import numpy as np
from skimage.draw import polygon
##-
def add_region(mask: np.ndarray, poly_line: np.ndarray) -> np.ndarray:
"""Add a new masking region by setting to 1 all the
pixels within the boundaries of a polygon. The changes are
operated directly in the array.
Args:
mask (numpy.array): an array possibly already containing
some masked regions, to be updated
poly_line (numpy.array): an N x 2 array with the (x,y)
coordinates of the polygon vertices as rows
Returns:
a numpy.array - the updated mask
"""
c, r = masked_points(poly_line, mask.shape)
mask[r, c] = 1
return mask
##-
##-
def masked_points(poly_line: np.ndarray, shape: tuple) -> tuple:
"""Compute the coordinates of the points that are inside the polygonal
region defined by the vertices of the polygon.
Args:
poly_line (numpy.array): an N x 2 array with the (x,y)
coordinates of the polygon vertices as rows
shape (pair): (width, height) of the rectangular region
within which the polygon lies (typically image.shape[:2])
Returns:
a pair of lists (X, Y) where X[i], Y[i] are the coordinates of a
point within the mask (polygonal region)
"""
# check the last point to match the first one
if not np.all(poly_line[0,] == poly_line[-1,]):
poly_line = np.concatenate((poly_line, [poly_line[0,]]))
# remeber: row, col in polygon()
r, c = polygon(poly_line[:,1], poly_line[:,0], shape)
return c, r
##-
##-
def apply_mask(img: np.ndarray, mask: np.ndarray) -> np.ndarray:
"""Apply a mask to each channel of an image. Pixels corresponding to 0s in
the mask will be set to 0. Changes are made in situ.
Args:
img (numpy.array): an image as an N-dim array
(height x width x no_of_channels)
mask (numpy.array): a mask as a 2-dim array (height x width)
Return:
numpy.array: the modified image
"""
if mask.dtype is np.bool:
mask = mask.astype(np.uint8)
mask[mask > 0] = 1
if img.ndim == 2:
img *= mask
else:
for k in np.arange(img.shape[2]):
img[:,:,k] *= mask
return img
##-
|
# coding=utf-8
#####################################
# Imports
#####################################
# Python native imports
from PyQt5 import QtCore, QtWidgets, QtGui
import logging
import rospy
from rover_arm.msg import ArmStatusMessage
from rover_control.msg import GripperStatusMessage
#####################################
# Global Variables
#####################################
ARM_STATUS_TOPIC = "/rover_arm/status"
GRIPPER_STATUS_TOPIC = "/rover_control/gripper/status"
COMMS_TO_STRING = {
0: "NO STATUS",
1: "COMMS OK",
2: "NO DEVICE",
4: "BUS ERROR",
8: "GEN COMM ERROR",
16: "PARAMETER ERROR",
32: "LENGTH ERROR"
}
TARGET_REACHED_BIT_POSITION = 1
STATUS_TO_STRING = {
1: "TARGET REACHED",
2: "ERROR RECOVERY",
3: "RUN",
4: "ENABLED",
5: "FAULT STOP",
6: "WARNING",
7: "STO ACTIVE",
8: "SERVO READY",
10: "BRAKING",
11: "HOMING",
12: "INITIALIZED",
13: "VOLT OK",
15: "PERMANENT STOP"
}
FAULT_TO_STRING = {
1: "TRACKING ERROR",
2: "OVER CURRENT",
# 3: "COMMUNICATION ERROR", # Was showing even though things were working???
4: "ENCODER FAILURE",
5: "OVER TEMP",
6: "UNDER VOLTAGE",
7: "OVER VOLTAGE",
8: "PROG OR MEM ERROR",
9: "HARDWARE ERROR",
10: "OVER VELOCITY",
11: "INIT ERROR",
12: "MOTION ERROR",
13: "RANGE ERROR",
14: "POWER STAGE FORCED OFF",
15: "HOST COMM ERROR"
}
#####################################
# Controller Class Definition
#####################################
class ArmIndication(QtCore.QObject):
base_position_updated__signal = QtCore.pyqtSignal(float)
shoulder_position_updated__signal = QtCore.pyqtSignal(float)
elbow_position_updated__signal = QtCore.pyqtSignal(float)
roll_position_updated__signal = QtCore.pyqtSignal(float)
wrist_pitch_position_updated__signal = QtCore.pyqtSignal(float)
wrist_roll_position_updated__signal = QtCore.pyqtSignal(float)
base_comms_state_update_ready__signal = QtCore.pyqtSignal(str)
shoulder_comms_state_update_ready__signal = QtCore.pyqtSignal(str)
elbow_comms_state_update_ready__signal = QtCore.pyqtSignal(str)
roll_comms_state_update_ready__signal = QtCore.pyqtSignal(str)
wrist_pitch_comms_state_update_ready__signal = QtCore.pyqtSignal(str)
wrist_roll_comms_state_update_ready__signal = QtCore.pyqtSignal(str)
base_status_update_ready__signal = QtCore.pyqtSignal(str)
shoulder_status_update_ready__signal = QtCore.pyqtSignal(str)
elbow_status_update_ready__signal = QtCore.pyqtSignal(str)
roll_status_update_ready__signal = QtCore.pyqtSignal(str)
wrist_pitch_status_update_ready__signal = QtCore.pyqtSignal(str)
wrist_roll_status_update_ready__signal = QtCore.pyqtSignal(str)
base_faults_update_ready__signal = QtCore.pyqtSignal(str)
shoulder_faults_update_ready__signal = QtCore.pyqtSignal(str)
elbow_faults_update_ready__signal = QtCore.pyqtSignal(str)
roll_faults_update_ready__signal = QtCore.pyqtSignal(str)
wrist_pitch_faults_update_ready__signal = QtCore.pyqtSignal(str)
wrist_roll_faults_update_ready__signal = QtCore.pyqtSignal(str)
pinch_position_updated__signal = QtCore.pyqtSignal(int)
pinch_current_updated__signal = QtCore.pyqtSignal(int)
gripper_reported_distance_updated__signal = QtCore.pyqtSignal(int)
gripper_reported_temp_updated__signal = QtCore.pyqtSignal(int)
def __init__(self, shared_objects):
super(ArmIndication, self).__init__()
# ########## Reference to class init variables ##########
self.shared_objects = shared_objects
self.right_screen = self.shared_objects["screens"]["right_screen"]
self.base_position_lcd_number = self.right_screen.base_position_lcd_number # type: QtWidgets.QLCDNumber
self.shoulder_position_lcd_number = self.right_screen.shoulder_position_lcd_number # type: QtWidgets.QLCDNumber
self.elbow_position_lcd_number = self.right_screen.elbow_position_lcd_number # type: QtWidgets.QLCDNumber
self.roll_position_lcd_number = self.right_screen.roll_position_lcd_number # type: QtWidgets.QLCDNumber
self.wrist_pitch_position_lcd_number = self.right_screen.wrist_pitch_position_lcd_number # type: QtWidgets.QLCDNumber
self.wrist_roll_position_lcd_number = self.right_screen.wrist_roll_position_lcd_number # type: QtWidgets.QLCDNumber
self.pinch_position_lcd_number = self.right_screen.pinch_position_lcd_number # type: QtWidgets.QLCDNumber
self.pinch_current_lcd_number = self.right_screen.pinch_current_lcd_number # type: QtWidgets.QLCDNumber
self.arm_controls_base_comms_label = self.right_screen.arm_controls_base_comms_label # type:QtWidgets.QLabel
self.arm_controls_base_status_label = self.right_screen.arm_controls_base_status_label # type:QtWidgets.QLabel
self.arm_controls_base_faults_label = self.right_screen.arm_controls_base_faults_label # type:QtWidgets.QLabel
self.arm_controls_shoulder_comms_label = self.right_screen.arm_controls_shoulder_comms_label # type:QtWidgets.QLabel
self.arm_controls_shoulder_status_label = self.right_screen.arm_controls_shoulder_status_label # type:QtWidgets.QLabel
self.arm_controls_shoulder_faults_label = self.right_screen.arm_controls_shoulder_faults_label # type:QtWidgets.QLabel
self.arm_controls_elbow_comms_label = self.right_screen.arm_controls_elbow_comms_label # type:QtWidgets.QLabel
self.arm_controls_elbow_status_label = self.right_screen.arm_controls_elbow_status_label # type:QtWidgets.QLabel
self.arm_controls_elbow_faults_label = self.right_screen.arm_controls_elbow_faults_label # type:QtWidgets.QLabel
self.arm_controls_roll_comms_label = self.right_screen.arm_controls_roll_comms_label # type:QtWidgets.QLabel
self.arm_controls_roll_status_label = self.right_screen.arm_controls_roll_status_label # type:QtWidgets.QLabel
self.arm_controls_roll_faults_label = self.right_screen.arm_controls_roll_faults_label # type:QtWidgets.QLabel
self.arm_controls_wrist_pitch_comms_label = self.right_screen.arm_controls_wrist_pitch_comms_label # type:QtWidgets.QLabel
self.arm_controls_wrist_pitch_status_label = self.right_screen.arm_controls_wrist_pitch_status_label # type:QtWidgets.QLabel
self.arm_controls_wrist_pitch_faults_label = self.right_screen.arm_controls_wrist_pitch_faults_label # type:QtWidgets.QLabel
self.arm_controls_wrist_roll_comms_label = self.right_screen.arm_controls_wrist_roll_comms_label # type:QtWidgets.QLabel
self.arm_controls_wrist_roll_status_label = self.right_screen.arm_controls_wrist_roll_status_label # type:QtWidgets.QLabel
self.arm_controls_wrist_roll_faults_label = self.right_screen.arm_controls_wrist_roll_faults_label # type:QtWidgets.QLabel
self.gripper_reported_distance_lcd_number = self.right_screen.gripper_reported_distance_lcd_number # type: QtWidgets.QLCDNumber
self.gripper_reported_temp_lcd_number = self.right_screen.gripper_reported_temp_lcd_number # type: QtWidgets.QLCDNumber
# ########## Get the settings instance ##########
self.settings = QtCore.QSettings()
# ########## Get the Pick And Plate instance of the logger ##########
self.logger = logging.getLogger("groundstation")
# ########## Class Variables ##########
self.arm_status_subscriber = rospy.Subscriber(ARM_STATUS_TOPIC, ArmStatusMessage, self.on_arm_status_update_received__callback)
self.gripper_status_subscriver = rospy.Subscriber(GRIPPER_STATUS_TOPIC, GripperStatusMessage, self.on_gripper_status_update_received__callback)
# ########## Connect Signals and Slots ##########
self.connect_signals_and_slots()
def connect_signals_and_slots(self):
self.base_position_updated__signal.connect(self.base_position_lcd_number.display)
self.shoulder_position_updated__signal.connect(self.shoulder_position_lcd_number.display)
self.elbow_position_updated__signal.connect(self.elbow_position_lcd_number.display)
self.roll_position_updated__signal.connect(self.roll_position_lcd_number.display)
self.wrist_pitch_position_updated__signal.connect(self.wrist_pitch_position_lcd_number.display)
self.wrist_roll_position_updated__signal.connect(self.wrist_roll_position_lcd_number.display)
self.base_comms_state_update_ready__signal.connect(self.arm_controls_base_comms_label.setText)
self.shoulder_comms_state_update_ready__signal.connect(self.arm_controls_shoulder_comms_label.setText)
self.elbow_comms_state_update_ready__signal.connect(self.arm_controls_elbow_comms_label.setText)
self.roll_comms_state_update_ready__signal.connect(self.arm_controls_roll_comms_label.setText)
self.wrist_pitch_comms_state_update_ready__signal.connect(self.arm_controls_wrist_pitch_comms_label.setText)
self.wrist_roll_comms_state_update_ready__signal.connect(self.arm_controls_wrist_roll_comms_label.setText)
self.base_status_update_ready__signal.connect(self.arm_controls_base_status_label.setText)
self.shoulder_status_update_ready__signal.connect(self.arm_controls_shoulder_status_label.setText)
self.elbow_status_update_ready__signal.connect(self.arm_controls_elbow_status_label.setText)
self.roll_status_update_ready__signal.connect(self.arm_controls_roll_status_label.setText)
self.wrist_pitch_status_update_ready__signal.connect(self.arm_controls_wrist_pitch_status_label.setText)
self.wrist_roll_status_update_ready__signal.connect(self.arm_controls_wrist_roll_status_label.setText)
self.base_faults_update_ready__signal.connect(self.arm_controls_base_faults_label.setText)
self.shoulder_faults_update_ready__signal.connect(self.arm_controls_shoulder_faults_label.setText)
self.elbow_faults_update_ready__signal.connect(self.arm_controls_elbow_faults_label.setText)
self.roll_faults_update_ready__signal.connect(self.arm_controls_roll_faults_label.setText)
self.wrist_pitch_faults_update_ready__signal.connect(self.arm_controls_wrist_pitch_faults_label.setText)
self.wrist_roll_faults_update_ready__signal.connect(self.arm_controls_wrist_roll_faults_label.setText)
self.pinch_position_updated__signal.connect(self.pinch_position_lcd_number.display)
self.pinch_current_updated__signal.connect(self.pinch_current_lcd_number.display)
self.gripper_reported_distance_updated__signal.connect(self.gripper_reported_distance_lcd_number.display)
self.gripper_reported_temp_updated__signal.connect(self.gripper_reported_temp_lcd_number.display)
def on_arm_status_update_received__callback(self, data):
self.base_comms_state_update_ready__signal.emit(self.process_comms_to_string(data.base_comm_status))
self.shoulder_comms_state_update_ready__signal.emit(self.process_comms_to_string(data.shoulder_comm_status))
self.elbow_comms_state_update_ready__signal.emit(self.process_comms_to_string(data.elbow_comm_status))
self.roll_comms_state_update_ready__signal.emit(self.process_comms_to_string(data.roll_comm_status))
self.wrist_pitch_comms_state_update_ready__signal.emit(
self.process_comms_to_string(data.wrist_pitch_comm_status))
self.wrist_roll_comms_state_update_ready__signal.emit(self.process_comms_to_string(data.wrist_roll_comm_status))
self.base_status_update_ready__signal.emit(self.process_statuses_to_string(data.base_status))
self.shoulder_status_update_ready__signal.emit(self.process_statuses_to_string(data.shoulder_status))
self.elbow_status_update_ready__signal.emit(self.process_statuses_to_string(data.elbow_status))
self.roll_status_update_ready__signal.emit(self.process_statuses_to_string(data.roll_status))
self.wrist_pitch_status_update_ready__signal.emit(self.process_statuses_to_string(data.wrist_pitch_status))
self.wrist_roll_status_update_ready__signal.emit(self.process_statuses_to_string(data.wrist_roll_status))
self.base_faults_update_ready__signal.emit(self.process_faults_to_string(data.base_faults))
self.shoulder_faults_update_ready__signal.emit(self.process_faults_to_string(data.shoulder_faults))
self.elbow_faults_update_ready__signal.emit(self.process_faults_to_string(data.elbow_faults))
self.roll_faults_update_ready__signal.emit(self.process_faults_to_string(data.roll_faults))
self.wrist_pitch_faults_update_ready__signal.emit(self.process_faults_to_string(data.wrist_pitch_faults))
self.wrist_roll_faults_update_ready__signal.emit(self.process_faults_to_string(data.wrist_roll_faults))
self.base_position_updated__signal.emit(data.base)
self.shoulder_position_updated__signal.emit(data.shoulder)
self.elbow_position_updated__signal.emit(data.elbow)
self.roll_position_updated__signal.emit(data.roll)
self.wrist_pitch_position_updated__signal.emit(data.wrist_pitch)
self.wrist_roll_position_updated__signal.emit(data.wrist_roll)
def on_gripper_status_update_received__callback(self, data):
data = data # type: GripperStatusMessage
self.pinch_position_updated__signal.emit(data.position_raw)
self.pinch_current_updated__signal.emit(data.current)
self.gripper_reported_distance_updated__signal.emit(data.distance)
self.gripper_reported_temp_updated__signal.emit(data.temp)
@staticmethod
def process_faults_to_string(faults):
fault_output = ""
for bit_position in FAULT_TO_STRING:
if (1 << bit_position) & faults:
fault_output += FAULT_TO_STRING[bit_position] + "\n"
return fault_output[:-1]
@staticmethod
def process_statuses_to_string(statuses):
status_output = ""
for bit_position in STATUS_TO_STRING:
if (1 << bit_position) & statuses:
status_output += STATUS_TO_STRING[bit_position] + "\n"
return status_output[:-1]
@staticmethod
def process_comms_to_string(comms):
return COMMS_TO_STRING[comms] if comms in COMMS_TO_STRING else "UNKNOWN"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.