text stringlengths 8 6.05M |
|---|
# Generated by Django 2.2.1 on 2019-07-05 10:56
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('bbs', '0008_auto_20190625_1518'),
]
operations = [
migrations.AddField(
model_name='userinfo',
name='created_time',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='创建时间'),
preserve_default=False,
),
migrations.AddField(
model_name='userinfo',
name='last_modified_time',
field=models.DateTimeField(auto_now=True, verbose_name='修改时间'),
),
]
|
# Generated by Django 2.1.2 on 2018-10-31 08:22
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('etl', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='etlstage1',
name='keterangan',
field=models.CharField(max_length=20),
preserve_default=False,
),
]
|
import os,arcpy, dla, dlaCreateSourceTarget,dlaPreview,dlaPublish,dlaStage
projFolder = r"C:\Users\Steve\Documents\ArcGIS\Projects\DomainTest"
def main():
test1()
def test0():
dlaStage.stage(r"C:\Users\Steve\Documents\ArcGIS\Projects\MyProject\ForSteve\CurbStopValves.xml")
def test01():
#dla.setProject(r"C:\Users\Steve\Documents\ArcGIS\Projects\MyProject1\transformerUnit_to_ElectricDevice2.xml","MyProject1.aprx")
#dlaStage.stage(r"C:\Users\Steve\Documents\ArcGIS\Projects\DomainTest\SourceTarget.xml")
#dlaPublish.useReplaceSettings = True
#dlaPreview.preview(r"C:\Users\Steve\Documents\ArcGIS\Projects\MyProject1\transformerUnit_to_ElectricDevice2.xml")
#dlaPublish.publish(r"C:\Users\Steve\Documents\ArcGIS\Projects\MyProject\testjoin.xml")
dlaPreview.preview(r"C:\Users\Steve\Documents\ArcGIS\Projects\DomainTest\SourceTarget.xml")
def testCreate():
projFolder = r"C:\Users\Steve\Documents\ArcGIS\Projects\DomainTest"
dla.setProject(os.path.join(projFolder,"SourceTarget.xml"),(os.path.join(projFolder,"DomainTest.aprx")))
dlaCreateSourceTarget.createDlaFile(os.path.join(projFolder,r'DomainTest.gdb\WaterValve'),
os.path.join(projFolder,r'DomainTest.gdb\WaterDevice'),
os.path.join(projFolder,'SourceTarget.xml'))
def test1():
#dla._project = arcpy.mp.ArcGISProject(os.path.join(projFolder,"MyProject.aprx"))
dlaCreateSourceTarget.createDlaFile(r"GIS Servers\https://services2.arcgis.com/EmOtS7q6cfSmspIo/arcgis/rest/services/MCC_Map/FeatureServer/L0Boat",
r"https://services3.arcgis.com/xoil9QbyeUaxAZtF/arcgis/rest/services/Stormwater_OpenDrain_5015368ed0dc4f75a2103bb8dc7b3208/FeatureServer/0",
r"C:\Users\Steve\Documents\ArcGIS\Projects\MyProject\testjoin.xml")
#fcfields = arcpy.ListFields(r"C:\Users\Steve\Documents\ArcGIS\Projects\MyProject\SampleData.gdb\SampleData")
#tfields = arcpy.ListFields(r"C:\Users\Steve\Documents\ArcGIS\Projects\MyProject\SampleData.gdb\SampleTable")
#vfields = getViewString(tfields,fcfields)
#result = arcpy.MakeFeatureLayer_management(r"C:\Users\Steve\Documents\ArcGIS\Projects\MyProject\SampleData.gdb\SampleData","Join1")
#layer = dla.getMapLayer("JoinTest")
#lname = layer.name
#result = arcpy.MakeFeatureLayer_management(layer,lname)
#layer = result.getOutput(0)
#lyrFile = dla.getLayerPath(layer)
#layer = dla.getMapLayer("JoinTest")
#arcpy.env.overwriteOutput = True
#lyrFile = os.path.join(projFolder,lname)
#arcpy.SaveToLayerFile_management(layer,lyrFile)
#desc = arcpy.Describe(lyrFile)
#pth = dla.getLayerPath(lyrFile)
#tmp = desc.catalogPath
#fields = arcpy.ListFields(lyrFile)
#result = arcpy.MakeFeatureLayer_management(lyrFile,lname)
#layer = result.getOutput(0)
#try:
# tmp = layer.connectionProperties
# for item in tmp:
# print(str(item),tmp[item])
# src = tmp['source']
# dest = tmp['destination']
# print(src['dataset'])
# print(src['connection_info']['database'])
# print(dest['dataset'])
# print(dest['connection_info']['database'])
#except:
# pass
#result = arcpy.MakeFeatureLayer_management(r"C:\Users\Steve\Documents\ArcGIS\Projects\MyProject15\MyProject15.gdb\test",
# "testTHIS")
#layer = result.getOutput(0)
#layer = dla.getLayer("Test")
#if isinstance(layer, arcpy._mp.Layer):
# print(layer.dataSource)
#else:
# desc = arcpy.Describe(layer)
# print(desc.catalogPath)
def getViewString(fields,fields2):
# get the string for creating a view
viewStr = ""
for field in fields: # drop any field prefix from the source layer (happens with map joins)
thisFieldName = field.name[field.name.rfind(".")+1:]
for field2 in fields2:
matchname = field2.name[field2.name.rfind(".")+1:]
if matchname != thisFieldName and matchname.upper() == thisFieldName.upper():
# this is a special case where the source name is different case but the same string as the target
# need to create table so that the name matches the target name so there is no conflict later
thisFieldName = targetname
thisFieldStr = field.name + " " + thisFieldName + " VISIBLE NONE;"
viewStr += thisFieldStr
return viewStr
def test2():
dla._project = arcpy.mp.ArcGISProject(r"C:\Users\Steve\Documents\ArcGIS\Projects\pbmpolygons\pbmpolygons.aprx")
ws = r"C:\Users\Steve\Documents\ArcGIS\Projects\MyProject11\shp\Data Assistant 10.4 Testing\pbmnorepair.gdb"
base = "pbmpoly"
res = arcpy.GetCount_management(os.path.join(ws,base))
cnt = int(res.getOutput(0))
chunk = 100000
lngth = int(cnt/chunk)
for group in range (0,lngth):
minoid = group * chunk
where = 'OBJECTID > '+ str(minoid) + ' AND OBJECTID <= ' + str(minoid+chunk)
dla.addMessage(where)
layername = "pbmpolys"
if arcpy.Exists(layername):
arcpy.Delete_management(layername)
result = arcpy.MakeFeatureLayer_management(in_features=os.path.join(ws,base),where_clause=where,workspace=ws,out_layer=layername)
cnt = result.getOutput(0)
outpath = r"C:\Users\Steve\Documents\ArcGIS\Projects\MyProject11\shp\Data Assistant 10.4 Testing\pbm.gdb"
outname = "pbmpoly"+str(group)
ds = os.path.join(outpath,outname)
if arcpy.Exists(ds):
arcpy.Delete_management(ds)
arcpy.FeatureClassToFeatureClass_conversion(in_features=layername,out_path=outpath,out_name=outname)
outdoc = r"C:\Users\Steve\Documents\ArcGIS\Projects\pbmpolygons\pbm" + str(group) + ".xml"
svce = r"http://services.arcgis.com/b6gLrKHqgkQb393u/arcgis/rest/services/TaxDistribution/FeatureServer/0"
dlaCreateSourceTarget.createDlaFile(ds,svce,outdoc)
def test3():
dla._project = arcpy.mp.ArcGISProject(r"C:\Users\Steve\Documents\ArcGIS\Projects\Trails\Trails.aprx")
dlaCreateSourceTarget.createDlaFile(r"C:\Users\Steve\Documents\ArcGIS\Projects\Trails\prawn.sde\GDB_D.DBO.GWS_FACILITY",
r"C:\Users\Steve\Documents\ArcGIS\Projects\Trails\prawn.sde\GDB_D.DBO.GWS_TANK",
r"C:\Users\Steve\Documents\ArcGIS\Projects\Trails\tables.xml")
def test4():
dlaPreview.preview(r"C:\Users\Steve\Documents\ArcGIS\Projects\Trails\tables.xml")
def test5():
dlaPublish.useReplaceSettings = True
dlaPublish.publish(r"C:\Users\Steve\Documents\ArcGIS\Projects\Trails\tables.xml")
def test6():
dla._project = arcpy.mp.ArcGISProject(r"C:\Users\Steve\Documents\ArcGIS\Projects\Trails\Trails.aprx")
layer = "Trails"
try:
desc = arcpy.Describe(layer) # never works in scripts
except:
arcpy.AddMessage("Describe error")
dla.showTraceback()
layer = dla.getLayer("Trails") # loop through maps/layers to find matching name
if layer != None and layer.supports("DataSource"):
try:
arcpy.AddMessage(layer.dataSource)
except:
arcpy.AddMessage("Print error")
if __name__ == "__main__":
main() |
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Say you have an array for which the ith element is the price of a given stock on day i.
# If you were only permitted to complete at most one transaction
# (ie, buy one and sell one share of the stock), design an algorithm to find the maximum profit.
# Example 1:
# Input: [7, 1, 5, 3, 6, 4]
# Output: 5
# max. difference = 6-1 = 5 (not 7-1 = 6, as selling price needs to be larger than buying price)
# Example 2:
# Input: [7, 6, 4, 3, 1]
# Output: 0
# In this case, no transaction is done, i.e. max profit = 0.
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) == 0:
return 0
lo, hi = 0, len(prices)
max_profit = 0
interval = 1
while lo + interval < hi:
if prices[lo + interval] > prices[lo]:
max_profit = max(max_profit, prices[lo + interval] - prices[lo])
interval += 1
else:
lo += interval
interval = 1
return max_profit
if __name__ == '__main__':
print(Solution().maxProfit([7, 1, 5, 3, 6, 4]))
print(Solution().maxProfit([2, 4, 1]))
print(Solution().maxProfit([]))
# Here are some solutions to understand more easier than mine.
class Solution:
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):
max_profit, min_price = 0, float("inf")
# float("inf") means positive infinity
# float("-inf") means negative infinity
for price in prices:
min_price = min(min_price, price)
max_profit = max(max_profit, price - min_price)
return max_profit
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
curMin = prices[0]
curMax = prices[0]
curDif = 0
for p in prices:
if p < curMin:
curMin = p
curMax = p
if p > curMax:
curMax = p
curDif = max(curDif, curMax - curMin)
return curDif
# ##!!!!!!! So important
# The logic to solve this problem is same as "max subarray problem" using Kadane's Algorithm.
# Since no body has mentioned this so far, I thought it's a good thing for everybody to know.
#
# All the straight forward solution should work,
# but if the interviewer twists the question slightly by giving the difference array of prices,
# Ex: for {1, 7, 4, 11}, if he gives {0, 6, -3, 7}, you might end up being confused.
# Here, the logic is to calculate the difference (maxCur += prices[i] - prices[i-1]) of the original array,
# and find a contiguous subarray giving maximum profit. If the difference falls below 0, reset it to zero.
# public int maxProfit(int[] prices) {
# int maxCur = 0, maxSoFar = 0;
# for(int i = 1; i < prices.length; i++) {
# maxCur = Math.max(0, maxCur += prices[i] - prices[i-1]);
# if p[i-1] < 0, it will be reset zero in next calculate.
# maxSoFar = Math.max(maxCur, maxSoFar);
# }
# return maxSoFar;
# } |
s=int(input())
flag=1
for x in range(2,s//2):
if(s%x==0):
flag=0
break
else:
flag=1
if(flag==0 or s==1):
print("yes")
else:
print("no")
|
from django.apps import AppConfig
class BrowseModeConfig(AppConfig):
name = 'browse_mode'
|
import os
from db_config import db
import pandas as pd
from models.performance_analysis import performance_model
from models.domain import domain_model
from models.networks import network_model
from models.probe_monitoring import probe_model
from models.webservices import web_service_model
from models.fault_management import fault_model
import xlrd
from datetime import datetime
xlrd.xlsx.ensure_elementtree_imported(False, None)
xlrd.xlsx.Element_has_iter = True
db.create_all()
# fault_data = [
# {'url_name': 'www.google.com'},
# {'url_name': 'www.youtube.com'},
# {'url_name': 'www.twitter.com'},
# {'url_name': 'www.instagram.com'},
# {'url_name': 'www.facebook.com'},
# {'url_name': 'www.whatsapp.com'},
# ]
#
# for site in fault_data:
# p = web_service_model.WebService(url_name=site.get("url_name"))
# db.session.add(p)
# db.session.commit()
# fault_data = [
# {'application_name': 'Dropbox'},
# {'application_name': 'Facebook'},
# {'application_name': 'Whatsapp'},
# {'application_name': 'Snapchat'},
# {'application_name': 'Twitter'},
# {'application_name': 'Instagram'},
# {'application_name': 'Skype'},
# {'application_name': 'Ookla'},
# {'application_name': 'SMS'},
# {'application_name': 'Youtube'},
# {'application_name': 'Dialer pad'},
# {'application_name': 'My Etisalat'},
#
# ]
#
# for site in fault_data:
# p = performance_model.Applications(application_name=site.get("application_name"))
# db.session.add(p)
# db.session.commit()
domain_data = [
{'domain_name': 'eProbe-UX'},
{'domain_name': 'eProbe-NW'},
{'domain_name': 'eProbe-5G'},
{'domain_name': 'CreaNORD'},
{'domain_name': 'EXFO'},
]
for site in domain_data:
p = domain_model.Domain(domain_name=site.get("domain_name"))
db.session.add(p)
db.session.commit()
# Give the location of the file
# loc = (r'D:\etislat\DB structure\pms-db_1.1.xlsx')
#
# wb = xlrd.open_workbook(loc)
# sheet = wb.sheet_by_name('t_probe')
# for i in range(1, sheet.nrows):
# probe_id = db.Column(db.Integer, db.Sequence('seq_probe_id', start=1, increment=1), primary_key=True)
# device_id = sheet.cell_value(i, 1)
# probe_display_name = sheet.cell_value(i, 2)
# region = sheet.cell_value(i, 3)
# site_name = sheet.cell_value(i, 4)
# mobile_technology = sheet.cell_value(i, 5)
# mobile_model = sheet.cell_value(i, 6)
# mobile_number = sheet.cell_value(i, 7)
# mobile_os = sheet.cell_value(i, 8)
# mobile_os_version = sheet.cell_value(i, 9)
# current_version = sheet.cell_value(i, 10)
# current_status = sheet.cell_value(i, 11)
# cordinates = sheet.cell_value(i, 12)
# default_select = sheet.cell_value(i, 13)
# installation_date = sheet.cell_value(i, 14)
# # 10-06-2021 12:27:23
# installation_date = datetime.strptime(installation_date, '%d-%m-%Y %H:%M:%S')
# installation_by = sheet.cell_value(i, 15)
# # print(installation_date)
# p = probe_model.ProbeMonitoring(device_id=device_id, probe_display_name=probe_display_name, region=region,
# site_name=site_name, mobile_technology=mobile_technology, mobile_model=mobile_model,
# mobile_number=mobile_number, mobile_os=mobile_os,
# mobile_os_version=mobile_os_version, current_version=current_version,
# current_status=current_status, cordinates=cordinates, default_select=default_select,
# installation_date=installation_date, installation_by=installation_by)
#
# db.session.add(p)
# db.session.commit()
#
# wb = xlrd.open_workbook(loc)
# sheet = wb.sheet_by_name('t_network')
# for i in range(1, sheet.nrows):
# network_id = sheet.cell_value(i, 0)
# network_name = sheet.cell_value(i, 1)
# dpi = sheet.cell_value(i, 2)
# probe_display_name = sheet.cell_value(i, 3)
#
# # print(installation_date)
# p = network_model.Network(network_id=network_id, network_name=network_name, dpi=dpi,
# network_display_name=probe_display_name)
#
# db.session.add(p)
# db.session.commit()
|
from rest_framework import serializers, viewsets
from .models import Todo
class TodoSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Todo
fields = [
'title',
'description',
'created',
'due',
]
class TodoViewSet(viewsets.ModelViewSet):
queryset = Todo.objects.all()
serializer_class = TodoSerializer
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 20 19:00:59 2014
@author: jgr42_000
"""
from __future__ import division
import numpy as np
import math
#import operator as op
#import itertools
#from collections import OrderedDict
import cPickle
from scipy.optimize import minimize
from scipy.optimize import basinhopping
from scipy.spatial.distance import euclidean
from scipy.misc import factorial2
from sklearn.linear_model import LinearRegression
#from scipy import optimize
import core_objects_v5 as core
# Decorator to make a function return the negative of it's usual value
def make_neg(func):
def inner(*args, **kwargs):
res_tup = func(*args, **kwargs)
if 'eval_MSE' in kwargs:
res_neg = res_tup[0] * -1.0
res_MSE = res_tup[-1]
res_tup = tuple([res_neg, res_MSE])
else:
res_tup = res_tup * -1.0
return res_tup
return inner
#def constr_cycle_len(func, min_cyc_len=200.0): # cycle len in EPFD | TAG: Constraint
# def inner(*args, **kwargs):
# return func(*args, **kwargs) - min_cyc_len
# return inner
def constr_cycle_len(func, min_cyc_len=200.0): # Void worth in pcm | TAG: Constraint
def inner(*args, **kwargs):
res_tup = func(*args, **kwargs)
if 'eval_MSE' in kwargs:
res_neg = res_tup[0] - min_cyc_len
res_MSE = res_tup[-1]
res_tup = tuple([res_neg, res_MSE])
else:
res_tup = res_tup - min_cyc_len
return res_tup
return inner
# Function to provide some margin to void worth constraint
def void_worth_adj(func, void_w_margin=50.0): # Void worth in pcm | TAG: Constraint
def inner(*args, **kwargs):
res_tup = func(*args, **kwargs)
if 'eval_MSE' in kwargs:
res_neg = (res_tup[0] + void_w_margin) * -1.0
res_MSE = res_tup[-1]
res_tup = tuple([res_neg, res_MSE])
else:
res_tup = (res_tup + void_w_margin) * -1.0
return res_tup
return inner
def reac_coeff_adj(func, reac_coeff_margin=0.01): # Coolant temperature coefficient of reactivity in pcm/K | TAG: Constraint
def inner(*args, **kwargs):
res_tup = func(*args, **kwargs)
if 'eval_MSE' in kwargs:
res_neg = (res_tup[0] + reac_coeff_margin) * -1.0
res_MSE = res_tup[-1]
res_tup = tuple([res_neg, res_MSE])
else:
res_tup = (res_tup + reac_coeff_margin) * -1.0
return res_tup
return inner
# Function to combine all box bound constraints
# TAG: Improve, TAG: Test
def all_bounds_constr(X):
result = None
for feature in X:
if 1 - feature < 0.0:
result = 1 - feature
break
elif feature < 0.0:
result = feature
break
else:
result = feature
return result
def get_optim_opts(fit_dict, data_opts, fit_opts, case_info):
num_feat = fit_dict['X_t'].shape[-1]
x_guess = np.array([0.8]*num_feat) # Improve guess spot? | TAG: Improve
obj_eval = make_neg(fit_dict['obj_val'].predict)
reac_co_eval = reac_coeff_adj(fit_dict['reac_co'].predict)
void_w_eval = void_worth_adj(fit_dict['void_w'].predict)
max_cycle_eval = constr_cycle_len(fit_dict['max_cycle'].predict)
sur_type = fit_opts['sur_type']
dv_bounds = case_info['dv_bounds']
if sur_type == 'regress':
igpm_obj_eval = make_neg(fit_dict['igpm_obj_val'].predict)
# bounds_eval = all_bounds_constr
#global meta_dict
# Constraints for COBYLA
# Correlation (linear fit) for peak temp [K] in homogenized fuel
# as a function of volumetric power dens [W/m^3]
homog_peak_fuel_temps = np.array([1191.0, 1265.0, 1296.0, 1405.0])
vol_powdens = np.array([5.789E7, 7.815E7, 8.683E7, 1.172E8])[:,np.newaxis]
peak_fuel_temp_regress = LinearRegression()
peak_fuel_temp_regress.fit(vol_powdens, homog_peak_fuel_temps)
def calc_peak_bulk_fuel_temp(core_pow, core_height,
powdens_calc = core.AssemblyPowerPeak(),
regress_func=peak_fuel_temp_regress.predict):
core_powdens = powdens_calc.set_core_conditions(core_pow, core_height)
return regress_func(core_powdens)
# Want to find a way to make work for any length of features
# Is there a way to make all the bound constraints all into one function,
# such that if any constraint is violated, the function returns a violation?
# after getting optimization to work with the hardwired way, test with new way
# TAG: Improve
def make_boxbound_constr_dict(x):
boxbound_constr_dict = []
for feature in xrange(len(x)):
def upper_constr(x, index=feature):
return 1 - x[index]
#upper_name = 'upper_bound_feature_{}'.format(feature)
def lower_constr(x, index=feature):
return x[index]
#lower_name = 'upper_bound_feature_{}'.format(feature)
boxbound_constr_dict.append({'type':'ineq', 'fun':upper_constr})
boxbound_constr_dict.append({'type':'ineq', 'fun':lower_constr})
return boxbound_constr_dict
def triso_pow_eval(dv_vec_scaled, bounds=dv_bounds):
dv_vec = core.dv_scaler(dv_vec_scaled, dv_bounds=bounds, scal_type='real').sum(0)
pf = dv_vec[1]
coreh = dv_vec[0]*1e-2 # core height [input: cm, output: m]
krad = dv_vec[2]*1e-2 # kernel radius [input: cm, output: m]
power = dv_vec[5]*1e6 # TAG: Hardcode
pow_max_constr = 0.340 # peak triso power, in W/particle | TAG: Constraint
npins = 3240.0 # number of pins in core, 60 pin/assm*54 assm/core
pinrad = 0.007 # fuel pin radius, [m]
layer_thick = np.array([0.0, 0.01, 0.004, 0.0035, 0.004]) # [cm], convert to [m] later
tot_lay_thick = layer_thick.cumsum()
layerrad = krad + tot_lay_thick*1e-2
vol_pin = np.pi * pinrad**2.0 * coreh
vol_triso = vol_pin * pf
vol_single_triso = 4.0/3.0 * np.pi * layerrad[-1]**3.0
num_triso_pin = vol_triso / vol_single_triso
pow_pin = power / npins
pow_triso = pow_pin / num_triso_pin
# axial, radial, and pin peaking
pow_triso_peak = pow_triso * 1.2856 * 1.5159 * 0.9067780
pow_max_constr_eval = pow_max_constr - pow_triso_peak
return pow_max_constr_eval
def fuel_temp_eval(dv_vec_scaled, dvbounds=dv_bounds):
dv_vec = core.dv_scaler(dv_vec_scaled, dv_bounds=dvbounds, scal_type='real').sum(0)
krad = dv_vec[2]*1e-2 # kernel radius [input: cm, output: m]
core_pow = dv_vec[5]
core_h = dv_vec[0]
t_surf = calc_peak_bulk_fuel_temp(core_pow, core_h) # 1209.0 + 50.0 # triso surface temp [k]
t_max_constr = 1610.0 # TAG: Constraint
layer_thick = np.array([0.0, 0.01, 0.004, 0.0035, 0.004]) # [cm], convert to [m] later
tot_lay_thick = layer_thick.cumsum()
layer_k = [3.5, 0.5, 4.0, 30.0, 4.0] # Thermal cond of layers [W/m-k], jianwei's thesis, pg. 18
layerrad = krad + tot_lay_thick*1e-2
# axial, radial, and pin peaking
pow_triso_peak = 0.340 - triso_pow_eval(dv_vec_scaled, bounds = dvbounds) # | TAG: Constraint
t_max = t_surf + pow_triso_peak/(6.0*layer_k[0]*layerrad[0])
for idx in xrange(1,len(layer_k)):
t_max = t_max - pow_triso_peak*(1.0/(layer_k[idx]*layerrad[idx]) \
- 1.0/(layer_k[idx]*layerrad[idx - 1]))
t_max_constr_eval = t_max_constr - t_max
return t_max_constr_eval
# def constr_x1_upper(x):
# return 1 - x[0]
# def constr_x2_upper(x):
# return 1 - x[1]
# def constr_x3_upper(x):
# return 1 - x[2]
# def constr_x4_upper(x):
# return 1 - x[3]
# def constr_x1_lower(x):
# return x[0]
# def constr_x2_lower(x):
# return x[1]
# def constr_x3_lower(x):
# return x[2]
# def constr_x4_lower(x):
# return x[3]
boxbound_constr_dict = make_boxbound_constr_dict(x_guess)
# Put into dictionary for use
# cobyla_constr = [{'type':'ineq', 'fun':constr_x1_upper},{'type':'ineq', 'fun':constr_x2_upper},
# {'type':'ineq', 'fun':constr_x3_upper},{'type':'ineq', 'fun':constr_x4_upper},
# {'type':'ineq', 'fun':constr_x1_lower},{'type':'ineq', 'fun':constr_x2_lower},
# {'type':'ineq', 'fun':constr_x3_lower},{'type':'ineq', 'fun':constr_x4_lower},
# {'type':'ineq', 'fun':reac_co_eval},{'type':'ineq', 'fun':void_w_eval},
# {'type':'ineq', 'fun':max_cycle_eval}]
# cobyla_constr = [{'type':'ineq', 'fun':bounds_eval},
# {'type':'ineq', 'fun':reac_co_eval},{'type':'ineq', 'fun':void_w_eval},
# {'type':'ineq', 'fun':max_cycle_eval}]
cobyla_constr_gpm = [{'type':'ineq', 'fun':reac_co_eval}, {'type':'ineq', 'fun':void_w_eval},
{'type':'ineq', 'fun':max_cycle_eval}]
cobyla_constr_nongpm = [{'type':'ineq', 'fun':fuel_temp_eval}, {'type':'ineq', 'fun':triso_pow_eval}]
cobyla_constr_search = cobyla_constr_nongpm + boxbound_constr_dict
cobyla_constr_obj_fun = cobyla_constr_gpm + cobyla_constr_search
gpm_constr = [constr['fun'] for constr in cobyla_constr_gpm]
cobyla_opts = {'catol':1E-3}
basinhopping_opts = {'interval':15, 'disp':False}
randomized_opts = {'niter':100, 'repeat_stop':15}
min_kwargs = {"method":"COBYLA", "options":cobyla_opts}
min_kwargs_obj_fun = merge_dict(min_kwargs, {'constraints':cobyla_constr_obj_fun})
min_kwargs_search = merge_dict(min_kwargs, {'constraints':cobyla_constr_search})
myaccept = MyConstr(reac_co_eval, void_w_eval, max_cycle_eval, fuel_temp_eval, triso_pow_eval, num_feat)
global_type = 'random'
optim_options = {'fmin_opts_obj_fun':min_kwargs_obj_fun, 'fmin_opts_search':min_kwargs_search ,'accept_test':myaccept,
'x_guess':x_guess, 'obj_eval':obj_eval, 'search_constr_gpm':gpm_constr,
'basin_opts':basinhopping_opts, 'global_type':global_type,
'random_opts':randomized_opts} # want the constr_dict here explicitly? | TAG: Question
if sur_type == 'regress':
optim_options.update({'igpm_obj_eval':igpm_obj_eval})
# with open(data_opts['opt_inp_fname'], 'wb') as f:
# cPickle.dump(optim_options, f)
return optim_options
def optimize_dv(optim_options, data_opts):
x_guess = optim_options['x_guess']
obj_eval = optim_options['obj_eval']
min_kwargs = optim_options['fmin_opts']
myaccept = optim_options['accept_test']
global_type = optim_options['global_type']
if global_type == 'basin':
basin_interval = optim_options['basin_opts']['interval']
basin_disp = optim_options['basin_opts']['disp']
elif global_type == 'random':
random_iter = optim_options['random_opts']['niter']
# Basinhopping global search
if global_type == 'basin':
# As with search, try local dv first and make sure guess
# results in a successful initial local optimum
x_guess_ok = False
while not x_guess_ok:
local_res = minimize(obj_eval, x_guess, **min_kwargs)
if local_res.success:
print 'x_guess ok! = {}'.format(x_guess)
x_guess_ok = True
else:
print 'x_guess not ok! = {}'.format(x_guess)
print 'making new x_guess'
x_guess = np.random.random_sample([len(x_guess)])
print 'new x_guess: {}'.format(x_guess)
opt_res = basinhopping(func=obj_eval, x0=x_guess, minimizer_kwargs=min_kwargs, \
accept_test=myaccept, disp=basin_disp, interval=basin_interval) # niter = 10, accept_test=mybounds1 or accept_test=myaccept1, stepsize=0.01, callback=print_fun
elif global_type == 'random':
dv_global = RandGlobal()
for local_iter in xrange(random_iter):
local_res = minimize(obj_eval, x_guess, **min_kwargs)
dv_global.add_result(local_res)
dv_global.print_results()
else:
msg = """
{} is not a recognized global opt type,
please specify either 'basin' or 'random'""".format(global_type)
raise Exception(msg)
# save results to file
with open(data_opts['opt_fname'], 'wb') as optf:
cPickle.dump(opt_res, optf, 2)
return opt_res
def optimize_search(opt_results, optim_options):
x_guess = optim_options['x_guess']
obj_eval = optim_options['obj_eval']
min_kwargs = optim_options['fmin_opts']
myaccept = optim_options['accept_test']
ymin = opt_results.fun
global_type = optim_options['global_type']
if global_type == 'basin':
basin_interval = optim_options['basin_opts']['interval']
basin_disp = optim_options['basin_opts']['disp']
elif global_type == 'random':
random_iter = optim_options['random_opts']['niter']
def expect_improve(x, y_min=ymin, obj_eval_func=obj_eval):
y_eval, MSE = obj_eval_func(x, eval_MSE=True)
sigma = np.sqrt(MSE)
if MSE == 0.0: # Check tolerances here!
exp_imp = 0.0
else:
ei_term1 = (y_min-y_eval) * (0.5 + 0.5 * math.erf( (y_min-y_eval)//(sigma*math.sqrt(2.0)) ))
ei_term2 = (sigma * 1.0//math.sqrt(2.0*math.pi))*math.exp( -1.0 * (y_min - y_eval)**2.0//(2.0*MSE) )
exp_imp = ei_term1 + ei_term2
return exp_imp
neg_expect_improve = make_neg(expect_improve)
if global_type == 'basin':
# Try the local minimizer first, make sure that it doesn't fail
# otherwise try a new starting guess and repeat
x_guess_ok = False
while not x_guess_ok:
# do a local optimize with current x_guess
local_res = minimize(neg_expect_improve, x_guess, **min_kwargs)
if local_res.success:
print 'x_guess ok! = {}'.format(x_guess)
x_guess_ok = True
else:
print 'x_guess not ok! = {}'.format(x_guess)
print 'making new x_guess'
x_guess = np.random.random_sample([len(x_guess)])
print 'new x_guess: {}'.format(x_guess)
# Once the inital local fmin works, start the basinhopping
search_res = basinhopping(func=neg_expect_improve, x0=x_guess, minimizer_kwargs=min_kwargs, \
accept_test=myaccept, disp=basin_disp, interval=basin_interval)
elif global_type == 'random':
search_global = RandGlobal()
for local_iter in xrange(random_iter):
local_res = minimize(obj_eval, x_guess, **min_kwargs)
search_global.add_result(local_res)
search_global.print_results()
else:
msg = """
{} is not a recognized global opt type,
please specify either 'basin' or 'random'""".format(global_type)
raise Exception(msg)
return search_res
def optimize_wrapper(optim_options, prev_opt_data, opt_purpose, outp_name = None, opt_results=None, fit_opts=None):
if prev_opt_data is not None:
if opt_purpose == 'dv_opt':
x_guess = prev_opt_data['opt_res'].x
elif opt_purpose == 'search_opt':
x_guess = prev_opt_data['search_res']['new_doe_scaled']
else:
x_guess = optim_options['x_guess']
obj_eval = optim_options['obj_eval']
myaccept = optim_options['accept_test']
global_type = optim_options['global_type']
outp_fname = outp_name
if global_type == 'basin':
basin_interval = optim_options['basin_opts']['interval']
basin_disp = optim_options['basin_opts']['disp']
elif global_type == 'random':
random_iter = optim_options['random_opts']['niter']
#random_repeat_stop = optim_options['random_opts']['repeat_stop'] # |TAG: outtest
if opt_purpose == 'dv_opt':
opt_fun = obj_eval
min_kwargs = optim_options['fmin_opts_obj_fun']
elif opt_purpose == 'search_opt':
min_kwargs = optim_options['fmin_opts_search']
gpm_constr_list = optim_options['search_constr_gpm']
sur_type = fit_opts['sur_type']
if sur_type == 'regress':
obj_eval = optim_options['igpm_obj_eval']
# Use same ymin here, or use the igpm to estimate it? For now, use same | TAG: Check
ymin = opt_results.fun
def expect_improve(x, y_min=ymin, obj_eval_func=obj_eval, constr_info=gpm_constr_list):
y_eval, MSE = obj_eval_func(x, eval_MSE=True)
sigma = np.sqrt(MSE)
if MSE == 0.0: # Check tolerances here!
exp_imp = 0.0
# else: #a_sub < some_num:
# a_sub = (y_min - y_eval) // (math.sqrt(2.0) * sigma)
# mac_term = np.arange(1, 21)
# mac_term = (-1.0)**(mac_term)*factorial2(2*mac_term - 1)//(2.0**mac_term) \
# * a_sub**(-(2*mac_term + 1))
# ei_term1 = (y_min-y_eval) * 1.0//(2.0*math.sqrt(math.pi)) * np.sum(mac_term)
# ei_term2 = sigma // math.sqrt(2.0*math.pi)
# exp_imp = (ei_term1 + ei_term2) * math.exp(-1.0*a_sub**2.0)
else:
ei_term1 = (y_min-y_eval) * (0.5 + 0.5 * math.erf( (y_min-y_eval)//(sigma*math.sqrt(2.0)) ))
ei_term2 = (sigma * 1.0//math.sqrt(2.0*math.pi))*math.exp( -1.0 * (y_min - y_eval)**2.0//(2.0*MSE) )
exp_imp = ei_term1 + ei_term2
if np.isclose(exp_imp, 0.0):
exp_imp = np.finfo(np.array(exp_imp).dtype).eps
exp_imp = np.log(exp_imp)
#now get probability of exceeding constraints
c_min = 0.0
prob_f_list = []
for constr_gpm in constr_info:
gpm_eval, gpm_MSE = constr_gpm(x, eval_MSE=True)
p_f_single = 0.5 + 0.5*math.erf((gpm_eval - c_min)//(np.sqrt(2.0*gpm_MSE)))
if np.isclose(p_f_single, 0.0):
p_f_single = np.finfo(np.array(p_f_single).dtype).eps
p_f_single = np.log(p_f_single)
prob_f_list.append(p_f_single)
# Now sum all P(F(x))
prob_f_list = np.array(prob_f_list).sum()
exp_imp = exp_imp + prob_f_list
return exp_imp
neg_expect_improve = make_neg(expect_improve)
opt_fun = neg_expect_improve
if global_type == 'basin':
# Try the local minimizer first, make sure that it doesn't fail
# otherwise try a new starting guess and repeat
x_guess_ok = False
while not x_guess_ok:
# do a local optimize with current x_guess
local_res = minimize(opt_fun, x_guess, **min_kwargs)
if local_res.success:
print 'x_guess ok! = {}'.format(x_guess)
x_guess_ok = True
else:
print 'x_guess not ok! = {}'.format(x_guess)
print 'making new x_guess'
x_guess = np.random.random_sample([len(x_guess)])
print 'new x_guess: {}'.format(x_guess)
# Once the inital local fmin works, start the basinhopping
global_res = basinhopping(func=opt_fun, x0=x_guess, minimizer_kwargs=min_kwargs, \
accept_test=myaccept, disp=basin_disp, interval=basin_interval)
elif global_type == 'random':
global_obj = RandGlobal()
for local_iter in xrange(random_iter):
x_guess = np.random.random_sample([len(x_guess)])
global_obj.add_x_guess(x_guess)
local_res = minimize(opt_fun, x_guess, **min_kwargs)
global_obj.add_result(local_res)
# Check to see if not finding improved result | TAG: outtest
# if global_obj.best_count >= random_repeat_stop:
# print 'Have not found improved global opt after {} iter'.format(random_repeat_stop)
# print 'stopping global optimization on iteration {}'.format(local_iter)
# break
global_obj.finish_step()
global_res = global_obj
else:
msg = """
{} is not a recognized global opt type,
please specify either 'basin' or 'random'""".format(global_type)
raise Exception(msg)
if outp_fname != None:
with open(outp_fname, 'wb') as optf:
cPickle.dump(global_res, optf, 2)
return global_res
# Optimization search and infill function
def search_infill(opt_result, optim_options, exist_opt, case_info, data_opts, fit_op):
dv_bounds = case_info['dv_bounds']
search_type = optim_options['search_type']
#First, select whether exploitation or hybrid
if search_type == 'exploit':
search_res = {'new_doe_scaled':opt_result.x, 'search_val':opt_result.fun,
'new_doe':core.dv_scaler(opt_result.x, dv_bounds, 'real'),
}
elif search_type == 'hybrid':
try:
search_point = optimize_wrapper(optim_options, exist_opt, opt_purpose = 'search_opt',
opt_results = opt_result, fit_opts = fit_op)
except ValueError:
print 'ValueError in Basinhopping, trying again....'
search_res = search_infill(opt_result, optim_options, exist_opt, case_info, data_opts, fit_op)
return search_res
search_res = {'new_doe_scaled':search_point.x, 'search_val':search_point.fun,
'new_doe':core.dv_scaler(search_point.x, dv_bounds, 'real'),
'search_res_obj':search_point}
with open(data_opts['search_fname'], 'wb') as f:
cPickle.dump(search_res, f, 2)
# new_doe = core.dv_scaler(new_doe_scaled, dv_bounds, 'real')
return search_res
#def converge_check(prev_obs_vals, thresh_inp):
#
# obs_obj_vals = prev_obs_vals
# range_obs = np.abs(np.max(obs_obj_vals) - np.min(obs_obj_vals))
# thresh = thresh_inp
# stop_criterion = thresh * range_obs
# delta_set = np.array(np.abs([obs_obj_vals[-idx] - obs_obj_vals[-idx - 1] for idx in xrange(1, len(obs_obj_vals))]))
# if delta_set[0] < stop_criterion: # can check a set here if desired | TAG: Improve
# converged = True
# else:
# converged = False
# return converged
def converge_check(prev_obs_vals, converge_opts):
converge_tol = converge_opts['converge_tol']
converge_points = converge_opts['converge_points']
converge_type = converge_opts['converge_type']
obs_obj_vals = np.array(prev_obs_vals)
delta_set = np.diff(obs_obj_vals)
if converge_type == 'range':
range_obs = np.abs(np.max(obs_obj_vals[:-1]) - np.min(obs_obj_vals[:-1]))
stop_criterion = converge_tol * range_obs
pos_delta_set = np.abs(delta_set)
converged = np.all(np.less(pos_delta_set[-converge_points:], stop_criterion))
elif converge_type == 'rel':
rel_delta_set = delta_set / obs_obj_vals[:-1]
pos_rel_delta_set = np.abs(rel_delta_set)
converged = np.all(np.less(pos_rel_delta_set[-converge_points:], converge_tol))
#reverse_delta_set = np.array(np.abs([obs_obj_vals[-idx] - obs_obj_vals[-idx - 1] for idx in xrange(1, len(obs_obj_vals))]))
# Code to do relative diff converge check | TAG: outtest
# and np.all(np.less(rel_delta_set, 0.0)) not used
# if np.all(np.less(pos_rel_delta_set[-converge_points:], converge_tol)):
# if np.all(np.less(pos_delta_set[-converge_points:], stop_criterion)): # can check a set here if desired | TAG: Improve
# converged = True
# else:
# converged = False
return converged
def prox_check(doe_sets, new_search_dv, euclid_tol):
for dv_set in doe_sets['doe_scaled']:
new_point_distance = euclidean(dv_set, new_search_dv)
if new_point_distance < euclid_tol:
print 'new point {} is {} away from previous point {}, within tol {}'.format(
new_search_dv, new_point_distance, dv_set, euclid_tol)
print 'thus counting as converged!'
converged_temp = True
return converged_temp
print 'new point is not euclidean proximal within {} of any existing points'.format(
euclid_tol)
print 'thus calculation is not proximally converged'
converged_temp = False
return converged_temp
#class MyBounds(object):
# def __init__(self, xmax=[1.0,1.0,1.0,1.0], xmin=[0.0,0.0,0.0,0.0] ):
# self.xmax = np.array(xmax)
# self.xmin = np.array(xmin)
# def __call__(self, **kwargs):
# x = kwargs["x_new"]
# tmax = bool(np.all(x <= self.xmax))
# tmin = bool(np.all(x >= self.xmin))
## if False in [tmax, tmin]:
## return False
## else:
## return True
# return tmax and tmin
class MyConstr(object):
def __init__(self, reac_co_eval, void_w_eval, max_cycle_eval,
fuel_temp_eval, triso_pow_eval, x_len): # need to make work for n-length feature set | TAG: Improve
self.xmax = np.ones(x_len)
self.xmin = np.zeros(x_len)
self.reac_co_eval = reac_co_eval
self.void_w_eval = void_w_eval
self.max_cycle_eval = max_cycle_eval
self.fuel_temp_eval = fuel_temp_eval
self.triso_pow_eval = triso_pow_eval
def __call__(self, **kwargs):
x = kwargs["x_new"]
# now evaluate the constraint function here using x_new
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
reac_coeff = bool(self.reac_co_eval(x) >= 0.0) # Changed to match cobyla constr
void_worth = bool(self.void_w_eval(x) >= 0.0) # TAG: Note
max_cycle = bool(self.max_cycle_eval(x) >= 0.0)
fuel_temp = bool(self.fuel_temp_eval(x) >= 0.0)
triso_pow = bool(self.triso_pow_eval(x) >= 0.0)
# cycle_len next
#return reac_coeff and void_worth
if False in [reac_coeff, void_worth, max_cycle, fuel_temp, triso_pow, tmax, tmin]:
return False
else:
return True
def print_result(self, x):
print 'Reac coeff constr is {}'.format(self.reac_co_eval(x))
print 'Void worth constr is {}'.format(self.void_w_eval(x))
print 'Max cycle len constr is {}'.format(self.max_cycle_eval(x))
print 'Peak fuel temp constr is {}'.format(self.fuel_temp_eval(x))
print 'Peak power per particle constr is {}'.format(self.triso_pow_eval(x))
class RandGlobal(object):
def __init__(self):
self.num_success = 0
self.num_failure = 0
self.res_success = []
self.res_failure = []
self.loc_success = []
self.loc_failure = []
self.fun_success = []
self.fun_failure = []
self.x_guesses = []
self.feval_tot = 0
self.best = None
self.best_count = 0
def add_result(self, result): # Can add a check here to make sure result is a OptimizeResult object | TAG: Improve
if result.success:
self.num_success += 1
print 'Successful local iter completed ({} successful so far)'.format(self.num_success)
if self.best == None:
self.best = result
elif self.best.fun > result.fun:
# Found a new best location
self.best = result
self.best_count = 0
print 'Found new optimal location {} with value {}'.format(
self.best.x, self.best.fun)
else:
# not any better than current best
# increment counter that tracks number of iterations
# with same global best
self.best_count += 1
self.loc_success.append(result.x)
self.fun_success.append(result.fun)
self.res_success.append(result)
else:
self.num_failure += 1
print 'Failed local opt iteration, status={}, {} total failed'.format(result.status, self.num_failure)
self.loc_failure.append(result.x)
self.fun_failure.append(result.fun)
self.res_failure.append(result)
self.feval_tot += result.nfev
def add_x_guess(self, x):
self.x_guesses.append(x)
def print_results(self):
selfstr = """
Global optimization result
Total func evals: {}
Final optimum: {}
Final opt location: {}
""".format(self.feval_tot, self.best.fun, self.best.x)
print selfstr
def make_scipy_like(self):
self.x = self.best.x
self.fun = self.best.fun
def combine_internal_lists(self):
self.x_guesses = np.vstack(self.x_guesses)
if self.num_success > 0:
self.loc_success = np.vstack(self.loc_success)
self.fun_success = np.array(self.fun_success)
else:
raise Exception('No local iterations were successful!')
if self.num_failure > 0 :
self.loc_failure = np.vstack(self.loc_failure)
self.fun_failure = np.array(self.fun_failure)
def finish_step(self):
self.make_scipy_like()
self.combine_internal_lists()
self.print_results()
#msg = """New optimum found: {} at loc {}"""
class MaxTrisoTemp(object):
def __init__(self, dv_vec):
self.coreh = dv_vec[0]*1e-2 # core height [input: cm, output: m]
self.pf = dv_vec[1]
self.krad = dv_vec[2]*1e-2 # kernel radius [input: cm, output: m]
self.pow = 20E6
self.t_surf = 1209+50 # triso surface temp [k]
self.t_max_constr = 1623.0
self.npins = 3240.0 # number of pins in core, 60 pin/assm*54 assm/core
self.pinrad = 0.007 # fuel pin radius, [m]
self.layer_thick = np.array([0.0, 0.01, 0.004, 0.0035, 0.004]) # [cm], convert to [m] later
self.tot_lay_thick = self.layer_thick.cumsum()
self.layer_k = [3.5, 0.5, 4.0, 30.0, 4.0] # Thermal cond of layers [W/m-k], jianwei's thesis, pg. 18
self.layerrad = self.krad + self.tot_lay_thick*1e-2
self.vol_pin = np.pi * self.pinrad**2.0 * self.coreh
self.vol_triso = self.vol_pin * self.pf
self.vol_single_triso = 4.0/3.0 * np.pi * self.layerrad[-1]**3.0
self.num_triso_pin = self.vol_triso / self.vol_single_triso
self.pow_pin = self.pow / self.npins
self.pow_triso = self.pow_pin / self.num_triso_pin
# axial, radial, and pin peaking
self.pow_triso_peak = self.pow_triso * 1.2856 * 1.5159 * 0.9067780
self.t_max = self.t_surf + self.pow_triso_peak/(6.0*self.layer_k[0]*self.layerrad[0])
for idx in xrange(1,len(self.layer_k)):
self.t_max = self.t_max - self.pow_triso_peak*(1.0/(self.layer_k[idx]*self.layerrad[idx]) \
- 1.0/(self.layer_k[idx]*self.layerrad[idx - 1]))
def get_tmax(self):
return self.t_max
def get_tmax_constr(self):
return self.t_max_constr - self.t_max
def print_fun(x, f, accepted):
print("at minima %.4f accepted %d" % (f, int(accepted)))
def merge_dict(dict1, dict2):
new_dict = dict1.copy()
new_dict.update(dict2)
return new_dict
|
print("Efficient program to print all prime factors of a given number")
import math
def prime_factor(n):
while n % 2 == 0:
print(2)
n = n/2
for i in range(3,int(math.sqrt(n))+1,+2):
while n % i == 0:
print(i)
n = n / i
if n > 2:
print(n)
n = int(input("n = "))
prime_factor(n)
print("prime number and fibonacci")
def fibonacci(n):
a = 0
b = 1
for i in range(2,n+1):
c = a+b
a = b
b = c
print(c)
n = int(input("n = "))
print("0")
print("1")
print(fibonacci(n))
print("prime numbers")
max = int(input("max = "))
min = int(input("min = "))
|
#!/home/roshan/Documents/extra/cms/cmsenv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
import argparse
import pyfaidx
import pyBigWig
import pandas as pd
import numpy as np
import os
import json
import chrombpnet.helpers.hyperparameters.param_utils as param_utils
def parse_data_args():
parser=argparse.ArgumentParser(description="find hyper-parameters for chrombpnet defined in src/training/models/chrombpnet_with_bias_model.py")
parser.add_argument("-g", "--genome", type=str, required=True, help="Genome fasta")
parser.add_argument("-i", "--bigwig", type=str, required=True, help="Bigwig of tn5 insertions. Ensure it is +4/-4 shifted")
parser.add_argument("-p", "--peaks", type=str, required=True, help="10 column bed file of peaks. Sequences and labels will be extracted centered at start (2nd col) + summit (10th col).")
parser.add_argument("-n", "--nonpeaks", type=str, required=True, help="10 column bed file of non-peak regions, centered at summit (10th column)")
parser.add_argument("-b", "--bias-threshold-factor", type=float, default=0.5, help="A threshold is applied on maximum count of non-peak region for training bias model, which is set as this threshold x min(count over peak regions)")
parser.add_argument("-oth", "--outlier-threshold", type=float, default=0.9999, help="threshold to use to filter outlies")
parser.add_argument("-j", "--max-jitter", type=int, default=50, help="Maximum jitter applied on either side of region (default 500 for chrombpnet model)")
parser.add_argument("-fl", "--chr-fold-path", type=str, required=True, help="Fold information - dictionary with test,valid and train keys and values with corresponding chromosomes")
return parser
def parse_model_args(parser):
# arguments here defined the following model - src/training/models/chrombpnet_with_bias_model.py
parser.add_argument("-il", "--inputlen", type=int, help="Sequence input length")
parser.add_argument("-ol", "--outputlen", type=int, help="Prediction output length")
parser.add_argument("-fil", "--filters", type=int, default=128, help="Number of filters to use in chrombpnet mode")
parser.add_argument("-dil", "--n-dilation-layers", type=int, default=4, help="Number of dilation layers to use in chrombpnet model")
parser.add_argument("-op", "--output-prefix", required=True, help="output prefix for storing hyper-param TSV for chrombpnet")
args = parser.parse_args()
return args
def main(args):
# read the fold information - we will evaluate hyperparams and filter outliers on the train+valid set
# do nothing on the test set
splits_dict=json.load(open(args.chr_fold_path))
chroms_to_keep=splits_dict["train"]+splits_dict["valid"]
test_chroms_to_keep=splits_dict["test"]
print("evaluating hyperparameters on the following chromosomes",chroms_to_keep)
# read from bigwigs and fasta file
bw = pyBigWig.open(args.bigwig)
genome = pyfaidx.Fasta(args.genome)
# read peaks and non peaks
in_peaks = pd.read_csv(args.peaks,
sep='\t',
header=None,
names=["chr", "start", "end", "1", "2", "3", "4", "5", "6", "summit"])
in_nonpeaks = pd.read_csv(args.nonpeaks,
sep='\t',
header=None,
names=["chr", "start", "end", "1", "2", "3", "4", "5", "6", "summit"])
assert(in_peaks.shape[0] != 0) # peaks file is empty
assert(in_nonpeaks.shape[0] !=0) # non peaks file is empty
assert(args.inputlen >= args.outputlen) # inputlen should be greater than the outputlen
# inputlen and outlen are chosen based on the filters and dilations layers used
# get train/valid peaks and test peaks seperately
peaks = in_peaks[(in_peaks["chr"].isin(chroms_to_keep))]
test_peaks = in_peaks[(in_peaks["chr"].isin(test_chroms_to_keep))]
nonpeaks = in_nonpeaks[(in_nonpeaks["chr"].isin(chroms_to_keep))]
test_nonpeaks = in_nonpeaks[(in_nonpeaks["chr"].isin(test_chroms_to_keep))]
# step 1 filtering: filter nonpeaks that are in the edges - prevents us from making the inputlen regions - do this for all train/test/valid
nonpeaks = param_utils.filter_edge_regions(nonpeaks, bw, args.inputlen, peaks_bool=0)
test_nonpeaks = param_utils.filter_edge_regions(test_nonpeaks, bw, args.inputlen, peaks_bool=0)
peaks = param_utils.filter_edge_regions(peaks, bw, args.inputlen, peaks_bool=1)
# step 2 filtering: filter nonpeaks that have counts less than a threshold_factor (minimum of peak counts)
peak_cnts, _ = param_utils.get_seqs_cts(genome, bw, peaks, args.inputlen, args.outputlen)
nonpeak_cnts, _ = param_utils.get_seqs_cts(genome, bw, nonpeaks, args.inputlen, args.outputlen)
assert(len(peak_cnts) == peaks.shape[0])
assert(len(nonpeak_cnts) == nonpeaks.shape[0])
final_cnts = nonpeak_cnts
counts_threshold = np.quantile(peak_cnts,0.01)*args.bias_threshold_factor
assert(counts_threshold > 0) # counts threshold is 0 - all non peaks will be filtered!
final_cnts = final_cnts[final_cnts < counts_threshold]
print("Upper bound counts cut-off for bias model training: ", counts_threshold)
print("Number of nonpeaks after the upper-bount cut-off: ", len(final_cnts))
assert(len(final_cnts) > 0) # Upper bound cut-off is too stringent so there are no points left for training
# step 3 filtering: filter nonpeaks that are outliers in train and valid set - no filtering on test set
upper_thresh = np.quantile(final_cnts, args.outlier_threshold)
lower_thresh = np.quantile(final_cnts, 1-args.outlier_threshold)
nonpeaks = nonpeaks[(nonpeak_cnts<upper_thresh) & (nonpeak_cnts>lower_thresh)]
print("Number of nonpeaks after applying upper-bound cut-off and removing outliers : ", nonpeaks.shape[0])
# combine train valid and test peak set and store them in a new file
frames = [nonpeaks, test_nonpeaks]
all_nonpeaks = pd.concat(frames)
all_nonpeaks.to_csv("{}filtered.bias_nonpeaks.bed".format(args.output_prefix), sep="\t", header=False, index=False)
# find counts loss weight for model training - using train and validation set
counts_loss_weight = np.median(final_cnts[(final_cnts < upper_thresh) & (final_cnts>lower_thresh)])/10
print("counts_loss_weight:", counts_loss_weight)
assert(counts_loss_weight != 0)
if counts_loss_weight < 1.0:
counts_loss_weight = 1.0
print("WARNING: you are training on low-read depth data")
# store the parameters being used - in a TSV file
file = open("{}bias_data_params.tsv".format(args.output_prefix),"w")
file.write("\t".join(["counts_sum_min_thresh", str(round(lower_thresh,2))]))
file.write("\n")
file.write("\t".join(["counts_sum_max_thresh", str(round(upper_thresh,2))]))
file.write("\n")
file.write("\t".join(["trainings_pts_post_thresh", str(sum((final_cnts<upper_thresh) & (final_cnts>lower_thresh)))]))
file.write("\n")
file.close()
file = open("{}bias_model_params.tsv".format(args.output_prefix),"w")
file.write("\t".join(["counts_loss_weight", str(round(counts_loss_weight,2))]))
file.write("\n")
file.write("\t".join(["filters", str(args.filters)]))
file.write("\n")
file.write("\t".join(["n_dil_layers", str(args.n_dilation_layers)]))
file.write("\n")
file.write("\t".join(["inputlen", str(args.inputlen)]))
file.write("\n")
file.write("\t".join(["outputlen", str(args.outputlen)]))
file.write("\n")
file.write("\t".join(["max_jitter", str(args.max_jitter)]))
file.write("\n")
file.write("\t".join(["chr_fold_path", str(args.chr_fold_path)]))
file.write("\n")
file.write("\t".join(["negative_sampling_ratio", str(1.0)])) # this is just a dummy variable because the train.py pipeline needs it - all negatives will be used for bias model training
file.close()
if __name__=="__main__":
# read the arguments
parser = parse_data_args()
args = parse_model_args(parser)
main(args)
|
from django.db import models
from mpesa_api.util.managers import AuthTokenManager
class AuthToken(models.Model):
"""Handles AuthTokens"""
access_token = models.CharField(max_length=40)
type = models.CharField(max_length=3)
expires_in = models.BigIntegerField()
objects = AuthTokenManager()
def __str__(self):
return self.access_token
class Meta:
db_table = "tbl_access_token"
class B2CRequest(models.Model):
"""
Handles B2C requests
"""
id = models.BigAutoField(primary_key=True)
phone = models.BigIntegerField()
amount = models.DecimalField(max_digits=20, decimal_places=2)
conversation_id = models.CharField(max_length=40, blank=True, null=True)
originator_conversation_id = models.CharField(max_length=40, blank=True, null=True)
response_code = models.CharField(max_length=5, blank=True, null=True)
response_description = models.TextField(blank=True, null=True)
request_id = models.CharField(max_length=20, blank=True, null=True)
error_code = models.CharField(max_length=20, blank=True, null=True)
error_message = models.TextField(blank=True, null=True)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.phone)
class Meta:
db_table = "tbl_b2c_requests"
verbose_name_plural = "B2C Requests"
class B2CResponse(models.Model):
"""
Handles B2C Response
"""
id = models.BigAutoField(primary_key=True)
phone = models.BigIntegerField(blank=True, null=True)
amount = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
conversation_id = models.CharField(max_length=40, blank=True, null=True)
originator_conversation_id = models.CharField(max_length=40, blank=True, null=True)
result_type = models.CharField(max_length=5, blank=True, null=True)
result_code = models.CharField(max_length=5, blank=True, null=True)
result_description = models.TextField(blank=True, null=True)
transaction_id = models.CharField(max_length=20, blank=True, null=True)
transaction_receipt = models.CharField(max_length=20, blank=True, null=True)
transaction_amount = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
working_funds = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
utility_funds = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
paid_account_funds = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True
)
transaction_date = models.DateTimeField(blank=True, null=True)
mpesa_user_name = models.CharField(max_length=100, blank=True, null=True)
is_registered_customer = models.CharField(max_length=1, blank=True, null=True)
def __str__(self):
return str(self.phone)
class Meta:
db_table = "tbl_b2c_response"
verbose_name_plural = "B2C Responses"
class C2BRequest(models.Model):
"""
Handles C2B Requests
"""
id = models.BigAutoField(primary_key=True)
transaction_type = models.CharField(max_length=20, blank=True, null=True)
transaction_id = models.CharField(max_length=20, unique=True)
transaction_date = models.DateTimeField(blank=True, null=True)
amount = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
business_short_code = models.CharField(max_length=20, blank=True, null=True)
bill_ref_number = models.CharField(max_length=50, blank=True, null=True)
invoice_number = models.CharField(max_length=50, blank=True, null=True)
org_account_balance = models.DecimalField(
max_digits=20, decimal_places=2, blank=True, null=True, default=0.0
)
third_party_trans_id = models.CharField(max_length=50, blank=True, null=True)
phone = models.BigIntegerField(blank=True, null=True)
first_name = models.CharField(max_length=50, blank=True, null=True)
middle_name = models.CharField(max_length=50, blank=True, null=True)
last_name = models.CharField(max_length=50, blank=True, null=True)
is_validated = models.BooleanField(default=False)
is_completed = models.BooleanField(default=False)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{} {} {}".format(self.first_name, self.middle_name, self.last_name)
class Meta:
db_table = "tbl_c2b_requests"
verbose_name_plural = "C2B Requests"
@property
def name(self):
return "{} {} {}".format(self.first_name, self.middle_name, self.last_name)
class OnlineCheckout(models.Model):
"""
Handles Online Checkout
"""
id = models.BigAutoField(primary_key=True)
phone = models.BigIntegerField()
amount = models.DecimalField(max_digits=20, decimal_places=2)
is_paybill = models.BooleanField(default=True)
checkout_request_id = models.CharField(max_length=50, default="")
account_reference = models.CharField(max_length=50, default="")
transaction_description = models.CharField(max_length=50, blank=True, null=True)
customer_message = models.CharField(max_length=100, blank=True, null=True)
merchant_request_id = models.CharField(max_length=50, blank=True, null=True)
response_code = models.CharField(max_length=5, blank=True, null=True)
response_description = models.CharField(max_length=100, blank=True, null=True)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.phone)
class Meta:
db_table = "tbl_online_checkout_requests"
verbose_name_plural = "Online Checkout Requests"
class OnlineCheckoutResponse(models.Model):
"""
Handles Online Checkout Response
"""
id = models.BigAutoField(primary_key=True)
merchant_request_id = models.CharField(max_length=50, blank=True, null=True)
checkout_request_id = models.CharField(max_length=50, default="")
result_code = models.CharField(max_length=5, blank=True, null=True)
result_description = models.CharField(max_length=100, blank=True, null=True)
mpesa_receipt_number = models.CharField(max_length=50, blank=True, null=True)
transaction_date = models.DateTimeField(blank=True, null=True)
phone = models.BigIntegerField(blank=True, null=True)
amount = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.phone)
class Meta:
db_table = "tbl_online_checkout_responses"
verbose_name_plural = "Online Checkout Responses"
|
"""
Merge function for 2048 game.
"""
def merge(line):
"""
Function that merges a single row or column in 2048.
"""
merged_line = []
zeros_holder = []
# place all zeros at the end while remaining the order
# of other numbers
for num in range(len(line)):
if len(line) == 1:
new_line = line
return new_line
if line[num] != 0:
merged_line.append(line[num])
else:
zeros_holder.append(0)
merged_line.extend(zeros_holder)
# Loop through length of new list
# add matching values to the second value
# remove the element currently being looped
# append a 0 to the end
for element in range(len(merged_line)):
position = element - 1
prev_position = position - 1
next_position = element
if element == 0:
recent_match = True
element = element + 1
elif prev_position != position:
recent_match = False
if merged_line[position] == merged_line[next_position] and recent_match == False and merged_line[position] != 0:
recent_match = True
merged_line[next_position] = merged_line[position] + merged_line[next_position]
merged_line.pop(position)
merged_line.append(0)
else:
recent_match = False
return merged_line
print merge([2, 2, 0, 2, 8, 8, 8, 0])
print merge([2, 2, 2, 2, 8, 8, 8, 2])
|
import pymysql
CHARSET = 'utf8'
class Db(object):
def __init__(self, ip, port, user_name, password, db_name):
self.ip = ip
self.port = port
self.userName = user_name
self.password = password
self.dbName = db_name
self.__connect()
def __connect(self):
try:
self.connection = pymysql.connect(host=self.ip,
port=self.port,
user=self.userName,
passwd=self.password,
db=self.dbName,
charset=CHARSET)
self.cursor = self.connection.cursor()
except Exception as e:
self.connection = None
print("Connect mysql error!", e)
return
def __del__(self):
if self.connection:
self.connection.close()
def __commit(self, sqlStr):
cursor = self.cursor
if cursor:
cursor.execute(sqlStr)
self.connection.commit()
return True
#查询方法,不支持查询大量数据,否则耗光内存
def select(self, sqlStr):
try:
cursor = self.cursor
if cursor:
cursor.execute(sqlStr)
result = cursor.fetchall()
return result
except Exception as e:
print("Excute select method error!", e)
return None
#插入方法
def insert(self, sqlStr):
try:
self.__commit(sqlStr)
except Exception as e:
print("Excute insert method error!", e)
return False
#更新方法
def update(self, sqlStr):
try:
self.__commit(sqlStr)
except Exception as e:
print("Excute update method error!", e)
return False
#删除方法
def delete(self, sqlStr):
try:
self.__commit(sqlStr)
except Exception as e:
print("Excute delete method error!", e)
return None
if __name__ =='__main__':
HOST = '192.168.240.197'
PORT = 8888
USER = 'root'
PASSWD = 'root'
DB = "mall_admin"
db = Db(HOST, PORT, USER, PASSWD, DB)
# #正常情况
# result = db.select("select * from sys_role")
# print(result)
#
# #异常情况,表错误
# result1 = db.select("select * from sys_role1")
# print(result1)
#
# # 异常情况,字段错误
# result2 = db.select("select column_One from sys_role")
# print(result2)
# 正常情况
sql = "insert into sys_role (`role_id`, `role_name`, `role_code`, `description`, `status`, `create_time`, `create_by`, `modify_by`, `modify_time`) values ('6386818388630437124', '仓管', 'storer', '仓管', '1', '2018-10-27 13:54:28', 'luhaitao', 'luhaitao', '2018-10-27 13:54:28')"
result3 = db.insert(sql)
print(result3) |
'''
def letterCombinations(digits):
n=len(digits)
if n==0:
return []
ds={
'2': list('abc'),
'3': list('def'),
'4': list('ghi'),
'5': list('jkl'),
'6': list('mno'),
'7': list('pqrs'),
'8': list('tuv'),
'9': list('wxyz'),
}
if n==1:
return ds[digits]
res=ds[digits[0]]
i=0#代表digits的下标
while i <n-1:#对digits循环
i+=1
di=ds[digits[i]]
#print(di)
temp=[]
m=len(res)#res的长度
for j in res:#res循环
for k in di:#di循环
temp.append(j+k)#将每一个最新元素添加至临时列表中
res=temp#更新输出列表
return res
'''
def letterCombinations(digits):
if not digits: return []
ds={
'2': list('abc'),
'3': list('def'),
'4': list('ghi'),
'5': list('jkl'),
'6': list('mno'),
'7': list('pqrs'),
'8': list('tuv'),
'9': list('wxyz'),
}
res=['']
for i in digits:
res=[j+k for j in res for k in ds[i]]
return res
digits='23333'
print(letterCombinations(digits)) |
from Crypto.PublicKey import RSA
from base64 import b64decode
key = RSA.generate(1024)
print key.exportKey('PEM')
pub_key = key.publickey()
print pub_key.exportKey('PEM')
p_key = b64decode('LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlDV3dJQkFBS0JnUUNjVDRaalluR2lNWXRFZDcrL1l0RmdHRTNlZ1RrV0Jpd2hXQ240MUxQU1lzdXErNE9RCnBuQk1ZUjVZdExiOTRXTzdpNnZHYU9PTnNzSE5kWUFkblJETThpTFN2L3JUMHVPdGdTd2RaTmlMQzduNmdILzgKOFJqRlFFTGptSldRemdzWDhDVXFkWm80SnJNZkJTbXd3RFlBNUJtMGI3Nmd6bXFoK3lMWGErdW5PUUlEQVFBQgpBb0dBYWNhUy9adzNvM2Q5Yy9iSkpqMDd6SmlGMFdXRytQVnlWWm93eFBkRFBNS29hbXRMYTg2RnZkb1d6QloyCm9yVXNaVlN1Q0ZVZ2I5b2d0ZVdtcmVPRTR1d0FQK0RGKzJpU1h0MlZxTEdJZ29ieDZib0YrTktjMXNvUUFEaFQKNkw2emZNSzFNVzZwSDVYUGNVNUg4QU1TWUREYVFxeEVtWEp0azg4OUxJTVpVUUVDUVFDNDRYVjRqdEwwcjFkcQpjY1ByTlIrTEp4UjExMkJPMEhXLzduRVEwQUtUbUhIZ2EvbmV1ejMycHF1TVNRM2xodXRTNW5kanNzdmJ3dHJuCjNWdVhKRUJaQWtFQTJIQ1E0bE12MHI5Y3B2b2lCTVR0cDlYS2dIeU5Vbmd3dE1SODZ6VE0vK1dudlhTWjlDZk8KWXhyMlVvd2daMTlPNXpCZDFrVGRZQnNMbFVoL2syekI0UUpBWWtMeVBIRXNqZi9qWmgreEVZSGFrZ3JqUlA2RApvV0FLTlVoMXI0bmUxTE5oVXZZUWgrRGN2Z3MzZ2dnUjZyd2F0cVRuTDRZSDgzVk5BNDhTN3ZIRmdRSkFkeFZvCkFiNDNQOExkM1ZrZVFuVi9OS3FpSWhObFJneXU3Nlp6L0kwdWhWVDc5M2NpQlgycFJrbmRZUW1NQXBRanUzdVgKQlg4YU5maHJaUlZnYStLWXdRSkFYY1RKemE3ODNFeVk1YmxTZWIvWlJGYzZjdnFERDlRSDRXRVQ1b000ZjFnWgprZlI3Ti9qcEc4b09sZkl5d2o1RStaaHJaSTlEY1RmQjVnQWlFRHFrQmc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQ==')
p_key = RSA.importKey(p_key)
data = p_key.decrypt(b64decode('L/R8ejlvVP4+JvgvsSI+JaLn6YCArf5fTAIfUwMNCrJ8HkRkQLEB5RH5COF1+9mSQoGY8wG23AtDyM0OEgm+zFCTibFOgieixjrv5OHAIB+akOahMWoyt/qAGnK9ZsLsv20apyzlH0llafbfQ0MkurU/c8O3Xj3m0VL1GOjHk14='))
with open('flag', 'w') as f:
f.write(data)
print data
|
from flask import Flask,render_template
from flask import request
from twilio.rest import Client
import requests
import requests_cache
account_sid="AC51caf0ac9cf2c1e199213c4d81661e7b"
auth_token='a3d4fe0af9392aae0e2f1b3e349951ac'
client=Client(account_sid,auth_token)
app=Flask(__name__, static_url_path='/static')
@app.route('/')
def registration_form():
return render_template('login_page.html')
@app.route('/login_page',methods=['POST','GET'])
def login_registration_dtls():
first_name=request.form['fname']
last_name=request.form['lname']
email_id=request.form['eid']
source_st=request.form['source_st']
source_dt=request.form['source']
destination_st=request.form['destination_st']
destination_dt=request.form['destination']
phone_number=request.form['phno']
id_proof=request.form['idcard']
date=request.form['trip']
full_name=first_name+"."+last_name
r=requests.get('https://api.covid19india.org/v4/data.json')
json_data=r.json()
cnt=json_data[destination_st]['districts'][destination_dt]['total']['confirmed']
pop=json_data[destination_st]['districts'][destination_dt]['meta']['population']
travel_pass=(cnt/pop)*100
if travel_pass<30 and request.method =='POST':
status='CONFIRMED'
client.messages.create(to="whatsapp:+916303399473",
from_="whatsapp:+14155238886",
body="Hello" + " " + full_name + " " + "Your travel from" + " " + source_dt + " " + "to" + " " + destination_dt + " " + "Has" + " " + status + " " + "on" + " " + date + " ")
return render_template('user_registration_dtls.html',var=full_name,var1=email_id,var2=id_proof,
var3=source_st,var4=source_dt,var5=destination_st,var6=destination_dt,
var7=phone_number,var8=date,var9=status)
else:
status='Not Confirmed'
client.messages.create(to="whatsapp:+916303399473",
from_="whatsapp:+14155238886",
body="Hello " + " " + full_name + " " + "your travel from" + source_dt + " " + "To" + " " + destination_dt + " "
+ "Has" + " " + status + " " + " On" + " " + date + " " + ",Please Apply later")
return render_template('user_registration_dtls.html',var=full_name,var1=email_id,var2=id_proof,
var3=source_st,var4=source_dt,var5=destination_st,var6=destination_dt,
var7=phone_number,var8=date,var9=status)
if __name__ == "__main__":
app.run(port=9000,debug=True) |
import random
from random import choice
density=float(input("density"))
maxsteps=10000
npart=500
perc=0
side=41
steps = [(1,0),(-1,0),(0,1),(0,-1)]
grid=[[0 for x in range(side)] for y in range(side)]
for ipart in range(npart):
x,y=side//2,side//2
for x in range(side):
for y in range(side):
grid[x][y]=random.randint(0,100)
if grid[x][y] <= (density*100):
grid[x][y]=1
else:
grid[x][y]=0
x,y=side//2,side//2
for i in range(maxsteps):
grid[x][y] = 0
sx,sy=choice(steps)
y += sy
x += sx
if x<0 or y<0 or x==side or y==side:
perc += 1
break
if grid[x][y] == 1 :
y -= sy
x -= sx
continue
print("Probability of the Particle Percolating=%5.2f"%(perc/npart)) |
#!/usr/bin/python
"""A simple script used to alter a systems MAC address. User provides the interface, and can supply a MAC address or
choose to use a randomly generated one. Script uses Python 2.7.16"""
import subprocess
import optparse
import random
import re
def generate_mac():
"""Generates a random MAC address"""
mac = ':'.join(("%012x" % random.randint(0, 0xFFFFFFFFFFFF))[i:i+2] for i in range(0, 12, 2))
return mac
def get_arguments():
"""Get user supplied arguments from terminal."""
parser = optparse.OptionParser()
# arguments
parser.add_option('-i', '--interface', dest='interface', help='Interface to change MAC address')
parser.add_option('-m', '--mac', dest='new_mac', help='Specify new MAC address. Type "random" for random MAC.')
(options, arguments) = parser.parse_args()
# add call to random_mac
if options.mac == 'random':
options.mac = generate_mac()
if not options.interface:
parser.error('[-] Please specify an interface, use --help for more info.')
elif not options.new_mac:
parser.error('[-] Please specify a new MAC, use --help for more info.')
return options
def change_mac(interface, new_mac):
"""Change MAC address for user specified interface."""
print('[+] Changing MAC address for interface ' + interface + 'to ' + new_mac)
# interface down
subprocess.call(['ifconfig', interface, 'down'])
# change MAC
subprocess.call(['ifconfig', interface, 'hw', 'ether', new_mac])
# interface up
subprocess.call(['ifconfig', interface, 'up'])
def get_current_mac(interface):
"""Returns current MAC address, if available."""
ifconfig_result = subprocess.check_output(["ifconfig", interface])
mac_address_search_result = re.search(r"([0-9A-F]{2}[:-]){5}([0-9A-F]{2})", ifconfig_result.decode(), re.IGNORECASE)
if mac_address_search_result:
return mac_address_search_result.group(0)
else:
print('[-] Could not read MAC address.')
options = get_arguments()
current_mac = get_current_mac(options.interface) # gets original MAC address
print('Current MAC: ' + str(current_mac))
change_mac(options.interface, options.new_mac) # changes MAC address
current_mac = get_current_mac(options.interface) # gets MAC address after change
if current_mac == options.new_mac:
print('[+] MAC address was successfully changed to ' + current_mac)
else:
print('[-] MAC address was not changed')
|
import socket
th='127.0.0.1'
tp=80
client=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
message="AAABBBCCC"
client.sendto(message.encode('utf-8'),(th,tp))
data,addr=client.recvfrom(4096)
print(data)
print("\n")
print(addr) |
balance = int(input("Balance:"))
if (balance<10): print("Balance Low")
else: print("Sufficient Balance") |
# This function takes last element as pivot, places
# the pivot element at its correct position in sorted
# array, and places all smaller (smaller than pivot)
# to left of pivot and all greater elements to right
# of pivot
def partition(arr,low,high):
i = ( low-1 ) # index of smaller element
pivot = arr[high] # pivot
for j in range(low , high):
# If current element is smaller than or
# equal to pivot
if arr[j] <= pivot:
# increment index of smaller element
i = i+1
arr[i],arr[j] = arr[j],arr[i]
arr[i+1],arr[high] = arr[high],arr[i+1]
return ( i+1 )
# The main function that implements QuickSort
# arr[] --> Array to be sorted,
# low --> Starting index,
# high --> Ending index
# Function to do Quick sort
def quickSort(arr,low,high):
if low < high:
# pi is partitioning index, arr[p] is now
# at right place
pi = partition(arr,low,high)
# Separately sort elements before
# partition and after partition
quickSort(arr, low, pi-1)
quickSort(arr, pi+1, high)
# Driver code to test above
arr = [291, 80, 43, 226, 206, 204, 36, 112, 17, 137, 284, 298, 133, 300, 38, 107, 299, 183, 8, 152, 56, 217, 179, 110, 61, 73, 232, 228, 221, 119, 285, 202, 192,
195, 57, 225, 239, 86, 66, 247, 144, 58, 91, 120, 169, 41, 82, 2, 76, 173, 39, 102, 234, 34, 97, 29, 264, 159, 10, 65, 109, 68, 191, 171, 213, 15, 210, 16,
209, 50, 111, 178, 3, 49, 260, 62, 263, 242, 261, 227, 31, 168, 200, 219, 128, 193, 254, 160, 24, 129, 170, 12, 106, 187, 166, 124, 126, 81, 79, 296]
n = len(arr)
quickSort(arr,0,n-1)
print (arr)
|
import unittest
from katas.kyu_7.switcheroo import switcheroo
class SwitcherooTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(switcheroo('abc'), 'bac')
def test_equal_2(self):
self.assertEqual(switcheroo('aaabcccbaaa'), 'bbbacccabbb')
def test_equal_3(self):
self.assertEqual(switcheroo('ccccc'), 'ccccc')
def test_equal_4(self):
self.assertEqual(switcheroo('abababababababab'), 'babababababababa')
def test_equal_5(self):
self.assertEqual(switcheroo('aaaaa'), 'bbbbb')
|
#!/usr/bin/env python
import sys, os
sys.path.append(os.path.join(sys.path[0], "vendor/levpasha/instabot/src"))
from instabot import InstaBot
from check_status import check_status
from feed_scanner import feed_scanner
from unfollow_protocol import unfollow_protocol
from follow_protocol import follow_protocol
import time
import json
with open("settings.json") as settings_json:
settings = json.load(settings_json)
bot = InstaBot(login = settings["login"],
password = settings["password"],
like_per_day = settings["like_per_day"],
comments_per_day = settings["comments_per_day"],
tag_list = settings["tag_list"],
tag_blacklist = settings["tag_blacklist"],
user_blacklist = settings["user_blacklist"],
max_like_for_one_tag = settings["max_like_for_one_tag"],
follow_per_day = settings["follow_per_day"],
follow_time = settings["follow_time"],
unfollow_per_day = settings["unfollow_per_day"],
unfollow_break_min = settings["unfollow_break_min"],
unfollow_break_max = settings["unfollow_break_max"],
log_mod = settings["log_mod"],
proxy = settings["proxy"],
unwanted_username_list = settings["unwanted_username_list"],
unfollow_whitelist = settings["unfollow_whitelist"])
while True:
mode = settings["mode"]
if mode == 0 :
bot.new_auto_mod()
elif mode == 1 :
check_status(bot)
while bot.self_following - bot.self_follower > 200:
unfollow_protocol(bot)
time.sleep(10 * 60)
check_status(bot)
while bot.self_following - bot.self_follower < 400:
while len(bot.user_info_list) < 50 :
feed_scanner(bot)
time.sleep(5 * 60)
follow_protocol(bot)
time.sleep(10 * 60)
check_status(bot)
elif mode == 2 :
bot.bot_mode = 1
bot.new_auto_mod()
elif mode == 3 :
unfollow_protocol(bot)
time.sleep(10 * 60)
elif mode == 4 :
feed_scanner(bot)
time.sleep(60)
follow_protocol(bot)
time.sleep(10 * 60)
elif mode == 5 :
bot.bot_mode = 2
unfollow_protocol(bot)
else :
print ("Wrong mode!")
|
from django.urls import path
from django.contrib.auth.views import LogoutView
from .views import *
urlpatterns = [
path('lawyer/<int:pk>/', LawyerDetailView.as_view(), name='lawyer-profile'),
path('lawyer/<int:pk>/book/', BookingCreateView.as_view(), name='lawyer-booking'),
path('lawyer/book/<int:pk>/update/', BookingUpdateView.as_view(), name='booking-update'),
path('book/<int:pk>/review/', ReviewCreateView.as_view(), name='review-create'),
path('expert-area/create/', ExpertAreaCreateView.as_view(), name='expert-area-create'),
path('', home, name='home'),
path('about/', about, name='about'),
path('admin/', dashboard, name='dashboard'),
path('dashboard/lawyer/', dlawyers, name='d-lawyer'),
path('dashboard/client/', dclients, name='d-client'),
path('dashboard/client/<int:pk>/update/', CleintUpdateView.as_view(), name='dclient-update'),
path('dashboard/lawyer/<int:pk>/update/', LawyerUpdateView.as_view(), name='dlawyer-update'),
path('dashboard/user-reg/', RegistrationView.as_view(), name='user-reg'),
path('dashboard/booking/all/', AdminBookingListView.as_view(), name='all-bookings'),
path('dashboard/booking/create/', AdminBookingCreateView.as_view(), name='abooking-create'),
path('dashboard/booking/update/<int:pk>', AdminBookingUpdateView.as_view(), name='abooking-update'),
path('logout/', LogoutView.as_view(), name='logout'),
path('dashboard/', dashboard, name='dashboard'),
path('lawyer/', LawyerRegistrationView.as_view(), name='lawyer-reg'),
path('client/', ClientRegistrationView.as_view(), name='client-reg'),
path('profile/<int:pk>/', ProfileView.as_view(), name='user-profile'),
path('profile/<int:pk>/delete/', UserDelete.as_view(), name='user-delete'),
path('search/', search, name='search'),
] |
from primer3plus import Design
from primer3plus.design.interfaces import ParameterDescriptor
from primer3plus.params import _load_default_boulderio
def test_descriptor():
class Foo:
SEQUENCE_ID = ParameterDescriptor("SEQUENCE_ID")
def __init__(self):
self.params = _load_default_boulderio()
foo = Foo()
foo.params["SEQUENCE_ID"] = "bar"
assert foo.SEQUENCE_ID.value == "bar"
foo.SEQUENCE_ID.value = "baz"
assert foo.SEQUENCE_ID.value == "baz"
assert foo.params["SEQUENCE_ID"] == "baz"
def test_design_accessor():
design = Design()
design.SEQUENCE_ID = "baz"
assert design.params["SEQUENCE_ID"] == "baz"
# assert design.P.SEQUENCE_ID == 'baz'
|
from nltk.corpus import dependency_treebank
import random
import mst
from sparse_vector import sparse_vector
# read parsed data ('gold' parsed sentences) and add 'ROOT' node with 'ROOT' tag
parsed_sents=dependency_treebank.parsed_sents()
for sent in parsed_sents:
sent.nodes[0].update({'word': 'ROOT','tag': 'ROOT','ctag': 'ROOT'})
# read taged data and add the word 'ROOT'
tagged_sents_orig=dependency_treebank.tagged_sents()
tagged_sents = []
for sent in tagged_sents_orig:
tagged_sents.append([('ROOT', 'ROOT')] + sent)
# split train and test, from the parsed and frome the tagged-only sentences
train_tagged= tagged_sents[:int(len(parsed_sents) * 0.9)]
train_parsed= parsed_sents[:int(len(parsed_sents) * 0.9)]
test_parsed= parsed_sents[int(len(parsed_sents) * 0.9):]
test_tagged= tagged_sents[int(len(tagged_sents) * 0.9):]
# create set of all possible tags and words.
def get_all_possible_tags_and_words(data_set):
all_words = set()
all_tags=set()
for sen in data_set:
for tagged_word in sen:
all_words.add(tagged_word[0])
all_tags.add(tagged_word[1])
all_tags.add('ROOT')
all_words.add('ROOT')
all_words = list(all_words)
all_tags = list(all_tags)
all_tags.sort()
all_words.sort()
return all_words,all_tags
all_words,all_tags = get_all_possible_tags_and_words(tagged_sents)
tag2i = {pos: i for (i, pos) in enumerate(all_tags)}
word2i = {word: i for (i, word) in enumerate(all_words)}
N = len(all_words)
T = len(all_tags)
# return the index of the '1' in the words feature vector
def word_bigram_feature(w1,w2):
return word2i[w1] * N+word2i[w2]
# return the index of the '1' in the POS tags feature vector
def tag_bigram_feature(w1,w2):
return tag2i[w1] * T+tag2i[w2]
# return the index of the '1' in the distance feature vector, or None if this feature vector should not contain '1'
def distance_feature(i,j):
ret = min(j-i-1,3)
if ret < 0:
return None
return min(j-i-1,3)
# feature function, contain only word-bigram and POS-bigram tags
def feature_function(w1, t1, w2, t2,i,j):
w_feature = word_bigram_feature(w1,w2)
t_feature = tag_bigram_feature(t1,t2)
temp1=sparse_vector([w_feature],N**2)
temp2=sparse_vector([t_feature],T**2)
temp1.concatenate(temp2)
return temp1
# full feature function, contain word-bigram feature, POS-bigram feature, and distance feature
def feature_function_w_dist(w1, t1, w2, t2,i,j):
feature_vec = feature_function(w1, t1, w2, t2,i,j)
d_feature = distance_feature(i, j)
temp3 = 0
if d_feature == None:
temp3 = sparse_vector([], 4)
else:
temp3 = sparse_vector([d_feature], 4)
feature_vec.concatenate(temp3)
return feature_vec
# the result is graph as dict of dict as the mst algorithem gets.
# a key in the dictionary are the index of the word in the sentence.
# graph[i][j] = -weight of the w_i ->w_j arch in the graph, the minus is because the mst search for minimum and we want maximum
def sentence_to_full_graph(feature_function, w, sentence):
graph = dict()
for i in range(len(sentence)):
graph[i] = dict()
for j in range(len(sentence)):
if i!=j:
weight = feature_function(sentence[i][0],sentence[i][1], sentence[j][0], sentence[j][1], i, j).sparse_dot_by_sparse(w)
graph[i][j] = -weight
return graph
# compute and sum all the feature-vectors of all the archs in a graph
def sum_tree(tree, tagged_sent, feature_function):
ret=sparse_vector([],N**2+T**2)
for node1 in tree:
neighbours=tree[node1]
for node2 in neighbours:
feature_vec=feature_function(tagged_sent[node1][0], tagged_sent[node1][1], tagged_sent[node2][0], tagged_sent[node2][1], node1, node2)
ret.add(feature_vec)
return ret
# convert dependency-tree to tree of the type dict-in-dict, as the mst algo' return
def to_tree(prs_sent):
ret = {}
for w1 in prs_sent.nodes:
ret[w1] = {}
d = prs_sent.nodes[w1]['deps']
for w2 in d['']:
ret[w1][w2] = 0
for w2 in d['ROOT']:
ret[w1][w2] = 0
return ret
# the perceptron learning algorithem
def perceptron(learning_rate=1,itertations=2, dist_feature=False):
# the weight vector is sparse, for convinient and running time reasons.
weight=sparse_vector([],N**2+T**2)
rand_iter = list(range(len(train_parsed)))
random.shuffle(rand_iter)
if not dist_feature:
feature_function_to_use = feature_function
else:
feature_function_to_use = feature_function_w_dist
w_sum = sparse_vector([],N**2+T**2)
for i in range(itertations):
for j in rand_iter:
G = sentence_to_full_graph(feature_function_to_use, weight, train_tagged[j])
T_opt=mst.mst(0,G)
t = to_tree(train_parsed[j])
temp=sum_tree(t, train_tagged[j], feature_function_to_use)
temp.sub(sum_tree(T_opt, train_tagged[j],feature_function_to_use))
temp.mult_by_scalar(learning_rate)
weight.add(temp)
w_sum.add(weight)
w_sum.mult_by_scalar(1.0 / (len(train_tagged) * itertations))
return w_sum
# score a singl sentence
def score_sent(w_train,tagged_sent,feature_function,gold_tree):
G = sentence_to_full_graph(feature_function, w_train, tagged_sent)
T = mst.mst(0, G)
gold_tree=to_tree(gold_tree)
num_of_right_edges=0
for node in gold_tree:
if node in T.keys():
neighbours=gold_tree[node].keys()
for node2 in neighbours:
if node2 in T[node].keys():
num_of_right_edges+=1
return num_of_right_edges/len(tagged_sent)
# score the whole test set
def score(w_train, feature_function, test_gold, test_tag):
sum_of_scores = 0
for i in range(len(test_gold)):
sum_of_scores += score_sent(w_train,test_tag[i],feature_function,test_gold[i])
return sum_of_scores / len(test_gold)
import time
start_time = time.time()
w = perceptron(dist_feature=True)
print("--- %s seconds for learning ---" % (time.time() - start_time))
start_time = time.time()
print("score for learning with the distance feature function is ", score(w, feature_function_w_dist, test_parsed, test_tagged))
print("--- %s seconds for evaluation ---" % (time.time() - start_time)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 30 17:34:28 2019
@author: amie
"""
import picdata as pic
import numpy as np
from numba import jit
import time
import math
import matplotlib.pyplot as plt
#from pca import PCA_face_example
import pickle
import cv2
import os
class picprocess():
def __init__(self):
self.strain = 1
self.sN_NEURONS = 100
self.sN_NEURONS_1 = 100
self.sETA = 1.0
self.sXI = 1.0
self.strain_round = 100
self.strainpath = 'trainpicpath.txt'
self.stestpath = 'testpicpath.txt'
value = self.readconfig('config_FGV.txt')
self.strain = int(value[0])
self.sN_NEURONS = int(value[1])
self.sN_NEURONS_1 = int(value[2])
self.sETA = float(value[3])
self.sXI = float(value[4])
self.strain_round = int(value[5])
self.strainpath =value[6]
self.stestpath = value[7]
def generateGxGyVF(self):
Gx = []
Gy = []
F = []
V = []
for fpath,dirname,file in os.walk('pic_Gx'):
for f in file:
f = 'pic_Gx/' + f
with open(f,'rb') as outfile:
data = pickle.load(outfile)
Gx.append(data)
for fpath,dirname,file in os.walk('pic_Gy'):
for f in file:
f = 'pic_Gy/' + f
with open(f,'rb') as outfile:
data = pickle.load(outfile)
Gy.append(data)
for fpath,dirname,file in os.walk('pic_F'):
for f in file:
f = 'pic_F/' + f
with open(f,'rb') as outfile:
data = pickle.load(outfile)
F.append(data)
for fpath,dirname,file in os.walk('pic_V'):
for f in file:
f = 'pic_V/' + f
with open(f,'rb') as outfile:
data = pickle.load(outfile)
V.append(data)
return Gx,Gy,F,V
def generatetraindata(self,Gx,Gy,F,V):
Gxyv = []
Fxy = []
for i in range(len(Gx)):
print('Gxshape:',Gx[i].shape)
print('Fshape:',F[i].shape)
for m in range(Gx[i].shape[0]):
for n in range(Gx[i].shape[1]):
gxyv = [Gx[i][m,n],Gy[i][m,n],V[i][m,n]]
fxy = [F[i][m,n,0],F[i][m,n,1]]
Gxyv.append(gxyv)
Fxy.append(fxy)
Gxyv = np.array(Gxyv)
Fxy = np.array(Fxy)
return Gxyv,Fxy
def readconfig(self,configpath):
value = []
with open(configpath,'r') as config:
for line in config.readlines():
line = line.split('=')
value.append(line[1].split('\n')[0])
return value
def cosine_dis(self, x, y):
num = (x*y).sum(axis=1)
denom = np.linalg.norm(x) * np.linalg.norm(y,axis=1)
return num/denom
def predict_corner(self):
# initialize the parameters
population_a = np.zeros((self.sN_NEURONS,1))
population_s = np.ones((self.sN_NEURONS,1))*0.045
wcross = np.random.uniform(0,1,(self.sN_NEURONS,self.sN_NEURONS_1))
population_Wcross = wcross / wcross.sum()
population_Winput = np.random.random((self.sN_NEURONS,3))/10.0
population_Winput_1 = np.random.random((self.sN_NEURONS_1,2))/10.0
# load the model
with open('Weight data/train_vgf_300_1000/populations_Wcross799.pkl','rb') as file:
population_Wcross = pickle.load(file)
with open('Weight data/train_vgf_300_1000/populations_Winput799.pkl','rb') as file1:
population_Winput = pickle.load(file1)
with open('Weight data/train_vgf_300_1000/population_Winput_1799.pkl','rb') as file2:
population_Winput_1 = pickle.load(file2)
with open('Weight data/train_vgf_300_1000/populations_s799.pkl','rb') as file3:
population_s = pickle.load(file3)
# show the HL matrix
plt.imshow(population_Wcross)
cap = cv2.VideoCapture('slow_traffic_small.mp4')
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
p_time = p0[:,0,:]
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
count = 1
while(1):
sensory_x = []
count+=1
ret,frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dx = cv2.Sobel(old_gray,cv2.CV_16S,1,0)
dy = cv2.Sobel(old_gray,cv2.CV_16S,0,1)
VI = frame_gray - old_gray
dx = cv2.resize(dx,(6400,3600))
dy = cv2.resize(dy,(6400,3600))
VI = cv2.resize(VI,(6400,3600))
p0 = p0[:,0,:]
good_old_around = (p0 * 10).astype(np.int64)
for i in range(len(good_old_around)):
if good_old_around[i][1] >= 3600:
good_old_around[i][1] = 3599
if good_old_around[i][0] >= 6400:
good_old_around[i][1] = 6399
x = dx[(good_old_around[i][1]),(good_old_around[i][0])]
y = dy[(good_old_around[i][1]),(good_old_around[i][0])]
vi = VI[(good_old_around[i][1]),(good_old_around[i][0])]*4
sensory_x.append(np.array([x,y,vi])/1020.0)
sensory_x = np.array(sensory_x)
act_cur1 = np.zeros((100,1))
x_drection = []
for i in range(sensory_x.shape[0]):
input_sample = sensory_x[i].reshape(1,-1)
temp = (np.power((input_sample - population_Winput),2).sum(axis=1)/100).reshape(-1,1)
act_cur1 = (1/(np.sqrt(2*np.pi)*population_s))*np.exp(-temp/(2*np.power(population_s,2)))
act_cur_sum = act_cur1.sum()
if act_cur_sum == 0 :
print('act_cur.sum() is less than 1e-323,ignore the update!')
act_cur1 = act_cur1 / act_cur_sum
population_a = act_cur1
# get the position of winner neuron
win_pos = population_a[:,0].argmax()
pre_pos = population_Wcross[win_pos,:].argmax()
# decode the HL matrix
a1 = population_Winput_1[pre_pos]
x_drection.append(a1)
x_drection = np.array(x_drection)
good_old = p_time
good_new = p_time + x_drection
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel().astype(np.float32)
c,d = old.ravel().astype(np.float32)
mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
img = cv2.add(frame,mask)
# rebuild the optical flow picture
cv2.imshow('rebuild flow',img)
cv2.waitKey(100)
p_time = good_new
p0 = good_new[:,np.newaxis,:]
def calmove(self,coordiante,coordinate_new):# [x1,y1],[float1,float2]
co = []
x0y0 = [coordiante[0] - 1, coordiante[1] - 1]
x0y1 = [coordiante[0] - 1, coordiante[1]]
x0y2 = [coordiante[0] - 1, coordiante[1] + 1]
x1y0 = [coordiante[0] , coordiante[1] - 1]
x1y1 = [coordiante[0] , coordiante[1]]
x1y2 = [coordiante[0] , coordiante[1] + 1]
x2y0 = [coordiante[0] + 1, coordiante[1] - 1]
x2y1 = [coordiante[0] + 1, coordiante[1]]
x2y2 = [coordiante[0] + 1, coordiante[1] + 1]
co.append(x0y0)
co.append(x0y1)
co.append(x0y2)
co.append(x1y0)
co.append(x1y1)
co.append(x1y2)
co.append(x2y0)
co.append(x2y1)
co.append(x2y2)
co = np.array(co)
x1y1 = np.array(coordinate_new)
co_x1y1 = co -x1y1
co_x1y1 = np.power(co_x1y1,2)
co_x1y1_sum = np.sum(co_x1y1,axis=1)
posmax = co_x1y1_sum.argmin()
print(co[posmax])
return co[posmax]
def parametrize_learning_law(self, v0, vf, t0, tf):
y = np.zeros((tf-t0,1))
t = [i for i in range(1,tf+1)]
B = (vf*tf - v0*t0)/(v0 - vf)
A = v0*t0 + B*v0
y = [A/(t[i]+B) for i in range(len(t))]
return y
def speed_up_som(self):
# get the training data
Gx,Gy,F,V = a.generateGxGyVF()
gxyv,fxy = a.generatetraindata(Gx,Gy,F,V)
DxyvUxy = np.zeros((8720,5))
with open('DxyvUxy.pkl','rb') as file:
load = pickle.load(file)
DxyvUxy = np.array(load)
# Normalize the data
sensory_x = DxyvUxy[:,0:3] / 1020.0
sensory_y = DxyvUxy[:,3:5]
# initialize the parameters
N_NEURONS = self.sN_NEURONS # sensor1
N_NEURONS_1 = self.sN_NEURONS_1 # sensor2
population_s = np.ones((N_NEURONS,1))*0.045 # sensor1 tuning curve
population_a = np.zeros((N_NEURONS,1)) # sensor1 activation value
wcross = np.random.uniform(0,1,(N_NEURONS,N_NEURONS_1))
population_Wcross = wcross / wcross.sum() # sensor1 HL matrix
train_round = self.strain_round
population_Winput = np.random.random((N_NEURONS,sensory_x.shape[1]))/100.0 # sensor1 weight
sample_num = sensory_x.shape[0]
sample_demension = sensory_x.shape[1]
learning_sigmat = self.parametrize_learning_law(50,1,1,train_round)
learning_alphat = self.parametrize_learning_law(0.1,0.001,1,train_round)
ETA = 1.0
XI = 1e-3
hwi = np.zeros((N_NEURONS,1))
population_s_1 = np.ones((N_NEURONS_1,1))*0.045 # sensor2 tuning curve
population_a_1 = np.zeros((N_NEURONS_1,1)) # sensor1 activation value
wcross_1 = np.random.uniform(0,1,(N_NEURONS_1,N_NEURONS))
population_Wcross_1 = wcross_1 / wcross_1.sum() # sensor2 HL matrix
print(sensory_y.shape)
population_Winput_1 = np.random.random((N_NEURONS_1,sensory_y.shape[1]))/100.0 # sensor1 weight
sample_num_1 = sensory_y.shape[0]
sample_demension_1 = sensory_y.shape[1]
ETA = 1.0
XI = 1e-3
hwi_1 = np.zeros((N_NEURONS_1,1))
hl_trainround = 100
avg_act = np.zeros((N_NEURONS,1))
avg_act_1 = np.zeros((N_NEURONS_1,1))
# training
for t in range(hl_trainround + train_round):
if t < train_round:
for sample_index in range(sample_num):
act_cur1 = np.zeros((N_NEURONS,1))
act_cur2 = np.zeros((N_NEURONS_1,1))
input_sample = sensory_x[sample_index].reshape(1,-1)
input_sample_2 = sensory_y[sample_index].reshape(1,-1)
temp = (np.power((input_sample - population_Winput),2).sum(axis=1)/sample_demension).reshape(-1,1)
temp1 = (np.power((input_sample_2 - population_Winput_1),2).sum(axis=1)/sample_demension_1).reshape(-1,1)
# matrix calculate.All activation values are updated together
act_cur1 = (1/(np.sqrt(2*np.pi)*population_s))*np.exp(-temp/(2*np.power(population_s,2)))
act_cur2 = (1/(np.sqrt(2*np.pi)*population_s_1))*np.exp(-temp1/(2*np.power(population_s_1,2)))
act_cur_sum = act_cur1.sum()
act_cur_sum1 = act_cur2.sum()
if act_cur_sum == 0 or act_cur_sum1 == 0:
print('act_cur.sum() is less than 1e-323,ignore the update!')
continue
act_cur1 = act_cur1 / act_cur_sum
act_cur2 = act_cur2 / act_cur_sum1
population_a = (1-ETA)*population_a + ETA * act_cur1
population_a_1 = (1-ETA)*population_a_1 + ETA * act_cur2
win_pos = population_a[:,0].argmax()
win_pos1 = population_a_1[:,0].argmax()
pos_list = np.arange(0,N_NEURONS,1)
pos_list_1 = np.arange(0,N_NEURONS_1,1)
hwi = (np.exp(-np.power(pos_list - win_pos, 2) / (2 * np.power(learning_sigmat[t],2)))).reshape(N_NEURONS,1)
hwi_1 = (np.exp(-np.power(pos_list_1 - win_pos1, 2) / (2 * np.power(learning_sigmat[t],2)))).reshape(N_NEURONS_1,1)
# matrix calculate.All population_Winput values are updated together
population_Winput = population_Winput+ \
learning_alphat[t] * hwi * (input_sample - population_Winput)
population_Winput_1 = population_Winput_1+ \
learning_alphat[t] * hwi_1 * (input_sample_2 - population_Winput_1)
# matrix calculate.All population_s values are updated together
temp_s = (np.power((input_sample - population_Winput),2).sum(axis=1)/sample_demension).reshape(-1,1)
population_s = population_s + \
learning_alphat[t] * (1/(np.sqrt(2*np.pi)*learning_sigmat[t])) * \
hwi * (temp_s - np.power(population_s,2))
temp_s_1 = (np.power((input_sample_2 - population_Winput_1),2).sum(axis=1)/sample_demension_1).reshape(-1,1)
population_s_1 = population_s_1 + \
learning_alphat[t] * (1/(np.sqrt(2*np.pi)*learning_sigmat[t])) * \
hwi_1 * (temp_s_1 - np.power(population_s_1,2))
print('training:',t/(train_round+hl_trainround))
# HL matrix training
for sample_index in range(sample_num):
act_cur1 = np.zeros((N_NEURONS,1))
act_cur2 = np.zeros((N_NEURONS_1,1))
input_sample = sensory_x[sample_index].reshape(1,-1)
input_sample_2 = sensory_y[sample_index].reshape(1,-1)
temp = (np.power((input_sample - population_Winput),2).sum(axis=1)/sample_demension).reshape(-1,1)
temp1 = (np.power((input_sample_2 - population_Winput_1),2).sum(axis=1)/sample_demension_1).reshape(-1,1)
# matrix calculate. All activation values are updated together
act_cur1 = (1/(np.sqrt(2*np.pi)*population_s))*np.exp(-temp/(2*np.power(population_s,2)))
act_cur2 = (1/(np.sqrt(2*np.pi)*population_s_1))*np.exp(-temp1/(2*np.power(population_s_1,2)))
act_cur_sum = act_cur1.sum()
act_cur_sum1 = act_cur2.sum()
if act_cur_sum == 0 or act_cur_sum1 == 0:
print('act_cur.sum() is less than 1e-323,ignore the update!')
continue
act_cur1 = act_cur1 / act_cur_sum
act_cur2 = act_cur2 / act_cur_sum1
population_a = (1-ETA)*population_a + ETA * act_cur1
population_a_1 = (1-ETA)*population_a_1 + ETA * act_cur2
OMEGA = 0.002 + 0.998/(t+2)
avg_act[:,0] = (1-OMEGA)*avg_act[:, 0] + OMEGA*population_a[:,0]
avg_act_1[:,0] = (1-OMEGA)*avg_act_1[:, 0] + OMEGA*population_a_1[:,0]
population_Wcross = (1-XI)*population_Wcross + XI*(population_a - avg_act[:, 0].reshape(N_NEURONS,1))*(population_a_1 - avg_act_1[:, 0].reshape(N_NEURONS_1,1)).T
if t%200 == 199:
# save the model
with open('populations_Wcross{}.pkl'.format(t),'wb') as output:
pickle.dump(population_Wcross,output)
with open('populations_Winput{}.pkl'.format(t),'wb') as output1:
pickle.dump(population_Winput,output1)
with open('population_Winput_1{}.pkl'.format(t),'wb') as output2:
pickle.dump(population_Winput_1,output2)
with open('populations_s{}.pkl'.format(t),'wb') as output3:
pickle.dump(population_s,output3)
with open('populations_s_1{}.pkl'.format(t),'wb') as output4:
pickle.dump(population_s_1,output4)
if __name__ == '__main__':
a = picprocess()
start = time.time()
if a.strain == 1:
a.speed_up_som()
else:
a.predict_corner()
print(time.time() - start)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-06 14:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app1', '0007_auto_20161006_1446'),
]
operations = [
migrations.RemoveField(
model_name='info',
name='host',
),
]
|
from Jumpscale import j
def chat(bot):
"""
to call http://localhost:5050/chat/session/threebot_deploy
"""
progress = """\
# Deploying your 3Bot
{1}
<div class="progress">
<div class="progress-bar active" role="progressbar" aria-valuenow="{0}"
aria-valuemin="0" aria-valuemax="100" style="width:{0}%">
{0}%
</div>
</div>
"""
explorer = j.clients.gedis.get(name="explorer", port=8901, host="explorer.testnet.grid.tf")
token = j.clients.digitalocean.provisioning.token_
user_info = bot.user_info()
name = user_info["username"]
email = user_info["email"]
if not name or not email:
bot.md_show("Username or email not found in session. Please log in properly")
url = f"https://{name}.3bot.testnet.grid.tf"
deployer = j.tools.threebot_deploy.get()
try:
container = deployer.get_by_double_name(name)
try:
explorer.actors.phonebook.get(name=name)
except j.exceptions.RemoteException:
bot.md_show(
"There seems to be a deployment in progress. Please check again later or contact support at support@threefold.tech"
)
else:
bot.md_show(
f"Doublename {name} has already been used to deploy a 3bot. You can find it here [{url}]({url})"
)
except j.exceptions.NotFound:
pass
question = "Please enter a description for your 3BOT:"
description = bot.string_ask(question)
bot.md_show("Press next to start the deployment. This might take several minutes")
bot.md_show_update(progress.format(0, "Creating 3Bot"))
machine = deployer.machines.get_available()
bot.md_show_update(progress.format(10, "Configuring 3Bot"))
try:
container = machine.threebot_deploy(name, start=False)
bot.md_show_update(progress.format(70, "Starting 3Bot"))
container.start_servers_threebot_zdb_sonic()
print("Finished installing threebot")
print("Start registering threebot")
bot.md_show_update(progress.format(90, "Registering 3Bot"))
client = container.threebot_client
client.actors.registration.register(name, email, description)
print("Setting the identity of the threebotserver")
record = explorer.actors.phonebook.get(name=name)
client.actors.registration.set_identity(record.id, record.name, record.email, record.pubkey)
bot.md_show_update(progress.format(100, "Registering 3Bot completed"))
bot.md_show(f"# Your 3bot has been registered successfully you can find it here [{url}]({url})")
except:
machine.destroy_container(name)
raise
|
import matplotlib.pyplot as plt
import numpy as np
from .FluidQuantity import FluidQuantity
from .OutputException import OutputException
class RunawayElectronDensity(FluidQuantity):
def __init__(self, name, data, grid, output, attr=list()):
"""
Constructor.
"""
super().__init__(name=name, data=data, grid=grid, output=output, attr=attr)
def plotRates(self, r=None, t=None, ax=None, show=True):
"""
Plot all runaway rates in the same figure.
"""
if t is None and r is None:
raise OutputException("When plotting all runaway rates, at least one of 'r' and 't' must be specified.")
labels = []
if 'fluid' not in self.output.other:
raise OutputException("No 'other' fluid quantities saved in output. Cannot plot runaway rates.")
# Plot total runaway rate
if 'runawayRate' in self.output.other.fluid:
ax = self.output.other.fluid.runawayRate.plot(r=r, t=t, ax=ax, show=False)
labels.append('Total')
# Plot avalanche
if 'GammaAva' in self.output.other.fluid:
ax = self.output.other.fluid.GammaAva.plotRunawayRate(r=r, t=t, ax=ax, show=False)
labels.append('Avalanche')
for o in self.output.other.fluid.keys():
if not o.startswith('gamma'):
continue
q = self.output.other.fluid[o]
# Ignore if empty
if np.sum(np.abs(q[:])) == 0:
continue
ax = q.plot(r=r, t=t, ax=ax, show=False)
labels.append(o[5:].replace(r'_', r'\_'))
plt.ylabel('Runaway rates')
plt.legend(labels)
if show:
plt.show(block=False)
return ax
|
# -*- coding: utf8 -*-
import io
import collections
from jiendia.io.manipulator import BinaryReader
from jiendia.io.archive.base import BaseArchive, ArchiveMode
Rectangle = collections.namedtuple('Rectangle',
'pattern, x, y, axis_x, axis_y, left, top, right, bottom, filename')
Image = collections.namedtuple('Image',
'item_id, depth, rectangles')
class TblArchive(BaseArchive):
u"""キャラクターの体・装備品などの画像情報を格納するアーカイブ"""
_DEFAULT_ENCODING = 'utf-8'
def __init__(self, file, mode = BaseArchive._DEFAULT_MODE, encoding = _DEFAULT_ENCODING):
if mode != ArchiveMode.READ:
raise RuntimeError('TBLアーカイブは読み取り専用です')
BaseArchive.__init__(self, file, mode)
self._encoding = encoding
self._images = {}
if mode in (ArchiveMode.READ, ArchiveMode.UPDATE):
self._load()
@property
def images(self):
return dict(self._images)
def get_image(self, item_id):
u"""指定したアイテムIDの画像情報を取得する
画像情報は{アイテムID, レイヤの深さ, 句形画像リスト}の三要素からなる"""
return self._images[item_id]
def _load(self):
reader = BinaryReader(self._stream)
self._stream.seek(4, io.SEEK_SET)
group_count = reader.read_int32()
for _ in range(group_count):
rectangle_count = reader.read_int32()
group_id_str = reader.read_string(16, self._encoding)
self._stream.seek(116, io.SEEK_CUR)
# グループIDが12_3456_78_90ならば先頭の12がレイヤの深さ, 続く34567890がアイテムID
item_id = int(''.join(group_id_str.split('_')[1:]))
depth = int(group_id_str.split('_')[0])
rectangles = []
for __ in range(rectangle_count):
pattern = reader.read_int32()
x = reader.read_int32()
y = reader.read_int32()
axis_x = reader.read_float()
axis_y = reader.read_float()
left = reader.read_int32()
top = reader.read_int32()
right = reader.read_int32()
bottom = reader.read_int32()
filename = reader.read_string(24, self._encoding)
self._stream.seek(104, io.SEEK_CUR)
rectangle = Rectangle._make((pattern, x, y, axis_x, axis_y, left, top, right, bottom, filename))
rectangles.append(rectangle)
image = Image._make((item_id, depth, tuple(rectangles)))
self._images[item_id] = image
|
c=0
n=int(input())
s='kabali'
for i in range(0,n):
g=input()
if sorted(s)==sorted(g):
c+=1
print(c)
|
from art import logo
from art import vs
from game_data import data
from random import randint
from replit import clear
def winner(a, b):
if a > b:
return 'a'
else: return 'b'
def game():
print(logo)
selectionA = []
selectionB = []
score = 0
game_end = False
random_numA = randint(1, 50)
selectionA = data[random_numA]
nameA = (selectionA['name'])
descriptionA = (selectionA['description'])
countryA = (selectionA['country'])
follower_countA = (selectionA['follower_count'])
print(f"Compare A: {nameA}, a {descriptionA}, from {countryA}.")
print(vs)
random_numB = randint(1, 50)
selectionB = data[random_numB]
nameB = (selectionB['name'])
descriptionB = (selectionB['description'])
countryB = (selectionB['country'])
follower_countB = (selectionB['follower_count'])
print(F"Against B: {nameB}, a {descriptionB}, from {countryB}.")
winner(follower_countA, follower_countB)
print(winner(follower_countA, follower_countB))
guess = input("Who has more followers? Type 'A' or 'B':").lower()
while not game_end:
if guess == winner(follower_countA, follower_countB):
clear()
score += 1
print(logo)
print(f"You're right! Current score: {score}.")
selectionB = selectionA
print(f"Compare A: {nameA}, a {descriptionA}, from {countryA}.")
print(vs)
random_numB = randint(1, 50)
selectionB = data[random_numB]
nameB = (selectionB['name'])
descriptionB = (selectionB['description'])
countryB = (selectionB['country'])
follower_countB = (selectionB['follower_count'])
print(F"Against B: {nameB}, a {descriptionB}, from {countryB}.")
winner(follower_countA, follower_countB)
guess = input("Who has more followers? Type 'A' or 'B':").lower()
continue
else:
clear()
game_end = True
print(logo)
print(f"Sorry, that's wrong. Final score: {score}")
game()
|
def yashadd(a,b):
result = a+b
print(result)
def yashdictionary(a):
yashdict = {
'Yash' : 'Python',
'Satyen' : 'C',
'Akash' : 'C++',
'Bhavesh' : 'Java',
'Sarthak' : 'Vanila'
}
print(yashdict[a]) |
r"""!\file
\ingroup meas
In QCD the Polyakov loop is a Wilson line that wraps the temporal direction,
\f[
\mathcal{P}(\vec{x}) = \textrm{tr}\left[ \prod_{j=0}^{N_t-1} U_{t}(\vec{x},j)\right]
\f]
where the product is time-ordered and the trace yields a color singlet.
The correlation function of Polyakov loops yields access to the static quark potential,
\f[
\left\langle \mathcal{P}(\vec{m}) \mathcal{P}^{\dagger}(\vec{n}) \right\rangle
\propto
\exp\left(-N_t a V(\vec{m},\vec{n})\right) \left[1+\mathcal{O}\left(\exp(-N_t a \Delta E)\right)\right]
\f]
where \f$V\f$ is the potential between static sources on sites \f$\vec{m}\f$ and \f$\vec{n}\f$,
\f$a\f$ is the lattice spacing,
and \f$\Delta E\f$ is the difference between \f$V\f$ and the first excited energy level of a quark-antiquark pair.
For an enlightening derivation and discussion, see Gattringer \& Lang, *Quantum Chromodynamics on the Lattice* (2010), sections 3.3, 4.5.4, 12.1.
Whether the Polyakov loop correlator is related to a static potential in our case is not *a priori* obvious, as there is no equivalent quantity to the plaquette, which encodes the energy of the gauge fields in QCD.
In our case we can calculate the analogous
\f[
\mathcal{P}_{xy} = \prod_{j=0}^{N_t-1} F_j = \exp\left(i\sum_j \phi_{xj}\right) \delta_{xy}
\quad\quad\quad
(\alpha=1)
\f]
a diagonal matrix whose entries are the Polyakov loops for the spatial sites.
(The trace is trivial because the "gauge group" is \f$U(1)\f$).
We define the argument
\f[
\Phi_x = \sum_t \phi_{xt}
\f]
and store it as `Phi_x`. The Polyakov loops are stored as `Polyakov`.
The formation of potentials and correlators are left for analysis.
"""
import numpy as np
import isle
from .measurement import Measurement
from ..h5io import createH5Group
from logging import getLogger
class Polyakov(Measurement):
r"""!
\ingroup meas
Tabulate the Polyakov loop.
"""
def __init__(self, basis, nt, savePath, configSlice=slice(None, None, None)):
super().__init__(savePath, configSlice)
self.basis = basis
self.nt = nt
self.Phi_x = []
self.P = []
try:
if self.basis == isle.action.HFABasis.PARTICLE_HOLE:
self.forward = 1j
elif self.basis == isle.action.HFABasis.SPIN:
self.forward = 1
except:
getLogger(__name__).exception(f"Unknown basis, {self.basis}.")
def __call__(self, stage, itr):
"""!Record the sum_t phi_xt and the Polyakov loops."""
self.Phi_x.append(np.sum(np.reshape(stage.phi, (self.nt, -1)), axis=0))
self.P.append(
np.exp(self.forward*self.Phi_x[-1])
)
def save(self, h5group):
r"""!
Write both Phi_x and P.
\param base HDF5 group in which to store data.
\param h5group Base HDF5 group. Data is stored in subgroup `h5group/self.savePath`.
"""
subGroup = createH5Group(h5group, self.savePath)
subGroup["Phi_x"] = self.Phi_x
subGroup["Polyakov"] = self.P
def read(h5group):
r"""!
Read Phi_x and the Polyakov loops from a file.
\param h5group HDF5 group which contains the data of this measurement.
"""
return h5group["Phi_x"][()], h5group["Polyakov"][()]
|
#!/usr/bin/env python
import exercise1
"""
File name : test_exercise.1py
Script in charged of test the different use cases
W1: Write a regular expression that only matches the string (wherever it occurs): ABC
W2: Write a regular expression that matches any of the following, and no others: ABC ACC ADC
AXC
W3: Write a regular expression that will only match the following string when it is on a line entirely
by itself: ABC
W4: Write a regular expression that matches only the string that starts with 'A' and ends with 'B',
with anything in-between. Make a note of any assumptions you make.
W5: Write a regular expression that matches only the string that starts with 'A' and ends with 'B',
with a run of one or more of either 'XO' or 'OX' in-between. Make a note of any assumptions
you make.
"""
__author__ = 'Rebeca Perez Lainez'
__email__ = 'rebeca.perez.lainez@gmail.com'
def test_find_abc():
"""
Test use case related with task W1
Evaluates a text that only matches the string (wherever it occurs): ABC
"""
# Positive test cases
exercise1.find_abc("Hola ABC me llamo ACBC y leo ABC \n mi ABC ")
exercise1.find_abc("ABC")
exercise1.find_abc("abc")
exercise1.find_abc("abcABC")
# Negative test cases
exercise1.find_abc("Bye")
exercise1.find_abc("My name")
exercise1.find_abc("123")
def test_match_abc_acc_adc_axc_no_others():
"""
Test use case related with task W2
Evaluates a text that matches any of the following, and no others: ABC ACC ADC
"""
# Positive test cases
exercise1.match_abc_acc_adc_axc_no_others("ABC")
exercise1.match_abc_acc_adc_axc_no_others("ACC")
exercise1.match_abc_acc_adc_axc_no_others("ADC")
exercise1.match_abc_acc_adc_axc_no_others("AXC")
exercise1.match_abc_acc_adc_axc_no_others("ABCACCADCAXC")
exercise1.match_abc_acc_adc_axc_no_others("ABCACCADCAXC\nabc")
exercise1.match_abc_acc_adc_axc_no_others("ABCACCADCAXC\tabc")
# Negative test cases
exercise1.match_abc_acc_adc_axc_no_others(".")
exercise1.match_abc_acc_adc_axc_no_others(".ABC")
exercise1.match_abc_acc_adc_axc_no_others("123")
def test_match_abc_entire_line():
"""
Test use case related with task W3
Evaluates a text that will only match the following string when it is on a line entirely
by itself: ABC
"""
# Positive test cases
exercise1.match_abc_entire_line("ABC")
exercise1.match_abc_entire_line("abc")
# Negative test cases
exercise1.match_abc_entire_line("\tABC")
exercise1.match_abc_entire_line("123")
exercise1.match_abc_entire_line("abc\n")
def test_matches_starts_with_a_ends_with_b():
"""
Test use case related with task W4
Evaluates a text that matches only the string that starts with 'A' and ends with 'B',
with anything in-between.
"""
# Positive test cases
exercise1.matches_starts_with_a_ends_with_b("AB")
exercise1.matches_starts_with_a_ends_with_b("ab")
exercise1.matches_starts_with_a_ends_with_b("a1b")
exercise1.matches_starts_with_a_ends_with_b("aHere this expression is validb")
exercise1.matches_starts_with_a_ends_with_b("ABAB")
# Negaive test cases
exercise1.matches_starts_with_a_ends_with_b("a\n\tb")
exercise1.matches_starts_with_a_ends_with_b("a*b")
exercise1.matches_starts_with_a_ends_with_b("Hello")
exercise1.matches_starts_with_a_ends_with_b(".")
exercise1.matches_starts_with_a_ends_with_b("1")
exercise1.matches_starts_with_a_ends_with_b("A\nB\n")
exercise1.matches_starts_with_a_ends_with_b("A\nBAB")
def test_matches_starts_with_a_ends_with_b_xo_ox_in_between():
"""
Test use case related with task W5
Evaluates a text that matches only the string that starts with 'A' and ends with 'B',
with a run of one or more of either 'XO' or 'OX' in-between.
"""
# Positive test cases
exercise1.matches_starts_with_a_ends_with_b_xo_ox_in_between("AOXB")
exercise1.matches_starts_with_a_ends_with_b_xo_ox_in_between("AXOB")
exercise1.matches_starts_with_a_ends_with_b_xo_ox_in_between("axoB")
# Negative test cases
exercise1.matches_starts_with_a_ends_with_b_xo_ox_in_between("ax0B")
exercise1.matches_starts_with_a_ends_with_b_xo_ox_in_between("aB")
exercise1.matches_starts_with_a_ends_with_b_xo_ox_in_between("axo\nB")
exercise1.matches_starts_with_a_ends_with_b_xo_ox_in_between("hello")
def main():
test_find_abc()
test_match_abc_acc_adc_axc_no_others()
test_match_abc_entire_line()
test_matches_starts_with_a_ends_with_b()
test_matches_starts_with_a_ends_with_b_xo_ox_in_between()
if __name__ == '__main__':
main()
|
import time
import sacn
import numpy as np
from pprint import pprint
# sender.manual_flush = False # keep maunal flush off as long as possible, because if it is on, the automatic
# sending of packets is turned off and that is not recommended
# sender.stop() # stop sending out
class SacnSink(object):
"""
docstring
"""
def __init__(self,dest_ip = "localhost",reduction_rate = 3):
self.reduction_rate = reduction_rate
self.data_queue = []
self.sender = sacn.sACNsender(fps=40)
self.sender.start()
self.sender.manual_flush = True
self.sender.activate_output(1)
self.sender[1].destination = dest_ip
def send(self, data):
if type(data) is None:
print("Warning: none data")
return
self.data_queue.append(data)
if len(self.data_queue) == self.reduction_rate:
fame_average = 256 * np.mean(self.data_queue,axis = 0)
frame_average = fame_average.astype(np.uint8)
self.sender[1].dmx_data = frame_average
self.sender.flush()
self.data_queue = []
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
x = [[1., 0], [0, 2.], [3., 0], [0, 4.], [5., 0]]
y = [1, 2, 3, 4, 5]
w = tf.Variable(tf.random_uniform([2, 1]))
b = tf.Variable(tf.random_uniform([1]))
hx = tf.matmul(x, w) + b
cost = tf.reduce_mean(tf.square(hx - y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in range(100):
sess.run(train)
print(i, sess.run(cost))
print(sess.run(w))
print(sess.run(b))
# 5시간 공부 3시간 출석시 점수를 예측하시오
print(sess.run(tf.matmul([[5., 3.]], w) + b))
|
'''
Created on Jul 2, 2013
@author: emma
'''
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.common.action_chains import ActionChains
from UnitTesting.page_objects.modals.base_modal import base_modal
class recommend_modal(base_modal):
def __init__(self, webd_wrap):
base_modal.__init__(self, webd_wrap)
def _confirm_modal(self):
self._webd_wrap.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'fancybox-inner')), 'User modal not present')
def close_modal(self):
self._confirm_modal()
_close = self._webd_wrap._driver.find_element_by_class_name('fancybox-skin').find_element_by_xpath('a')
self._webd_wrap._driver.execute_script("(arguments[0]).click()", _close)
# confirms the modal is gone
self._webd_wrap.wait.until(EC.invisibility_of_element_located((By.CLASS_NAME, 'fancybox-inner')))
########################################################################
def submit_recommend(self):
self._confirm_modal()
self.enter_email()
self.click_checkbox()
self.enter_message()
self.submit()
def enter_email(self):
_recommend_email_form = self._webd_wrap._driver.find_element_by_id('recommend-modal').find_element_by_id('recommend-email-form')
self._webd_wrap._driver.execute_script('$(arguments[0]).val(arguments[1])', _recommend_email_form.find_elements_by_class_name('textboxlist-bit-editable-input')[0], '1@zolabooks.com')
def click_checkbox(self):
checkbox = self._webd_wrap._driver.find_element_by_id('uniform-share-with-followers').find_element_by_xpath('span/input')
self._webd_wrap._driver.execute_script("$(arguments[0]).click()", checkbox)
def enter_message(self):
_recommend_email_form = self._webd_wrap._driver.find_element_by_id('recommend-email-form')
self._webd_wrap._driver.execute_script('$(arguments[0]).val(arguments[1])', _recommend_email_form.find_elements_by_name('message')[0], 'test')
def submit(self):
send = self._webd_wrap._driver.find_element_by_id('recommend-modal').find_element_by_id("recommend-email-form").find_element_by_xpath("footer/input")
self._webd_wrap._driver.execute_script("(arguments[0]).click()", send)
|
from django.db import models
from django.core.exceptions import ValidationError
class TrimCharField(models.CharField):
description = "CharField that ignores trailing spaces in data"
def from_db_value(self, value, expression, connection, *ignore):
if value:
return value.rstrip()
return value
class CharToBooleanField(models.CharField):
def __init__(self, *args, null=True, **kwargs):
kwargs['max_length'] = 1
super().__init__(*args, null=null, **kwargs)
def from_db_value(self, value, expression, connection, *ignore):
if self.null and value is None:
return None
return value == 'Y'
def get_db_prep_value(self, value, connection, prepared=False):
if value is True:
return 'Y'
elif value is False:
return 'N'
elif value is None and self.null:
return None
# - Not sure if this is the right place/thing to do here
raise ValidationError(
self.error_messages['invalid_nullable' if self.null else 'null'],
code='invalid',
params={'value': value},
)
def to_python(self, value):
return value
def get_prep_value(self, value):
return value
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-06-07 18:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('course', '0004_course_teacher'),
]
operations = [
migrations.AddField(
model_name='video',
name='videos',
field=models.FileField(default='', upload_to='videos/%Y/%m', verbose_name='视频'),
),
migrations.AlterField(
model_name='course',
name='teacher',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='organizations.Teacher', verbose_name='所教教师'),
),
]
|
from core.vectors import PhpCode
from core.module import Module
from core import messages
from core.loggers import log
import random
class Cd(Module):
"""Change current working directory."""
aliases = [ 'cd' ]
def init(self):
self.register_info(
{
'author': [
'Emilio Pinna'
],
'license': 'GPLv3'
}
)
self.register_arguments([
{ 'name' : 'dir', 'help' : 'Target folder', 'default' : '.', 'nargs' : '?' }
])
def run(self, args):
chdir = '' if args['dir'] == '.' else "@chdir('%s')&&" % args['dir']
folder = PhpCode("""${chdir}print(@getcwd());""", "chdir").run({ 'chdir' : chdir })
if folder:
# Store cwd used by other modules
self._store_result('cwd', folder)
else:
log.warning(
messages.module_file_cd.failed_directory_change_to_s %
(args['dir']))
def run_alias(self, line, cmd):
# Run this alias independently from the shell_sh status
return self.run_cmdline(line)
|
import sys
import time
import subprocess
gpu = int(sys.argv[1])
mode = sys.argv[2]
net_repeat = 1 # sys.argv[3]
# eval_poisons_root = sys.argv[4]
poison_num = sys.argv[3]
assert mode in ['convex', 'mean']
assert gpu >= 0 and gpu <= 3
print("--net-repeat set to {}".format(net_repeat))
time.sleep(5)
start_id = 0
end_id = 49
i = start_id
while i <= end_id:
if True:
cmd = 'bash launch/attack-transfer-18.sh {} {} {} {} {}'.format(gpu, mode, i, net_repeat, poison_num)
print(cmd)
subprocess.run(cmd.split())
# cmd = 'bash launch/eval-transfer.sh {} {} {} {}'.format(gpu, i, eval_poisons_root)
# print(cmd)
# subprocess.run(cmd.split())
i += 1
|
import unittest
from katas.kyu_8.vowel_remover import shortcut
class ShortcutTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(shortcut('hello'), 'hll')
|
from sqlalchemy import create_engine, String, Integer, Column
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
db_name = "mysql+pymysql://root:123456@localhost:3306/sqlalchemy_study"
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(50))
engine = create_engine(db_name)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
|
# Product Sum
# Input is an array which can contain another arrays inside it.
# Return the sum of the products within the passed array list.
# Example: [x, [y,z]], output should be x + 2*(y + z)
def product_sum(array, multiplier=1):
sum = 0
for element in array:
if type(element) is list:
sum += product_sum(element, multiplier+1)
else:
sum += element
return sum * multiplier
# def product_sum(array, multiplier = 1):
# sum = 0
# for element in array:
# if type(element) is list:
# sum += product_sum(element, multiplier + 1)
# else:
# sum += element
# return sum * multiplier
output = product_sum([5,2,[7,-1], 3,[6,[-13,8], 4]])
print(output)
|
#!/usr/bin/env python3
import sys
from util.aoc import file_to_day
from util.input import load_data
INC = 8
def main(test=False):
data = load_data(file_to_day(__file__), test)
filter_ = [c for c in data[0]]
image = set()
for y, row in enumerate(data[2:]):
for x, cell in enumerate(row):
if cell == "#":
image.add((x, y))
p1 = 0
for x in range(50):
if x == 2:
p1 = len(image)
on = x % 2 == 0
image = transpose(image, on, filter_)
print("2021:20:1 =", p1)
print("2021:20:2 =", len(image))
def transpose(image, default, noisefilter):
ymin = min([y for _, y in image])
ymax = max([y for _, y in image])
xmin = min([x for x, _ in image])
xmax = max([x for x, _ in image])
ni = set()
for y in range(ymin - 5, ymax + 10):
for x in range(xmin - 5, xmax + 10):
index = 0
bit = 8
for coord in get_coords(x, y):
if ((coord[0], coord[1]) in image) == default:
index += 2 ** bit
bit -= 1
if (noisefilter[index] == "#") != default:
ni.add((x, y))
return ni
def get_coords(x, y):
return [(x + tx, y + ty) for ty in (-1, 0, 1) for tx in (-1, 0, 1)]
def print_image(image):
ymin = min([y for _, y in image])
ymax = max([y for _, y in image])
xmin = min([x for x, _ in image])
xmax = max([x for x, _ in image])
for y in range(ymin, ymax + 1):
for x in range(xmin, xmax + 1):
print("#" if (x, y) in image else ".", end="")
print("")
if __name__ == "__main__":
test = len(sys.argv) > 1 and sys.argv[1] == "test"
main(test)
|
from datetime import datetime
import random
import pymysql
from Django_shop.settings import *
from django.http import HttpResponse
class Customer:
# 构造函数
def __init__(self):
self.serialnum = "" # 存储生成的流水单号
self.buy_list = [] # 存储了当前扫描的商品
self.total_number = 0 # 存储商品总量
self.total_money = 0.0 # 存储总金额
self.receive_money = 0.0 #存储收款金额
self.return_money = 0.0 # 存储找零金额
def get_serialnum(self):
"""
生成流水单号
:return:
"""
# 获取当前的系统事件
dt = datetime.now()
# 生成流水单号
self.serialnum = '%04d%02d%02d%02d%02d%02d' % (dt.year,dt.month,dt.day,dt.hour,dt.minute,dt.second)
# 生成4位的随机数
temp = random.randint(0, 9999)
# 附加到流水单号的尾部
self.serialnum += '%04d' % (temp)
def get_procduct_by_barcode(self, barcode):
"""
通过条形码到数据库中找到商品
:param barcode:
:return:
"""
# 实例化数据库连接
mysql_db = pymysql.connect(DB_HOST, DB_USER, DB_PASSWORD, DB_NAME)
# 定义一个指针
cursor = mysql_db.cursor()
# 准备SQL语句
sql = " Select ProductId,BarCode,ProductName,Unit,UnitPrice from Product " \
" where BarCode = '%s' " % (barcode)
try:
# 执行SQL获取结果
cursor.execute(sql)
# 获取结果
product = cursor.fetchone() # 元组类型
#添加
self.add_product_to_buylist(product)
except Exception as e:
self.error_info = "联系数据库出现异常,具体原因:" + str(e)
finally:
mysql_db.close()
def add_product_to_buylist(self, product):
"""
添加商品到buylist
:return:
"""
# 准备数据
temp_dict = {
"ProductId": product[0],
"ProductName": product[2],
"Unit": product[3],
"UnitPrice": product[4],
"Number":1,
"Money": product[4]
}
# 添加到list
if len(self.buy_list) == 0:
self.buy_list.append(temp_dict)
else:
# 遍历当前的buy_list
for index in range(len(self.buy_list)):
# 判断是否存在
if temp_dict['ProductId'] == self.buy_list[index]['ProductId']:
self.buy_list[index]["Number"] += 1
self.buy_list[index]["Money"] = self.buy_list[index]["UnitPrice"] * self.buy_list[index]["Number"]
break
if index == len(self.buy_list)-1:
self.buy_list.append(temp_dict)
def delete_product_from_buylist(self,productId):
"""
删除列表中的商品
:param productId:
:return:
"""
# 遍历buyList
for index in range(len(self.buy_list)):
# 判断是否相等
if self.buy_list[index]["ProductId"] == productId:
self.buy_list.pop(index)
break
def get_total_info(self):
"""
获取购买商品总数量和总金额
:return:
"""
self.total_number = 0
self.total_money = 0.0
# 遍历
for product in self.buy_list:
self.total_number += product["Number"]
self.total_money += product["Money"]
# 保留金额的两位小数
self.total_money = round(self.total_money,2)
def get_receive_return_money(self, receive):
"""
赋值收款和找零金额
:param receive:
:return:
"""
self.receive_money = round(float(receive), 2)
self.return_money = round(self.receive_money - self.total_money,2)
def submit(self, loginId):
"""
提交到数据库
:return:
"""
# 实例化数据库连接
mysql_db = pymysql.connect(DB_HOST, DB_USER, DB_PASSWORD, DB_NAME)
# 定义一个指针
cursor = mysql_db.cursor()
# 准备SQL语句 --- 插入SalesList,插入到SalesListDetail ,更新Product ,通过List
sql_list = []
# 插入到SalesList --- 一条
sql = "Insert Into SalesList (SerialNumber,TotalNumber,TotalPrice,ReceiveMoney,ReturnMoney,LoginId,BuyTime) " \
"Values('%s',%d,%.2f,%.2f,%.2f,'%s','%s')" %(self.serialnum,self.total_number,self.total_money,
self.receive_money,self.return_money,loginId,datetime.now())
sql_list.append(sql)
# 插入到SalesListDetail ---- 有多少商品就多少条
# 更新库存 --- 修改Produce === 有多少商品就多少条
for product in self.buy_list:
# 插入到SalesListDetail
sql01 = "Insert Into SalesListDetail(SerialNumber, ProductId, ProductName, Unit, UnitPrice, Number, Money) " \
"Values('%s','%s','%s','%s',%.2f,%d,%.2f)" %(self.serialnum, product['ProductId'],product['ProductName'],
product['Unit'],product['UnitPrice'],product['Number'],product['Money'])
sql_list.append(sql01)
# 修改Produce
sql02 = "Update Product Set Inventory=Inventory-%d where ProductId='%s'" % (product['Number'],product['ProductId'])
sql_list.append(sql02)
try:
# 遍历sql_list
for sql in sql_list:
# 执行SQL获取结果
cursor.execute(sql)
# 使用提交更改
mysql_db.commit()
except Exception as e:
# rollback -- 回滚
mysql_db.rollback()
# 报错
return HttpResponse("提交出现异常,具体原因:" + str(e))
finally:
mysql_db.close()
if __name__ == '__main__':
obj = Customer()
obj.get_procduct_by_barcode("6907992500133" )
print(print(obj.buy_list)) |
import urllib.request
import json
from urllib import parse
def download5(url, free_proxy=None, user_agent='test', num_retries=2, data=None):
# print("download5...", url)
# 设置headers 中的用户代理,默认值是test
headers = {"User_agent": "Mozilla/5.0 (Linux; U; Android 2.3.6; zh-cn; GT-S5660 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1 MicroMessenger/4.5.255"}
# 将用户代理添加到请求中
request = urllib.request.Request(url, data, headers=headers)
# 创建句柄
opener = urllib.request.build_opener()
# 判断如果proxy是否有值
if free_proxy:
# 获取ip代理协议和IP代理
proxy_params = {urllib.request.urlparse(url).scheme: free_proxy}
# 将IP代理设置添加到句柄中
opener.add_handler(urllib.request.ProxyHandler(proxy_params))
try:
# 使用句柄的open()打开网页,read()读取内容
html5 = opener.open(request).read()
# 异常处理,捕获异常
except urllib.request.URLError as e:
# 打印异常原因
print("download error", e.reason)
html5 = None
# 判断重新加载次数是否大于0
if num_retries > 0:
# 判断 页面code是否在500和600之间
if hasattr(e, 'code') and 500 <= e.code < 600:
# 调用自身
html5 = download5(url, free_proxy, user_agent, num_retries - 1)
return html5
html = download5("http://wx.sephora.cn/index.php?m=Nearby&a=index&provinceid=6&cityid=76")
print(html)
# province =['http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=北京市&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=天津市&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=河北省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=山西省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=内蒙古自治区&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=辽宁省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=吉林省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=黑龙江省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=上海市&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=江苏省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=浙江省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=安徽省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=福建省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=江西省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=山东省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=河南省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=湖北省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=湖南省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=广东省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=广西壮族自治区&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=海南省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=重庆市&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=四川省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=贵州省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=云南省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=西藏自治区&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=陕西省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=甘肃省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=青海省&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=宁夏回族自治区&city=&_=1516174273975',
# 'http://location.chcedo.com/interface/index.php?c=api&m=get_store_list&callback=jQuery110109367562047222278_1516174273973&province=新疆维吾尔自治区&city=&_=1516174273975'
# ]
# f = open('Chando-s180117.csv', "w+", encoding="utf-8")
# try:
# for url in province:
# url = parse.quote(url, safe='/:?=&_.')
# html = download5(url)
# html=str(html).replace('jQuery110109367562047222278_1516174273973(','')
# html =str(html).replace(')', '')
# data = json.loads(html)
# re = data["data"]["store_list"]
# print(html)
#
# for store in re:
# for k, v in store.items():
# f.write(v)
# f.write(',')
# f.write('\n')
# f.close()
# except:
# print('erro')
|
"""
Week 5, Day 2: K Closest Points to Origin
We have a list of points on the plane. Find the K closest points to the origin (0, 0).
(Here, the distance between two points on a plane is the Euclidean distance.)
You may return the answer in any order. The answer is guaranteed to be unique (except for the order that it is in.)
Example 1:
Input: points = [[1,3],[-2,2]], K = 1
Output: [[-2,2]]
Explanation:
The distance between (1, 3) and the origin is sqrt(10).
The distance between (-2, 2) and the origin is sqrt(8).
Since sqrt(8) < sqrt(10), (-2, 2) is closer to the origin.
We only want the closest K = 1 points from the origin, so the answer is just [[-2,2]].
Example 2:
Input: points = [[3,3],[5,-1],[-2,4]], K = 2
Output: [[3,3],[-2,4]]
(The answer [[-2,4],[3,3]] would also be accepted.)
Note:
1 <= K <= points.length <= 10000
-10000 < points[i][0] < 10000
-10000 < points[i][1] < 10000
"""
import json
from typing import List
class Solution:
def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:
return sorted(points, key=lambda p: p[0] ** 2 + p[1] ** 2)[:K]
def equals(A: List[List[int]], B: List[List[int]]) -> bool:
set_a = set((a, b) for a, b in A)
set_b = set((a, b) for a, b in B)
return set_a == set_b
if __name__ == '__main__':
o = Solution()
points = [[1, 3], [-2, 2]]
K = 1
expected = [[-2, 2]]
print(equals(o.kClosest(points, K), expected))
points = [[3, 3], [5, -1], [-2, 4]]
K = 2
expected = [[3, 3], [-2, 4]]
print(equals(o.kClosest(points, K), expected))
points = [[3, 3], [5, -1], [2, 4], [-2, 4]]
K = 2
expected = [[3, 3], [2, 4]]
print(equals(o.kClosest(points, K), expected))
points = [[0, 1], [1, 0]]
K = 2
expected = [[0, 1], [1, 0]]
print(equals(o.kClosest(points, K), expected))
points = [[1, 3], [-2, 2], [2, -2]]
K = 2
expected = [[-2, 2], [2, -2]]
print(equals(o.kClosest(points, K), expected))
points = json.load(open('points.json'))
K = 5313
expected = json.load(open('points_expected.json'))
print(equals(o.kClosest(points, K), expected))
# last line of code
|
from gpiozero import MotionSensor, LED
from signal import pause
pir = MotionSensor(25)
while 1==1:
if (pir.motion_detected):
print("Hello")
else:
print("Still")
pause()
|
def the_contact_exist(contact_to_check, contacts_list):
return contact_to_check in contacts_list
def is_valid_index(number_index, contacts_list):
return 0 <= number_index < len(contacts_list)
contacts = input().split()
while True:
command_input = input().split()
command = command_input[0]
if command == "Add":
contact = command_input[1]
index = int(command_input[2])
if not the_contact_exist(contact, contacts):
contacts.append(contact)
else:
if is_valid_index(index, contacts):
contacts.insert(index, contact)
elif command == "Remove":
index = int(command_input[1])
if is_valid_index(index, contacts):
del contacts[index]
elif command == "Export":
start_index = int(command_input[1])
count = int(command_input[2])
end_index = start_index + count
# print(*contacts[start_index:start_index + count])
if end_index > len(contacts):
print(" ".join(contacts[start_index:]))
else:
print(" ".join(contacts[start_index:end_index]))
elif command == "Print":
normal_or_reversed = command_input[1]
if normal_or_reversed == "Normal":
print("Contacts:", end=" ")
print(" ".join(contacts))
break
elif normal_or_reversed == "Reversed":
contacts = contacts[::-1]
print("Contacts:", end=" ")
print(" ".join(contacts))
break
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import requests
from jsonpath import jsonpath
class TestJsonPath:
# 传统的根据python数据类型解析
def test_hogwarts_json(self):
r = requests.get('https://home.testing-studio.com/categories.json')
assert r.status_code == 200
assert r.json()['category_list']['categories'][0]['name'] == '霍格沃兹测试学院公众号'
# 使用jsonpath解析
def test_hogwarts_jsonpath(self):
url = "https://home.testing-studio.com/categories.json"
r = requests.get(url)
assert jsonpath(r.json(), '$..name')[0] == '霍格沃兹测试学院公众号'
if __name__ == '__main__':
pytest.main('-v', '-s', "test_request_jsonpath.py")
|
# Generated by Django 3.2 on 2021-04-26 05:44
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('slug', models.CharField(max_length=100)),
('content', ckeditor.fields.RichTextField()),
('author', models.CharField(max_length=100)),
('image', models.ImageField(default='static/images/image_1.jpg', upload_to='blogs')),
('status', models.CharField(choices=[('1', 'Publish'), ('0', 'Draft')], max_length=20)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
from __future__ import absolute_import
# The MIT License (MIT)
#
# Copyright (c) 2015 Fabrizio Guglielmino <guglielmino@gumino.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import future
import paho.mqtt.client as mqtt
import re
import json
import uuid
from .exceptions import PushettaException, TokenValidationError, ChannelNotFoundError
try:
from urllib2 import urlopen as urlopen
import urllib2 as urllib
from urllib2 import Request
from urllib2 import HTTPError
from urllib2 import URLError
except:
import urllib
from urllib.request import Request
from urllib.error import HTTPError
from urllib.error import URLError
from urllib.request import urlopen as urlopen
class Pushetta(object):
iot_url = "iot.pushetta.com"
sub_pattern = "/pushetta.com/channels/{0}"
message_callback = None
def __init__(self, apiKey):
self._apiKey = apiKey
self.mqtt_client = None
def pushMessage(self, channel, body, expire=None):
try:
req = Request('http://api.pushetta.com/api/pushes/{0}/'.format(channel))
req.add_header('Content-Type', 'application/json')
req.add_header('Authorization', 'Token {0}'.format(self._apiKey))
payload = dict()
payload["body"] = body
payload["message_type"] = "text/plain"
if expire is not None:
payload["expire"] = expire
response = urlopen(req, json.dumps(payload).encode('utf8'))
except HTTPError as e:
if e.code == 401:
raise TokenValidationError("Invalid token")
elif e.code == 404:
raise ChannelNotFoundError("Channel name not found")
else:
raise PushettaException(e.reason)
except URLError as e:
raise PushettaException(e.reason)
except Exception:
import traceback
raise PushettaException(traceback.format_exc())
def subscribe(self, channel, callback):
topic = Pushetta.sub_pattern.format(channel)
self.message_callback = callback
if self.mqtt_client is None:
self.mqtt_client = mqtt.Client(client_id="pushetta-" + str(uuid.uuid4()))
self.mqtt_client.on_message = self.__message_callback
self.mqtt_client.on_connect = self.__connect_callback
self.mqtt_client.username_pw_set(self._apiKey, password="pushetta")
self.mqtt_client.connect(Pushetta.iot_url, 1883, 60)
self.mqtt_client.user_data_set(topic)
self.mqtt_client.loop_start()
else:
self.mqtt_client.subscribe(topic)
def unsubscribe(self, channel):
topic = Pushetta.sub_pattern.format(channel)
self.mqtt_client.unsubscribe(topic)
def __connect_callback(self, client, userdata, flags, rc):
client.subscribe(userdata)
def __message_callback(self, client, userdata, message):
if self.message_callback is not None:
notification = {'channel' : message.topic.split('/')[-1], 'message' : message.payload, 'timestamp' : message.timestamp}
self.message_callback(notification)
|
try:
a = int(input('Введите значение сдвига (целое число) '))
except ValueError: # Если появляется ошибка, даем возможность повторного ввода
try:
a = int(input('Не верные данные. Введите целое число '))
except ValueError:
print('Были введены некорректные данные. Шифровка производится со сдвигом 1')
a = 1
text = input('Введите текст ')
letters = ['а', 'б', 'в', 'г', 'д', 'е', 'ё' ,'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х',
'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
b_letters = ['А', 'Б', 'В', 'Г', 'Д', 'Е', 'Ё', 'Ж', 'З', 'И', 'Й', 'К', 'Л', 'М', 'Н', 'О', 'П', 'Р', 'С', 'Т', 'У',
'Ф', 'Х', 'Ц', 'Ч', 'Ш', 'Щ', 'Ъ', 'Ы', 'Ь', 'Э', 'Ю', 'Я']
def Cipher(text, move=a):
stroka = ' ' # Создаем пустую строку, которая будет заполняться
for b in text:
if b in letters:
num = letters.index(b) # Определяем номер буквы
d = move - (move // 33) * 33 # Определяем номер замещающей буквы
k = num + d
if k > 32: # Учитываем, что при номере новой буквы, превышающем 32, нужно перейти к началу алфавита
kn = k - 33
stroka += letters[kn]
elif k <= 32:
stroka += letters[k]
elif b in b_letters: # Всё то же самое, но для заглавных букв
num = b_letters.index(b)
d = move - (move // 33) * 33 # Определяем номер замещающей буквы
k = num + d
if k > 32:
kn = k - 33
stroka += b_letters[kn]
elif k <= 32:
stroka += b_letters[k]
else: # Если введена не буква, то ничего не меняется
stroka += b
return stroka
print('Шифровка: ', Cipher(text))
def DeCipher(text, move=a):
stroka = ' ' # Создаем пустую строку, которая будет заполняться
for b in text:
if b in letters:
num = letters.index(b) # Определяем номер буквы
d = move - (move // 33) * 33
k = num - d
stroka += letters[k] # Добавляем новую букву с учетом сдвига
elif b in b_letters: # Все то же самое для заглавных букв
num = b_letters.index(b)
d = move - (move // 33) * 33
k = num - d
stroka += b_letters[k]
else:
stroka += b # Если введена не буква, то ничего не меняется
return stroka
print('Расшифровка: ', DeCipher(text))
|
from flask import Flask
from flask_restful import Api
from flaskext.mysql import MySQL
class ConfigDatabaseC:
_mysql = None
@staticmethod
def getMysql():
return ConfigDatabaseC._mysql
def __init__(self, app):
""" Virtually private constructor. """
if ConfigDatabaseC._mysql != None:
raise Exception("This class is already configured!")
else:
ConfigDatabaseC._mysql = MySQL()
# MySQL configurations
app.config['MYSQL_DATABASE_USER'] = 'userC'
app.config['MYSQL_DATABASE_PASSWORD'] = 'userC'
app.config['MYSQL_DATABASE_DB'] = 'base_C'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
ConfigDatabaseC._mysql.init_app(app)
|
from django.db import models
class TimeStamp(models.Model):
created_at = models.DateTimeField(auto_now_add=True, blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)
# is_active = models.BooleanField(default=True)
class Meta:
abstract = True
class WebPa(TimeStamp):
navbar_eder = models.CharField(
max_length=100, default="Daru Wheel", blank=True, null=True
)
footer1 = models.CharField(
max_length=100, default="Darius & Co.", blank=True, null=True
)
footer2_url = models.CharField(
max_length=100,
default="https://www.github.com/gibeongideon",
blank=True,
null=True,
)
footer3 = models.CharField(max_length=100, blank=True, null=True)
header1 = models.CharField(
max_length=100, default="Welcome to Daruwheel", blank=True, null=True
)
header2 = models.CharField(
max_length=100, default="Play and Win Real Cash", blank=True, null=True
)
header3 = models.CharField(max_length=100, blank=True, null=True)
header4 = models.CharField(max_length=100, blank=True, null=True)
copyright_text = models.CharField(max_length=30, blank=True, null=True)
mpesa_header_depo_msg = models.TextField(
max_length=300,
default="Enter amount and click send.Check M-pesa SMS send to your mobile NO you register with to confirm transaction.",
blank=True,
null=True,
)
share_info = models.TextField(
max_length=300,
default="Share the code to other people to get credit whenever they bet.Once someone register you will always get some credit whenever they place stake.Make sure they entered the code correctly when they signup.",
blank=True,
null=True,
)
|
# Generated by Django 3.2.4 on 2021-06-19 03:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chat', '0002_chatmessage_time'),
]
operations = [
migrations.AlterField(
model_name='chatmessage',
name='time',
field=models.DateTimeField(default='%m/%d/%Y %H:%M:%S'),
),
]
|
from django.urls import path, re_path
from .views import AudioView
urlpatterns = [
path("", AudioView.as_view(), name="audioCreate"),
path("<str:audioFileType>/<int:audioFileID>", AudioView.as_view(), name="audioFileFetch"),
path("<str:audioFileType>/", AudioView.as_view(), name="audioFiles"),
] |
# -*- coding:utf-8 -*-
# Linux32 module: build syscalls for linux 32 bits
from ropgenerator.exploit.syscalls.SyscallDef import Syscall
from ropgenerator.exploit.Utils import popMultiple
from ropgenerator.IO import verbose, string_bold, string_ropg, string_payload, error
from ropgenerator.semantic.Engine import search
from ropgenerator.Database import QueryType
from ropgenerator.Constraints import Constraint, Assertion
import ropgenerator.Architecture as Arch
SYSCALL_LMAX = 500
# mprotect
def build_mprotect32(addr, size, prot=7, constraint=None, assertion = None, clmax=SYSCALL_LMAX, optimizeLen=False):
"""
Call mprotect from X86 arch
Args must be on the stack:
int mprotect(void *addr, size_t len, int prot)
args must be in registers (ebx, ecx, edx)
eax must be 0x7d = 125
"""
# Check args
if not isinstance(addr, int):
error("Argument error. Expected integer, got " + str(type(addr)))
return None
elif not isinstance(size, int):
error("Argument error. Expected integer, got " + str(type(size)))
return None
elif not isinstance(prot, int):
error("Argument error. Expected integer, got " + str(type(prot)))
return None
if( constraint is None ):
constraint = Constraint()
if( assertion is None ):
assertion = Assertion()
# Check if we have the function !
verbose("Trying to call mprotect() function directly")
func_call = build_call('mprotect', [addr, size, prot], constraint, assertion, clmax=clmax, optimizeLen=optimizeLen)
if( not isinstance(func_call, str) ):
verbose("Success")
return func_call
else:
if( not constraint.chainable.ret ):
verbose("Coudn't call mprotect(), try direct syscall")
else:
verbose("Couldn't call mprotect() and return to ROPChain")
return None
# Otherwise do syscall directly
# Set the registers
args = [[Arch.n2r('eax'),0x7d],[Arch.n2r('ebx'), addr],[Arch.n2r('ecx'),size], [Arch.n2r('edx'),prot]]
chain = popMultiple(args, constraint, assertion, clmax-1, optimizeLen)
if( not chain ):
verbose("Failed to set registers for the mprotect syscall")
return None
# Int 0x80
int80_gadgets = search(QueryType.INT80, None, None, constraint, assertion)
if( not int80_gadgets ):
verbose("Failed to find an 'int 80' gadget")
return None
else:
chain.addChain(int80_gadgets[0])
verbose("Success")
return chain
mprotect = Syscall('int', 'mprotect', \
[('void*', 'addr'),('size_t','len'),('int','prot')], build_mprotect32)
## All available syscalls
available = dict()
available[mprotect.name+"32"] = mprotect
####################
# Useful functions #
####################
def print_available():
global available
print(string_bold("\n\n\tSupported Linux 32-bits syscalls"))
for name,syscall in available.iteritems():
print("\n\t"+string_payload(name)+": "+str(syscall))
|
import world
import utils
from world import cprint
import torch
import numpy as np
from tensorboardX import SummaryWriter
import time
import Procedure
from os.path import join
# ==============================
utils.set_seed(world.seed)
print(">>SEED:", world.seed)
# ==============================
import register
from register import dataset
Recmodel = register.MODELS[world.model_name](world.config, dataset)
Recmodel = Recmodel.to(world.device)
bpr = utils.BPRLoss(Recmodel, world.config)
weight_file = utils.getFileName()
print(f"load and save to {weight_file}")
if world.LOAD:
try:
Recmodel.load_state_dict(torch.load(weight_file,map_location=torch.device('cpu')))
world.cprint(f"loaded model weights from {weight_file}")
except FileNotFoundError:
print(f"{weight_file} not exists, start from beginning")
Neg_k = 1
# init tensorboard
if world.tensorboard:
w : SummaryWriter = SummaryWriter(
join(world.BOARD_PATH, time.strftime("%m-%d-%Hh%Mm%Ss-") + "-" + world.comment)
)
else:
w = None
world.cprint("not enable tensorflowboard")
try:
for epoch in range(world.TRAIN_epochs):
start = time.time()
if epoch %10 == 0:
cprint("[TEST]")
Procedure.Test(dataset, Recmodel, epoch, w, world.config['multicore'])
output_information = Procedure.BPR_train_original(dataset, Recmodel, bpr, epoch, neg_k=Neg_k,w=w)
print(f'EPOCH[{epoch+1}/{world.TRAIN_epochs}] {output_information}')
torch.save(Recmodel.state_dict(), weight_file)
finally:
if world.tensorboard:
w.close() |
# coin_change.py
# You are given coins of different denominations and a total amount of money.
# Write a function to compute the fewest number of coins that you need to make
# that amount. If that amount cannot be made by any combination of the coins,
# return -1.
#
# Example 1:
# * coins = [1, 2, 5], amount = 11
# return 3 (11 = 5 + 5 + 1)
#
# Example 2:
# * coins = [2], amount = 3
# return -1.
#
# Note: You may assume that you have an infinite number of each kind of coin.
class Solution(object):
def coin_change(self, coins, amount):
d = [amount+1 for i in range(amount + 1)]
d[0] = 0
for a in range(1, amount + 1):
for c in coins:
if a >= c:
d[a] = min(d[a], d[a-c] + 1)
return d[amount] if d[amount] < amount + 1 else -1
def main(self):
n = int(input())
coins = list(map(int, input().split()))
amount = int(input())
result = self.coin_change(coins, amount)
print(result)
if __name__ == "__main__":
Solution().main()
|
import matplotlib
def transform(samples):
pass
def draw(values):
pass
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 22:36:32 2017
@author: Anirban Das
"""
import csv
from textwrap import TextWrapper
#==============================================================================
# from itertools import product
# stringReq = 'ABCDEFGH' #valid letters string
# stringLength = 4
# all_combinations = [''.join(i) for i in product(stringReq, repeat = stringLength)]
# print(all_combinations)
#==============================================================================
def writeToTextFile(combinations, filename): #if you want text file
wrapper = TextWrapper(width=80)
filepath = 'Outputs/' + filename + '.fasta'
with open(filepath, 'w') as myfile:
for element in range(len(combinations)):
myfile.write(">sequence_combination#{}\n"\
.format(element))
myfile.write("\n".join(wrapper.wrap(combinations[element])))
myfile.write("\n")
print("Generated text file :", filename+'.fasta')
#==============================================================================
def writeToCSV(combinations, filename): #if you want csv
filepath = 'Outputs/'+ filename + '.csv'
delimeter = "," #enter the delimeter you want
with open(filepath, 'w') as myfile:
wr = csv.writer(myfile,delimiter= delimeter, quoting=csv.QUOTE_NONE)
wr.writerow(combinations)
print("Generated CSV file :", filename+'.csv')
if __name__ == '__main__':
validAlphabet = 'GALMFWKQESPVICYHRNDT'
file = open('sequenceFile.conf', 'r')
proteinSeq = file.read().strip().split('\n')
file.close()
for sequence in proteinSeq:
sequenceName, actualSequence = sequence.split('=')
allCombinations = list()
for i in range(len(actualSequence.strip())):
for letter in validAlphabet:
dummySeq = list(actualSequence.strip())
dummySeq[i] = letter
dummySeq = ''.join(dummySeq)
allCombinations.append(dummySeq)
uniqueCombinations = list(set(allCombinations)) #unique combinations
uniqueCombinations = sorted(uniqueCombinations) #sort the set
totalUniqueGenerated = len(uniqueCombinations) #number of unique combinations
print("\nNumber of entries generated for sequence: {} = {}".format(sequenceName,totalUniqueGenerated))
name_of_generated_File = 'proteinSequence_'+ sequenceName.strip() + '_'+ str(totalUniqueGenerated)
#generate text file
writeToTextFile(uniqueCombinations, name_of_generated_File)
#generate CSV file
writeToCSV(uniqueCombinations, name_of_generated_File)
|
#! /usr/bin/env python
"""
Deprecated. Please see Simulation.py and Bag2Video.py
USING:
As a command line utility:
./Simulator.py [-h] [-n NUM] [-fps FPS] [-d DROP_DIR] [-s SAND_DIR]
[-dn DROP_NUM] [-dvm DROP_VEL_MEAN] [-dvv DROP_VEL_VAR]
[-dsm DROP_SIZE_MEAN] [-dsv DROP_SIZE_VAR] [-sn SAND_NUM]
[-svm SAND_VEL_MEAN] [-svv SAND_VEL_VAR]
[-ssm SAND_SIZE_MEAN] [-ssv SAND_SIZE_VAR] [-b BAG] [-v]
[-p]
bg output_video
As a module:
Untested
Author: Martin Humphreys
"""
import cv2
from argparse import ArgumentParser
from math import floor, ceil
import random
import os
import re
import pickle
import scipy.io
from DataBag import DataBag
import numpy as np
# This will be system specific, depending on OpenCV config
# ... Maybe not actually, I need 0 for .avi, and 33 for .mp4
# may have always been a codec difference ...
FOURCC = 0
class Particle:
def __init__(self, sim, id_gen, vm, vv, sprite):
self.id_gen = id_gen
self.id = self.id_gen()
self.sim = sim
self.sx = random.randint(0, sim.width-1)
self.sy = random.randint(0, sim.height-1)
self.vy = np.random.normal(vm, vv)
self.sprite = self.augment(sprite)
def tick(self, n):
self.x = self.sx
_y = self.sy + (self.vy * n)
y = _y % self.sim.height
if _y != y:
self.id = self.id_gen()
self.y = y
def augment(self, sprite):
"""
sprite = np.rot90(sprite, random.randint(0,3))
if random.randint(0,1):
sprite = cv2.flip(sprite, 0)
if random.randint(0,1):
sprite = cv2.flip(sprite, 1)
"""
return sprite
def rect(self):
h, w, c = self.sprite.shape
h2, w2 = floor(h / 2), floor(w / 2)
mw, mh = self.sim.width - 1, self.sim.height - 1
fy1, fy2 = int(max(0, self.y - h2)), int(min(mh, self.y + h2))
fx1, fx2 = int(max(0, self.x - w2)), int(min(mw, self.x + w2))
if h2 * 2 < h:
if fy2 == mh:
fy1 -= 1
else:
fy2 += 1
if w2 * 2 < w:
if fx2 == mw:
fx1 -= 1
else:
fx2 += 1
oy1, oy2 = 0, h
ox1, ox2 = 0, w
if fy1 == 0:
oy1 += h - (fy2 - fy1)
if fy2 == mh:
oy2 -= h - (fy2 - fy1)
if fx1 == 0:
ox1 += w - (fx2 - fx1)
if fx2 == mw:
ox2 -= w - (fx2 - fx1)
return fy1, fy2, fx1, fx2, oy1, oy2, ox1, ox2
def paint(self, frame):
# Working withing "transmission space" allows the
# blending to be performed as an attenuation of
# any intensity values behind the object being painted.
# Get sprite coordinates in the (f)rame and the (o)bject
fy1, fy2, fx1, fx2, oy1, oy2, ox1, ox2 = self.rect()
# We'll use the alpha channel as a mask for the blending
mask = np.float32(self.sprite[oy1:oy2, ox1:ox2, 3] / 255.0)
# ... first we'll expand the mask a little, then blur for smoother edges
mask = cv2.dilate(mask, (8,8))
mask = cv2.blur(mask, (8,8))
for c in range(0,3):
bg = frame[fy1:fy2, fx1:fx2, c] * (1-mask)
fg = frame[fy1:fy2, fx1:fx2, c] * (self.sprite[oy1:oy2, ox1:ox2, c]/255.0) * mask
frame[fy1:fy2, fx1:fx2, c] = bg + fg
def paint_binary(self, frame):
# Working withing "transmission space" allows the
# blending to be performed as an attenuation of
# any intensity values behind the object being painted.
# Get sprite coordinates in the (f)rame and the (o)bject
fy1, fy2, fx1, fx2, oy1, oy2, ox1, ox2 = self.rect()
# We'll use the alpha channel as a mask for the blending
mask = np.bool8(self.sprite[oy1:oy2, ox1:ox2, 3])
# ... first we'll expand the mask a little, then blur for smoother edges
# mask = cv2.dilate(mask, (8,8))
# mask = cv2.blur(mask, (8,8))
crop = frame[fy1:fy2, fx1:fx2]
crop[mask>0] = 1
# for c in range(0,3):
# bg = frame[fy1:fy2, fx1:fx2, c] * (1-mask)
# fg = frame[fy1:fy2, fx1:fx2, c] * (self.sprite[oy1:oy2, ox1:ox2, c]/255.0) * mask
# frame[fy1:fy2, fx1:fx2, c] = bg + fg
class Simulator:
def __init__(self, opts):
global FOURCC
self.opts = opts
self.save_bitmask = self.opts.p
self.verbose = self.opts.v
if self.verbose:
print "initializing..."
if isinstance(opts.bg, str):
self.bg = cv2.imread(opts.bg)
else:
self.bg = opts.bg
self.height, self.width, self.channels = self.bg.shape
self.vw = cv2.VideoWriter(opts.output_video, FOURCC , opts.fps, (self.width, self.height), False)
self.bag = DataBag(opts.bag, verbose = self.verbose)
self.particles = []
self.drop_sprites = self.loadSprites(self.opts.drop_dir)
self.sand_sprites = self.drop_sprites
# self.sand_sprites = self.loadSprites(self.opts.sand_dir)
def counter(first = 1):
current = [first - 1]
def next():
current[0] += 1
return current[0]
return next
id_gen = counter()
if self.verbose:
print "sprites loaded..."
# If we do not specify a mean drop size, we instead randomly sample the
# loaded sprites
if not self.opts.drop_size_mean:
l = []
for k,v in self.drop_sprites.iteritems():
l.extend(len(v)*[int(k)])
count = 0
# for n in range(opts.drop_num):
while (count < opts.drop_num):
size = random.choice(l)
if size < 150:
continue
sprite = random.choice(self.drop_sprites[size])
p = Particle(self, id_gen, opts.drop_vel_mean, opts.drop_vel_var, sprite)
self.particles.append(p)
self.bag.insertParticle(size, p.id)
count +=1
else:
strkeys = self.drop_sprites.keys()
intkeys = map(int, strkeys)
for n in range(opts.drop_num):
while True:
size = str(int(round(np.random.normal(opts.drop_size_mean, opts.drop_size_var))))
if size in strkeys:
break;
sprite = random.choice(self.drop_sprites[size])
p = Particle(self, id_gen, opts.drop_vel_mean, opts.drop_vel_var, sprite)
self.particles.append(p)
self.bag.insertParticle(3.14159 * ((float(size)/2.0)**2), 1, p.id)
if self.verbose:
print "drop selection complete..."
# If we do not specify a mean sand size, we instead randomly sample the
# loaded sprites
if not self.opts.sand_size_mean:
l = []
for k,v in self.sand_sprites.iteritems():
l.extend(len(v)*[int(k)])
count = 0
# for n in range(opts.drop_num):
while (count < opts.sand_num):
size = random.choice(l)
# if size < 50:
# continue
sprite = random.choice(self.sand_sprites[size])
p = Particle(self, count, size, opts.sand_vel_mean, opts.sand_vel_var, sprite)
self.particles.append(p)
self.bag.insertParticle(size, p.id)
count +=1
else:
strkeys = self.sand_sprites.keys()
intkeys = map(int, strkeys)
for n in range(opts.sand_num):
while True:
size = str(int(round(np.random.normal(opts.sand_size_mean, opts.sand_size_var))))
if size in strkeys:
break;
sprite = random.choice(self.sand_sprites[size])
self.particles.append(Particle(self, id_gen, opts.sand_vel_mean, opts.sand_vel_var, sprite))
if self.verbose:
print "sand selection complete..."
# sprite = cv2.cvtColor(random.choice(self.sand_sprites[size]), cv2.COLOR_GRAY2RGBA)
random.shuffle(self.particles)
def loadSprites(self, folder):
# Are we loading sprite files from a directory, or a pickled sprites file
if os.path.isdir(folder):
sprites = {}
files = os.listdir(folder)
num_re = re.compile("\d+")
for sfile in files:
nums = num_re.findall(sfile)
sprite = cv2.imread(folder + "/" + sfile, -1)
if nums[0] in sprites.keys():
sprites[nums[0]].append(sprite)
else:
sprites[nums[0]] = [sprite]
else:
sprites = pickle.load(open(folder, "rb"))
return sprites
def blackbar(self, frame):
w = 16
frame[:, 0:15, :] = 0
def frame(self, n):
frame = self.bg.copy()
binary_mask = np.zeros((self.height, self.width), dtype=np.bool8)
for p in self.particles:
p.tick(n)
self.bag.insertAssoc(n-1, p.id, p.x, p.y)
p.paint(frame)
self.blackbar(frame)
p.paint_binary(binary_mask)
if self.save_bitmask:
self.bag.insertFrame(n-1, binary_mask)
else:
self.bag.insertFrame(n-1)
return frame
def fines(self, image):
# Gaussian Noise (only attenuates background intensity)
# intended to simulate fines
row,col= image.shape
mean = 0
var = 16
sigma = var**0.5
gauss = np.random.normal(mean,sigma,(row,col))
gauss = (100.0 - gauss)/100.0
gauss[gauss>1]=1
gauss = gauss.reshape(row,col)
frame = image * gauss
return frame
def generate(self, outfile, n=10):
if self.verbose:
print "generating frames..."
for n in range(1, n+1):
if self.verbose:
print "frame ", n, ' ...'
frame = self.frame(n)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = self.fines(frame)
frame = np.uint8(frame)
self.vw.write(frame)
def build_parser():
parser = ArgumentParser()
parser.add_argument('bg', help='The background image to use')
parser.add_argument('output_video', help='file to save simulated video to')
parser.add_argument("-n", "--num", help="number of frames to generate", type=int, default=10)
parser.add_argument("-fps", "--fps", help="frames per second", type=int, default=300)
parser.add_argument("-d", "--drop_dir", help="dir or path to drop sprites", default="drop")
parser.add_argument("-s", "--sand_dir", help="dir or path to sand sprites", default="sand")
parser.add_argument("-dn", "--drop_num", help="number of drops", type=int, default=250)
parser.add_argument("-dvm", "--drop_vel_mean", help="drop velocity mean", type=float, default=-2.5)
parser.add_argument("-dvv", "--drop_vel_var", help="drop velocity variance", type=float, default=3)
parser.add_argument("-dsm", "--drop_size_mean", help="drop size mean; 0 to randomly sample loaded drops", type=float, default=8)
parser.add_argument("-dsv", "--drop_size_var", help="drop size variance", type=float, default=10)
parser.add_argument("-sn", "--sand_num", help="number of sand particles", type=int, default=300)
parser.add_argument("-svm", "--sand_vel_mean", help="sand velocity mean", type=float, default=1)
parser.add_argument("-svv", "--sand_vel_var", help="sand velocity variance", type=float, default=1)
parser.add_argument("-ssm", "--sand_size_mean", help="sand size mean; 0 to randomly sample loaded sand", type=float, default=8)
parser.add_argument("-ssv", "--sand_size_var", help="sand size variance", type=float, default=6)
parser.add_argument("-b", "--bag", help="bag file to save to", type=str, default=":memory:")
parser.add_argument('-v', help='print verbose statements while executing',
action = 'store_true')
parser.add_argument('-p', help='Store the png bitmask image for detection validation; slows frame generation', action = 'store_true')
return parser
def main(opts):
if not os.path.isfile(opts.bg):
parser.error("Background image file %s does not exist." % opts.bg)
sim = Simulator(opts)
sim.generate(opts.output_video, opts.num)
if __name__ == '__main__':
main(build_parser().parse_args())
|
# -*- coding: utf-8 -*-
import inject
import json
import re
import logging
import psycopg2
from dateutil.parser import parse
import pytz
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet import threads
import autobahn
import wamp
from model.assistance.logs import Log
from model.assistance.utils import Utils
from model.assistance.assistance import AssistanceModel
######## para activar exportación a LibreOffice #############
## from actions.systems.assistance.exportOO import ExportModel
####### para activar la exportación a google spreadsheets #############
from actions.systems.assistance.exportG import ExportModel
logging.getLogger().setLevel(logging.INFO)
class Assistance(wamp.SystemComponentSession):
#exportModel = None
exportModel = ExportModel
assistanceModel = inject.attr(AssistanceModel)
timezone = pytz.timezone('America/Argentina/Buenos_Aires')
conn = wamp.getConnectionManager()
def getRegisterOptions(self):
return autobahn.wamp.RegisterOptions(details_arg='details')
def _parseDate(self, strDate):
########################
# ver como lo corregimos para que lo maneje wamp al tema del date.
# tambien el docker parece no setear el timezone.
date = parse(strDate)
ldate = Utils._localizeUtc(date).astimezone(self.timezone)
return ldate
def _getLogs(self, initDate, endDate, initHours, endHours, details):
iDate = self._parseDate(initDate)
eDate = self._parseDate(endDate)
iHours = self._parseDate(initHours)
eHours = self._parseDate(endHours)
con = self.conn.get()
try:
logs = Log.findByDateRange(con, iDate, eDate, iHours, eHours)
return logs
finally:
self.conn.put(con)
@autobahn.wamp.register('assistance.get_logs')
@inlineCallbacks
def getLogs(self, initDate, endDate, initHours, endHours, details):
r = yield threads.deferToThread(self._getLogs, initDate, endDate, initHours, endHours, details)
returnValue(r)
def _getStatistics(self, initDate, endDate, userIds, officeIds, initTime, endTime, details):
iDate = None if initDate is None else self._parseDate(initDate)
eDate = None if endDate is None else self._parseDate(endDate)
iTime = None if initTime is None else self._parseDate(initTime)
eTime = None if endTime is None else self._parseDate(endTime)
con = self.conn.get()
try:
logging.info('calculando estadisticas')
statistics = self.assistanceModel.getStatisticsData(con, userIds, iDate, eDate, officeIds, iTime, eTime)
logging.info(statistics)
return statistics
finally:
self.conn.put(con)
@autobahn.wamp.register('assistance.get_statistics')
@inlineCallbacks
def getStatistics(self, initDate, endDate, userIds, officeIds, initTime, endTime, details):
r = yield threads.deferToThread(self._getStatistics, initDate, endDate, userIds, officeIds, initTime, endTime, details)
returnValue(r)
def _setWorkedNote(self, userId, date, text, details):
if (userId is None or date is None):
return None
con = self.conn.get()
try:
date = self._parseDate(date).date()
wp = self.assistanceModel.setWorkedNote(con, userId, date, text)
con.commit()
return wp
finally:
self.conn.put(con)
@autobahn.wamp.register('assistance.set_worked_note')
@inlineCallbacks
def setWorkedNote(self, userId, date, text, details):
r = yield threads.deferToThread(self._setWorkedNote,userId, date, text, details)
returnValue(r)
############################# EXPORTACIONES #######################################
@inlineCallbacks
def _exportLogs(self, initDate, endDate, initHours, endHours, details):
###### HACK HORRIBLE!!! ver como se mejora de una forma eficiente #################
ownerId = details.caller_authid
###################################################
logs = self._getLogs(initDate, endDate, initHours, endHours, details)
userIds = [l.userId for l in logs if l.userId is not None]
usersData = yield self.call('users.find_by_id', userIds)
r = self.exportModel.exportLogs(ownerId, logs, usersData)
returnValue(r)
@autobahn.wamp.register('assistance.export_logs')
@inlineCallbacks
def exportLogs(self, initDate, endDate, initHours, endHours, details):
r = yield self._exportLogs( initDate, endDate, initHours, endHours, details)
#r = yield threads.deferToThread(self._exportStatistics, initDate, endDate, userIds, officeIds, initTime, endTime, details)
returnValue(r)
@inlineCallbacks
def _exportStatistics(self, initDate, endDate, userIds, officeIds, initTime, endTime, details):
###### HACK HORRIBLE!!! ver como se mejora de una forma eficiente #################
ownerId = details.caller_authid
###################################################
stats = self._getStatistics( initDate, endDate, userIds, officeIds, initTime, endTime, details)
userIds = [s.userId for s in stats if s.userId is not None]
usersData = yield self.call('users.find_by_id', userIds)
r = self.exportModel.exportStatistics(ownerId, stats, usersData)
returnValue(r)
@autobahn.wamp.register('assistance.export_statistics')
@inlineCallbacks
def exportStatistics(self, initDate, endDate, userIds, officeIds, initTime, endTime, details):
r = yield self._exportStatistics(initDate, endDate, userIds, officeIds, initTime, endTime, details)
#r = yield threads.deferToThread(self._exportStatistics, initDate, endDate, userIds, officeIds, initTime, endTime, details)
returnValue(r)
|
from mybib.bibtext.reader import load_from_string
def test_load_from_string_dblp_format(bibtex_dblp_format, json_dblp_format):
actual = load_from_string(bibtex_dblp_format)
assert actual == json_dblp_format
def test_load_from_string_multiple_authors(bibtex_json_multiple_authors):
bibtex_multiple_authors, json_multiple_authors = bibtex_json_multiple_authors
actual = load_from_string(bibtex_multiple_authors)
assert actual == json_multiple_authors
def test_load_from_string_single_author(bibtex_json_single_author):
bibtex_single_author, json_single_author = bibtex_json_single_author
actual = load_from_string(bibtex_single_author)
assert actual == json_single_author
def test_load_from_string_no_keywords(bibtex_json_no_keywords):
bibtex_no_keywords, json_no_keywords = bibtex_json_no_keywords
actual = load_from_string(bibtex_no_keywords)
assert actual == json_no_keywords
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import urllib
""" generate readme.md """
__author__ = 'kuoTed'
paper_class_map = {}
paper_map = {}
file_object = open('./README.md')
all_lines = file_object.readlines()
file_object.close()
out_file = open('./README.md', 'w')
paper_class_flag = 0
paper_class_name = ""
paper_flag = 0
paper_name = ""
catalog_flag = 0
for line in all_lines:
if catalog_flag != 1:
out_file.write(line)
if line.startswith("##"):
catalog_flag = 1
if paper_class_flag == 1 and not line.startswith("*") and not line.startswith("#"):
paper_class_map[paper_class_name] = line.strip()
print paper_class_name, line.strip()
paper_class_flag = 0
if paper_flag == 1 and not line.startswith("*") and not line.startswith("#"):
paper_map[paper_name] = line.strip()
print "\t", paper_name, line.strip()
paper_flag = 0
if catalog_flag == 1:
if line.startswith("*"):
paper_flag = 1
paper_name = line[line.find("[")+1:line.find("]")].strip()
if line.startswith("###"):
paper_class_flag = 1
paper_class_name = line[3:].strip()
# github_root = "https://github.com/wzhe06/Reco-papers/blob/master/"
gitee_root = "https://gitee.com/cvsuser/DeepLearningPapers/blob/master/"
all_dir = os.listdir("./")
for one_dir in all_dir:
if os.path.isdir(one_dir) and not one_dir.startswith('.'):
out_file.write("\n### " + one_dir+"\n")
if one_dir.strip() in paper_class_map:
out_file.write(paper_class_map[one_dir.strip()] + "\n")
all_sub_files = os.listdir(one_dir)
for one_file in all_sub_files:
if not os.path.isdir(one_file) and not one_file.startswith('.'):
out_file.write("* [" + ('.').join(one_file.split('.')[:-1]) + "]("+gitee_root + urllib.quote(one_dir.strip())+"/"
+ urllib.quote(one_file.strip())+") <br />\n")
if one_file.strip() in paper_map:
out_file.write(paper_map[one_file.strip()] + "\n")
out_file.close()
|
# evaluate the impack of regulatory somatic mutations on co-expression network.
|
print("w key = paddle up x key = paddle down")
print("| < w/x | up/down > |")
import turtle
wn = turtle.Screen()
wn.title("Pong")
wn.bgcolor("blue")
wn.setup(width=800, height=600)
wn.tracer(0)
#paddle A
paddle_a = turtle.Turtle()
paddle_a.speed(0)
paddle_a.color("white")
paddle_a.shape("square")
paddle_a.shapesize(stretch_wid=5, stretch_len=2)
paddle_a.penup()
paddle_a.goto(-350, 0)
#paddle B
paddle_b = turtle.Turtle()
paddle_b.speed(0)
paddle_b.shape("square")
paddle_b.color("white")
paddle_b.shapesize(stretch_wid=5, stretch_len=2)
paddle_b.penup()
paddle_b.goto(350, 0)
zak = input('BG color change?(Change to black) y/n')
if zak == 'y':
wn.bgcolor('black')
elif zak == 'n':
wn.bgcolor('blue')
else:
print('AUTO COLOR BLUE')
wn.bgcolor('blue')
vara = input("Start? ")
if vara == 'yes':
print("GO!")
elif vara == 'no':
input('Well then click [x]')
else:
input("Click [x] if you do not want to play click enter to play")
#Ball
ball = turtle.Turtle()
ball.speed(0)
ball.shape("square")
ball.color("white")
ball.penup()
ball.goto(0, 0)
ball.dx = 0.5
ball.dy = 0.5
#function
def paddle_a_up():
y = paddle_a.ycor()
y += 20
paddle_a.sety(y)
def paddle_a_down():
y = paddle_a.ycor()
y -= 20
paddle_a.sety(y)
def paddle_b_up():
y = paddle_b.ycor()
y += 20
paddle_b.sety(y)
def paddle_b_down():
y = paddle_b.ycor()
y -= 20
paddle_b.sety(y)
def reset():
ball.xcor(200,200)
def colorChange():
wn.bgcolor(Black)
##
wn.listen()
wn.onkeypress(paddle_a_up, "w")
wn.onkeypress(paddle_a_down, "x")
wn.onkeypress(paddle_a_up, "W")
wn.onkeypress(paddle_a_down, "X")
wn.onkeypress(paddle_b_up, "Up")
wn.onkeypress(paddle_b_down, "Down")
wn.onkeypress(colorChange, "b")
#mainloop
while True:
wn.update()
#Move ball
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
#border check
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1
if ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1
if ball.xcor() > 290:
ball.goto(0, 0)
ball.dx *= -1
if ball.xcor() < -390:
ball.goto(0, 0)
ball.dx *= -1
#paddle and ball collide
if ball.xcor() < -340 and ball.ycor() < paddle_a.ycor() + 50 and ball.ycor() > paddle_a.ycor() - 50:
ball.dx *= -1
if ball.xcor() > 340 and ball.ycor() > paddle_b.ycor() + 50 and ball.ycor() > paddle_b.ycor() - 50:
ball.dx *= -1
######
|
from leetcode import TreeNode, test, new_tree
def convert_bst(root: TreeNode) -> TreeNode:
acc = 0
def reversed_inorder(node: TreeNode) -> None:
nonlocal acc
if node:
reversed_inorder(node.right)
acc = node.val = acc + node.val
reversed_inorder(node.left)
reversed_inorder(root)
return root
test(
convert_bst,
[
(new_tree(5, 2, 13), new_tree(18, 20, 13)),
],
)
|
# Generated by Django 2.1.2 on 2019-01-14 19:51
import django.core.files.storage
from django.db import migrations, models
import products.models
class Migration(migrations.Migration):
dependencies = [
('products', '0011_productfile'),
]
operations = [
migrations.AlterField(
model_name='productfile',
name='file',
field=models.FileField(storage=django.core.files.storage.FileSystemStorage(location='C:\\Users\\iarocha\\django-projects\\learning-ecommerce\\static_dev\\protected_media'), upload_to=products.models.upload_product_file_loc),
),
]
|
import urllib.request
import cv2
import numpy as np
url_id='http://25.13.177.28:8080/shot.jpg'
while True:
img_phone=urllib.request.urlopen(url_id)
image_array=np.array(bytearray(img_phone.read()),dtype=np.uint8)
image=cv2.imdecode(image_array,-1)
grey_frame=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
grey_frame=cv2.cvtColor(grey_frame,cv2.COLOR_GRAY2BGR)
hsv_image=cv2.cvtColor(image,cv2.COLOR_BGR2HSV) ## The HSV frame of the normal decoded image
red_lower_1 = np.array([0,130,140]) ## The Colour Range(min) in the order Hue Saturation Brightness
red_upper_1 = np.array([40,255,255]) ## The Colour Range(max)
red_mask_0=cv2.inRange(hsv_image, red_lower_1, red_upper_1) ## Range of Colours to be identified
red_colour_1 = cv2.bitwise_and(image,image,mask=red_mask_0) ## Embedding
grey=cv2.add(red_colour_1,grey_frame)
cv2.imshow('grey',grey)
cv2.imshow('image viewer',image)
i=1
if red_mask_0.any() == True:
cv2.imwrite(f'files/grey{i}.jpg',grey)
i+=1
k= cv2.waitKey(1)
if k % 256 == 27 or k ==ord('q'):
cv2.destroyAllWindows()
break
elif k % 256 == 32 or k == ord('s'):
cv2.imwrite('new capture.png',image)
cv2.destroyAllWindows()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import os
from pants.core.util_rules.asdf import AsdfPathString
from pants.option.option_types import BoolOption, StrListOption, StrOption
from pants.option.subsystem import Subsystem
from pants.util.memo import memoized_property
from pants.util.ordered_set import OrderedSet
from pants.util.strutil import softwrap
logger = logging.getLogger(__name__)
_DEFAULT_COMPILER_FLAGS = ("-g", "-O2")
class GolangSubsystem(Subsystem):
options_scope = "golang"
help = "Options for Golang support."
class EnvironmentAware(Subsystem.EnvironmentAware):
env_vars_used_by_options = ("PATH",)
_go_search_paths = StrListOption(
default=["<PATH>"],
help=softwrap(
f"""
A list of paths to search for Go.
Specify absolute paths to directories with the `go` binary, e.g. `/usr/bin`.
Earlier entries will be searched first.
The following special strings are supported:
* `<PATH>`, the contents of the PATH environment variable
* `{AsdfPathString.STANDARD}`, {AsdfPathString.STANDARD.description("Go")}
* `{AsdfPathString.LOCAL}`, {AsdfPathString.LOCAL.description("binary")}
"""
),
)
_subprocess_env_vars = StrListOption(
default=["LANG", "LC_CTYPE", "LC_ALL", "PATH"],
help=softwrap(
"""
Environment variables to set when invoking the `go` tool.
Entries are either strings in the form `ENV_VAR=value` to set an explicit value;
or just `ENV_VAR` to copy the value from Pants's own environment.
"""
),
advanced=True,
)
_cgo_tool_search_paths = StrListOption(
default=["<PATH>"],
help=softwrap(
"""
A list of paths to search for tools needed by CGo (e.g., gcc, g++).
Specify absolute paths to directories with tools needed by CGo , e.g. `/usr/bin`.
Earlier entries will be searched first.
The following special strings are supported:
* `<PATH>`, the contents of the PATH environment variable
"""
),
)
cgo_gcc_binary_name = StrOption(
default="gcc",
advanced=True,
help=softwrap(
"""
Name of the tool to use to compile C code included via CGo in a Go package.
Pants will search for the tool using the paths specified by the
`[golang].cgo_tool_search_paths` option.
"""
),
)
cgo_gxx_binary_name = StrOption(
default="g++",
advanced=True,
help=softwrap(
"""
Name of the tool to use to compile C++ code included via CGo in a Go package.
Pants will search for the tool using the paths specified by the
`[golang].cgo_tool_search_paths` option.
"""
),
)
cgo_fortran_binary_name = StrOption(
default="gfortran",
advanced=True,
help=softwrap(
"""
Name of the tool to use to compile fortran code included via CGo in a Go package.
Pants will search for the tool using the paths specified by the
`[golang].cgo_tool_search_paths` option.
"""
),
)
external_linker_binary_name = StrOption(
default="gcc",
advanced=True,
help=softwrap(
"""
Name of the tool to use as the "external linker" when invoking `go tool link`.
Pants will search for the tool using the paths specified by the
`[golang].cgo_tool_search_paths` option.
"""
),
)
cgo_c_flags = StrListOption(
default=lambda _: list(_DEFAULT_COMPILER_FLAGS),
advanced=True,
help=softwrap(
"""
Compiler options used when compiling C code when Cgo is enabled. Equivalent to setting the
CGO_CFLAGS environment variable when invoking `go`.
"""
),
)
cgo_cxx_flags = StrListOption(
default=lambda _: list(_DEFAULT_COMPILER_FLAGS),
advanced=True,
help=softwrap(
"""
Compiler options used when compiling C++ code when Cgo is enabled. Equivalent to setting the
CGO_CXXFLAGS environment variable when invoking `go`.
"""
),
)
cgo_fortran_flags = StrListOption(
default=lambda _: list(_DEFAULT_COMPILER_FLAGS),
advanced=True,
help=softwrap(
"""
Compiler options used when compiling Fortran code when Cgo is enabled. Equivalent to setting the
CGO_FFLAGS environment variable when invoking `go`.
"""
),
)
cgo_linker_flags = StrListOption(
default=lambda _: list(_DEFAULT_COMPILER_FLAGS),
advanced=True,
help=softwrap(
"""
Compiler options used when linking native code when Cgo is enabled. Equivalent to setting the
CGO_LDFLAGS environment variable when invoking `go`.
"""
),
)
@property
def raw_go_search_paths(self) -> tuple[str, ...]:
return tuple(self._go_search_paths)
@property
def env_vars_to_pass_to_subprocesses(self) -> tuple[str, ...]:
return tuple(sorted(set(self._subprocess_env_vars)))
@memoized_property
def cgo_tool_search_paths(self) -> tuple[str, ...]:
def iter_path_entries():
for entry in self._cgo_tool_search_paths:
if entry == "<PATH>":
path = self._options_env.get("PATH")
if path:
yield from path.split(os.pathsep)
else:
yield entry
return tuple(OrderedSet(iter_path_entries()))
minimum_expected_version = StrOption(
default="1.17",
help=softwrap(
"""
The minimum Go version the distribution discovered by Pants must support.
For example, if you set `'1.17'`, then Pants will look for a Go binary that is 1.17+,
e.g. 1.17 or 1.18.
You should still set the Go version for each module in your `go.mod` with the `go`
directive.
Do not include the patch version.
"""
),
)
tailor_go_mod_targets = BoolOption(
default=True,
help=softwrap(
"""
If true, add a `go_mod` target with the `tailor` goal wherever there is a
`go.mod` file.
"""
),
advanced=True,
)
tailor_package_targets = BoolOption(
default=True,
help=softwrap(
"""
If true, add a `go_package` target with the `tailor` goal in every directory with a
`.go` file.
"""
),
advanced=True,
)
tailor_binary_targets = BoolOption(
default=True,
help=softwrap(
"""
If true, add a `go_binary` target with the `tailor` goal in every directory with a
`.go` file with `package main`.
"""
),
advanced=True,
)
cgo_enabled = BoolOption(
default=True,
help=softwrap(
"""\
Enable Cgo support, which allows Go and C code to interact. This option must be enabled for any
packages making use of Cgo to actually be compiled with Cgo support.
See https://go.dev/blog/cgo and https://pkg.go.dev/cmd/cgo for additional information about Cgo.
"""
),
)
asdf_tool_name = StrOption(
default="go-sdk",
help=softwrap(
"""
The ASDF tool name to use when searching for installed Go distributions using the ASDF tool
manager (https://asdf-vm.com/). The default value for this option is for the `go-sdk` ASDF plugin
(https://github.com/yacchi/asdf-go-sdk.git). There are other plugins. If you wish to use one of them,
then set this option to the ASDF tool name under which that other plugin was installed into ASDF.
"""
),
advanced=True,
)
asdf_bin_relpath = StrOption(
default="bin",
help=softwrap(
"""
The path relative to an ASDF install directory to use to find the `bin` directory within an installed
Go distribution. The default value for this option works for the `go-sdk` ASDF plugin. Other ASDF
plugins that install Go may have a different relative path to use.
"""
),
advanced=True,
)
|
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.AdmissionDetails, name = "AdmissionDetails"),
path('admissionDetails/', views.AdmissionDetails, name = "AdmissionDetails"),
path('personalDetails/', views.PersonalDetails, name = "PersonalDetails"),
path('educationalQualifications/', views.EducationalQualifications, name = "EducationalQualifications"),
path('workExperience/', views.WorkExperience, name = "WorkExperience"),
path('attachments/', views.Attachments, name = "Attachments"),
path('success/', views.Success, name = "Success"),
path('adminFilterView/', views.AdminFilterView, name = "AdminFilterView"),
path('adminResultView/', views.AdminResultView, name = "AdminResultView"),
path('adminPrimaryView/', views.AdminPrimaryView, name = "AdminPrimaryView"),
path('adminLogin/', views.AdminLogin, name = "AdminLogin"),
]
|
import math
ans = 0
cnt = 6
flag = 0
for i in range(14, 100000000):
root = int(math.sqrt(i)) + 1
flag = 0
for j in range(2, root):
if i % j == 0:
flag = 1
break
if flag == 0:
cnt = cnt + 1
if cnt == 10001:
print(i)
break
|
import os
class pdb2sql_base(object):
def __init__(
self,
pdbfile,
sqlfile=None,
fix_chainID=False,
verbose=False):
"""Base class for the definition of sql database.
Args:
pdbfile (str, list(str/bytes), ndarray) : name of pdbfile or
list or ndarray containing the pdb data
sqlfile (str, optional): name of the sqlfile.
By default it is created in memory only.
fix_chainID (bool, optinal): check if the name of the chains
are A,B,C, .... and fix it if not.
verbose (bool): probably print stuff
"""
self.pdbfile = pdbfile
self.sqlfile = sqlfile
self.fix_chainID = fix_chainID
self.is_valid = True
self.verbose = verbose
self.backbone_atoms = ['CA', 'C', 'N', 'O']
# hard limit for the number of SQL varaibles
self.SQLITE_LIMIT_VARIABLE_NUMBER = 999
self.max_sql_values = 950
# column names and types
self.col = {'serial': 'INT',
'name': 'TEXT',
'altLoc': 'TEXT',
'resName': 'TEXT',
'chainID': 'TEXT',
'resSeq': 'INT',
'iCode': 'TEXT',
'x': 'REAL',
'y': 'REAL',
'z': 'REAL',
'occ': 'REAL',
'temp': 'REAL',
'element': 'TEXT',
'model': 'INT'}
# delimtier of the column format
# taken from
# http://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#ATOM
self.delimiter = {
'serial': [6, 11],
'name': [12, 16],
'altLoc': [16, 17],
'resName': [17, 20],
'chainID': [21, 22],
'resSeq': [22, 26],
'iCode': [26, 27],
'x': [30, 38],
'y': [38, 46],
'z': [46, 54],
'occ': [54, 60],
'temp': [60, 66],
'element': [76, 78]}
##########################################################################
#
# CREATION AND PRINTING
#
##########################################################################
'''
Main function to create the SQL data base
'''
def _create_sql(self):
raise NotImplementedError()
def _get_table_names(self):
names = self.conn.execute(
"SELECT name from sqlite_master WHERE type='table';")
return [n[0] for n in names]
# get the properties
def get(self, atnames, **kwargs):
raise NotImplementedError()
def get_xyz(self, tablename='atom', **kwargs):
"""Shortcut to get the xyz coordinates."""
return self.get('x,y,z', tablename=tablename, **kwargs)
def get_residues(self, tablename='atom', **kwargs):
"""Get the residue sequence.
Returns:
list : residue sequence
Examples:
>>> db.get_residues()
"""
res = [tuple(x) for x in self.get(
'chainID,resName,resSeq', tablename=tablename, **kwargs)]
return sorted(set(res), key=res.index)
def get_chains(self, tablename='atom', **kwargs):
"""Get the chain IDs.
Returns:
list : chain IDs in alphabetical order.
Examples:
>>> db.get_chains()
"""
chains = self.get('chainID', tablename=tablename, **kwargs)
return sorted(set(chains))
def update(self, attribute, values, **kwargs):
raise NotImplementedError()
def update_xyz(self, xyz, tablename='atom', **kwargs):
"""Update the xyz coordinates."""
self.update('x,y,z', xyz, **kwargs)
def update_column(self, colname, values, index=None):
"""Update a single column."""
raise NotImplementedError()
def add_column(self, colname, coltype='FLOAT', default=0):
"""Add a new column to the ATOM table."""
raise NotImplementedError()
def exportpdb(self, fname, append=False, tablename='atom', **kwargs):
"""Export a PDB file.
Args:
fname(str): output filename
append(bool): append expored data to file or not
kwargs: argument to select atoms, dict value must be list,
e.g.:
- name = ['CA', 'O']
- no_name = ['CA', 'C']
- chainID = ['A']
- no_chainID = ['A']
"""
if append:
f = open(fname, 'a')
else:
f = open(fname, 'w')
lines = self.sql2pdb(tablename=tablename, **kwargs)
for i in lines:
f.write(i + '\n')
f.close()
def sql2pdb(self, tablename='atom', **kwargs):
"""Convert SQL data to PDB formatted lines.
Args:
kwargs: argument to select atoms, dict value must be list,
e.g.:
- name = ['CA', 'O']
- no_name = ['CA', 'C']
- chainID = ['A']
- no_chainID = ['A']
Returns:
list: pdb-format lines
"""
cols = ','.join(self.col.keys())
data = self.get(cols, tablename=tablename, **kwargs)
return self.data2pdb(data)
def data2pdb(self, data):
"""converts data from a get method to a pdb
Args:
data (list): data from a get statement
Returns:
list: the formatted pdb data
"""
pdb = []
# the PDB format is pretty strict
# http://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#ATOM
for d in data:
line = 'ATOM '
line += '{:>5}'.format(d[0]) # serial
line += ' '
line += self._format_atomname(d) # name
line += '{:>1}'.format(d[2]) # altLoc
line += '{:>3}'.format(d[3]) # resname
line += ' '
line += '{:>1}'.format(d[4]) # chainID
line += '{:>4}'.format(d[5]) # resSeq
line += '{:>1}'.format(d[6]) # iCODE
line += ' '
line += pdb2sql_base._format_xyz(d[7]) # x
line += pdb2sql_base._format_xyz(d[8]) # y
line += pdb2sql_base._format_xyz(d[9]) # z
line += '{:>6.2f}'.format(d[10]) # occ
line += '{:>6.2f}'.format(d[11]) # temp
line += ' ' * 10
line += '{:>2}'.format(d[12]) # element
line += ' ' * 2 # charge, keep it blank
pdb.append(line)
return pdb
def _format_atomname(self, data):
"""Format atom name to align with PDB reqireuments.
- alignment of one-letter atom name starts at column 14,
- while two-letter atom name such as FE starts at column 13.
Args:
data(list): sql output for one pdb line
Returns:
str: formatted atom name
"""
name = data[1]
lname = len(name)
if lname in (1, 4):
name = '{:^4}'.format(name)
elif lname == 2:
if name == data[12]: # name == element
name = '{:<4}'.format(name)
else:
name = '{:^4}'.format(name)
else:
if name[0] in '0123456789':
name = '{:<4}'.format(name)
else:
name = '{:>4}'.format(name)
return name
@staticmethod
def _format_xyz(i):
"""Format PDB coordinations x,y or z value.
Note: PDB has a fixed 8-column space for x,y or z value.
Thus the value should be in the range of (-1e7, 1e8).
Args:
(float): PDB coordinations x, y or z.
Raises:
ValueError: Exceed the range of (-1e7, 1e8)
Returns:
str: formated x, y or z value.
"""
if i >= 1e8 - 0.5 or i <= -1e7 + 0.5:
raise ValueError(
f'PDB coordination {i} exceeds the range of (-1e7, 1e8) '
f'after rounding.')
elif i >= 1e6 - 0.5 or i <= -1e5 + 0.5:
i = '{:>8.0f}'.format(i)
elif i >= 1e5 - 0.5 or i <= -1e4 + 0.5:
i = '{:>8.1f}'.format(i)
elif i >= 1e4 - 0.5 or i <= -1e3 + 0.5:
i = '{:>8.2f}'.format(i)
else:
i = '{:>8.3f}'.format(i)
return i
def _close(self, rmdb=True):
if self.sqlfile is None:
self.conn.close()
else:
if rmdb:
self.conn.close()
os.system('rm %s' % (self.sqlfile))
else:
self._commit()
self.conn.close()
|
def ones_counter(input):
output = []
cons = 0
for x in input:
if x == 1:
cons += 1
else:
if cons>0:
output.append(cons)
cons = 0
if cons>0:
output.append(cons)
return output
'''
Tranform of input array of zeros and ones to array
in which counts number of continuous ones:
[1, 1, 1, 0, 1] -> [3,1]
'''
|
"""
Dash port of Shiny iris k-means example:
https://shiny.rstudio.com/gallery/kmeans-example.html
"""
import dash
import dash_bootstrap_components as dbc
import pandas as pd
import plotly.graph_objs as go
from dash import Input, Output, dcc, html
from sklearn import datasets
from sklearn.cluster import KMeans
iris_raw = datasets.load_iris()
iris = pd.DataFrame(iris_raw["data"], columns=iris_raw["feature_names"])
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
controls = dbc.Card(
[
html.Div(
[
dbc.Label("X variable"),
dcc.Dropdown(
id="x-variable",
options=[
{"label": col, "value": col} for col in iris.columns
],
value="sepal length (cm)",
),
]
),
html.Div(
[
dbc.Label("Y variable"),
dcc.Dropdown(
id="y-variable",
options=[
{"label": col, "value": col} for col in iris.columns
],
value="sepal width (cm)",
),
]
),
html.Div(
[
dbc.Label("Cluster count"),
dbc.Input(id="cluster-count", type="number", value=3),
]
),
],
body=True,
)
app.layout = dbc.Container(
[
html.H1("Iris k-means clustering"),
html.Hr(),
dbc.Row(
[
dbc.Col(controls, md=4),
dbc.Col(dcc.Graph(id="cluster-graph"), md=8),
],
align="center",
),
],
fluid=True,
)
@app.callback(
Output("cluster-graph", "figure"),
[
Input("x-variable", "value"),
Input("y-variable", "value"),
Input("cluster-count", "value"),
],
)
def make_graph(x, y, n_clusters):
# minimal input validation, make sure there's at least one cluster
km = KMeans(n_clusters=max(n_clusters, 1))
df = iris.loc[:, [x, y]]
km.fit(df.values)
df["cluster"] = km.labels_
centers = km.cluster_centers_
data = [
go.Scatter(
x=df.loc[df.cluster == c, x],
y=df.loc[df.cluster == c, y],
mode="markers",
marker={"size": 8},
name="Cluster {}".format(c),
)
for c in range(n_clusters)
]
data.append(
go.Scatter(
x=centers[:, 0],
y=centers[:, 1],
mode="markers",
marker={"color": "#000", "size": 12, "symbol": "diamond"},
name="Cluster centers",
)
)
layout = {"xaxis": {"title": x}, "yaxis": {"title": y}}
return go.Figure(data=data, layout=layout)
# make sure that x and y values can't be the same variable
def filter_options(v):
"""Disable option v"""
return [
{"label": col, "value": col, "disabled": col == v}
for col in iris.columns
]
# functionality is the same for both dropdowns, so we reuse filter_options
app.callback(Output("x-variable", "options"), [Input("y-variable", "value")])(
filter_options
)
app.callback(Output("y-variable", "options"), [Input("x-variable", "value")])(
filter_options
)
if __name__ == "__main__":
app.run_server(debug=True, port=8888)
|
# -*-coding:utf-8-*-
"""
二叉树的创建与遍历
以'#'表示子树终结
1
2 3
4 # # 6
# # 7 #
其列表形式书写为[1, 2, 4, '#', '#', 5, '#', '#', 3, '#', 6, 7, '#', '#', '#']
=============================
isEmpty() 判断是否为空,空则返回True,不空则返回False
build(list) 生成二叉树的结构,返回根节点
build_tree(list) 生成二叉树,返回二叉树
preOrder_traversal() 先序遍历,返回遍历结果列表
inOrder_traversal() 中序遍历,返回遍历结果列表
inOrder_traversal() 后序遍历,返回遍历结果列表
bfs(node_root) 广义优先遍历,返回遍历结果列表
dfs(node_root) 未定义
=============================
example:
=============================
arr = [1, 2, 4, '#', '#', 5, '#', '#', 3, '#', 6, 7, '#', '#', '#']
tree = Tree()
print(tree.isEmpty())
my_tree = tree.build_tree(arr)
result_pre = my_tree.preOrder_traversal(my_tree.root)
result_in = my_tree.inOrder_traversal(my_tree.root)
result_post = my_tree.postOrder_traversal(my_tree.root)
result_bfs = my_tree.bfs(my_tree.root)
print('先序:', result_pre)
print('中序:', result_in)
print('后序:', result_post)
print('广义:', result_bfs)
============================
"""
from queue import Queue
class BTNode(object):
def __init__(self, data=None, left=None, right=None):
self.data = data
self.left = left
self.right = right
class Tree(object):
# """
# 迭代器方法
# example
# =============================
# data_list = [1, 2, 4, '#', '#', 5, '#', '#', 3, '#', 6, 7, '#', '#', '#']
# btree = Tree(data_list)
# root = btree.createBiTree()
# print(type(root)) #可以看出返回的是根节点Node节点
# =============================
# """
# def __init__(self, data_list):
# # 初始化即将传入的列表的迭代器
# self.it = iter(data_list)
#
# def createBiTree(self, bt=None):
# try:
# # 步进获取下一个元素
# next_data = next(self.it)
# # 如果当前列表元素为'#', 则认为其为 None
# if next_data is "#":
# bt = None
# else:
# bt = BTNode(next_data)
# bt.left = self.createBiTree(bt.left)
# bt.right = self.createBiTree(bt.right)
# except Exception as e:
# print(e)
# return bt
def __init__(self):
self.root = None
self.ls = [[] for _ in range(10)]
def isEmpty(self):
return self.root is None
def build(self, list):
key = list.pop(0)
if key == '#':
node = None
else:
node = BTNode(key)
node.left = self.build(list)
node.right = self.build(list)
return node
def build_tree(self, list):
tree = Tree()
tree.root = self.build(list)
return tree
def preOrder_traversal(self, node):
if node is not None:
self.ls[0].append(node.data)
self.preOrder_traversal(node.left)
self.preOrder_traversal(node.right)
return self.ls[0]
def inOrder_traversal(self, node):
if node is not None:
self.inOrder_traversal(node.left)
self.ls[1].append(node.data)
self.inOrder_traversal(node.right)
return self.ls[1]
def postOrder_traversal(self, node):
if node is not None:
self.postOrder_traversal(node.left)
self.postOrder_traversal(node.right)
self.ls[2].append(node.data)
return self.ls[2]
def bfs(self, node):
"""广义优先遍历就是从根节点出发,按层横向遍历。使用的是一个队列"""
queue = Queue()
if node is None:
return
else:
queue.put(node)
while not queue.empty() > 0:
tree_node = queue.get()
self.ls[3].append(tree_node.data)
if tree_node.left is not None:
queue.put(tree_node.left)
if tree_node.right is not None:
queue.put(tree_node.right)
return self.ls[3]
def dfs(self, node):
"""
深度优先遍历是先序遍历的推广,从某个定点出发,一直遍历到最深处,然后回溯
找到没有遍历到的点,再次应用深度优先遍历。使用的是一个栈。
按我的理解树的深度优先遍历其实和先序遍历一样,
也有大部分说法是先序中序后序都是树的深度优先遍历
"""
pass
arr = [1, 2, 4, '#', '#', 5, '#', '#', 3, '#', 6, 7, '#', '#', '#']
tree = Tree()
print(tree.isEmpty())
my_tree = tree.build_tree(arr)
result_pre = my_tree.preOrder_traversal(my_tree.root)
result_in = my_tree.inOrder_traversal(my_tree.root)
result_post = my_tree.postOrder_traversal(my_tree.root)
result_bfs = my_tree.bfs(my_tree.root)
print('先序:', result_pre)
print('中序:', result_in)
print('后序:', result_post)
print('广义:', result_bfs)
|
#!/usr/bin/python3
import numpy as np
# Centroid Recovery - uses truncated CD to impute all missing values in the matrix (designated with NaN)
def centroid_recovery(matrix, truncation = 0, maxIterations = 100, threshold = 1E-6):
# input processing
matrix = np.asarray(matrix, dtype=np.float64).copy()
n = len(matrix)
m = len(matrix[0])
if truncation > m: #strictly bigger
print("[Centroid Recovery] Error: provided truncation parameter k=" + str(truncation) + " is larger than the number of columns m=" + str(m))
print("[Centroid Recovery] Aborting recovery. Please provide a valid truncation parameter 1 <= k <= m - 1.")
print("[Centroid Recovery] Alternatively, providing k = 0 or k = m will choose one automatically.")
return None
if truncation == 0 or truncation == m:
truncation = 3
truncation = min(truncation, m - 1)
truncation = max(truncation, 1)
maxIterations = max(maxIterations, 1)
miss_mask = np.isnan(matrix)
miss_count = sum(sum(miss_mask))
if miss_count == 0:
print("[Centroid Recovery] Warning: provided matrix doesn't contain any missing values.")
print("[Centroid Recovery] The algorithm will run, but will return an unchanged matrix.")
# initiate missing values
matrix = interpolate(matrix, miss_mask)
# init persistent values
SV = default_SV(n, truncation)
iter = 0
last_diff = threshold + 1.0 #dummy to ensure it doesn't terminate in 1 hop
# main loop
while iter < maxIterations and last_diff >= threshold:
# terminated if we reach the interation cap
# or if our change to missing values from last iteration is small enough
iter += 1
# perform truncated decomposition
res = centroid_decomposition(matrix, truncation, SV)
if res == None: #make sure it doesn't fail, if it does - fail as well
return None
else:
(L, R, SV) = res
# perform a low-rank reconstruction of the original matrix
recon = np.dot(L, R.T)
# compute how much did it change using ||X[mis] - Xrec[mis]||_F / sqrt(|mis|)
diff_vector = matrix[miss_mask] - recon[miss_mask]
last_diff = np.linalg.norm(diff_vector) / np.sqrt(miss_count)
# substitute values in the missing blocks with what was reconstructed after truncated CD
matrix[miss_mask] = recon[miss_mask]
#end while
return matrix
#end function
# simple linear interpolation function
# interpolates segments which are marked as NaN
# if the segments start (or ends) at the start (or end) of the column - uses 1NN instead
def interpolate(matrix, mask):
n = len(matrix)
m = len(matrix[0])
for j in range(0, m):
mb_start = -1
prev_value = np.nan
step = 0 #init
for i in range(0, n):
if mask[i][j]:
# current value is missing - we either start a new block, or we are in the middle of one
if mb_start == -1:
# new missing block
mb_start = i
mb_end = mb_start + 1
while (mb_end < n) and np.isnan(matrix[mb_end][j]):
mb_end += 1
next_value = np.nan if mb_end == n else matrix[mb_end][j]
if mb_start == 0: # special case #1: block starts with array
prev_value = next_value
if mb_end == n: # special case #2: block ends with array
next_value = prev_value
step = (next_value - prev_value) / (mb_end - mb_start + 1)
#end if
matrix[i][j] = prev_value + step * (i - mb_start + 1)
else:
# missing block either ended just now or we're traversing normal data
prev_value = matrix[i][j]
mb_start = -1
#end if
#end for
#end for
return matrix
#end function
##
## decomposition functions
##
# Centroid Decomposition, with the optional possibility of specifying truncation or usage of initial sign vectors
def centroid_decomposition(matrix, truncation = 0, SV = None):
# input processing
matrix = np.asarray(matrix, dtype=np.float64).copy()
n = len(matrix)
m = len(matrix[0])
if truncation == 0:
truncation = m
if truncation < 1 or truncation > m:
print("[Centroid Decomposition] Error: invalid truncation parameter k=" + str(truncation))
print("[Centroid Decomposition] Aboritng decomposition")
return None
if SV is None:
SV = default_SV(n, truncation)
if len(SV) != truncation:
print("[Centroid Decomposition] Error: provided list of Sign Vectors doesn't match in size with the truncation truncation parameter k=" + str(truncation))
print("[Centroid Decomposition] Aboritng decomposition")
return None
L = np.zeros((truncation, n))
R = np.zeros((truncation, m))
# main loop - goes up till the truncation param (maximum of which is the # of columns)
for j in range(0, truncation):
# calculate the sign vector
Z = local_sign_vector(matrix, SV[j])
# calculate the column of R by X^T * Z / ||X^T * Z||
R_i = matrix.T @ Z
R_i = R_i / np.linalg.norm(R_i)
R[j] = R_i
# calculate the column of L by X * R_i
L_i = matrix @ R_i
L[j] = L_i
# subtract the dimension generated by L_i and R_i from the original matrix
matrix = matrix - np.outer(L_i, R_i)
# update the new sign vector in the array
SV[j] = Z
#end for
return (L.T, R.T, SV)
#end function
# Algorithm: LSV (Local Sign Vector). Finds locally optimal sign vector Z, i.e.:
# Z being locally optimal means: for all Z' sign vectors s.t. Z' is one sign flip away from Z at some index j,
# we have that ||X^T * Z|| >= ||X^T * Z'||
def local_sign_vector(matrix, Z):
n = len(matrix)
m = len(matrix[0])
eps = np.finfo(np.float64).eps
Z = local_sign_vector_init(matrix, Z)
# calculate initial product of X^T * Z with the current version of Z
direction = matrix.T @ Z
# calculate initial value of ||X^T * Z||
lastNorm = np.linalg.norm(direction) ** 2 + eps
flipped = True
while flipped:
# we terminate the loop if during the last pass we didn't flip a single sign
flipped = False
for i in range(0, n):
signDouble = Z[i] * 2
gradFlip = 0.0
# calculate how ||X^T * Z|| would change if we would change the sign at position i
# change to the values of D = X^T * Z is calculated as D_j_new = D_j - 2 * Z_i * M_ij for all j
for j in range(0, m):
localMod = direction[j] - signDouble * matrix[i][j]
gradFlip += localMod * localMod
# if it results in augmenting ||X^T * Z||
# flip the sign and replace cached version of X^T * Z and its norm
if gradFlip > lastNorm:
flipped = True
Z[i] = Z[i] * -1
lastNorm = gradFlip + eps
for j in range(0, m):
direction[j] -= signDouble * matrix[i][j]
#end for
#end if
#end for
#end while
return Z
#end function
# Auxiliary function for LSV:
# Z is initialized sequentiually where at each step we see which sign would give a larger increase to ||X^T * Z||
def local_sign_vector_init(matrix, Z):
n = len(matrix)
m = len(matrix[0])
direction = matrix[0]
for i in range(1, n):
gradPlus = 0.0
gradMinus = 0.0
for j in range(0, m):
localModPlus = direction[j] + matrix[i][j]
gradPlus += localModPlus * localModPlus
localModMinus = direction[j] - matrix[i][j]
gradMinus += localModMinus * localModMinus
if gradMinus > gradPlus:
Z[i] = -1
for j in range(0, m):
direction[j] += Z[i] * matrix[i][j]
return Z
#end function
#initialize sign vector array with default values
def default_SV(n, k):
# default sign vector is (1, 1, ..., 1)^T
baseZ = np.array([1.0] * n)
SV = []
for i in range(0, k):
SV.append(baseZ.copy())
return SV
#end function
def main():
matrix = np.loadtxt("data_miss.txt")
recovered = centroid_recovery(matrix, 1)
reference = np.loadtxt("data_full.txt");
print("Recovery error:")
print(np.linalg.norm(recovered - reference))
np.savetxt("data_recov.txt", recovered, fmt="%10.5f")
if __name__ == "__main__":
main()
|
"""
2021年5月21日
重新调整束线
前段一个 QS
后段漂移段 2.5 米
优化识别码 202105210001
"""
# 因为要使用父目录的 cctpy 所以加入
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
sys.path.append(path.dirname(path.dirname(path.abspath(path.dirname(__file__)))))
from cctpy import *
from work.optim.A01run import *
from work.optim.A01geatpy_problem import *
if __name__ == '__main__':
# multiprocessing.Process(target=runviz).start() # Start Visualization Server
# time.sleep(15)
BaseUtils.i_am_sure_my_code_closed_in_if_name_equal_main()
problem = Myproblem()
Encoding = 'RI'
NIND = 24*5
Field = ea.crtfld(Encoding, problem.varTypes, problem.ranges, problem.borders)
population = ea.Population(Encoding, Field, NIND)
myAlgorithm = ea.moea_NSGA3_templet(problem, population)
myAlgorithm.MAXGEN = 500000
myAlgorithm.drawing = 0
[NDset, population] = myAlgorithm.run()
NDset.save()
print('time: %f seconds' % (myAlgorithm.passTime))
print('evaluation times: %d times' % (myAlgorithm.evalsNum))
print('NDnum: %d' % (NDset.sizes))
print('ParetoNum: %d' % (int(NDset.sizes / myAlgorithm.passTime)))
# t = (
# Trajectory.set_start_point(P2.origin())
# .first_line(direct=P2.x_direct(),length=1.592)
# .add_arc_line(radius=0.95,clockwise=False,angle_deg=22.5)
# .add_strait_line(length=0.5+0.27+0.5)
# .add_arc_line(radius=0.95,clockwise=False,angle_deg=22.5)
# .add_strait_line(length=1.592)
# .add_strait_line(2.5)
# .add_arc_line(radius=0.95,clockwise=True,angle_deg=67.5)
# .add_strait_line(length=0.5+0.27+0.5)
# .add_arc_line(radius=0.95,clockwise=True,angle_deg=67.5)
# .add_strait_line(2.5)
# )
# print(t.point_at_end())
# Plot2.plot(t)
# Plot2.show()
# if __name__ == "__main__":
# d = create_gantry_beamline()
# print(d.point_at_end())
# Plot2.plot(d)
# Plot2.show() |
import random
class Player(object):
def __init__(self, isAI, number, name):
self.ai = isAI
self.name = name
self.number = number
self.hand = []
self.num_books = 0
def __eq__(self, other):
return self.__dict__ == other.__dict__
class GoFishPlayer(Player):
def __init__(self, isAI, number, name):
super(GoFishPlayer, self).__init__(isAI, number, name)
def computerPickAPlayer(self, env):
player = None
while player is None or player == self:
player = env.players[random.randrange(0,len(env.players))]
return player
def computerPickACard(self):
rank = None
card = None
maxCount = 0
for i in range(1, 14):
count = 0
for c in self.hand:
if c.value.value == rank:
count += 1
if count > maxCount:
card = c
maxCount = count
if card is not None:
return card
return self.hand[0]
def sortHandByRank(self):
self.hand.sort()
def checkForCard(self, card):
for c in self.hand:
if c.value == card.value and c.suit == card.suit:
return True
return False
def checkForCardByRank(self, cardRank):
for c in self.hand:
# print("Checking...")
# print("c.value: " + str(c.value))
# print("cardRank: " + str(cardRank))
if c.value == cardRank:
return True
return False
def giveUpAllCardsByRank(self, cardRank):
cardsThatMatch = []
# adds cards to be given up
for c in self.hand:
if c.value == cardRank:
cardsThatMatch.append(c)
# updates self.hand to have cards not in cardThatMatch
self.hand = [c for c in self.hand if c not in cardsThatMatch]
return cardsThatMatch
def checkForBook(self):
for i in range(1, 14):
count = 0
for c in self.hand:
if c.value.value == i:
count += 1
if count == 4:
for ca in reversed(self.hand):
if ca.value.value == i:
self.hand.remove(ca)
# self.hand[:] = [x for x in self.hand if not c.value.value]
self.num_books += 1
return True
return False
def isEmptyHand(self):
if len(self.hand) == 0:
return True
else:
return False |
# -*- coding: utf-8 -*-
from django.views.generic import TemplateView
class HomeView(TemplateView):
template_name = 'index.html'
class AboutUsView(TemplateView):
template_name = 'about-us.html'
class NewsView(TemplateView):
template_name = 'news.html'
class CoursesView(TemplateView):
template_name = 'courses.html'
class TeacherView(TemplateView):
template_name = 'teachers.html'
class HardwareView(TemplateView):
template_name = 'index.html'
class ContactUsView(TemplateView):
template_name = 'contact-us.html' |
import sys
import argparse
from misc.Logger import logger
from core.awsrequests import awsrequests
from misc import Misc
from core.base import base
class stack(base):
def __init__(self, global_options, account_information):
self.global_options = global_options
self.account_information = account_information
def start(self):
logger.info('Invoked starting point for stack')
parser = argparse.ArgumentParser(description='ec2 tool for devops', usage='''kerrigan.py stack <command> [<args>]
Second level options are:
overflow
deploy
''' + self.global_options)
parser.add_argument('command', help='Command to run')
args = parser.parse_args(sys.argv[2:3])
if not hasattr(self, args.command):
logger.error('Unrecognized command')
parser.print_help()
exit(1)
getattr(self, args.command)()
def overflow(self):
logger.info("Started stack provision command")
a = awsrequests(session=self.account_information['session'])
parser = argparse.ArgumentParser(description='stack provision', usage='''kerrigan.py stack overflow [<args>]]
''' + self.global_options, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="kerrigan")
parser.add_argument('--json', action='store', required=True, help="Stack json to provision")
parser.add_argument('--env', action='store', required=True, help="Which env to deploy to")
parser.add_argument('--dryrun', action='store_true', default=False, help="No changes should be done")
args = parser.parse_args(sys.argv[3:])
a.deploy_stack_to_env(env=args.env, file=args.json, dryrun=args.dryrun)
def deploy(self):
logger.info("Started deployment command")
a = awsrequests(session=self.account_information['session'])
parser = argparse.ArgumentParser(description='deployment for stack in env', usage='''kerrigan.py stack deploy [<args>]]
''' + self.global_options, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="kerrigan")
parser.add_argument('--env', action='store', required=True, help="Which env to deploy to")
parser.add_argument('--puppet_role', action='store', required=True, help="Puppet_role to deploy")
parser.add_argument('--dry_run', action='store_true', default=False, help="No changes should be done")
parser.add_argument('--xively_service', action='store', default=None, help="Xively_service to deploy")
parser.add_argument('--num', action='store', default=1, help="Number of instances to spin up")
parser.add_argument('--instance_type', action='store', help="Requested instance size")
parser.add_argument('--base_ami', action='store', default="", choices=a.get_ami_stacks(),
help="Base Ami to use")
parser.add_argument('--iam', action='store', help="Custom Iam role to use")
parser.add_argument('--requester', action='store', default="", help="The person requesting the machine")
parser.add_argument('--customer', action='store', default="", help="Customer associated with the gateway")
args = parser.parse_args(sys.argv[3:])
a.prepare_deployment(**vars(args))
def snappy(self):
logger.info("Started snappy command")
a = awsrequests(session=self.account_information['session'])
parser = argparse.ArgumentParser(description='deployment for snappy', usage='''kerrigan.py stack snappy [<args>]]
''' + self.global_options, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="kerrigan")
parser.add_argument('--num', action='store', default=1, help="Number of instances to spin up")
parser.add_argument('--env', action='store', required=True, help="Which env to deploy to")
parser.add_argument('--dryrun', action='store_true', default=False, help="No changes should be done")
parser.add_argument('--accountid', action='store', required=True, help="The account id for snappy")
parser.add_argument('--channelname', action='store', required=True, help="The channelname for snappy")
parser.add_argument('--newrelic', action='store', required=True, help="The newrelic environment to use")
parser.add_argument('--devicestring', action='store', required=True, help="The deviec string like deploy/nots")
parser.add_argument('--branch', action='store', default="None", help="The branch to checkout from")
args = parser.parse_args(sys.argv[3:])
a.deploy_snappy(**vars(args))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 11 18:50:01 2020
@author: ritambasu
"""
import numpy as np
a= np.array([[2.0,-1,0],[-1,2.0,-1],[0,-1,2.0]])
x= np.array([1,1.0,1.0])
y= np.array([1,1.0,1.0])
maximum_eigenvalue= max(np.linalg.eigh(a)[0])
k=1
i=0
ax=np.dot(a,x)
dominant_eigvalue=np.dot(ax,x)/np.dot(x,x)
accuracy=1.2 #arbitary error is defined here to start the loop( actually in c we can do it using do_while loop but python does not have it)
while accuracy > 1.0:
m=np.dot(np.dot(np.linalg.matrix_power(a,(i+1)),x),y) #here m= < A^(k+1)*x , y >
n=np.dot(np.dot(np.linalg.matrix_power(a,i),x),y) #here m= < A^(k)*x , y >
eigenvalue=float(m)/float(n)
accuracy=(abs(maximum_eigenvalue-eigenvalue)/maximum_eigenvalue)*100
i=i+1
z=np.dot(np.linalg.matrix_power(a,i),x)
eigen_vector=z/np.linalg.norm(z)
print("lialg.eigh eigenvalue:",maximum_eigenvalue,"\neigenvalue using power method:",eigenvalue,
"\nnormalized eigen vectorby power method:",eigen_vector)
|
import sys
from pyspark.sql import SparkSession
def multiply(r, d, b):
if int(r[1]) in d:
return [(int(r[0]), b * (float(r[2]) * d[int(r[1])]))]
return []
if len(sys.argv) != 2:
print("Zle argumenty")
exit(1)
sp = SparkSession.builder.appName("zad1").getOrCreate()
file = sys.argv[1]
v_num = 4
beta = 0.8
dane = sp.read.csv(file, header=False, sep=' ').rdd
res = {}
for i in range(v_num):
res[i] = float(1.0 / v_num)
for i in range(50):
res = dane.flatMap(lambda row: multiply(row, res, beta))\
.reduceByKey(lambda x, y: x + y)\
.map(lambda x: (x[0], x[1] + (1-beta) * 1/v_num)).collectAsMap()
print("Result at iteration ", i+1, ": ", res)
|
from __future__ import absolute_import, division, print_function
import tflearn
import tensorflow as tf
def not_operation():
# Logical NOT
X = [[0.], [1.]]
Y = [[1.], [0.]]
# TF graph
with tf.Graph().as_default():
graph = tflearn.input_data(shape=[None,1])
graph = tflearn.fully_connected(graph, 128, activation='linear')
graph = tflearn.fully_connected(graph, 128, activation='linear')
graph = tflearn.fully_connected(graph, 1, activation='sigmoid')
graph = tflearn.regression(graph, optimizer='adam', learning_rate=0.1, loss='mean_square')
# Model training
model = tflearn.DNN(graph)
model.fit(X, Y, n_epoch=5000, snapshot_epoch=False)
prediction = model.predict([[1.]])
print("Prediction: ", prediction)
def or_operation():
X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
Y = [[0.], [1.], [1.], [1.]]
# Graph definition
with tf.Graph().as_default():
graph = tflearn.input_data(shape=[None, 2])
graph = tflearn.fully_connected(graph, 128, activation='linear')
graph = tflearn.fully_connected(graph, 1, activation='sigmoid')
graph = tflearn.regression(graph, optimizer='adam', learning_rate=2.,
loss='mean_square')
# Model training
model = tflearn.DNN(graph)
model.fit(X, Y, n_epoch=100, snapshot_epoch=False)
prediction = model.predict([[0., 1.]])
print("Prediction: ", prediction)
def xor_operation():
# Function to simulate XOR operation using graph combo of NAND and OR
X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
Y_nand = [[1.], [1.], [1.], [0.]]
Y_or = [[0.], [1.], [1.], [1.]]
with tf.Graph().as_default():
graph = tflearn.input_data(shape=[None, 2])
graph_nand = tflearn.fully_connected(graph, 32, activation='linear')
graph_nand = tflearn.fully_connected(graph_nand, 32, activation='linear')
graph_nand = tflearn.fully_connected(graph_nand, 1, activation='sigmoid')
graph_nand = tflearn.regression(graph_nand, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')
graph_or = tflearn.fully_connected(graph, 32, activation='linear')
graph_or = tflearn.fully_connected(graph_or, 32, activation='linear')
graph_or = tflearn.fully_connected(graph_or, 1, activation='sigmoid')
graph_or = tflearn.regression(graph_or, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')
graph_xor = tflearn.merge([graph_nand, graph_or], mode='elemwise_mul')
# Model training
model = tflearn.DNN(graph_xor)
model.fit(X, [Y_nand, Y_or], n_epoch=100, snapshot_epoch=False)
prediction = model.predict([[0., 1.]])
print("Prediction: ", prediction)
if __name__ == '__main__':
xor_operation()
|
import math
class MarkovModel(object):
def __init__(self):
self.mapping = {} # Map from NGrams to tags to words to the number of times that word occurs
self.allNGrams = {} # A map from NGrams to the number of times a word has been categorized under that NGram
self.allTags = {} # A map from tag to the number of times a word has been categorized under that tag
self.allWords = {} # A map from word to the number of times it has occured in the text
self.totalCount = 0; # The total amount of things (words = tags) that have been added to the Markov Model
# Adds an N-Gram, a tag, and a word to the Hidden Markov Model. Also adds tag to a tag list and word to a word list.
def addTagCombination(self, tagList, tag, word):
ngram = self.condenseNGram(tagList)
if ngram in self.mapping:
if tag in self.mapping[ngram]:
if word in self.mapping[ngram][tag]:
self.mapping[ngram][tag][word] += 1
else:
self.mapping[ngram][tag][word] = 1
else:
self.mapping[ngram][tag] = {}
self.mapping[ngram][tag][word] = 1
else:
self.mapping[ngram] = {}
self.mapping[ngram][tag] = {}
self.mapping[ngram][tag][word] = 1
if ngram in self.allNGrams:
self.allNGrams[ngram] += 1
else:
self.allNGrams[ngram] = 1
if tag in self.allTags:
self.allTags[tag] += 1
else:
self.allTags[tag] = 1
if word in self.allWords:
self.allWords[word] += 1
else:
self.allWords[word] = 1
self.totalCount += 1;
# Converts a list of tags into a single string [Python can't use a list of strings as a dictionary key]
def condenseNGram(self, tagList):
if len(tagList) > 1:
total = ""
for tag in tagList:
total += tag + "~"
return total[:-1]
else:
return tagList[0]
# Returns the tag with the highest value for the given word
def getMostProbableTagForWord(self, word):
if word in self.allWords:
highestValue = 0
mostProbableTag = ""
for ngram in self.mapping:
for tag in self.mapping[ngram]:
if word in self.mapping[ngram][tag]:
if self.mapping[ngram][tag][word] > highestValue:
highestValue = self.mapping[ngram][tag][word]
mostProbableTag = tag
return mostProbableTag
else:
return None
# START NNP VBZ NN NNS CD NN .
# Fed raises interest rates 0.5 percent.
# P(Fed|NNP), Emission Probability # Actually not sure about the logic in this method at all yet
def getProbabilityOfWordGivenTag(self, word, tag):
if tag not in self.allTags:
return 0
if word not in self.allWords:
return math.log( 1 / float(len(self.allWords) + self.allTags[tag]) )#1 / (self.allWords[word] + self.allTags[tag]) # This literally cannot be the case
totalValueForWordGivenTag = 0
for ngram in self.mapping:
if tag in self.mapping[ngram]:
if word in self.mapping[ngram][tag]:
totalValueForWordGivenTag += self.mapping[ngram][tag][word]
return math.log( (totalValueForWordGivenTag + 1) / float(self.allWords[word] + self.allTags[tag]) )
# P(NNP|START, .), Transition Probability
def getProbabilityOfTagGivenNGram(self, tag, tagList):
ngram = self.condenseNGram(tagList)
if ngram in self.mapping:
if tag not in self.mapping[ngram]:
return math.log( 1 / (self.mapping[ngram]) )
return math.log( len(self.mapping[ngram][tag]) / float(len(self.mapping[ngram])) )
print 'ah'
return 1
# P(Fed)
def getProbabilityOfWord(self, word):
if word in self.allWords:
return math.log( self.allWords[word] / float(self.totalCount) )
return math.log( 1 / float(len(self.allWords)) )#self.totalCount
# P(NNP)
def getProbabilityOfTag(self, tag):
if tag in self.allTags:
return math.log( self.allTags[tag] / float(self.totalCount) )
return math.log( 1 / float(len(self.allTags)) )
# P(VBZ, NNP) # Does not find individual probabilities and multiply them.
def getProbabilityOfNGram(self, tagList):
ngram = self.condenseNGram(tagList)
if ngram in self.allNGrams:
return math.log( self.allNGrams[ngram] / self.totalCount )
return math.log( 1 / float(self.totalCount) )
# Prints the HMM
def printMap(self):
for ngram in sorted(self.mapping):
print ngram + ": ",
for tag in sorted(self.mapping[ngram]):
print "[" + tag + ": ",
for word in sorted(self.mapping[ngram][tag]):
print "(" + word + "=" + str(self.mapping[ngram][tag][word]) + ")",
print "]",
else:
print
print 'This is the total number of word types: ' + str(len(self.allWords))
print 'This is the total number of tag types: ' + str(len(self.allTags))
print 'This is the total number of NGram types: ' + str(len(self.allNGrams))
|
from _typeshed import Incomplete
from collections.abc import Generator
def generate_multiline_adjlist(
G, delimiter: str = " "
) -> Generator[Incomplete, None, None]: ...
def write_multiline_adjlist(
G, path, delimiter: str = " ", comments: str = "#", encoding: str = "utf-8"
) -> None: ...
def parse_multiline_adjlist(
lines,
comments: str = "#",
delimiter: Incomplete | None = None,
create_using: Incomplete | None = None,
nodetype: Incomplete | None = None,
edgetype: Incomplete | None = None,
): ...
def read_multiline_adjlist(
path,
comments: str = "#",
delimiter: Incomplete | None = None,
create_using: Incomplete | None = None,
nodetype: Incomplete | None = None,
edgetype: Incomplete | None = None,
encoding: str = "utf-8",
): ...
|
import ee
ee.Initialize()
coords = [0.32153, 28.69916]
newFil = ee.Geometry.Point(coords).buffer(300)
data = ee.ImageCollection('LANDSAT/LC8_L1T_32DAY_TOA').select('B8').filterDate('2016-1-1', '2016-5-30')
data.filterBounds(newFil)
def clipper(image):
return image.clip(newFil.bounds())
data.map(clipper)
req = data.getInfo()
boundary = ee.Geometry(newFil.bounds().getInfo()).toGeoJSONString()
def downloader(image):
url = ee.data.makeDownloadUrl(
ee.data.getDownloadId({
'image': image.serialize(),
'scale': 30,
'filePerBand': 'false',
'name': 'test',
'region': boundary,
}))
print(url)
downloader(ee.Image(data.getInfo()['features'][1]['id']))
|
import sys
import os
import django
sys.path.append('../../../djangoscrapy') # 具体路径
os.environ['DJANGO_SETTINGS_MODULE'] = 'djangoscrapy.settings'
django.setup() |
from statistics import mean
def game_score_analysis(scores):
# Stage 1 : Normalizing the data
a = min(scores)
b = max(scores)
if b == a:
return 0
score = [(x-a)/(b-a) for x in scores]
# Stage 2 : Trend analysis
performance = 0
for i in range(len(scores)-1):
if score[i] > score[i+1]:
performance -= (score[i]-score[i+1])
else:
performance += (score[i+1]-score[i])
growth_rate = round(performance*100,2)
return growth_rate
pool = {
"intro":{
1: [
"What is your age?",
"What is your gender?",
"How are you feeling today?",
"Have u ever consulted a psychologist before?"],
2: [
"How active do you feel today?",
"How dependant are you on substances?",
"How much pressure are you under?",
"How many hours can you work at a time?",
"How motivated are you to do work?"]
},
"depression" : {
3: [
"In the past two weeks, how often have you felt, down, depressed, or hopeless?",
"Have you had any thoughts of suicide?",
"How is your sleep?",
"How is your energy?",
"Do you prefer to stay at home rather than going out and doing new things?"
],
4: [
"Little interest or pleasure in doing things?",
"Feeling down, depressed, or hopeless?",
"Trouble falling or staying asleep, or sleeping too much?",
"Feeling tired or having little energy",
"Poor appetite or overeating?"
],
5: [
"How much of a change have u seen in yourself after our sessions?",
"Feeling bad about yourself -- or feel that your a failure or have let yourself of your family down?",
"Trouble concentrating or things such as reading the newspaper or watching television?",
"Moving or speaking so slowly that other people could have noticed? Or the opposite -- being so fidgety or restless that you have been moving around a lot more than usual",
"Thoughts that you would be better off dead of of hurting yourself in some way or the other"
]
},
"addiction" : {
3: ["Have you used drugs other than those required for medical reasons?",
"Do you abuse more than one drug at a time?",
"Are you unable to stop using drugs when you want to?",
"Have you ever had blackouts or flashback as a result of drug use?",
"Do you ever feel bad or guilty about your involvement with drugs?"
],
4: ["How much in control are you of yourself after the last session?"
"Have you neglected your family because of your use of drugs?",
"Have you ever experienced withdrawal symptoms when you stopped taking drugs?",
"Have you had medical problems as a result of your drug use <eg. memory loss, hepatitis, convulsions, bleeding?",
"Have you engaged in illegal activities in order to obtain drugs?"
],
5: [
"How much of a change have u seen in yourself after our sessions?",
"Feeling bad about yourself -- or feel that your a failure or have let yourself of your family down?",
"Trouble concentrating or things such as reading the newspaper or watching television?",
"Moving or speaking so slowly that other people could have noticed? Or the opposite -- being so fidgety or restless that you have been moving around a lot more than usual",
"Thoughts that you would be better off dead of of hurting yourself in some way or the other"
]
},
"stress" : {
3: [
"Do you experience excessive worry?",
"Is your worry excessive in intensity, frequency, or amount of distress it causes?",
"Do you find it difficult ot control the worry < or stop worrying > once it starts?",
"Do you worry excessively or uncontrollably about minor things such as being late for an appointment, minor repairs, homework, etc.?",
"Do you ever feel the need to take sleeping pills?"
],
4: [
"I’ve had trouble on the job because of my temper",
"People tell me that I become too angry, too quickly",
"After arguing with someone, I often hate myself for losing my temper",
"I find it very hard to forgive someone who has done me wrong.",
"I hate lines, and I especially hate waiting in lines"
],
5: [
"In the past two weeks, how often have you felt, down, depressed, or hopeless?",
"Have you had any thoughts of suicide?",
"How is your sleep?",
"How is your energy?",
"Do you prefer to stay at home rather than going out and doing new things?"
]
},
"burnout" : {
3: ["I’ve had trouble on the job because of my temper",
"People tell me that I become too angry, too quickly",
"After arguing with someone, I often hate myself for losing my temper",
"I find it very hard to forgive someone who has done me wrong.",
"I hate lines, and I especially hate waiting in lines"
],
4: ["Do you feel any change in you after the last session",
"At times I have gotten so angry that I have slammed doors, thrown things, broken items or punched walls",
"I am apt to take frustration so badly, I cannot get it out of my mind.",
"I still get angry when I think of the bad things people did to me in the past",
"I often make critical, judgmental comments to others, even if they do not ask me for advice or help"
],
5: ["After the last session, how often do u nend up in arguments?",
"When riled, I often blurt out things I later regret saying",
"People I’ve trusted have often let me down, leaving me feeling angry or betrayed",
"I use abusive language, such as name-calling, insults, sarcasm or swearing",
"I’m an angry person. My temper has already caused lots of problems, and I need help changing it"
]
}
}
# depression, burnout, anxiety, addiction
def get_session_question_count(keyword, session):
filter1 = pool[keyword]
question_set = filter1[session]
return len(question_set)
def get_next_question(keyword, session, count):
filter1 = pool[keyword]
question_set = filter1[session]
return question_set[count]
def analysis_per_session(answers):
"""
[decision, percent rating]
Decisions:
2 = Still needs sessions
1 = Improved after previous session
0 = Significant improvement
"""
no = len(answers)
p = sum(answers)
if p >= 3.5*no:
result = [2]
elif p >= 2.52*no and p < 3.5*no:
result = [1]
elif p < 2.52*no:
result = [0]
result.append((p/(5*no))*100)
return result
def overall_analysis(session_scores):
"""
Expects a list of all session scores
[decision, percent rating]
Decision:
2 = Patient needs consultation
1 = Patient might still face some issue (conduct expert survey)
0 = Patient is healthy
"""
no = len(session_scores)
p = sum(session_scores)
if p > 52.5:
result = [2]
elif p > 37.82 and p < 52.5:
result = [1]
elif p < 37.82:
result = [0]
result.append((p/75)*100)
return result
#print(analysis_per_session([3,4,3,4,4,3]))
#print(overall_analysis([17,18,18]))
|
import pprint
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def test(root):
if not root:
return []
row = [root]
res = []
while row:
res.append(max([r.val for r in row]))
a = [s for r in row for s in [r.left,r.right] if s!=None]
row = a
pprint.pprint([r.val for r in row])
return res
import collections
def findFrequentTreeSum(root):
if not root:
return []
val = collections.Counter()
def Treesum(root):
if not root:
return 0
sum = Treesum(root.left)+Treesum(root.right)+root.val
val[sum] += 1
return sum
Treesum(root)
max_count = max(val.itervalues())
return [k for k,v in val.iteritems() if v == max_count]
# return val
root = TreeNode(3)
root.left = TreeNode(1)
root.left.left = TreeNode(0)
root.left.right = TreeNode(2)
root.left.right.right = TreeNode(3)
root.right = TreeNode(5)
root.right.left = TreeNode(4)
root.right.right = TreeNode(6)
# print root.val
print findFrequentTreeSum(root) |
#Variables
#Variabels are used to stor data values in python
#They are actually created the moment you assign a value to them meaning that you do not need to declare them with any type or such
x = 4 # x is of type int
x = "Sally" # x is now of type str
print(x)
#String variables can be declared either by using single or double quotes
x = "John"
# is the same as
x = 'John'
#Variable name rules in python
"""
A variable name must start with a letter or the underscore character
A variable name cannot start with a number
A variable name can only contain alpha-numeric characters and underscores (A-z, 0-9, and _ )
Variable names are case-sensitive (age, Age and AGE are three different variables)
"""
#Python allows you to assign multiple variables in one line
x, y, z = "Orange", "Banana", "Cherry"
print(x)
print(y)
print(z)
#Global Variables
"""
a variable created outside a function or block of code is a global variable
"""
x = "awesome"
def myfunc():
print("Python is " + x)
"""
unlike java if you create a variable inside a function with the same name as a global variable you wont get and error instead it will focus on the varibale in the block of code
"""
def myfunc2():
x="fun"
print("python is",x)
myfunc()
myfunc2()
print("Python is ",x)
#Global Keyword
#You can make a global variable inside a function by using the global keyword
def myfunc3():
global g
g = "amazing"
myfunc3()
print(g)
##IF YOU WANT TO CHANGE THE VALUE OF A GLOBAL VARIABLE PLACE THE GLOBAL KEYWORD BESIDE IT |
def service(score):
total = sum(int(a) for a in score.split(':'))
return ('first', 'second')[(total / (5 if total < 40 else 2)) % 2]
|
#-*- coding: utf-8 -*-
"""
1. https://www.cnblogs.com/mengqingjian/p/8530994.html # python-socket讲解ws连接过程
2. https://www.jb51.net/article/172239.html
"""
import websocket
import websockets
import flask_socketio
import asyncio
import pywss
import tornado
# print(dir(websockets))
# print(help(websockets.connect))
import asyncio
import websockets
# 检测客户端权限,用户名密码通过才能退出循环
async def check_permit(websocket):
while True:
recv_str = await websocket.recv()
cred_dict = recv_str.split(":")
if cred_dict[0] == "admin" and cred_dict[1] == "123456":
response_str = "congratulation, you have connect with server\r\nnow, you can do something else"
await websocket.send(response_str)
return True
else:
response_str = "sorry, the username or password is wrong, please submit again"
await websocket.send(response_str)
# 接收客户端消息并处理,这里只是简单把客户端发来的返回回去
async def recv_msg(websocket):
while True:
recv_text = await websocket.recv()
response_text = "your submit context: {recv_text}".format(recv_text=recv_text)
await websocket.send(response_text)
# 服务器端主逻辑
# websocket和path是该函数被回调时自动传过来的,不需要自己传
async def main_logic(websocket, path):
await check_permit(websocket)
await recv_msg(websocket)
# 把ip换成自己本地的ip
start_server = websockets.serve(main_logic, '127.0.0.1', 7005)
# 如果要给被回调的main_logic传递自定义参数,可使用以下形式
# 一、修改回调形式
# import functools
# start_server = websockets.serve(functools.partial(main_logic, other_param="test_value"), '10.10.6.91', 5678)
# 修改被回调函数定义,增加相应参数
# async def main_logic(websocket, path, other_param)
from ipdb import set_trace;set_trace()
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
"""
GET / HTTP/1.0
Host: 127.0.0.1:7005
User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:86.0) Gecko/20100101 Firefox/86.0
Accept: */*
Accept-Language: en-US,en;q=0.5
Accept-Encoding: gzip, deflate
Sec-WebSocket-Version: 13
Origin: null
Sec-WebSocket-Extensions: permessage-deflate
Sec-WebSocket-Key: dMfuo7Tmw4iqJ13YYJdZUg==
Connection: keep-alive, Upgrade
Pragma: no-cache
Cache-Control: no-cache
Upgrade: websocket
""" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.