blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f3c7087617984089152d4cc6b9c5fafc46b3f17 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano200.py | 73b13cf0c88c6b8338ab73ca1e913d4a70757784 | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,292 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/270000/A2A03ED2-C2C7-D446-B850-478F84233086.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest200.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"rsk146@scarletmail.rutgers.edu"
] | rsk146@scarletmail.rutgers.edu |
d4c8bd1b9a8bce6b448f64fc215674c63f47f37e | ca77e9e45d666771c7b0897e7e3093b3d3c12f65 | /graphs/graphs.py | 918a04980fe52256e43ef0951a6fea0dfcaf64e8 | [] | no_license | 2gDigitalPost/custom | 46175d3a3fc4c3be21dc20203ff0a48fb93b5639 | 6a3a804ef4ef6178044b70ad1e4bc5c56ab42d8d | refs/heads/master | 2020-04-04T07:40:17.962611 | 2016-12-28T18:35:28 | 2016-12-28T18:35:28 | 39,648,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,650 | py | ll__ = ["GraphHoursWdg"]
import tacticenv, os
from datetime import date, timedelta as td
from pyasm.biz import *
from pyasm.web import Table, DivWdg, HtmlElement
from pyasm.common import jsonloads, jsondumps, Environment
from tactic.ui.common import BaseTableElementWdg
from tactic.ui.common import BaseRefreshWdg
from tactic.ui.widget import CalendarInputWdg
class GraphHoursWdg(BaseTableElementWdg):
def init(my):
from tactic_client_lib import TacticServerStub
my.server = TacticServerStub.get()
def get_title(my):
div = DivWdg()
div.add_behavior(my.get_load_behavior())
return div
def kill_mul_spaces(my, origstrg):
newstrg = ''
for word in origstrg.split():
newstrg=newstrg+' '+word
return newstrg
def get_dates(my):
import datetime
rightnow = datetime.datetime.now()
rnmo = str(rightnow.month)
if len(rnmo) == 1:
rnmo = '0%s' % rnmo
rnday = str(rightnow.day)
if len(rnday) == 1:
rnday = '0%s' % rnday
date2 = '%s-%s-%s' % (rightnow.year, rnmo, rnday)
date1 = date.today()-td(days=31)
#print "D1 = %s, D2 = %s" % (date1, date2)
date1 = '2013-04-01'
date2 = '2013-04-30'
return [str(date1), str(date2)]
def make_TV_data_dict(my, file_path):
the_file = open(file_path, 'r')
fields = []
data_dict = {}
count = 0
boolio = True
line_count = 0
flen = 0
for line in the_file:
first_name = ''
last_name = ''
name = ''
fixed_date = ''
if line_count > 5:
line = line.rstrip('\r\n')
if line in [None,'',' ']:
boolio = False
if boolio:
data = line.split('","')
if line_count == 6:
dc = 0
for field in data:
if dc == 0:
field = field[1:]
field = my.kill_mul_spaces(field)
field = field.strip(' ')
fields.append(field)
dc = dc + 1
flen = len(fields)
fields[flen - 1] = fields[flen - 1][:-1]
elif line_count > 6:
data_count = 0
this_code = ''
this_data = {}
this_time = 0
for val in data:
field = fields[data_count]
if data_count == 0:
val = val[1:]
val = my.kill_mul_spaces(val)
val = val.strip(' ')
if data_count == flen - 1:
val = val[:-1]
if field in ['First Name', 'Last Name', 'Date', 'Total Work Hours']:
if field == 'Total Work Hours':
if val in ['-','',' ',None]:
val = 0
this_data[field] = val
if field == 'First Name':
first_name = val
elif field == 'Last Name':
last_name = val
elif field == 'Date':
date_s = val.split('/')
fixed_date = '%s-%s-%s' % (date_s[2], date_s[0], date_s[1])
data_count = data_count + 1
this_data['fixed_date'] = fixed_date
name = '%s %s' % (first_name.lower(), last_name.lower())
if name not in data_dict.keys():
data_dict[name] = {'first_name': first_name, 'last_name': last_name, 'name': name, 'days': {}}
if fixed_date not in data_dict[name]['days'].keys():
data_dict[name]['days'][fixed_date] = float(this_data['Total Work Hours'])
else:
data_dict[name]['days'][fixed_date] = float(data_dict[name]['days'][fixed_date]) + float(this_data['Total Work Hours'])
count = count + 1
line_count = line_count + 1
the_file.close()
return data_dict
def make_data_dict(my, file_name, mode):
the_file = open(file_name, 'r')
fields = []
data_dict = {}
count = 0
boolio = True
code_index = 0
hours = {}
for line in the_file:
line = line.rstrip('\r\n')
#data = line.split('\t')
data = line.split('|')
if boolio:
if count == 0:
field_counter = 0
for field in data:
field = my.kill_mul_spaces(field)
field = field.strip(' ')
fields.append(field)
if mode == 'group':
if field == 'id':
code_index = field_counter
elif mode == 'hours':
if field == 'login':
code_index = field_counter
else:
if field == 'code':
code_index = field_counter
field_counter = field_counter + 1
elif count == 1:
nothing = True
elif data[0][0] == '(':
boolio = False
else:
data_count = 0
this_code = ''
this_data = {}
hour_data = {}
for val in data:
field = fields[data_count]
val = my.kill_mul_spaces(val)
val = val.strip(' ')
if data_count == code_index:
this_code = val
if mode == 'hours':
if this_code not in hours.keys():
hours[this_code] = []
elif mode == 'hours':
if field == 'straight_time':
if val in [None,'']:
val = 0
hour_data['straight_time'] = float(val)
elif field == 'day':
hour_data['day'] = val.split(' ')[0]
this_data[field] = val
data_count = data_count + 1
if mode == 'hours':
hours[this_code].append(hour_data)
data_dict[this_code] = this_data
count = count + 1
the_file.close()
return [data_dict, hours]
def make_string_dict(my, data_arr):
out_str = ''
for data in data_arr:
if out_str == '':
out_str = '|||'
else:
out_str = '%s|||' % out_str
for key, val in data.iteritems():
out_str = '%sWvWXsKEYsX:%sXsVALsX:%sWvW' % (out_str, key, val)
out_str = '%s|||' % out_str
return out_str
def get_toggle_row_behavior(my, group):
behavior = {'css_class': 'clickme', 'type': 'click_up', 'cbjs_action': '''
try{
var group = '%s';
var top_el = spt.api.get_parent(bvr.src_el, '.graph_top');
row = top_el.getElementById('graphs_' + group + '_row');
if(row.style.display == 'none'){
row.style.display = 'table-row';
bvr.src_el.innerHTML = '<b><u>Hide Users</u></b>';
}else{
row.style.display = 'none';
bvr.src_el.innerHTML = '<b><u>Show Users</u></b>';
}
}
catch(err){
spt.app_busy.hide();
spt.alert(spt.exception.handler(err));
//alert(err);
}
''' % (group)}
return behavior
def get_load_again(my):
behavior = {'css_class': 'clickme', 'type': 'click_up', 'cbjs_action': '''
try{
var top_el = spt.api.get_parent(bvr.src_el, '.graph_surrounder');
var inputs = top_el.getElementsByTagName('input');
date1 = '';
date2 = '';
for(var r = 0; r < inputs.length; r++){
if(inputs[r].getAttribute('name') == 'wh_graph_date1'){
date1 = inputs[r].value;
}else if(inputs[r].getAttribute('name') == 'wh_graph_date2'){
date2 = inputs[r].value;
}
}
alert(date1 + ' ||||| ' + date2);
spt.api.load_panel(top_el, 'graphs.GraphHoursWdg', {'date1': date1.split(' ')[0], 'date2': date2.split(' ')[0]});
}
catch(err){
spt.app_busy.hide();
spt.alert(spt.exception.handler(err));
//alert(err);
}
'''}
return behavior
def draw_chart3(my, div, idx, title):
behavior = {'type': 'load', 'cbjs_action':
'''
function decode_string_dict(data)
{
ret_arr = [];
pts = data.split('|||');
for(var r = 0; r < pts.length; r++){
chunk = pts[r];
if(chunk != '' && chunk != null){
dict = {};
corrs = chunk.split('WvW');
for(var t = 0; t < corrs.length; t++){
corr = corrs[t];
if(corr != '' && corr != null){
rightmost = corr.split('XsKEYsX:')[1];
segged = rightmost.split('XsVALsX:');
key = segged[0];
val = segged[1];
dict[key] = val;
}
}
ret_arr.push(dict);
}
}
return ret_arr;
}
var clicking = function(idx) {
title = '%s';
idx_data_el = document.getElementById('chartdiv_' + idx);
idx_data = idx_data_el.getAttribute('datastr');
var chartData = decode_string_dict(idx_data);
var chart;
chart = new AmCharts.AmSerialChart();
chart.dataProvider = chartData;
chart.categoryField = "cat";
chart.marginTop = 5;
chart.plotAreaFillAlphas = 0.2;
//chart.rotate = true;
// the following two lines makes chart 3D
chart.depth3D = 30;
chart.angle = 20;
// AXES
// category axis
var dateAxis = chart.categoryAxis;
dateAxis.parseDates = false; // as our data is date-based, we set parseDates to true
dateAxis.minPeriod = "DD"; // our data is daily, so we set minPeriod to DD
dateAxis.autoGridCount = false;
dateAxis.gridCount = 50;
dateAxis.gridAlpha = 0.2;
dateAxis.gridColor = "#000000";
dateAxis.axisColor = "#555555";
dateAxis.labelRotation = 30;
// we want custom date formatting, so we change it in next line
var hoursAxis = new AmCharts.ValueAxis();
hoursAxis.title = title;
hoursAxis.gridAlpha = 0.2;
hoursAxis.dashLength = 5;
hoursAxis.axisAlpha = 0.5;
hoursAxis.inside = false;
hoursAxis.position = "left";
chart.addValueAxis(hoursAxis);
var pctAxis = new AmCharts.ValueAxis();
pctAxis.title = 'Efficiency %%';
//pctAxis.stackType = "100%%";
pctAxis.gridAlpha = 0.2;
pctAxis.axisAlpha = 0.5;
//pctAxis.labelsEnabled = false;
pctAxis.position = "right";
pctAxis.min = 0;
pctAxis.max = 100;
chart.addValueAxis(pctAxis);
// GRAPHS
// duration graph
var timevantageGraph = new AmCharts.AmGraph();
timevantageGraph.title = "TimeVantage:";
timevantageGraph.valueField = "tv";
timevantageGraph.type = "column";
timevantageGraph.valueAxis = hoursAxis; // indicate which axis should be used
timevantageGraph.lineColor = "#CC0000";
timevantageGraph.balloonText = "TimeVantage: [[value]] hrs";
timevantageGraph.fillAlphas = 1;
timevantageGraph.lineThickness = 1;
timevantageGraph.legendValueText = " [[value]] Hrs";
//timevantageGraph.bullet = "square";
chart.addGraph(timevantageGraph);
// distance graph
var tacticGraph = new AmCharts.AmGraph();
tacticGraph.valueField = "tactic";
tacticGraph.title = "Tactic:";
tacticGraph.type = "column";
tacticGraph.fillAlphas = 1;
//tacticGraph.valueAxis = distanceAxis; // indicate which axis should be used
tacticGraph.valueAxis = hoursAxis; // indicate which axis should be used
tacticGraph.balloonText = "Tactic: [[value]] hrs";
tacticGraph.legendValueText = "[[value]] Hrs";
//tacticGraph.lineColor = "#ffe0e0";
tacticGraph.lineColor = "#2e0854";
tacticGraph.lineThickness = 1;
tacticGraph.lineAlpha = 0;
chart.addGraph(tacticGraph);
var pctGraph = new AmCharts.AmGraph();
pctGraph.title = "Efficiency:";
pctGraph.valueField = "percentage";
pctGraph.type = "line";
pctGraph.valueAxis = pctAxis; // indicate which axis should be used
//pctGraph.valueAxis = hoursAxis; // indicate which axis should be used
pctGraph.lineColor = "#00b200";
pctGraph.balloonText = "Efficiency: [[value]]%%";
pctGraph.fillAlphas = 0;
pctGraph.lineThickness = .5;
pctGraph.legendValueText = " Efficiency [[value]]%%";
pctGraph.bullet = "square";
chart.addGraph(pctGraph);
// CURSOR
var chartCursor = new AmCharts.ChartCursor();
chartCursor.zoomable = false;
chartCursor.categoryBalloonDateFormat = "DD";
chartCursor.cursorAlpha = 0;
chart.addChartCursor(chartCursor);
// LEGEND
var legend = new AmCharts.AmLegend();
legend.bulletType = "round";
legend.equalWidths = false;
legend.valueWidth = 40;
legend.color = "#000000";
chart.addLegend(legend);
// WRITE
chart.write("chartdiv_" + idx);
}
var js_files = ["amcharts/amcharts/amcharts.js"];
spt.dom.load_js(js_files, clicking);
clicking(%s);
'''% (title, idx)
}
div.add_behavior(behavior)
def get_load_behavior(my):
idx = my.get_current_index()
behavior = {'type': 'load', 'cbjs_action': '''
//spt.graph = {};
clicking = function(idx) {
var chartData = [{ country: 'USA29', visits: 4252 },
{ country: 'China', visits: 1882 },
{ country: 'Japan', visits: 1809 },
{ country: 'Poland', visits: 328}];
var chart = new AmCharts.AmSerialChart();
console.log(chart);
chart.dataProvider = chartData;
chart.categoryField = 'country';
chart.marginTop = 15;
chart.marginLeft = 55;
chart.marginRight = 15;
chart.marginBottom = 80;
chart.angle = 30;
chart.depth3D = 15;
var catAxis = chart.categoryAxis;
catAxis.gridCount = chartData.length;
catAxis.labelRotation = 90;
var graph = new AmCharts.AmGraph();
graph.balloonText = '[[category]]: [[value]]';
graph.valueField = 'visits'
graph.type = 'column';
graph.lineAlpha = 0;
graph.fillAlphas = 0.8;
chart.addGraph(graph);
chart.invalidateSize()
chart.write('chartdiv_' + idx);
chart.validateData();
chart.animateAgain();
console.log("finished")
var js_files = ["amcharts/amcharts/amcharts.js"];
spt.dom.load_js(js_files, clicking);
}
console.log("done onload");
'''
}
return behavior
def get_snapshot_file_path(my,snapshot_code):
what_to_ret = ''
rel_paths = my.server.get_all_paths_from_snapshot(snapshot_code, mode='local_repo')
if len(rel_paths) > 0:
rel_path = rel_paths[0]
splits = rel_path.split('/')
if len(splits) < 2:
splits = rel_path.split('\\')
file_only = splits[len(splits) - 1]
what_to_ret = rel_path
return what_to_ret
def get_display(my):
logine = Environment.get_login()
user_name = logine.get_login()
all_days = {}
group_days = {}
user_days = {}
tv_all_days = {}
tv_group_days = {}
tv_user_days = {}
tv_obj = my.server.eval("@SOBJECT(twog/global_resource['name','TimeVantage'])")[0]
snaps = my.server.eval("@SOBJECT(sthpw/snapshot['search_type','twog/global_resource?project=twog']['search_id','%s']['is_latest','true'])" % tv_obj.get('id'))
#print "SNAPS = %s" % snaps
file_path = my.get_snapshot_file_path(snaps[0].get('code'))
date1, date2 = my.get_dates()
if 'date1' in my.kwargs.keys():
date1 = my.kwargs.get('date1')
if 'date2' in my.kwargs.keys():
date2 = my.kwargs.get('date2')
#print "DATE1 = %s, DATE2 = %s" % (date1, date2)
#file_path = '/opt/spt/custom/graphs/tv.csv'
tv_data = my.make_TV_data_dict(file_path)
login_file = '/opt/spt/custom/graphs/login_file'
work_hour_file = '/opt/spt/custom/graphs/work_hour_file'
login_in_group_file = '/opt/spt/custom/graphs/login_in_group_file'
login_query = '/opt/spt/custom/graphs/login_query'
login_in_group_query = '/opt/spt/custom/graphs/login_in_group_query'
work_hour_query = '/opt/spt/custom/graphs/work_hour_query'
os.system('''psql -U postgres sthpw < %s > %s''' % (login_query, login_file))
os.system('''psql -U postgres sthpw < %s > %s''' % (work_hour_query, work_hour_file))
os.system('''psql -U postgres sthpw < %s > %s''' % (login_in_group_query, login_in_group_file))
login_data = my.make_data_dict(login_file, '')[0]
work_hour_data = my.make_data_dict(work_hour_file, 'hours')[1]
lig_data = my.make_data_dict(login_in_group_file, 'group')[0]
login_groups = {}
# Create login_group lookup by login name
for key, val in login_data.iteritems():
if key not in login_groups.keys():
login_groups[key] = []
for id, data in lig_data.iteritems():
if data.get('login') == key:
login_groups[key].append(data.get('login_group'))
# Match up TimeVantage names with tactic logins
# Fill user_dates dict with all matched logins
user_dates = {}
name_to_login = {}
for name, data in tv_data.iteritems():
for ld, ldata in login_data.iteritems():
lname = '%s %s' % (ldata.get('first_name').lower(), ldata.get('last_name').lower())
if name == lname:
if name not in user_dates.keys():
user_dates[name] = {'login': ldata.get('login'), 'dates': {}}
if name not in name_to_login.keys():
name_to_login[name] = ldata.get('login')
#print "TV-DATA = %s" % tv_data
group_dates = {}
all_dates = {}
for name, data in user_dates.iteritems():
tdata = tv_data[name]
tlogin = data.get('login')
ugroups = []
if tlogin in login_groups.keys():
ugroups = login_groups[tlogin]
print "TLOGIN = %s, UGROUPS = %s" % (tlogin, ugroups)
for tdate, ttime in tdata['days'].iteritems():
if tdate < date2 and tdate > date1:
if tdate not in user_dates[name]['dates'].keys():
user_dates[name]['dates'][tdate] = {'cat': tdate, 'tv': ttime, 'tactic': 0}
else:
user_dates[name]['dates'][tdate]['tv'] = user_dates[name]['dates'][tdate]['tv'] + ttime
for g in ugroups:
if g not in group_dates.keys():
group_dates[g] = {}
if tdate not in group_dates[g].keys():
group_dates[g][tdate] = {'cat': tdate, 'tv': ttime, 'tactic': 0}
else:
group_dates[g][tdate]['tv'] = group_dates[g][tdate]['tv'] + ttime
if tdate not in all_dates.keys():
all_dates[tdate] = {'cat': tdate, 'tv': ttime, 'tactic': 0}
else:
all_dates[tdate]['tv'] = all_dates[tdate]['tv'] + ttime
if tlogin in work_hour_data.keys():
for tdict in work_hour_data[tlogin]:
day = tdict.get('day')
amt = tdict.get('straight_time')
if day < date2 and day > date1:
if day not in user_dates[name]['dates'].keys():
user_dates[name]['dates'][day] = {'cat': day, 'tv': 0, 'tactic': amt}
else:
user_dates[name]['dates'][day]['tactic'] = user_dates[name]['dates'][day]['tactic'] + amt
for g in ugroups:
if g not in group_dates.keys():
group_dates[g] = {}
if day not in group_dates[g].keys():
print "DAY = %s, Group Dates Keys = %s" % (day, group_dates[g].keys())
group_dates[g][day] = {'cat': day, 'tv': 0, 'tactic': amt}
print "GROUP DATES KEYS = %s" % group_dates[g].keys()
else:
print "GROUP_DATES[%s][%s]['tactic'] = %s, amt = %s" % (g, day, group_dates[g][day]['tactic'], amt)
group_dates[g][day]['tactic'] = group_dates[g][day]['tactic'] + amt
print "GROUP_DATES[%s][%s]['tactic'] = %s" % (g, day, group_dates[g][day]['tactic'])
if day not in all_dates.keys():
all_dates[day] = {'cat': day, 'tv': 0, 'tactic': amt}
else:
all_dates[day]['tactic'] = all_dates[day]['tactic'] + amt
print "GROUP DATES = %s" % group_dates
d1s = date1.split('-')
d2s = date2.split('-')
d1 = date(int(d1s[0]),int(d1s[1]),int(d1s[2]))
d2 = date(int(d2s[0]),int(d2s[1]),int(d2s[2]))
delta = d2 - d1
dates_to_fill = []
for i in range(delta.days + 1):
dates_to_fill.append('%s' % (d1 + td(days=i)))
users = user_dates.keys()
idx = 0
for user in users:
udkeys = user_dates[user]['dates'].keys()
if len(udkeys) > 0:
for dtf in dates_to_fill:
found = False
for d, l in user_dates[user]['dates'].iteritems():
if d == dtf:
found = True
if not found:
user_dates[user]['dates'][dtf] = {'cat': dtf, 'tactic': 0, 'tv': 0}
for grp, gdata in group_dates.iteritems():
for dtf in dates_to_fill:
found = False
for d, l in group_dates[grp].iteritems():
if d == dtf:
found = True
if not found:
group_dates[grp][dtf] = {'cat': dtf, 'tactic': 0, 'tv': 0}
for dtf in dates_to_fill:
found = False
for d, l in all_dates.iteritems():
if d == dtf:
found = True
if not found:
all_dates[dtf] = {'cat': dtf, 'tactic': 0, 'tv': 0}
#print "LOGIN GROUPS = %s" % login_groups
filtbl = Table()
filtbl.add_row()
date1_el = CalendarInputWdg("wh_graph_date1")
date1_el.set_option('show_activator',True)
date1_el.set_option('show_confirm', False)
date1_el.set_option('show_text', True)
date1_el.set_option('show_today', False)
date1_el.set_option('show_value', True)
date1_el.set_option('read_only', False)
if date1 not in [None,'']:
date1_el.set_option('default', date1)
date1_el.get_top().add_style('width: 150px')
date1_el.set_persist_on_submit()
date2_el = CalendarInputWdg("wh_graph_date2")
date2_el.set_option('show_activator',True)
date2_el.set_option('show_confirm', False)
date2_el.set_option('show_text', True)
date2_el.set_option('show_today', False)
date2_el.set_option('show_value', True)
date2_el.set_option('read_only', False)
if date2 not in [None,'']:
date2_el.set_option('default', date2)
date2_el.get_top().add_style('width: 150px')
date2_el.set_persist_on_submit()
f1 = filtbl.add_cell(' ')
f11 = filtbl.add_cell(' Date 1: ')
f2 = filtbl.add_cell(date1_el)
f21 = filtbl.add_cell(' Date 2: ')
f3 = filtbl.add_cell(date2_el)
f4 = filtbl.add_cell('<input type="button" value="Load Graph" name="not_yo_date"/>')
f4.add_style('cursor: pointer;')
f4.add_behavior(my.get_load_again())
f1.add_attr('width','40%%')
f4.add_attr('width','40%%')
surrounder = Table()
surrounder.add_attr('width','100%%')
surrounder.add_attr('class','graph_surrounder')
surrounder.add_row()
surrounder.add_cell(filtbl)
table = Table()
table.add_attr('width','100%%')
table.add_attr('class','graph_top')
table.add_style('background-color: #60ca9d;')
lgroupkeys = login_groups.keys()
arr = []
# Need to show this one for elites only
# Show supervisors their department's
# Show individuals their own
# Try to implement drop-downs
for d, l in all_dates.iteritems():
arr.append(l)
if len(arr) > 0:
arr2 = sorted(arr, key=lambda k: k['cat'])
acount = 0
for a1 in arr2:
percentage = 0
tv = float(a1.get('tv'))
tc = float(a1.get('tactic'))
if tv != 0:
percentage = tc/tv * 100
pps = '%.2f' % percentage
percentage = float(pps)
if percentage > 100:
percentage = 100
a1['percentage'] = percentage
arr2[acount] = a1
acount = acount + 1
widget = DivWdg("Chart area 2")
widget.add_attr('id','chartdiv_%s'%idx)
str_data = my.make_string_dict(arr2)
widget.add_attr('datastr', str_data)
widget.add_styles('width: 100%%;height: 200px;')
my.draw_chart3(widget, idx, 'All')
table.add_row()
tc = table.add_cell(widget)
tc.add_attr('width','100%%')
tc.add_attr('title','ALL')
idx = idx + 1
groups = group_dates.keys()
for group in groups:
grptbl = Table()
grptbl.add_attr('width','100%%')
grptbl.add_style('background-color: #a1b3e6;')
#print "GROUP = %s" % group
arr = []
for d, l in group_dates[group].iteritems():
arr.append(l)
if len(arr) > 0:
arr2 = sorted(arr, key=lambda k: k['cat'])
acount = 0
for a1 in arr2:
percentage = 0
tv = float(a1.get('tv'))
tc = float(a1.get('tactic'))
if tv != 0:
percentage = tc/tv * 100
pps = '%.2f' % percentage
percentage = float(pps)
if percentage > 100:
percentage = 100
a1['percentage'] = percentage
arr2[acount] = a1
acount = acount + 1
widget = DivWdg("Chart area 2")
widget.add_attr('id','chartdiv_%s'%idx)
str_data = my.make_string_dict(arr2)
widget.add_attr('datastr', str_data)
widget.add_styles('width: 100%%;height: 200px;')
my.draw_chart3(widget, idx, group)
grptbl.add_row()
tc = grptbl.add_cell(widget)
tc.add_attr('width','100%%')
tc.add_attr('title',group)
grptbl.add_row()
opener = grptbl.add_cell('<b><u>Show Users</u></b>')
opener.add_style('cursor: pointer;')
toggle_row_behavior = my.get_toggle_row_behavior(group)
opener.add_behavior(toggle_row_behavior)
idx = idx + 1
grpusers = 0
usertbl = Table()
usertbl.add_attr('width','100%%')
usertbl.add_style('background-color: #c8d0e7;')
for user in users:
if user in name_to_login.keys():
login_name = name_to_login[user]
#print "USER = %s, LOGIN NAME = %s" % (user, login_name)
if login_name in lgroupkeys:
lgroups = []
lgroups = login_groups[login_name]
#print "GROUP = %s, USER = %s, LGROUPS = %s" % (group, user, lgroups)
#print "LOGIN GROUPS = %s" % login_groups
if group in lgroups:
arr3 = []
for d, l in user_dates[user]['dates'].iteritems():
arr3.append(l)
if len(arr) > 0:
arr4 = sorted(arr3, key=lambda k: k['cat'])
acount = 0
for a1 in arr4:
percentage = 0
tv = float(a1.get('tv'))
tc = float(a1.get('tactic'))
if tv != 0:
percentage = tc/tv * 100
pps = '%.2f' % percentage
percentage = float(pps)
if percentage > 100:
percentage = 100
a1['percentage'] = percentage
arr4[acount] = a1
acount = acount + 1
widget = DivWdg("Chart area 2")
widget.add_attr('id','chartdiv_%s'%idx)
str_data = my.make_string_dict(arr4)
widget.add_attr('datastr', str_data)
widget.add_styles('width: 100%%;height: 200px;')
my.draw_chart3(widget, idx, user)
if grpusers % 2 == 0:
usertbl.add_row()
tc = usertbl.add_cell(widget)
tc.add_attr('width','50%%')
tc.add_attr('title',user)
idx = idx + 1
grpusers = grpusers + 1
if grpusers % 2 == 1:
te = usertbl.add_cell(' ')
te.add_attr('width','50%%')
grprow = grptbl.add_row()
grprow.add_attr('id','graphs_%s_row' % group)
grprow.add_style('display: table-row;')
grptbl.add_cell(usertbl)
table.add_row()
table.add_cell(grptbl)
surrounder.add_row()
surrounder.add_cell(table)
return surrounder
| [
"topher.hughes@2gdigital.com"
] | topher.hughes@2gdigital.com |
1f0050636b553377350ef958e53062abe0a0aec4 | 2db7597686f33a0d700f7082e15fa41f830a45f0 | /Python/String/266. 回文排列.py | 2dba117a4cfd0caece5666e521229f85abe7fe4f | [] | no_license | Leahxuliu/Data-Structure-And-Algorithm | 04e0fc80cd3bb742348fd521a62bc2126879a70e | 56047a5058c6a20b356ab20e52eacb425ad45762 | refs/heads/master | 2021-07-12T23:54:17.785533 | 2021-05-17T02:04:41 | 2021-05-17T02:04:41 | 246,514,421 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | '''
奇数个的char最多只能有一个
'''
from collections import defaultdict
class Solution:
def canPermutePalindrome(self, s: str) -> bool:
if s == '':
return True
info = defaultdict(int)
for i in s:
info[i] += 1
count = 0
for v in info.values():
if v % 2 == 1:
count += 1
if count >= 2:
return False
return True | [
"leahxuliu@gmail.com"
] | leahxuliu@gmail.com |
e5d1427da5952429304f092fff6d772d00a430d1 | 2865d34e84abea09836c9a84e1aa02ba262b8f6d | /Distances/superior.py | f02f8ccc65fe2fee530e93820de28977d1106921 | [] | no_license | magabydelgado/numpy-formulas | f52119ef1387f078e1527c80343ca0de2336bc9f | 093657d4a23dfe82685595254aae50e0c6e46afb | refs/heads/main | 2023-05-08T14:06:48.142258 | 2021-05-25T06:16:41 | 2021-05-25T06:16:41 | 379,125,857 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | import numpy as np
'''
In mathematics, Chebyshev distance (or Tchebychev distance), maximum metric,
or L∞ metric is a metric defined on a vector space where
the distance between two vectors is the greatest of their differences
along any coordinate dimension.[2] It is named after Pafnuty Chebyshev.
It is also known as chessboard distance, since in the game of chess the minimum number of
moves needed by a king to go from one square on a chessboard to another equals the
Chebyshev distance between the centers of the squares, if the squares have side length one,
as represented in 2-D spatial coordinates with axes aligned to the edges of the board.
'''
objA = [22, 1, 42, 10]
objB = [20, 0, 36, 8]
npA = np.array(objA)
npB = np.array(objB)
chebyshev = np.abs(npA - npB).max()
# chebyshev = np.linalg.norm(npA -npB, ord=np.inf)
print(chebyshev) | [
"mangelladen@gmail.com"
] | mangelladen@gmail.com |
161c51566a4e0d910527636a2197e923a1518102 | 84239d0809dca1c88a33d42e1cda225ae5512f0f | /models/models_3_2.py | dbb8dd0b87933e755fa9ddfed094e529d0f03ca4 | [] | no_license | siebeniris/Understanding-NN | 92e2e9662d9d56e2946dec151d9d8f13bb3ae776 | a6d1553aea8e137827a7b909461664c87f1db238 | refs/heads/master | 2021-05-10T22:43:29.609052 | 2018-01-20T06:05:20 | 2018-01-20T06:05:20 | 118,264,703 | 1 | 0 | null | 2018-01-20T17:25:00 | 2018-01-20T17:25:00 | null | UTF-8 | Python | false | false | 7,861 | py | from tensorflow.python.ops import nn_ops, gen_nn_ops
import tensorflow as tf
class MNIST_CNN:
def __init__(self, name):
self.name = name
def __call__(self, X, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
with tf.variable_scope('layer0'):
X_img = tf.reshape(X, [-1, 28, 28, 1])
# Convolutional Layer #1 and Pooling Layer #1
with tf.variable_scope('layer1'):
conv1 = tf.layers.conv2d(inputs=X_img, filters=32, kernel_size=[3, 3], padding="SAME", activation=tf.nn.relu, use_bias=False)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], padding="SAME", strides=2)
# Convolutional Layer #2 and Pooling Layer #2
with tf.variable_scope('layer2'):
conv2 = tf.layers.conv2d(inputs=pool1, filters=64, kernel_size=[3, 3], padding="SAME", activation=tf.nn.relu, use_bias=False)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], padding="SAME", strides=2)
# Convolutional Layer #3 and Pooling Layer #3
with tf.variable_scope('layer3'):
conv3 = tf.layers.conv2d(inputs=pool2, filters=128, kernel_size=[3, 3], padding="SAME", activation=tf.nn.relu, use_bias=False)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], padding="SAME", strides=2)
# Dense Layer with Relu
with tf.variable_scope('layer4'):
flat = tf.reshape(pool3, [-1, 128 * 4 * 4])
dense4 = tf.layers.dense(inputs=flat, units=625, activation=tf.nn.relu, use_bias=False)
# Logits (no activation) Layer: L5 Final FC 625 inputs -> 10 outputs
with tf.variable_scope('layer5'):
logits = tf.layers.dense(inputs=dense4, units=10, use_bias=False)
prediction = tf.nn.softmax(logits)
return [X_img, conv1, pool1, conv2, pool2, conv3, pool3, flat, dense4, prediction], logits
@property
def vars(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
class Taylor:
def __init__(self, activations, weights, conv_ksize, pool_ksize, conv_strides, pool_strides, name):
self.last_ind = len(activations)
for op in activations:
self.last_ind -= 1
if any([word in op.name for word in ['conv', 'pooling', 'dense']]):
break
self.activations = activations
self.activations.reverse()
self.weights = weights
self.weights.reverse()
self.conv_ksize = conv_ksize
self.pool_ksize = pool_ksize
self.conv_strides = conv_strides
self.pool_strides = pool_strides
self.name = name
def __call__(self, logit):
with tf.name_scope(self.name):
Rs = []
j = 0
for i in range(len(self.activations) - 1):
if i is self.last_ind:
if 'conv' in self.activations[i].name.lower():
Rs.append(self.backprop_conv_input(self.activations[i + 1], self.weights[j], Rs[-1], self.conv_strides))
else:
Rs.append(self.backprop_dense_input(self.activations[i + 1], self.weights[j], Rs[-1]))
continue
if i is 0:
Rs.append(self.activations[i][:,logit,None])
Rs.append(self.backprop_dense(self.activations[i + 1], self.weights[j][:,logit,None], Rs[-1]))
j += 1
continue
elif 'dense' in self.activations[i].name.lower():
Rs.append(self.backprop_dense(self.activations[i + 1], self.weights[j], Rs[-1]))
j += 1
elif 'reshape' in self.activations[i].name.lower():
shape = self.activations[i + 1].get_shape().as_list()
shape[0] = -1
Rs.append(tf.reshape(Rs[-1], shape))
elif 'conv' in self.activations[i].name.lower():
Rs.append(self.backprop_conv(self.activations[i + 1], self.weights[j], Rs[-1], self.conv_strides))
j += 1
elif 'pooling' in self.activations[i].name.lower():
# Apply average pooling backprop regardless of type of pooling layer used, following recommendations by Montavon et al.
# Uncomment code below if you want to apply the winner-take-all redistribution policy suggested by Bach et al.
#
# if 'max' in self.activations[i].name.lower():
# pooling_type = 'max'
# else:
# pooling_type = 'avg'
# Rs.append(self.backprop_pool(self.activations[i + 1], Rs[-1], self.pool_ksize, self.pool_strides, pooling_type))
Rs.append(self.backprop_pool(self.activations[i + 1], Rs[-1], self.pool_ksize, self.pool_strides, 'avg'))
else:
raise Error('Unknown operation.')
return Rs[-1]
def backprop_conv(self, activation, kernel, relevance, strides, padding='SAME'):
W_p = tf.maximum(0., kernel)
z = nn_ops.conv2d(activation, W_p, strides, padding) + 1e-10
s = relevance / z
c = nn_ops.conv2d_backprop_input(tf.shape(activation), W_p, s, strides, padding)
return activation * c
def backprop_pool(self, activation, relevance, ksize, strides, pooling_type, padding='SAME'):
if pooling_type.lower() in 'avg':
z = nn_ops.avg_pool(activation, ksize, strides, padding) + 1e-10
s = relevance / z
c = gen_nn_ops._avg_pool_grad(tf.shape(activation), s, ksize, strides, padding)
return activation * c
else:
z = nn_ops.max_pool(activation, ksize, strides, padding) + 1e-10
s = relevance / z
c = gen_nn_ops._max_pool_grad(activation, z, s, ksize, strides, padding)
return activation * c
def backprop_dense(self, activation, kernel, relevance):
W_p = tf.maximum(0., kernel)
z = tf.matmul(activation, W_p) + 1e-10
s = relevance / z
c = tf.matmul(s, tf.transpose(W_p))
return activation * c
def backprop_conv_input(self, X, kernel, relevance, strides, padding='SAME', lowest=0., highest=1.):
W_p = tf.maximum(0., kernel)
W_n = tf.minimum(0., kernel)
L = tf.ones_like(X, tf.float32) * lowest
H = tf.ones_like(X, tf.float32) * highest
z_o = nn_ops.conv2d(X, kernel, strides, padding)
z_p = nn_ops.conv2d(L, W_p, strides, padding)
z_n = nn_ops.conv2d(H, W_n, strides, padding)
z = z_o - z_p - z_n + 1e-10
s = relevance / z
c_o = nn_ops.conv2d_backprop_input(tf.shape(X), kernel, s, strides, padding)
c_p = nn_ops.conv2d_backprop_input(tf.shape(X), W_p, s, strides, padding)
c_n = nn_ops.conv2d_backprop_input(tf.shape(X), W_n, s, strides, padding)
return X * c_o - L * c_p - H * c_n
def backprop_dense_input(self, X, kernel, relevance, lowest=0., highest=1.):
W_p = tf.maximum(0., kernel)
W_n = tf.minimum(0., kernel)
L = tf.ones_like(X, tf.float32) * lowest
H = tf.ones_like(X, tf.float32) * highest
z_o = tf.matmul(X, kernel)
z_p = tf.matmul(L, W_p)
z_n = tf.matmul(H, W_n)
z = z_o - z_p - z_n + 1e-10
s = relevance / z
c_o = tf.matmul(s, tf.transpose(kernel))
c_p = tf.matmul(s, tf.transpose(W_p))
c_n = tf.matmul(s, tf.transpose(W_n))
return X * c_o - L * c_p - H * c_n
| [
"1202kbs@gmail.com"
] | 1202kbs@gmail.com |
6947929c742bc0792eea07204e55f54a00bbcc60 | 32df7046ccf6ef2dd9b3148c390149f7557101f6 | /Porthole_Detection/Data_to_Image.py | 92b33630fe62b40cc422c8cfec351cef1c485aa5 | [] | no_license | MLJejuCamp2017/Pothole_Detection_using_CNN | 06f849bf9b78b11acf0ef1ec7a75bd9db559e6f5 | 33a6b58837fc36a2d4e04a14d28376a3a456a790 | refs/heads/master | 2021-01-01T18:36:04.602634 | 2017-07-25T06:23:44 | 2017-07-25T06:23:44 | 98,374,940 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,028 | py | # 파일 하나만 바꿔줌
'''
import numpy as np
from scipy.misc import toimage
x = np.loadtxt("/Users/User/PycharmProjects/network/ML_Camp/Porthole_Detection/all.csv", delimiter=',')
# toimage(x).show()
toimage(x).save('all(grayscale).jpg')
'''
'''
# 디렉토리 내의 파일들을 한번에 일괄 변환 와 개쩐다 난 노가다했는데 ㅅㅂ 진작에 할걸
import os
import numpy as np
from scipy.misc import toimage
path = "/Users/User/OneDrive/센서로그/자전거/포트홀/csv/다듬다듬/"
dirs = os.listdir(path)
def convert():
for item in dirs:
if os.path.isfile(path+item):
print(path+item)
x = np.loadtxt(path+item, delimiter=',')
f, e = os.path.splitext(path+item)
toimage(x).save(f + '.jpg')
convert()
'''
# 스펙트럼 이미지로 일괄 변환
# '''
import matplotlib.pyplot as plt
import stft
import os
import numpy as np
path = "/Users/User/OneDrive/센서로그/자전거/포트홀/csv/다듬다듬/"
dirs = os.listdir(path)
def convert():
for item in dirs:
if os.path.isfile(path+item):
print(path+item)
x = np.loadtxt(path+item, delimiter=',', unpack=True, dtype='float32')
f, e = os.path.splitext(path+item)
z_data = np.transpose(x[2])
# specgram_z = stft.spectrogram(z_data)
specgram_z = stft.spectrogram(z_data, window=0.4)
plt._imsave(f + '.jpg', abs(specgram_z), vmin=-40, vmax=40, cmap=plt.get_cmap('coolwarm'), format='jpg') # gray Wistia
convert()
# '''
# 파일 하나만 스펙트럼 이미지로 바꿔줌
'''
import matplotlib.pyplot as plt
import stft
import numpy as np
x = np.loadtxt("/Users/User/PycharmProjects/network/ML_Camp/Porthole_Detection/all.csv", delimiter=',', unpack=True)
# toimage(x).show()
z_data = np.transpose(x[2])
specgram_z = stft.spectrogram(z_data, window=0.4)
plt._imsave('all(test).jpg', abs(specgram_z), vmin=-40, vmax=40, cmap=plt.get_cmap('coolwarm'), format='jpg')
# '''
| [
"chzhqk1994@gmail.com"
] | chzhqk1994@gmail.com |
9bee68782c0e527d2e9b4643372f1a7d9de2807e | 8844cf13ea4a61aea6fafc285883f173aa3c46f4 | /venv/Scripts/django-admin.py | 03e0e6e44f5412c5a1efc294cefd8e6e4a9bac0e | [] | no_license | ImOkay-Ms/bookmark | bf9c0ba3b0d5192abcee65c3b04601d9eed1b67b | 1e2512cd9be85067bf20c17a5661dbaadf3c01b8 | refs/heads/main | 2023-04-19T22:44:53.748534 | 2021-05-07T08:27:32 | 2021-05-07T08:27:32 | 365,162,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | #!c:\users\wallm\pycharmprojects\04_bookmark\venv\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"56255367+ImOkay-Ms@users.noreply.github.com"
] | 56255367+ImOkay-Ms@users.noreply.github.com |
fafedd086eb52ca3a26667cd17b01a87c8ca5b04 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_155/791.py | f4c857964fa46a84265cc71f3b483d20abda438d | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | __author__ = 'rrampage'
t = int(input())
def input_format():
s = input().split()[1]
return [int(i) for i in s]
def ovation(aud):
extras = 0
tot_standing = 0
for i, a in enumerate(aud):
if a == 0:
continue
if tot_standing >= i:
tot_standing += a
else:
extras += (i - tot_standing)
tot_standing += (i - tot_standing)
tot_standing += a
return extras
for x in range(t):
print("Case #%d: %d" % (x+1, ovation(input_format()))) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
c439e8bc4823a5c6fc7da35db3637314de577c9c | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /say_big_person_of_fact/hand_and_case/try_able_company_up_week.py | dfd64790159e034f5c52cd28b6e4a81e19f11920 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py |
#! /usr/bin/env python
def different_place(str_arg):
way(str_arg)
print('thing')
def way(str_arg):
print(str_arg)
if __name__ == '__main__':
different_place('know_right_world_over_year')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
21cf2eb653fc11c07a6ebf96569ea5090c294c25 | a42d240a05ddb7e77f9cd517451fde2c82d5156b | /Problem-089.py | f624636ca0bd14d29e71b74d507211f619c0500b | [] | no_license | spirosrap/Project-Euler | 3d7edc05c677a5edfa084308380839e2c018157e | 83c2a2467b15426216483bfa34aeeb7a21728a16 | refs/heads/master | 2016-09-06T08:07:18.383648 | 2013-11-08T13:19:15 | 2013-11-08T13:19:15 | 3,463,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,053 | py | import math
import re
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(n):
"""convert integer to Roman numeral"""
if not (0 < n < 5000):
raise OutOfRangeError, "number out of range (must be 1..4999)"
if int(n) != n:
raise NotIntegerError, "decimals can not be converted"
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
#Define pattern to detect valid Roman numerals
romanNumeralPattern = re.compile("""
^ # beginning of string
M{0,4} # thousands - 0 to 4 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
$ # end of string
""" ,re.VERBOSE)
def fromRoman(s):
"""convert Roman numeral to integer"""
if not s:
raise InvalidRomanNumeralError, 'Input can not be blank'
if not romanNumeralPattern.search(s):
raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while s[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
def int_to_roman(input):
"""
Convert an integer to Roman numerals.
Examples:
>>> int_to_roman(0)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(-1)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(1.5)
Traceback (most recent call last):
TypeError: expected integer, got <type 'float'>
>>> for i in range(1, 21): print int_to_roman(i)
...
I
II
III
IV
V
VI
VII
VIII
IX
X
XI
XII
XIII
XIV
XV
XVI
XVII
XVIII
XIX
XX
>>> print int_to_roman(2000)
MM
>>> print int_to_roman(1999)
MCMXCIX
"""
if type(input) != type(1):
raise TypeError, "expected integer, got %s" % type(input)
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
def number(char):
if char=='M':
return 1000
elif char=='D':
return 500
elif char=='C':
return 100
elif char=='L':
return 50
elif char=='X':
return 10
elif char=='V':
return 5
elif char=='I':
return 1
else:
return 'error'
def romanToNumber(s):
sum=0
previous=10**7
for char in s:
if previous<number(char):
sum+=number(char)-2*previous
previous=number(char)
else:
sum+=number(char)
previous=number(char)
return sum
print romanToNumber('MMMMCCCXIV')
print romanToNumber('MMDCCLXIX')
print romanToNumber('CMLXXXVII')
print toRoman(romanToNumber('MMMMCCCLXXXXVII'))
lines = [line.strip() for line in open('roman.txt')]
print lines
sum=0
for s in lines:
sum+=len(s)-len(toRoman(romanToNumber(s)))
print sum
| [
"spirosrap@gmail.com"
] | spirosrap@gmail.com |
3614b892b438862adb7730b5927fba103d610fdd | 2fdb9f2b2f3ffc13a04de7a13e3f177d88e85798 | /likes/templatetags/likes_tags.py | e4631e6e8eda65d969f91b1b3a7714083e2f1232 | [] | no_license | dyr201500800475/web_novels | 4d0eca0dbe7b1eba75bfc203361caa324d43eaad | df5daafd4661ede64554f19a074bd0581113f4b9 | refs/heads/master | 2020-05-09T15:59:55.266787 | 2019-04-16T01:58:34 | 2019-04-16T01:58:34 | 181,253,766 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | from django import template
from django.contrib.contenttypes.models import ContentType
from ..models import LikeCount, LikeRecord
register = template.Library()
# 获取点赞数
@register.simple_tag
def get_like_count(obj):
content_type = ContentType.objects.get_for_model(obj)
like_count, created = LikeCount.objects.get_or_create(content_type=content_type, object_id=obj.pk)
return like_count.liked_num
# 获取点赞状态
@register.simple_tag(takes_context=True)
def get_like_status(context, obj):
content_type = ContentType.objects.get_for_model(obj)
user=context['user']
if not user.is_authenticated:
return ''
if LikeRecord.objects.filter(content_type=content_type, object_id=obj.pk, user=user).exists():
return 'active'
else:
return ''
# 获取点赞对象的类型
@register.simple_tag
def get_content_type(obj):
content_type = ContentType.objects.get_for_model(obj)
return content_type.model
| [
"870850834@qq.com"
] | 870850834@qq.com |
b51914fd7b3e6ca960cf28e6f04ff6f317fe58a5 | 66865b7ed119f42c8490bf3f8821602e1201eb0b | /tests/performance/time_mean.py | f6149a4c0aef131f24928bd33fcd8962974edd8b | [
"MIT"
] | permissive | chanedwin/pandas-profiling | 1a8a35f6d985a93f02a25af6e1c650b24e11218a | d9ee4a8a589e075cfced9fc71ca500a20e2a3e73 | refs/heads/develop_spark_profiling | 2023-08-01T19:53:31.340751 | 2021-01-07T15:59:22 | 2021-01-07T15:59:22 | 288,504,610 | 1 | 3 | MIT | 2021-04-26T14:09:43 | 2020-08-18T16:14:57 | Jupyter Notebook | UTF-8 | Python | false | false | 726 | py | import timeit
testcode = """
import numpy as np
import pandas as pd
np.random.seed(12)
vals = np.random.random(1000)
series = pd.Series(vals)
series[series < 0.2] = pd.NA
def f1(series):
arr = series.values
arr_without_nan = arr[~np.isnan(arr)]
return np.mean(arr_without_nan)
def f2(series):
arr = series.values
return np.nanmean(arr)
def f3(series):
return series.mean()
def f4(series):
return series[series.notna()].mean()
"""
print(timeit.timeit("f1(series)", number=10, setup=testcode))
print(timeit.timeit("f2(series)", number=10, setup=testcode))
print(timeit.timeit("f3(series)", number=10, setup=testcode))
print(timeit.timeit("f4(series)", number=10, setup=testcode))
| [
"sfbbrugman@gmail.com"
] | sfbbrugman@gmail.com |
e432f4e76d689a36074aaa8adfdda869d6809a85 | 491c298283c3af8ca5188e7191758512b758fdc7 | /examples/ex_pyside.py | 4f9f43350bb17fa137396aac3a0d85c186502fe3 | [
"BSD-3-Clause"
] | permissive | merydwin/idascrtipt | 0bda6f2253dd94698a82cb09a7a1855cbced6221 | 431e04847e55adbb1d263aa2aadc2d489d068f50 | refs/heads/master | 2021-01-22T04:48:40.353877 | 2015-03-02T15:54:20 | 2015-03-02T15:54:20 | 38,051,147 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | from idaapi import PluginForm
from PySide import QtGui, QtCore
class MyPluginFormClass(PluginForm):
def OnCreate(self, form):
"""
Called when the plugin form is created
"""
# Get parent widget
self.parent = self.FormToPySideWidget(form)
self.PopulateForm()
def PopulateForm(self):
# Create layout
layout = QtGui.QVBoxLayout()
layout.addWidget(
QtGui.QLabel("Hello from <font color=red>PySide</font>"))
layout.addWidget(
QtGui.QLabel("Hello from <font color=blue>IDAPython</font>"))
self.parent.setLayout(layout)
def OnClose(self, form):
"""
Called when the plugin form is closed
"""
pass
plg = MyPluginFormClass()
plg.Show("PySide hello world")
| [
"elias.bachaalany@fccdda4b-c33c-0410-84de-61e1e3e5f415"
] | elias.bachaalany@fccdda4b-c33c-0410-84de-61e1e3e5f415 |
6ede21e09928afe06f7ca4e59017bb4b664f3da2 | afaef7fda04c6b9a72b9567dcb991037710ebab3 | /dopy/action.py | da7f33b153fd6c3de1aec25846ceb24f1069d922 | [] | no_license | B-Rich/DOPY | 8ecce140dbc8ee1781bbfec1f48835087d7c2b47 | c8a02b3ffefa7164515c4dc35ba0ed3bd2ff0c85 | refs/heads/master | 2016-09-06T09:22:08.123432 | 2014-08-02T00:26:31 | 2014-08-02T00:26:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | class Action(object):
def __init__(self, data):
self.id = data["id"]
self.status = data["status"]
self.type = data["type"]
self.started_at = data["started_at"]
self.completed_at = data["completed_at"]
self.resource_id = data["resource_id"]
self.resource_type = data["resource_type"]
self.region = data["region"]
| [
"trvrmay@yahoo.com"
] | trvrmay@yahoo.com |
69bed29a7ff68e4bc1e38f20eff1032b0709cdc7 | ce9593eb4ec109b86f3f75ac161a372e6d99f067 | /Problems/Beautify both output and code/main.py | cd06ffb22a2bed0cad2a1cdff2cf0c609f3bb1b4 | [] | no_license | wangpengda1210/Rock-Paper-Scissors | 0b2e5ef9b946dd209a85fa7440a7e40acfd83923 | 05c558ddfdf69eb4170185a158ded8a3a063359c | refs/heads/main | 2023-02-20T08:35:09.379752 | 2021-01-23T06:31:48 | 2021-01-23T06:31:48 | 332,143,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | print("http://example.com/{}/desirable/{}/profile".format(input(), input()))
| [
"515484505@qq.com"
] | 515484505@qq.com |
0a94b3a8f252d9c3cf1e6285b13b2ee7925ddcd3 | 6be41ba7ade71d84fd116d6eb132e6b30c34634a | /day25/day25.py | c28b774689f595a4021cff90a15b8aeabe039d71 | [] | no_license | brianjp93/aoc2019 | efaa8fc46430c3f8d94f8e28999c8c5a4548ab9d | 63baebae9ce3c9c7fc2dfaff61f0f2f4e30f77c6 | refs/heads/master | 2021-07-15T00:19:42.692789 | 2021-06-25T00:42:18 | 2021-06-25T00:42:18 | 225,127,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,473 | py | """day25.py
"""
from computer import Computer
from itertools import combinations
CHECKPOINT_ITEMS = ['asterisk','antenna','easter egg','space heater','jam','tambourine','festive hat','fixed point']
class Droid(Computer):
def __init__(self, *args, **kwargs):
self.sent = 0
super().__init__(*args, **kwargs)
def send(self, command):
command = command + '\n'
command = [ord(x) for x in command]
self.inputs += command
self.run()
self.sent += 1
if self.sent % 50 == 0:
print(f'Sent {self.sent} commands to droid.')
return self
def send_all(self, commands):
for c in commands:
self.read()
self.send(c)
def take(self, item):
command = f'take {item}\n'
self.send(command)
def drop(self, item):
command = f'drop {item}\n'
self.send(command)
def drop_all(self, items):
for item in items:
self.read()
self.drop(item)
return self
def take_all(self, items):
for item in items:
self.read()
self.take(item)
return self
def try_all(self):
print('Trying item combos.')
tried = 0
have = []
for i in range(8):
combos = combinations(CHECKPOINT_ITEMS, i)
for combo in combos:
if tried % 20 == 0:
print(f'Tried {tried} item combinations.')
self.drop_all(set(have) - set(combo))
self.take_all(set(combo) - set(have)).read()
have = combo
out = self.send('west').read()
tried += 1
if not 'Alert' in out:
return out
if __name__ == '__main__':
with open('data.txt', 'r') as f:
data = list(map(int, f.read().split(',')))
d = Droid(data)
d.run()
commands = [
'south', 'south', 'south',
'take fixed point', 'south', 'take festive hat',
'west', 'west', 'take jam',
'south', 'take easter egg', 'north',
'east', 'east', 'north',
'west', 'take asterisk', 'east',
'north', 'west', 'north',
'north', 'take tambourine', 'south',
'south', 'east', 'north',
'west', 'south', 'take antenna',
'north', 'west', 'west',
'take space heater','west',
]
d.send_all(commands)
out = d.try_all()
print(out)
| [
"perrettbrian@gmail.com"
] | perrettbrian@gmail.com |
8a8eb30d68328005ec249519efc1016a86616c7f | 45bfeba3abab88eeb08b54946a8729d0152a22cc | /src/python/codechef/JAN19B/DPAIRS.py | 0e5f5b1b7ae76609189e196aac939bfee444999f | [
"MIT"
] | permissive | duke79/compro | c32ee2aca9b5adf2d62e18fa8822736821148b0b | a5577e043888df513a78a94a93ed5d08bc6ad2cd | refs/heads/master | 2022-06-13T22:09:52.451149 | 2022-06-12T05:15:28 | 2022-06-12T05:21:44 | 165,487,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | N, M = input().split(" ")
N = int(N)
M = int(M)
A = input().split(" ")
A = [int(elem) for elem in A]
B = input().split(" ")
B = [int(elem) for elem in B]
A_sorted = [i[0] for i in sorted(enumerate(A), key=lambda x: x[1])]
B_sorted = [i[0] for i in sorted(enumerate(B), key=lambda x: x[1])]
A_i = 0
B_i = 0
As_turn = True
# print(A_sorted)
# print(B_sorted)
while len(A_sorted) > A_i and len(B_sorted) > B_i:
print("%s %s" % (A_sorted[A_i], B_sorted[B_i]))
if As_turn:
# print("As_turn")
As_turn = False
A_i += 1
else:
# print("Bs_turn")
As_turn = True
B_i += 1
| [
"pulkitsingh01@gmail.com"
] | pulkitsingh01@gmail.com |
2bdbc779fc636376e7fdf31a9eb638963d99f2c7 | ea023a0478c89e8f566d967321e2d0d645499f86 | /hw05_01_persons.py | 5988098836efc39ca1773a1a338e6cb6d9f13664 | [] | no_license | NVMarchuk/go_QA | 8ee8f75e9f7b37f0343393eba24b4d28e61fa122 | 4e14501ac6bb931a1a6b5703bf8f4b2f9695847b | refs/heads/master | 2020-04-09T04:45:55.134388 | 2018-12-02T12:42:26 | 2018-12-02T12:42:26 | 160,035,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,808 | py | import datetime
class Person:
"""
CLASS Person (два свойства:
1) full_name пусть будет свойством, а не функцией, а свойств name и surname нет
(одно поле, тип строка и состоит из двух слов «имя фамилия»),
2) год рождения).
"""
def __init__(self, full_name=None, birth_year=None):
"""
** (только для продвинутых) в конструкторе проверить, что в full_name
передаётся строка, состоящая из двух слов, если нет, вызывайте исключение
** (только для продвинутых) в конструкторе проверить, что в год рождения
меньше 2018, но больше 1900, если нет вызывайте исключение
"""
if len(full_name.split(' ')) != 2:
raise ValueError('Incorrect full_name. Required format: two words.')
self.full_name = full_name
current_year = datetime.datetime.now().year
if not birth_year:
self.birth_year = birth_year
elif 1900 <= birth_year <= current_year:
self.birth_year = birth_year
else:
raise ValueError('Incorrect birth_year. Required value between 1900 and ' + str(current_year))
def first_name(self):
# print(self)
return self.full_name.split(' ')[0]
def sur_name(self):
# print(self)
return self.full_name.split(' ')[1]
def age_in(self, year = 0):
"""
вычисляет сколько лет есть/исполнится в году, который передаётся параметром
"""
now = datetime.datetime.now()
if not year:
return now.year - self.birth_year
if year < self.birth_year:
return 0
else:
return year - self.birth_year
def __str__(self):
""" преобразование объекта в строку """
return "<PERSON object:: full_name:{}, birth_year:{}>".format(self.full_name, self.birth_year)
class Employee(Person):
"""
Employee (наследуемся от Person)
(добавляются свойства:
1) должность, 2) опыт работы, 3) зарплата)
"""
def __init__(self, full_name='', birth_year=0, position='', salary=0, experience=0):
super().__init__(full_name=full_name, birth_year=birth_year)
self.position = position
self.salary = salary
self.experience = experience
def increment(self, value):
self.salary += value
if self.salary < 0:
self.salary = 0
def exp_pos(self):
prefix = 'Senior' if self.experience >= 6 \
else ('Middle' if self.experience >= 3 else 'Junior')
return prefix + ' ' + self.position
def __str__(self):
""" преобразование объекта в строку """
return "<EMPLOYEE object:: full_name:{} birth_year:{}\n " \
"position:{} salary:{} experince:{}>"\
.format(self.full_name, self.birth_year,
self.exp_pos(), self.salary, self.experience)
class ITEmployee(Employee):
"""
ITEmployee (наследуемся от Employee)
1. Реализовать метод добавления одного навыка в новое свойство skills (список) новым
методом add_skill (см. презентацию).
2. * Реализовать метод добавления нескольких навыков в новое свойство skills (список)
новым методом add_skills.
Тут можно выбрать разные подходы: или аргумент один и он список навыков, которым вы
расширяете список-свойство skill, или вы принимаете неопределённое количество
аргументов, и все их добавляете в список-свойство skill
"""
def __init__(self, full_name='', birth_year=0, position='', salary=0, experience=0, *skills):
super().__init__(full_name=full_name, birth_year=birth_year,
position=position, salary=salary, experience=experience)
self.skills = list(*skills)
def add_skill(self, new_skill=''):
if not new_skill:
return
try:
if self.skills.index(new_skill) >= 0:
return
except:
self.skills.append(new_skill)
return
# self.skills = []
def add_skills(self, *new_skills):
for item in new_skills:
self.skills.append(item)
def __str__(self):
""" преобразование объекта в строку """
return "<ITEMPLOYEE object:: full_name:{} birth_year:{}\n" \
"position:{} salary:{} experince:{}\n" \
"skills:{}>"\
.format(self.full_name, self.birth_year,
self.exp_pos(), self.salary, self.experience, self.skills)
if __name__ == '__main__':
neo = Person(full_name='Neo Anderson', birth_year=1999)
print(neo.first_name())
print(neo.sur_name())
neo.age_in()
print(neo.birth_year)
print(str(neo))
#
roger = Employee(full_name='Roger Wilco', birth_year=1986, position='Janitor', experience=12)
print(str(roger))
#
neo = ITEmployee(full_name='Neo Anderson', birth_year=1999, position='Chosen', experience=1)
neo.add_skills('cool', 'smart')
print(str(neo))
# | [
"qamarchuk@gmail.com"
] | qamarchuk@gmail.com |
9bf54e5c86c4b04d3adf8d7b9ae99b50fe0cb1dd | 7b6b64795d3aa8c1e72c7a87cfb9d3efd59da39a | /fosfile/decrypt.py | aa290d576869ea9ea0da50d796cfc70f8017d1ea | [
"MIT"
] | permissive | CleyFaye/FOS_View | 9277ebc6a667740ac96982a99bc8f8cb04b64b19 | 9314a3a6aec24eec2211e3a5ddc532c1f4befd20 | refs/heads/master | 2021-01-10T17:39:19.174845 | 2015-10-20T18:48:31 | 2015-10-20T18:48:31 | 44,626,005 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | from Crypto.Protocol.KDF import PBKDF2
from Crypto.Cipher import AES
from base64 import b64encode, b64decode
# Informations taken from FOSDecrypt.cs
# (original author: superkhung@vnsecurity.net)
class cryptoInfo(object):
initVector = 'tu89geji340t89u2'
passPhrase = 'UGxheWVy'
keySize = 256
def decrypt(srcFile):
# cipher = BASE64(decode, srcFile)
# key = PBKDF2(passPhrase,IV)
# ctx = CBC(AES,key,IV)
# plain = ctx.decipher(cipher)
try:
rawData = srcFile.read()
except Exception:
with open(srcFile, 'r') as srcStream:
rawData = srcStream.read()
cipher = b64decode(rawData)
key = PBKDF2(cryptoInfo.passPhrase, cryptoInfo.initVector, cryptoInfo.keySize / 8)
ctx = AES.new(key, AES.MODE_CBC, cryptoInfo.initVector)
result = ctx.decrypt(cipher)
lastClose = result.rfind('}')
return result[:lastClose+1]
if __name__ == '__main__':
raise Exception('This program cannot be run in DOS mode')
| [
"github@cleyfaye.net"
] | github@cleyfaye.net |
cacd7937cdc425dcc72f8b26713976bf92e3d667 | 6e33f9472a6369a2c5219ef358e98f72ce9fe463 | /load_data.py | 18ffc9b777eed7e3a213a1e92165d17b5174d047 | [] | no_license | harshkn/NeuralNetworkWithLasagne | 599369f43e44a9463b5d53193f24b1983825106e | 4a5c1770cca69029b3b0cc8b704a80396c3181af | refs/heads/master | 2021-01-21T16:15:10.146334 | 2016-08-16T11:10:51 | 2016-08-16T11:10:51 | 65,447,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,710 | py | import sys
import os
import numpy as np
def load_dataset():
# We first define a download function, supporting both Python 2 and 3.
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
print("Downloading %s" % filename)
urlretrieve(source + filename, filename)
# We then define functions for loading MNIST images and labels.
# For convenience, they also download the requested files if needed.
import gzip
def load_mnist_images(filename):
if not os.path.exists(filename):
download(filename)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(-1, 1, 28, 28)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(filename):
if not os.path.exists(filename):
download(filename)
# Read the labels in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# We can now download and read the training and test set images and labels.
X_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
X_train = X_train.reshape((X_train.shape[0], X_train.shape[2] * X_train.shape[3]))
X_test = X_test.reshape((X_test.shape[0], X_test.shape[2] * X_test.shape[3]))
X_val = X_val.reshape((X_val.shape[0], X_val.shape[2] * X_val.shape[3]))
# X_train_ = np.reshape((X_train,50000, 784))
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
return X_train, y_train, X_val, y_val, X_test, y_test
| [
"harshkn@gmail.com"
] | harshkn@gmail.com |
a4d637f4f3bdbf6475cb8ac0d01ca134d5e91a3e | fd2920a8ca609ea9f08c655104ac0a5e88f61fbf | /Struct/action.py | 8a7200ef5c4bc0b7109ec1dc5ae406be6650fae2 | [] | no_license | gtello79/Alg_Ford_Fulkerson | 5a6dde5f921f83bc3d5204a7cd43973f6c3d43a2 | 0ada5311b4e1ce549363f5aeff350cb11b43a507 | refs/heads/main | 2023-01-30T14:31:45.023007 | 2020-12-13T19:54:48 | 2020-12-13T19:54:48 | 320,720,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | import copy as cp
class action:
def __init__(self, newPoint, value):
self.actual = newPoint
self.distance = value
def getState(self, state):
newState = cp.deepcopy(state)
newState.node.append(self.actual)
newState.eval += self.distance
return newState
| [
"gonzalotello79@gmail.com"
] | gonzalotello79@gmail.com |
a4b07117ea536c0e1ad360a226b7b83d8cdc76f4 | bd8fd0c735daeb93ae10dbdd58a224204790e05d | /stock_picking_invoicing_journal_type/wizard/stock_invoice_onshipping.py | 9ba6e273d3bb7f5ce21671210be26bb76009a758 | [] | no_license | Liyben/vertical-instaladores | 87f3906240d2802c90b24e4402d48f33f468311b | 623a4ee3745c84cff383fa52f65edf7e8806435e | refs/heads/master | 2023-08-30T14:55:39.681612 | 2021-05-28T18:39:43 | 2021-05-28T18:39:43 | 235,099,352 | 0 | 0 | null | 2021-05-28T18:39:45 | 2020-01-20T12:44:53 | Python | UTF-8 | Python | false | false | 5,396 | py | # © 2020 Liyben
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import models, fields, api, _
from odoo.exceptions import UserError
class StockInvoiceOnshipping(models.TransientModel):
_inherit = 'stock.invoice.onshipping'
@api.model
def _default_default_debit_account_id(self):
default_debit_account = False
#Obtenemos el albaran actual
active_ids = self.env.context.get('active_ids', [])
if active_ids:
active_ids = active_ids[0]
pick_obj = self.env['stock.picking']
picking = pick_obj.browse(active_ids)
#Obtenemos el id de la cuenta deudora por defecto del Pedido de venta
if (self._get_journal_type() == 'sale') and picking and picking.sale_id.type_id.default_debit_account_id:
default_debit_account = picking.sale_id.type_id.default_debit_account_id.id
return default_debit_account
@api.model
def _default_account_id(self):
default_account = False
#Obtenemos el albaran actual
active_ids = self.env.context.get('active_ids', [])
if active_ids:
active_ids = active_ids[0]
pick_obj = self.env['stock.picking']
picking = pick_obj.browse(active_ids)
#Obtenemos el id de la cuenta a cobrar por defecto del Pedido de venta
if (self._get_journal_type() == 'sale') and picking and picking.sale_id.type_id.account_id:
default_account = picking.sale_id.type_id.account_id.id
return default_account
@api.model
def _default_tax_id(self):
default_tax = False
#Obtenemos el albaran actual
active_ids = self.env.context.get('active_ids', [])
if active_ids:
active_ids = active_ids[0]
pick_obj = self.env['stock.picking']
picking = pick_obj.browse(active_ids)
#Obtenemos los ids de los impuestos del Pedido de venta
if (self._get_journal_type() == 'sale') and picking and picking.sale_id.type_id.tax_id:
company_id = self.env.context.get('force_company', self.env.user.company_id.id)
default_tax = picking.sale_id.type_id.tax_id.filtered(lambda r: r.company_id.id == company_id)
return default_tax
@api.model
def _default_order_type(self):
default_type = False
#Obtenemos el albaran actual
active_ids = self.env.context.get('active_ids', [])
if active_ids:
active_ids = active_ids[0]
pick_obj = self.env['stock.picking']
picking = pick_obj.browse(active_ids)
#Obtenemos el id de la cuenta a cobrar por defecto del Pedido de venta
if (self._get_journal_type() == 'sale') and picking and picking.sale_id.type_id:
default_type = picking.sale_id.type_id.id
return default_type
default_debit_account_id = fields.Many2one('account.account', string='Cuenta deudora por defecto', domain=[('deprecated', '=', False)],
help="Actúa como una cuenta por defecto para importes en el debe", default=_default_default_debit_account_id)
account_id = fields.Many2one('account.account', string='Cuenta a cobrar por defecto',
domain=[('deprecated', '=', False)], help="La cuenta que será usada para la factura",
default=_default_account_id)
tax_id = fields.Many2many('account.tax', string='Taxes', domain=['|', ('active', '=', False), ('active', '=', True)],
default=_default_tax_id)
type_id = fields.Many2one('sale.order.type', string='Sale Type', default=_default_order_type)
@api.model
def _default_journal(self, journal_type):
"""
Get the default journal based on the given type
:param journal_type: str
:return: account.journal recordset
"""
default_journal = super()._default_journal(journal_type)
#Obtenemos el albaran actual
active_ids = self.env.context.get('active_ids', [])
if active_ids:
active_ids = active_ids[0]
pick_obj = self.env['stock.picking']
picking = pick_obj.browse(active_ids)
#Obtenemos el id del diario del Pedido de venta
if journal_type == 'sale' and picking and picking.sale_id and picking.sale_id.type_id and picking.sale_id.type_id.journal_id:
default_journal = picking.sale_id.type_id.journal_id.id
return default_journal
@api.multi
def _get_invoice_line_values(self, moves, invoice_values, invoice):
values = super()._get_invoice_line_values(moves, invoice_values, invoice)
if self.default_debit_account_id:
values.update({
'account_id': self.default_debit_account_id.id,
})
if self.tax_id:
values.update({
'invoice_line_tax_ids': [(6, 0, self.tax_id.ids)],
})
return values
@api.multi
def _build_invoice_values_from_pickings(self, pickings):
invoice, values = super()._build_invoice_values_from_pickings(pickings)
if self.account_id:
values.update({
'account_id': self.account_id.id,
})
invoice.update({
'account_id': self.account_id.id,
})
if self.type_id:
values.update({
'sale_type_id': self.type_id.id,
})
invoice.update({
'sale_type_id': self.type_id.id,
})
return invoice, values
@api.constrains('sale_journal')
def check_sale_journal(self):
#Obtenemos el albaran actual
active_ids = self.env.context.get('active_ids', [])
if active_ids:
active_ids = active_ids[0]
pick_obj = self.env['stock.picking']
picking = pick_obj.browse(active_ids)
if picking and picking.sale_id and picking.sale_id.type_id and (picking.sale_id.type_id.journal_id != self.sale_journal):
raise UserError(_('El diario seleccionado es distinto al configurado en su Pedido de Venta. Si desea crear la factura ambos diarios deben '
'coincidir.'))
| [
"soporte@liyben.com"
] | soporte@liyben.com |
7f88fd1dedc0c43a5b4fb5c5c46af45d748f9898 | 16746b76421c9ebe834547dedb9e558f1f981c0a | /Yahoo_fianance_api_package/yahoo_finance_master/test/test_yahoo.py | 1a05159abee1aa22f32b7fe7f4112d35e50ed106 | [] | no_license | clementcole/Project_Pythia | 39f49330d241538d5175772262b2edd3afd67cc6 | 23fef0a9f0368855f4b90c086e7b3571c81a8333 | refs/heads/master | 2021-03-27T13:47:28.039331 | 2017-03-05T19:10:17 | 2017-03-05T19:10:17 | 68,121,457 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,491 | py | import datetime
import sys
if sys.version_info < (2, 7):
from unittest2 import main as test_main, SkipTest, TestCase
else:
from unittest import main as test_main, SkipTest, TestCase
from yahoo_finance import Currency, Share, edt_to_utc, get_date_range
class TestShare(TestCase):
def setUp(self):
self.yahoo = Share('YHOO')
def test_yhoo(self):
# assert that these are float-like
float(self.yahoo.get_open())
float(self.yahoo.get_price())
def test_get_info(self):
info = self.yahoo.get_info()
self.assertEqual(info['start'], '1996-04-12')
self.assertEqual(info['symbol'], 'YHOO')
def test_get_historical(self):
history = self.yahoo.get_historical('2014-04-25', '2014-04-29')
self.assertEqual(len(history), 3)
expected = {
'Adj_Close': '35.830002',
'Close': '35.830002',
'Date': '2014-04-29',
'High': '35.889999',
'Low': '34.119999',
'Open': '34.369999',
'Symbol': 'YHOO',
'Volume': '28736000'
}
self.assertDictEqual(history[0], expected)
def test_get_historical_longer_than_1y(self):
# issue #2
history = self.yahoo.get_historical('2012-04-25', '2014-04-29')
self.assertEqual(history[-1]['Date'], '2012-04-25')
self.assertEqual(history[0]['Date'], '2014-04-29')
self.assertEqual(len(history), 505)
def test_get_historical_1d(self):
# issue #7
history = self.yahoo.get_historical('2014-04-29', '2014-04-29')
self.assertEqual(len(history), 1)
expected = {
'Adj_Close': '35.830002',
'Close': '35.830002',
'Date': '2014-04-29',
'High': '35.889999',
'Low': '34.119999',
'Open': '34.369999',
'Symbol': 'YHOO',
'Volume': '28736000'
}
self.assertDictEqual(history[0], expected)
def test_edt_to_utc(self):
edt = '5/26/2014 4:00pm'
utc = '2014-05-26 20:00:00 UTC+0000'
self.assertEqual(edt_to_utc(edt), utc)
def test_edt_to_utc_issue15(self):
# date string for yahoo can contains 0 rather than 12.
# This means that it cannot be parsed with %I see GH issue #15.
edt = '4/21/2015 0:13am'
utc = '2015-04-21 04:13:00 UTC+0000'
self.assertEqual(edt_to_utc(edt), utc)
def test_get_date_range(self):
result = [i for i in get_date_range('2012-04-25', '2014-04-29')]
expected = [
('2013-04-29', '2014-04-29'),
('2012-04-28', '2013-04-28'),
('2012-04-25', '2012-04-27'),
]
self.assertEqual(result, expected)
class TestCurrency(TestCase):
def setUp(self):
self.eur_pln = Currency('EURPLN')
def test_eurpln(self):
# assert these are float-like
float(self.eur_pln.get_bid())
float(self.eur_pln.get_ask())
float(self.eur_pln.get_rate())
def test_eurpln_date(self):
eur_pln = Currency('EURPLN')
try:
datetime.datetime.strptime(eur_pln.get_trade_datetime(),
"%Y-%m-%d %H:%M:%S %Z%z")
except ValueError as v:
if "bad directive" in str(v):
raise SkipTest("datetime format checking requires the %z directive.")
else:
raise
if __name__ == "__main__":
test_main()
| [
"cmr0263@unt.edu"
] | cmr0263@unt.edu |
d8327625f3951b94827154fcd1efc3bb31fd7e6a | a4e59c4f47873daf440374367a4fb0383194d2ce | /Python/987.py | 071ba61e1dee050a891b2d02116afb3a3671fc25 | [] | no_license | maxjing/LeetCode | e37cbe3d276e15775ae028f99cf246150cb5d898 | 48cb625f5e68307390d0ec17b1054b10cc87d498 | refs/heads/master | 2021-05-23T17:50:18.613438 | 2021-04-02T17:14:55 | 2021-04-02T17:14:55 | 253,406,966 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def verticalTraversal(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
q = deque([(root, 0)])
res = defaultdict(list)
while q:
level = defaultdict(list)
for _ in range(len(q)):
node, col = q.popleft()
level[col].append(node.val)
if node.left:
q.append((node.left, col - 1))
if node.right:
q.append((node.right, col + 1))
for col in level:
res[col].extend(sorted(level[col]))
return [res[i] for i in sorted(res)] | [
"tvandcc@gmail.com"
] | tvandcc@gmail.com |
9f8aaad6b22ea7ecc6945c8288570a353c7d7b8f | caaf1b0754db1e676c37a6f1e58f19183754e654 | /sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2/async_samples/sample_classify_document_from_url_async.py | 9e4775d42c58ae924f0d55dc072fb01011589d59 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | rdomenzain/azure-sdk-for-python | 45dfb39121a0abda048c22e7309733a56259f525 | 58984255aeb904346b6958c5ba742749a2cc7d1b | refs/heads/master | 2023-07-07T06:53:12.967120 | 2023-07-04T16:27:37 | 2023-07-04T16:27:37 | 258,050,134 | 0 | 0 | MIT | 2020-04-23T00:12:14 | 2020-04-23T00:12:13 | null | UTF-8 | Python | false | false | 5,413 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_classify_document_from_url_async.py
DESCRIPTION:
This sample demonstrates how to classify a document from a URL using a trained document classifier.
To learn how to build your custom classifier, see sample_build_classifier.py.
More details on building a classifier and labeling your data can be found here:
https://aka.ms/azsdk/formrecognizer/buildclassifiermodel
USAGE:
python sample_classify_document_from_url_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Form Recognizer resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
3) CLASSIFIER_ID - the ID of your trained document classifier
-OR-
CLASSIFIER_CONTAINER_SAS_URL - The shared access signature (SAS) Url of your Azure Blob Storage container with your training files.
A document classifier will be built and used to run the sample.
"""
import os
import asyncio
async def classify_document_from_url_async(classifier_id):
# [START classify_document_from_url_async]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentAnalysisClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
classifier_id = os.getenv("CLASSIFIER_ID", classifier_id)
document_analysis_client = DocumentAnalysisClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with document_analysis_client:
url = "https://raw.githubusercontent.com/Azure/azure-sdk-for-python/main/sdk/formrecognizer/azure-ai-formrecognizer/tests/sample_forms/forms/IRS-1040.pdf"
poller = await document_analysis_client.begin_classify_document_from_url(
classifier_id, document_url=url
)
result = await poller.result()
print("----Classified documents----")
for doc in result.documents:
print(
f"Found document of type '{doc.doc_type or 'N/A'}' with a confidence of {doc.confidence} contained on "
f"the following pages: {[region.page_number for region in doc.bounding_regions]}"
)
# [END classify_document_from_url_async]
async def main():
classifier_id = None
if os.getenv("CLASSIFIER_CONTAINER_SAS_URL") and not os.getenv("CLASSIFIER_ID"):
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentModelAdministrationClient
from azure.ai.formrecognizer import (
ClassifierDocumentTypeDetails,
AzureBlobContentSource,
)
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
blob_container_sas_url = os.environ["CLASSIFIER_CONTAINER_SAS_URL"]
document_model_admin_client = DocumentModelAdministrationClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with document_model_admin_client:
poller = await document_model_admin_client.begin_build_document_classifier(
doc_types={
"IRS-1040-A": ClassifierDocumentTypeDetails(
azure_blob_source=AzureBlobContentSource(
container_url=blob_container_sas_url,
prefix="IRS-1040-A/train",
)
),
"IRS-1040-D": ClassifierDocumentTypeDetails(
azure_blob_source=AzureBlobContentSource(
container_url=blob_container_sas_url,
prefix="IRS-1040-D/train",
)
),
},
)
classifier = await poller.result()
classifier_id = classifier.classifier_id
await classify_document_from_url_async(classifier_id)
if __name__ == "__main__":
from azure.core.exceptions import HttpResponseError
try:
asyncio.run(main())
except HttpResponseError as error:
print(
"For more information about troubleshooting errors, see the following guide: "
"https://aka.ms/azsdk/python/formrecognizer/troubleshooting"
)
# Examples of how to check an HttpResponseError
# Check by error code:
if error.error is not None:
if error.error.code == "InvalidImage":
print(f"Received an invalid image error: {error.error}")
if error.error.code == "InvalidRequest":
print(f"Received an invalid request error: {error.error}")
# Raise the error again after printing it
raise
# If the inner error is None and then it is possible to check the message to get more information:
if "Invalid request".casefold() in error.message.casefold():
print(f"Uh-oh! Seems there was an invalid request: {error}")
# Raise the error again
raise
| [
"noreply@github.com"
] | noreply@github.com |
12437e1f025519cb508d5e60a98bb09581bceda3 | e7f87113f8118b4e7879ee3b9c6e1fe2ebc8dfe6 | /syntax_service/web_service.py | ac8c9dfb153d47754e7d6405aae6dcfdc1144094 | [] | no_license | tarasen/question_generator | 779d5d9a19d4c189a1184d6f70b4365fbcd04691 | e46b01a5a3c022a17dc7f88ef29f9d117916e4ea | refs/heads/master | 2021-06-17T16:40:58.009005 | 2017-06-15T16:49:07 | 2017-06-15T16:49:07 | 94,459,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,684 | py | import subprocess
from logging import getLogger, ERROR
from time import sleep
import rapidjson
from cytoolz import take
from flask import Flask, request
from werkzeug.serving import WSGIRequestHandler
from syntax_service.syntax_analyzer import SyntaxAnalyzer
log = getLogger('werkzeug')
log.setLevel(ERROR)
app = Flask(__name__)
def run_win_cmd(cmd, top=2):
process = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
sleep(5)
for line in take(top, process.stdout):
print(line.decode().strip())
errcode = process.returncode
if errcode is not None:
raise Exception('cmd %s failed, see above for details', cmd)
def init_analyzer():
try:
return SyntaxAnalyzer()
except:
cmd = """java -cp C:\\temp\\maltparser-1.9.0\\maltparser-1.9.0.jar;C:\\temp\\py4j-0.10.4\\py4j-java\\py4j0.10.4.jar;. MaltGateway syntagrus &"""
run_win_cmd(cmd)
return SyntaxAnalyzer()
service = init_analyzer()
@app.route('/parse', methods=['GET', 'POST'])
def parse():
body = rapidjson.loads(request.get_json(silent=True, force=True) or '')
sentences = body['sentences']
return rapidjson.dumps(
[{**a, 'head': b[0], 'deprel': b[1]} for a, b in zip(sentences, service.build_syntax(sentences))],
ensure_ascii=False), 200, {
'Content-Type': 'application/json; charset=utf-8'}
if __name__ == '__main__':
PORT = 7418
print('ready to serve at', PORT)
WSGIRequestHandler.protocol_version = "HTTP/1.1"
app.run(threaded=True, debug=False, port=PORT)
| [
"cepxuopamoc@ya.ru"
] | cepxuopamoc@ya.ru |
23b9f1d7d1bf70506e95ca0232b7de7c109195e2 | 567eb1054bd0c166dac12e46db09d0e39a70500b | /todos/migrations/0001_initial.py | 547f19cf01983a9e3e9cda3f6e387a0405f719c0 | [] | no_license | xudifsd/hook2do | 2b8896c59579a7656676550f2ce7a56fe22dea61 | f6c2bea9207a661da22e3c42d0a18decd1ba0135 | refs/heads/master | 2021-01-18T20:26:30.976320 | 2014-11-15T05:48:22 | 2014-11-15T05:48:22 | 26,671,337 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ToDoItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', models.CharField(max_length=256)),
('status', models.CharField(default=b'default', max_length=16, choices=[(b'default', b'Default status'), (b'scheduled', b'Scheduled status'), (b'archived', b'archived status')])),
('due_at', models.DateTimeField(null=True, blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True, auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ToDoItemList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=64)),
('is_archived', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True, auto_now_add=True)),
('owner', models.ForeignKey(related_name='lists', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='todoitem',
name='itemlist',
field=models.ForeignKey(related_name='todos', blank=True, to='todos.ToDoItemList', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='todoitem',
name='owner',
field=models.ForeignKey(related_name='todos', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
| [
"xudifsd@gmail.com"
] | xudifsd@gmail.com |
e5e3cbe5abeb2f0e0244f41aa88a0dc8c886e935 | a77b1fd3128787a5479371d4fae357ffca4d17e4 | /python/class.py | 9af3c8c89d8a36200a9f58cb9ae3ae946b5d7c3d | [] | no_license | kaidee/kaidee | 709fcd1218cc426d00735fea8bd39a5294ad71db | 0ff11afe9222235328937bf68760829ba1fb00d0 | refs/heads/master | 2020-05-20T01:19:30.490372 | 2017-11-01T11:38:06 | 2017-11-01T11:38:06 | 8,902,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | class FirstClass:
"""docstring for FirstClass"""
def __init__(self, arg):
# super(FirstClass, self).__init__()
self.arg = arg
def setdata(self, value):
self.data = value
def display(self):
print self.arg
z = 100
x = FirstClass("ling")
x.display()
y = 'hello'
# print locals()
for k, v in locals().items():
print k, '=', v | [
"lkaidee@gmail.com"
] | lkaidee@gmail.com |
10a476e13c38323dbe8b0c4072c8570fa256f26c | 40fc1d38f2d4b643bc99df347c4ff3a763ba65e3 | /examples/menus/basic1/data/states/menu2.py | 6adb8054f7339f609ba0c3ea440473cc73fedab8 | [] | no_license | alecordev/pygaming | 0be4b7a1c9e7922c63ce4cc369cd893bfef7b03c | 35e479b703acf038f47c2151b3759ad852781e4c | refs/heads/master | 2023-05-14T05:03:28.484678 | 2021-06-03T10:11:08 | 2021-06-03T10:11:08 | 372,768,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | import pygame as pg
from .. import tools
import random
class Menu(tools.States):
def __init__(self, screen_rect):
tools.States.__init__(self)
self.screen_rect = screen_rect
self.title, self.title_rect = self.make_text(
"Menu2 State", (75, 75, 75), (self.screen_rect.centerx, 75), 50
)
self.pre_render_options()
self.from_bottom = 200
self.spacer = 75
def update(self, now, keys):
self.change_selected_option()
def const_event(self, keys):
pass
def cleanup(self):
pass
def entry(self):
pass
| [
"alecor.dev@gmail.com"
] | alecor.dev@gmail.com |
92dd37214e9ff42428ae4975726aa638fdb81f0b | da0dc454f7c81856eedec17663d5893ce1d83bd5 | /4.d.9.py | 4719dd322f50b6ec0b12e6bd5941987c46bf0fb8 | [] | no_license | KritikRawal/Lab_Exercise-all | cf199f7f8d518a17464e67b6fdc16427aec11997 | f377eedc558195d4c2de07913e0ff0c0733c842a | refs/heads/master | 2021-01-26T14:05:13.288735 | 2020-02-27T14:27:59 | 2020-02-27T14:27:59 | 243,447,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | """Write a Python program to iterate over dictionaries using for loops."""
d = {'x': 10, 'y': 20, 'z': 30}
for dict_key, dict_value in d.items():
print(dict_key,'->',dict_value) | [
"noreply@github.com"
] | noreply@github.com |
06d3e36130944e9c7a4d9343a486d6b700770ca1 | c0a4879a91d0175af6ecd76a45a20a0e6643df6b | /app/models/ScannerThread.py | 6d1ceb19f54bcdf2e55f3a0711c6e9fde0f2a20f | [] | no_license | Hugo291/Flask-OCR | e4c0e6e65fbd84c920ed08788e044b71e518615f | 2d4872136715febd461a1008d318cb235bc8f090 | refs/heads/master | 2023-05-24T08:28:56.954393 | 2023-02-05T11:06:45 | 2023-02-05T11:06:45 | 129,457,196 | 7 | 3 | null | 2023-05-01T20:56:59 | 2018-04-13T21:53:21 | Python | UTF-8 | Python | false | false | 5,608 | py | import os
import time
from threading import Thread
from app.config import UPLOAD_DIR_PDF, UPLOAD_DIR_JPG
from app.models.OCR import OCR
from app.models.Pdf import convert_to_jpg, page_number
class ScannerThread(Thread):
"""
Construct
"""
def __init__(self):
super().__init__()
print("init ")
self.__list_file = []
self.__percent = 0
self.cal = lambda current, total: int(current * 100 / total)
"""
Loop infinit and if detect a file in list , the
"""
def run(self):
super().run()
print("Thread Run")
while True:
time.sleep(10)
while self.has_pending_file():
try:
self.set_percent(0)
self.convert_scan_file()
self.delete_last_file_scaned()
self.set_percent(0)
except Exception as error:
print("Erreur (run): " + str(error))
print(error)
"""
Return True if a file is in list
"""
def has_pending_file(self):
if len(self.__list_file) != 0:
return True
return False
def get_last_file_scaned(self):
"""
:return: the last or current file which has been scanned
"""
return self.__list_file[0]
def delete_last_file_scaned(self):
"""Thread init
Delete the last file to be scan
"""
del self.__list_file[0]
def append_file(self, pdf_file_id):
"""
Add the file to the list of files that will be scanned
:param pdf_file_id: the pdf id
"""
print("Add file : " + str(pdf_file_id))
self.__list_file.append(pdf_file_id)
def convert_pdf_to_jpg(self, pdf_page_number):
"""
:param pdf_page_number: number page of pdf
:return: the page number of the file
"""
if os.path.isfile(os.path.join(UPLOAD_DIR_PDF, str(self.get_last_file_scaned()) + ".pdf")):
file_path = os.path.join(UPLOAD_DIR_PDF, str(self.get_last_file_scaned()) + ".pdf")
dir_dest = os.path.join(UPLOAD_DIR_JPG, str(self.get_last_file_scaned()))
for index in range(pdf_page_number):
print("Convertion of image (" + str(index) + ")")
convert_to_jpg(file_path, dir_dest, num_page=index)
self.set_percent(int(self.cal(current=index, total=pdf_page_number)/2))
else:
print("The file is not supported by the system")
raise Exception('The file is not supported by the system')
"""
Analyse the file
"""
def ocr_jpg(self, number_file):
from app.models.DataBase import OCRPage, db, OcrBoxWord
# ckeck if the fodler exist
if os.path.isdir(os.path.join(UPLOAD_DIR_JPG, str(self.get_last_file_scaned()))):
# folder with all jpg
folder = os.path.join(UPLOAD_DIR_JPG, str(self.get_last_file_scaned()))
# for all file
for index in range(number_file):
image_ocr = OCRPage(
pdf_file_id=self.get_last_file_scaned(),
num_page=index
)
path_file_img = os.path.join(folder, '{}.jpg'.format(str(index)))
print("Scan file : "+str(index))
scanner_ocr = OCR(path_file_img)
image_ocr.text = scanner_ocr.scan_text()
db.session.add(image_ocr)
db.session.commit()
id_pdf_page = image_ocr.id
box_word = scanner_ocr.scan_data()
for box in box_word:
box_word = OcrBoxWord(
pdf_page_id=id_pdf_page,
box=box
)
db.session.add(box_word)
# commit all word box in folder
db.session.commit()
self.set_percent(int(self.cal(current=index, total=number_file)/2+50))
else:
print('The folder is not found')
raise Exception('The folder is not found')
"""
Convert the file and scan this
"""
# def convert_scan_file(self, folder_number, pdf_file_id):
def convert_scan_file(self):
from app.models.DataBase import PdfFile, db
# pdf file bd
pdf_file_db = PdfFile.query.filter_by(
id=self.get_last_file_scaned()
).first()
try:
# set status In progress
pdf_file_db.status = 1
db.session.commit()
# the page number pdf
file_path = os.path.join(UPLOAD_DIR_PDF, str(self.get_last_file_scaned()) + ".pdf")
pdf_page_number = page_number(file_path)
print("pdf page number : " + str(pdf_page_number))
# convert to jpg
self.convert_pdf_to_jpg(pdf_page_number)
# ocr the image
self.ocr_jpg(pdf_page_number)
# set staus finish
pdf_file_db.status = 2
db.session.commit()
except Exception as exception:
print(exception)
print("Erreur (convert_scan_file): " + str(exception))
pdf_file_db.status = -1
db.session.commit()
"""
Get the percent
"""
def __str__(self):
return str(self.get_percent())
def get_percent(self):
return self.__percent
def set_percent(self, percent):
print(str(percent) + "%")
self.__percent = percent
| [
"33847075+Hugo291@users.noreply.github.com"
] | 33847075+Hugo291@users.noreply.github.com |
c3412d496ec2458f35e2d607ca43e01d35c7d686 | 385a3c8cdd62a237bfa69e1a77359f81ce43542d | /variable_scope.py | 860f9dc273fda258dccdf413a977c8cbf1db0950 | [
"Apache-2.0"
] | permissive | tangentspire/Python_Practice | 8528a6314a4dd02fe9c43b9cce43a38e9b38d43d | e7f22303230a2ffa4e3f5ae57854bac9c4c3bc34 | refs/heads/master | 2021-06-18T17:01:08.023455 | 2020-03-27T05:25:22 | 2020-03-27T05:25:22 | 138,232,355 | 0 | 0 | Apache-2.0 | 2021-06-10T20:30:43 | 2018-06-21T23:34:13 | Python | UTF-8 | Python | false | false | 1,327 | py | # a exercise messing around with local, enclosed, and global variable scoping.
# https://www.smallsurething.com/how-variable-scope-works-in-python/
x = "I am a global variable, x."
# looks for x locally in the function, doesn't find it, so it looks globally and finds it at line 1
def global_print():
#global x
print("I am going to print the global variable....")
print(x)
# Defines a local variable, looks for x finds it locally and prints it.
def local_print():
x = "I am a local variable, x."
print("I am going to print the local variable....")
print(x)
# specifies that x in the function refers to GLOBAL variable x. Sets GLOBAL variable x to a new string and prints
def global_redefine_print():
global x
x = "I am a redefined global variable"
print(x)
def local_enclosing():
x = "I am enclosed variable the FIRST."
def inception():
x = "I am a local, enclosed variable SECOND."
print(x)
inception()
print(x)
global_print()
local_print()
global_redefine_print()
# you can see that the output of the function below changes the second time because the global variable was changed.
global_print()
# despite there being no local definition in inception there IS a local definition of x in the outer function. The enclosing variable IS local so we don't use a global variable for x.
local_enclosing()
| [
"seanvenard@gmail.com"
] | seanvenard@gmail.com |
b902452d883e17b367874e8a5573c31469d0319a | bbbcd2ac7088ab1e0111798107e9719382b965b9 | /German Traffic Signs Classification/german_traffic_signs.py | b5e74033a28853dfb1732636cbf3a4ab2c7e8d9c | [] | no_license | negatively/DS-ML-Project | 5dccda0015389ae70e111f1d1b9a805cb83396da | da89b9bad4e167cd4fff632c72b64e0349bf0771 | refs/heads/main | 2023-09-03T06:13:25.211544 | 2021-11-02T15:48:25 | 2021-11-02T15:48:25 | 409,063,054 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,243 | py | # -*- coding: utf-8 -*-
"""German Traffic Signs.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/11oG_rJKdiuWQkcPL9tDivDONJYnFymeV
## Load Library and Dataset
"""
# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
import numpy as np
import tensorflow as tf
from keras.models import Sequential, load_model
from keras.layers import Conv2D, Dense, Flatten, Dropout, MaxPool2D
from sklearn.model_selection import train_test_split
from zipfile import ZipFile
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# %config InlineBackend.figure_format = 'retina'
# Set Kaggle API
! pip install -q kaggle
from google.colab import files
files.upload()
! mkdir ~/.kaggle
! cp kaggle.json ~/.kaggle/
! chmod 600 ~/.kaggle/kaggle.json
# Get dataset from kaggle
!kaggle datasets download -d saadhaxxan/germantrafficsigns
# Extract File
with ZipFile("/content/germantrafficsigns.zip", "r") as zip_ref:
zip_ref.extractall('working')
# Load Data
training_file = '/content/working/train.p'
testing_file = '/content/working/test.p'
# Membuka dan load data training file
with open(training_file, mode='rb') as f:
train = pickle.load(f)
# Membuka dan load data testing file
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
print('Data Loaded')
df_sign = pd.read_csv('/content/working/signnames.csv')
SIGN_NAMES = df_sign.SignName.values
df_sign.set_index('ClassId', inplace = True)
df_sign.head(10)
# Define features and labels for training data
X, y = train['features'], train['labels']
# Converting lists into numpy arrays
data = np.array(X)
labels = np.array(y)
print(data.shape, labels.shape)
# Define features and labels for testing data
X_test, y_test = test['features'], test['labels']
# Converting lists into numpy arrays
X_test = np.array(X_test)
y_test = np.array(y_test)
print(X_test.shape, y_test.shape)
# Membagi data train menjadi train dan validation
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1, random_state = 1310)
print(X_train.shape, X_val.shape, y_train.shape, y_val.shape)
# Visualisasi distribusi data
n_labels = np.unique(y_train).size
def hist_data(y_data, title=None, ax=None, **kwargs):
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(y_data, np.arange(-0.5, n_labels+1.5 ), stacked=True, **kwargs)
ax.set_xlim(-0.5, n_labels-1.5)
if 'label' in kwargs : ax.legend()
if title : ax.set_title(title)
fig, ax = plt.subplots(1, 3, figsize = (20,5))
hist_data(y_train, title = 'Distribusi kelas pada Data Training', ax = ax[0])
hist_data(y_val, title = 'Distribusi kelas pada Data Validation', ax = ax[1], color = 'black')
hist_data(y_test, title = 'Distribusi kelas pada Data Test', ax = ax[2], color = 'grey')
"""Dari plot yang tertampil, dapat dilihat bahwa distribusi kelas masing-masing bagian data terlihat mirip. Oleh karena itu, kita tidak perlu melakukan proses normalisasi. """
# Converting the labels into one hot encoding
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train, 43)
y_val = to_categorical(y_val, 43)
# Membuat callback
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy') > 0.96):
print("\nAkurasi telah mencapai > 96%. Stop Training!")
self.model.stop_training = True
callbacks = myCallback()
# Membuat model
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size = (5,5), activation = 'relu', input_shape=X_train.shape[1:]))
model.add(Conv2D(filters = 32, kernel_size = (5,5), activation = 'relu'))
model.add(MaxPool2D(pool_size = (2,2)))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters = 64, kernel_size=(3,3), activation='relu'))
model.add(Conv2D(filters = 64, kernel_size=(3,3), activation='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(43, activation='softmax'))
model.summary()
# Compile model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Fit the model
hist = model.fit(X_train, y_train,
batch_size = 32,
epochs = 25,
validation_data=(X_val, y_val),
callbacks=[callbacks])
# Save model
model.save('trafficsign.h5')
# Plotting graphs for accuracy
plt.figure(0)
plt.plot(hist.history['accuracy'], label='training accuracy')
plt.plot(hist.history['val_accuracy'], label='val accuracy')
plt.title('Accuracy')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show()
# Plotting graphs for loss
plt.figure(1)
plt.plot(hist.history['loss'], label='training loss')
plt.plot(hist.history['val_loss'], label='val loss')
plt.title('Loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.show()
# Testing accuracy with test data
from sklearn.metrics import accuracy_score
y_pred = np.argmax(model.predict(X_test,), axis=-1)
accuracy_score(y_test, y_pred)
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
| [
"mileniandi39@gmail.com"
] | mileniandi39@gmail.com |
330cf22f710de37c4a2a14f3b365073cf76b16d8 | 336af6b9a3df1917bdf35ccb3ea0ed2a841ebc07 | /src/lib/inputparser.py | 95240236ba96361c17a1b8455987924863d388cf | [] | no_license | redsoxfantom/sprinkler_designer | 3c9791a5e818341b08a6cafc5ad1fbaa3884e6e2 | 89d2fdbee66b5371a5fb28244f9f0964a1bde44d | refs/heads/master | 2020-03-08T03:03:18.312543 | 2018-04-08T23:11:21 | 2018-04-08T23:11:21 | 127,879,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | import json
from src.lib.sprinklers.factory import createsprinkler
from src.lib.field import Field
from src.lib.scorers.scorekeeper import ScoreKeeper
from src.lib.strategies.manager import StrategyManager
class InputParser:
def __init__(self, filename):
loadedfile = json.load(open(filename))
self.sprinklers = {}
for sprinkler in loadedfile['sprinklers']:
self.sprinklers[sprinkler] = createsprinkler(sprinkler)
self.originalfield = Field(loadedfile["field"])
self.scorekeeper = ScoreKeeper(loadedfile["weights"])
self.strategymanager = StrategyManager(loadedfile["strategies"], self.scorekeeper, self.sprinklers) | [
"redsoxfantom@gmail.com"
] | redsoxfantom@gmail.com |
380a73b7ffd584930d557f8d65de872cfdc9fbc7 | a0cc2d898690a33db84124aae128a014f05c1748 | /PE5/PE5_1.py | 78699fa1d857a31568c764111d00a9e4d689e91e | [] | no_license | Hilgon2/prog | 2bf9adb6315de3e6c95bb4cc944ec1e2842ae64f | b594dd523e2efa51250d3be43cf74cf2ca6229e9 | refs/heads/master | 2020-03-29T09:40:33.924573 | 2019-01-29T13:39:48 | 2019-01-29T13:39:48 | 149,769,217 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | def som(getal1, getal2, getal3):
return getal1 + getal2 + getal3
print(som(1, 5, 8)) | [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
d6e2ffc6e5a6866b95ee3fa8a2af8eb20885ea4c | b24e584c29f3ad68404f2345828ad3ed8fdd04de | /file_loader.py | 52c79312cf872763fc101186c9613ced17f853ba | [] | no_license | urube/DE321_Assessment2 | b17ca23159f78bf14d567d3bd6820e0807c5dac5 | 61bf4ea9dad3718ac8bdcf3156e3bb2c46ec13e7 | refs/heads/master | 2021-05-16T19:45:49.926933 | 2020-04-02T11:25:30 | 2020-04-02T11:25:30 | 250,444,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | import os
class Controller:
def load_file():
images = []
for filename in oslistdir()
| [
"andaz.rai002@gmail.com"
] | andaz.rai002@gmail.com |
a6ef29eb5bb901813eb35f2c5eb018aaaf451497 | ce0a9815b0b265b3d75336f5128d44cc1a7944a0 | /HW3/gosho_tests.py | ba7ce2e89d6e6886ec9ef8755f6d029c38bee8b9 | [] | no_license | borisaltanov/Python-FMI | a81bc9ffab8b181664924996723e056cbeeb967a | 43ca1864556b02f94ec962ce7fa294d984e3aa09 | refs/heads/master | 2021-01-12T12:44:34.096534 | 2016-09-30T13:05:58 | 2016-09-30T13:05:58 | 69,667,152 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,442 | py | import unittest
import datetime
import math
from time import sleep
import sol2 as s
class TestUser(unittest.TestCase):
def setUp(self):
self.user = s.User("Lord Bendtner")
def test_name(self):
self.assertIsNotNone(getattr(self.user, 'full_name'))
self.assertEqual(self.user.full_name, "Lord Bendtner")
def test_has_uuid(self):
self.assertIsNotNone(getattr(self.user, 'uuid'))
def test_add_post(self):
self.user.add_post("I scored a hattrick today!")
post = next(self.user.get_post())
self.assertEqual(post.content, "I scored a hattrick today!")
self.assertEqual(self.user.uuid, post.author)
self.assertTrue(isinstance(post.published_at, datetime.datetime))
for i in range(60):
self.user.add_post(chr(i + 64))
all_posts = list(self.user.get_post())
length = len(all_posts)
self.assertEqual(length, 50)
self.assertEqual(all_posts[length - 1].content, '{')
self.assertEqual(all_posts[0].content, 'J')
class TestSocialGraph(unittest.TestCase):
def setUp(self):
self.social_graph = s.SocialGraph()
self.terry = s.User("Terry Gilliam")
self.eric = s.User("Eric Idle")
self.graham = s.User("Graham Chapman")
self.john = s.User("John Cleese")
self.michael = s.User("Michael Palin")
self.social_graph.add_user(self.terry)
self.social_graph.add_user(self.eric)
self.social_graph.add_user(self.graham)
self.social_graph.add_user(self.john)
self.social_graph.add_user(self.michael)
def test_add_get_del_user(self):
self.assertTrue(
self.social_graph.get_user(self.terry.uuid), self.terry)
self.assertTrue(
self.social_graph.get_user(self.eric.uuid), self.eric)
self.assertTrue(
self.social_graph.get_user(self.graham.uuid), self.graham)
self.assertTrue(
self.social_graph.get_user(self.john.uuid), self.john)
self.assertTrue(
self.social_graph.get_user(self.michael.uuid), self.michael)
with self.assertRaises(s.UserAlreadyExistsError):
self.social_graph.add_user(self.eric)
with self.assertRaises(s.UserAlreadyExistsError):
self.social_graph.add_user(self.graham)
with self.assertRaises(s.UserAlreadyExistsError):
self.social_graph.add_user(self.john)
with self.assertRaises(s.UserAlreadyExistsError):
self.social_graph.add_user(self.michael)
with self.assertRaises(s.UserAlreadyExistsError):
self.social_graph.add_user(self.terry)
self.social_graph.delete_user(self.terry.uuid)
self.assertTrue(
self.social_graph.get_user(self.eric.uuid), self.eric)
with self.assertRaises(s.UserDoesNotExistError):
self.social_graph.get_user(self.terry.uuid)
def test_followers_following(self):
self.social_graph.follow(self.terry.uuid, self.eric.uuid)
self.social_graph.follow(self.terry.uuid, self.graham.uuid)
self.social_graph.follow(self.graham.uuid, self.john.uuid)
self.social_graph.follow(self.michael.uuid, self.terry.uuid)
self.social_graph.follow(self.eric.uuid, self.terry.uuid)
with self.assertRaises(ValueError):
self.social_graph.is_following(self.terry.uuid, self.terry.uuid)
self.assertTrue(
self.social_graph.is_following(self.terry.uuid, self.eric.uuid))
self.assertTrue(
self.social_graph.is_following(self.terry.uuid, self.graham.uuid))
self.assertTrue(
self.social_graph.is_following(self.graham.uuid, self.john.uuid))
self.assertTrue(
self.social_graph.is_following(self.michael.uuid, self.terry.uuid))
self.assertFalse(
self.social_graph.is_following(self.graham.uuid, self.terry.uuid))
self.assertEqual(
self.social_graph.followers(self.terry.uuid),
{self.eric.uuid, self.michael.uuid})
self.assertEqual(
self.social_graph.followers(self.graham.uuid),
{self.terry.uuid})
self.assertEqual(
self.social_graph.followers(self.john.uuid),
{self.graham.uuid})
self.assertEqual(
self.social_graph.following(self.graham.uuid),
{self.john.uuid})
self.assertEqual(
self.social_graph.following(self.michael.uuid),
{self.terry.uuid})
self.assertEqual(
self.social_graph.following(self.eric.uuid),
{self.terry.uuid})
def test_friends(self):
self.social_graph.follow(self.terry.uuid, self.eric.uuid)
self.social_graph.follow(self.terry.uuid, self.graham.uuid)
self.social_graph.follow(self.graham.uuid, self.john.uuid)
self.social_graph.follow(self.graham.uuid, self.terry.uuid)
self.social_graph.follow(self.john.uuid, self.graham.uuid)
self.social_graph.follow(self.michael.uuid, self.terry.uuid)
self.social_graph.follow(self.eric.uuid, self.terry.uuid)
self.assertEqual(
self.social_graph.friends(self.terry.uuid),
{self.eric.uuid, self.graham.uuid})
self.assertEqual(
self.social_graph.friends(self.graham.uuid),
{self.john.uuid, self.terry.uuid})
self.assertEqual(
self.social_graph.friends(self.eric.uuid),
{self.terry.uuid})
self.assertEqual(
self.social_graph.friends(self.michael.uuid),
set())
# def test_all_paths_1(self):
# self.social_graph.follow(self.terry.uuid, self.eric.uuid)
# self.social_graph.follow(self.eric.uuid, self.graham.uuid)
# self.social_graph.follow(self.graham.uuid, self.john.uuid)
# self.social_graph.follow(self.terry.uuid, self.michael.uuid)
# a = s.User("A")
# b = s.User("B")
# c = s.User("C")
# self.social_graph.add_user(a)
# self.social_graph.add_user(b)
# self.social_graph.add_user(c)
# self.social_graph.follow(self.michael.uuid, a.uuid)
# self.social_graph.follow(a.uuid, b.uuid)
# self.social_graph.follow(b.uuid, self.eric.uuid)
# self.social_graph.follow(self.eric.uuid, c.uuid)
# self.social_graph.follow(c.uuid, self.eric.uuid)
# self.assertEqual(
# self.social_graph.find_all_paths(self.terry.uuid),
# {(self.terry.uuid,
# self.eric.uuid,
# self.graham.uuid,
# self.john.uuid),
# (self.terry.uuid,
# self.eric.uuid,
# c.uuid),
# (self.terry.uuid,
# self.michael.uuid,
# a.uuid,
# b.uuid,
# self.eric.uuid,
# self.graham.uuid,
# self.john.uuid),
# (self.terry.uuid,
# self.michael.uuid,
# a.uuid,
# b.uuid,
# self.eric.uuid,
# c.uuid)
# })
# def test_all_paths_2(self):
# a = s.User("A")
# b = s.User("B")
# c = s.User("C")
# d = s.User("D")
# self.social_graph.add_user(a)
# self.social_graph.add_user(b)
# self.social_graph.add_user(c)
# self.social_graph.add_user(d)
# self.social_graph.follow(a.uuid, b.uuid)
# self.social_graph.follow(a.uuid, c.uuid)
# self.social_graph.follow(b.uuid, c.uuid)
# self.social_graph.follow(b.uuid, d.uuid)
# self.social_graph.follow(c.uuid, d.uuid)
# self.social_graph.follow(d.uuid, c.uuid)
# self.assertEqual(
# self.social_graph.find_all_paths(a.uuid),
# {(a.uuid, b.uuid, c.uuid, d.uuid),
# (a.uuid, b.uuid, d.uuid, c.uuid),
# (a.uuid, c.uuid, d.uuid)})
def test_distance(self):
self.social_graph.follow(self.terry.uuid, self.eric.uuid)
self.social_graph.follow(self.eric.uuid, self.graham.uuid)
self.social_graph.follow(self.graham.uuid, self.john.uuid)
self.social_graph.follow(self.terry.uuid, self.michael.uuid)
a = s.User("A")
b = s.User("B")
c = s.User("C")
self.social_graph.add_user(a)
self.social_graph.add_user(b)
self.social_graph.add_user(c)
self.social_graph.follow(self.michael.uuid, a.uuid)
self.social_graph.follow(a.uuid, b.uuid)
self.social_graph.follow(b.uuid, self.eric.uuid)
self.social_graph.follow(self.eric.uuid, c.uuid)
self.social_graph.follow(c.uuid, self.eric.uuid)
self.assertEqual(
self.social_graph.max_distance(self.john.uuid), math.inf)
self.assertEqual(self.social_graph.max_distance(self.terry.uuid), 3)
self.assertEqual(
self.social_graph.min_distance(self.terry.uuid, self.eric.uuid), 1)
self.social_graph.follow(self.michael.uuid, self.terry.uuid)
self.assertEqual(
self.social_graph.min_distance(
self.michael.uuid, self.eric.uuid), 2)
self.social_graph.unfollow(b.uuid, self.eric.uuid)
self.assertEqual(self.social_graph.max_distance(self.terry.uuid), 3)
with self.assertRaises(s.UsersNotConnectedError):
self.social_graph.min_distance(a.uuid, self.eric.uuid)
def test_nth_layer_follwings(self):
self.social_graph.follow(self.terry.uuid, self.graham.uuid)
self.social_graph.follow(self.john.uuid, self.michael.uuid)
self.social_graph.follow(self.terry.uuid, self.john.uuid)
self.assertEqual(
self.social_graph.nth_layer_followings(self.terry.uuid, 1),
{self.graham.uuid, self.john.uuid})
self.assertEqual(
self.social_graph.nth_layer_followings(self.terry.uuid, 2),
{self.michael.uuid})
self.assertEqual(
self.social_graph.nth_layer_followings(self.graham.uuid, 1),
set())
def test_nth_layer_follwings2(self):
self.social_graph = s.SocialGraph()
self.zero = s.User("Zero")
self.one = s.User("One")
self.two = s.User("Two")
self.three = s.User("Three")
self.four = s.User("Four")
self.five = s.User("Five")
self.six = s.User("Six")
self.social_graph.add_user(self.zero)
self.social_graph.add_user(self.one)
self.social_graph.add_user(self.two)
self.social_graph.add_user(self.three)
self.social_graph.add_user(self.four)
self.social_graph.add_user(self.five)
self.social_graph.add_user(self.six)
self.social_graph.follow(self.zero.uuid, self.one.uuid)
self.social_graph.follow(self.zero.uuid, self.one.uuid)
self.social_graph.unfollow(self.zero.uuid, self.one.uuid)
self.social_graph.follow(self.zero.uuid, self.one.uuid)
self.social_graph.follow(self.zero.uuid, self.four.uuid)
self.social_graph.follow(self.zero.uuid, self.five.uuid)
self.social_graph.follow(self.one.uuid, self.two.uuid)
self.social_graph.follow(self.one.uuid, self.three.uuid)
self.social_graph.follow(self.one.uuid, self.four.uuid)
self.social_graph.follow(self.two.uuid, self.three.uuid)
self.social_graph.follow(self.four.uuid, self.two.uuid)
self.social_graph.follow(self.five.uuid, self.six.uuid)
self.social_graph.follow(self.six.uuid, self.zero.uuid)
with self.assertRaises(ValueError):
self.social_graph.nth_layer_followings(self.zero.uuid, -1)
self.assertEqual(
self.social_graph.nth_layer_followings(self.zero.uuid, 0),
set())
self.assertEqual(
self.social_graph.nth_layer_followings(self.zero.uuid, 1),
{self.one.uuid, self.four.uuid, self.five.uuid})
self.assertEqual(
self.social_graph.nth_layer_followings(self.zero.uuid, 2),
{self.two.uuid, self.three.uuid, self.six.uuid})
self.assertEqual(
self.social_graph.nth_layer_followings(self.zero.uuid, 3),
set())
self.assertEqual(
self.social_graph.nth_layer_followings(self.one.uuid, 0),
set())
self.assertEqual(
self.social_graph.nth_layer_followings(self.one.uuid, 1),
{self.two.uuid, self.three.uuid, self.four.uuid})
self.assertEqual(
self.social_graph.nth_layer_followings(self.one.uuid, 2),
set())
self.assertEqual(
self.social_graph.nth_layer_followings(self.two.uuid, 0),
set())
self.assertEqual(
self.social_graph.nth_layer_followings(self.two.uuid, 1),
{self.three.uuid})
self.assertEqual(
self.social_graph.nth_layer_followings(self.one.uuid, 2),
set())
self.assertEqual(
self.social_graph.nth_layer_followings(self.three.uuid, 0),
set())
self.assertEqual(
self.social_graph.nth_layer_followings(self.three.uuid, 3),
set())
self.assertEqual(
self.social_graph.nth_layer_followings(self.four.uuid, 0),
set())
self.assertEqual(
self.social_graph.nth_layer_followings(self.four.uuid, 1),
{self.two.uuid})
self.assertEqual(
self.social_graph.nth_layer_followings(self.four.uuid, 2),
{self.three.uuid})
self.assertEqual(
self.social_graph.nth_layer_followings(self.four.uuid, 3),
set())
self.assertEqual(
self.social_graph.nth_layer_followings(self.five.uuid, 0),
set())
self.assertEqual(
self.social_graph.nth_layer_followings(self.five.uuid, 1),
{self.six.uuid})
self.assertEqual(
self.social_graph.nth_layer_followings(self.five.uuid, 2),
{self.zero.uuid})
self.assertEqual(
self.social_graph.nth_layer_followings(self.five.uuid, 3),
{self.one.uuid, self.four.uuid})
self.assertEqual(
self.social_graph.nth_layer_followings(self.five.uuid, 4),
{self.two.uuid, self.three.uuid})
self.assertEqual(
self.social_graph.nth_layer_followings(self.five.uuid, 5),
set())
def test_feed(self):
self.social_graph.follow(self.terry.uuid, self.eric.uuid)
self.social_graph.follow(self.terry.uuid, self.graham.uuid)
self.social_graph.follow(self.terry.uuid, self.john.uuid)
self.social_graph.follow(self.terry.uuid, self.michael.uuid)
for i in range(10):
self.eric.add_post(str(i))
sleep(0.000001)
self.graham.add_post(str(10 + i))
sleep(0.000001)
self.john.add_post(str(20 + i))
sleep(0.000001)
self.michael.add_post(str(30 + i))
sleep(0.000001)
self.assertEqual(
[post.content
for post in self.social_graph.generate_feed(self.terry.uuid, 0, 10)],
["39", "29", "19", "9", "38", "28", "18", "8", "37", "27"])
self.assertEqual(
[post.content
for post in self.social_graph.generate_feed(self.terry.uuid, 10, 10)],
["17", "7", "36", "26", "16", "6", "35", "25", "15", "5"])
self.assertEqual(
[post.content
for post in self.social_graph.generate_feed(self.terry.uuid, 20, 10)],
["34", "24", "14", "4", "33", "23", "13", "3", "32", "22"])
self.assertEqual(
[post.content
for post in self.social_graph.generate_feed(self.terry.uuid, 30, 10)],
["12", "2", "31", "21", "11", "1", "30", "20", "10", "0"])
self.assertEqual(
[post.content
for post in self.social_graph.generate_feed(self.terry.uuid, 0, 1)],
["39"])
self.assertEqual(
[post.content
for post in self.social_graph.generate_feed(self.terry.uuid, 1, 1)],
["29"])
self.assertEqual(
[post.content
for post in self.social_graph.generate_feed(self.terry.uuid, 1, 0)],
[])
self.assertEqual(
[post.content
for post in self.social_graph.generate_feed(self.terry.uuid, 0, 40)],
["39", "29", "19", "9", "38", "28", "18", "8", "37", "27",
"17", "7", "36", "26", "16", "6", "35", "25", "15", "5",
"34", "24", "14", "4", "33", "23", "13", "3", "32", "22",
"12", "2", "31", "21", "11", "1", "30", "20", "10", "0"])
self.assertEqual(
[post.content
for post in self.social_graph.generate_feed(self.terry.uuid, 39, 40)],
["0"])
self.assertEqual(
[post.content
for post in self.social_graph.generate_feed(self.terry.uuid, 39, 39)],
["0"])
self.assertEqual(
[post.content
for post in self.social_graph.generate_feed(self.terry.uuid, 10, 140)],
["17", "7", "36", "26", "16", "6", "35", "25", "15", "5",
"34", "24", "14", "4", "33", "23", "13", "3", "32", "22",
"12", "2", "31", "21", "11", "1", "30", "20", "10", "0"])
with self.assertRaises(ValueError):
[post.content
for post in self.social_graph.generate_feed(self.terry.uuid, 0, -1)]
with self.assertRaises(ValueError):
[post.content
for post in self.social_graph.generate_feed(self.terry.uuid, -1, 10)]
self.assertEqual([post.content for post in self.eric.get_post()],
[str(i) for i in range(10)])
self.assertEqual([post.content for post in self.graham.get_post()],
[str(i) for i in range(10, 20)])
self.assertEqual([post.content for post in self.john.get_post()],
[str(i) for i in range(20, 30)])
self.assertEqual([post.content for post in self.michael.get_post()],
[str(i) for i in range(30, 40)])
if __name__ == "__main__":
unittest.main()
| [
"borisaltanov@gmail.com"
] | borisaltanov@gmail.com |
02f6df5ae4820400c31f0a44ab0af1722aff4957 | a63d907ad63ba6705420a6fb2788196d1bd3763c | /src/api/datahub/databus/shippers/mysql/shipper.py | 47bb186ba428a43fa955ca786b37cc8b70ff1a25 | [
"MIT"
] | permissive | Tencent/bk-base | a38461072811667dc2880a13a5232004fe771a4b | 6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2 | refs/heads/master | 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 | NOASSERTION | 2022-04-02T10:30:56 | 2021-06-29T06:10:01 | Python | UTF-8 | Python | false | false | 3,040 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from datahub.databus.settings import MODULE_SHIPPER
from datahub.databus.shippers.base_shipper import BaseShipper
class MysqlShipper(BaseShipper):
storage_type = "mysql"
module = MODULE_SHIPPER
def _get_shipper_task_conf(self, cluster_name):
# physical_table_name 格式 "dbname_123.table_name"
arr = self.physical_table_name.split(".")
if len(arr) == 1:
db_name = "mapleleaf_%s" % (self.rt_info["bk_biz_id"])
table_name = self.physical_table_name
else:
db_name = arr[0]
table_name = arr[1]
conn_url = (
"jdbc:mysql://{}:{}/{}?autoReconnect=true&useServerPrepStmts=false&rewriteBatchedStatements=true".format(
self.sink_storage_conn["host"],
self.sink_storage_conn["port"],
db_name,
)
)
return self.config_generator.build_tspider_config_param(
cluster_name,
self.connector_name,
self.rt_id,
self.source_channel_topic,
self.task_nums,
conn_url,
self.sink_storage_conn["user"],
self.sink_storage_conn["password"],
table_name,
)
@classmethod
def _field_handler(cls, field, storage_params):
if field.get("is_index"):
storage_params.indexed_fields.append(field["physical_field"])
@classmethod
def _get_storage_config(cls, params, storage_params):
return json.dumps(
{
"indexed_fields": storage_params.indexed_fields,
}
)
| [
"terrencehan@tencent.com"
] | terrencehan@tencent.com |
9b9be96e30df9c686070c80c225e104e3d02d81f | b13d6dfc382b65b07587d0cc3d5fb2478081ab6a | /main/forms.py | 4ab89e187e10f6ecc082e2571e616bfe858a3988 | [] | no_license | ashabdan/firstdjangoproject | 2198ae10fda461a286c193f3d2cf363ee550793b | e00806f95c56ebf528f988291c4ea3eb098364c9 | refs/heads/master | 2023-05-22T09:08:37.741597 | 2021-06-11T12:16:18 | 2021-06-11T12:16:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | from ckeditor.widgets import CKEditorWidget
from django import forms
from .models import Post
class CreatePostForm(forms.ModelForm):
text = forms.CharField(widget=CKEditorWidget())
class Meta:
model = Post
fields = ['title', 'text', 'image', 'category', 'tags']
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
super(CreatePostForm, self).__init__(*args, **kwargs)
def save(self):
data = self.cleaned_data
data['author'] = self.request.user
tags = data.pop('tags')
post = Post.objects.create(**data)
post.tags.add(*tags)
return post
class UpdatePostForm(forms.ModelForm):
class Meta:
model = Post
fields = ['title', 'text', 'image', 'category', 'tags']
| [
"akimbaeva.a23@gmail.com"
] | akimbaeva.a23@gmail.com |
96096b7eead71a44e4658a819f4b303bb7152054 | a88b029981e1ff89afc5e951d7346c91d3e0ecf5 | /test.py | 354e7da8a47d99484fe056d23271d59503820e81 | [] | no_license | jncraton/calculator | a75ae340673c67bd41c07cd3ecdb0ac6adae1955 | 91e8732df0b0fb82f47e82de62871146dd7415f5 | refs/heads/master | 2020-08-14T00:23:02.494264 | 2019-11-04T15:35:06 | 2019-11-04T15:35:06 | 215,063,057 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import os
options = Options()
options.headless = True
with webdriver.Firefox(options=options) as browser:
browser.get(f'file://{os.getcwd()}/dist/index.html')
assert('Calculator' in browser.page_source)
expression = browser.find_element_by_name('expression')
expression.clear()
expression.send_keys('273 + 571')
assert('844' in browser.page_source)
expression = browser.find_element_by_name('expression')
expression.clear()
expression.send_keys('2 * standard_state_pressure')
assert('200000.0' in browser.page_source) | [
"jncraton@gmail.com"
] | jncraton@gmail.com |
e90d3f3264f8703b34328d969f8d6039d11892bf | ed8b8b363abba3e32bc07c3586ba9004c57470c3 | /business_object/pokemon/supporter_pokemon.py | 666a38ff5a94ac4f03ae21d6b62345c49f984290 | [] | no_license | adrienensai/tp2_2021 | 1a1f0ce578c13ee43e81c823fd05ec2977652016 | d61ea39c19d24e494fbbbe9972de423e942f1391 | refs/heads/master | 2023-08-14T14:14:25.596972 | 2021-09-23T15:29:18 | 2021-09-23T15:29:18 | 409,634,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | from business_object.attack.special_attack import SpecialFormulaAttack
from business_object.pokemon.abstract_pokemon import AbstractPokemon
class SupporterPokemon(AbstractPokemon):
def __init__(self
, stat_max=None
, stat_current=None
, level=None
, name=None
, gear=None
, common_attacks = []) -> None:
special_attack = SpecialFormulaAttack(power=40
, name="Healing Song"
, description="{pokemon.name} sings a beautiful song ")
super().__init__(stat_max=stat_max
, stat_current=stat_current
, level=level
, name=name
, gear=gear
, special_attack=special_attack
, common_attacks=common_attacks)
def get_pokemon_attack_coef(self) -> float:
return 1 + (self.sp_atk_current+self.defense_current) / 200
| [
"amontbroussous@DOMENSAI.ECOLE"
] | amontbroussous@DOMENSAI.ECOLE |
fdcf94259376e6d3fa06d6a182d45cd8682133e8 | 9de495e040db39468a795a7a4072aa8f44e0eae1 | /pyrasl/util/imageio.py | 8db21f8de884763d371c5fe6cde8d85748d34776 | [] | no_license | ejd2163/Barnhart-Lab-2020 | 7fdcafaa24f2e02d75d54b3e660d3c851eaf342d | d410c015cc057bc8e663019158a5a920eb65a3d2 | refs/heads/master | 2020-12-14T04:51:27.700188 | 2020-09-09T18:01:12 | 2020-09-09T18:01:12 | 234,646,253 | 0 | 0 | null | 2020-02-19T16:00:45 | 2020-01-17T22:16:34 | null | UTF-8 | Python | false | false | 4,915 | py | # __BEGIN_LICENSE__
#
# Copyright (C) 2010-2012 Stanford University.
# All rights reserved.
#
# __END_LICENSE__
import numpy as np
import os
def load_image(filename, dtype = None, normalize = False):
"""
Load the image supplied by the user using OpenCV, and then
immediately convert it to a numpy array. The user may request
that it be cast into a specific data type, or (in the case of
floating point data) normalized to the range [0, 1.0].
"""
if not os.path.exists(filename):
raise IOError("File \"" + filename + "\" does not exist.")
filetype = filename.split('.')[-1]
if filetype.lower() == 'tif':
from libtiff import TIFF
tif = TIFF.open(filename, mode='r')
print(tif.info)
def zslices(tif):
print("3: Yes")
slice_entry = filter(lambda x: 'slices=' in x, tif.info().split('\n'))
print("4: Yes")
if slice_entry:
slices = int(slice_entry[0].split('=')[1])
else:
slices = 1
return slices
slices = zslices(tif)
# Each tiff directory contains one z slice!
z_count = 0
for zslice in tif.iter_images():
# Handle Endian-ness conversion since pylibtiff doesn't do it automatically for some reason.
if tif.IsByteSwapped():
zslice = zslice.byteswap()
shape = zslice.shape
if z_count == 0:
im = np.zeros((shape[0], shape[1], 1), dtype=zslice.dtype)
im[:,:,0] = zslice
else:
im = np.concatenate((im, np.reshape(zslice, (shape[0], shape[1], 1))), axis=2)
z_count += 1
# If the tiff image has only one dimension, we squeeze it out of existence here.
if z_count == 1:
im = np.squeeze(im)
del tif # Close the image
im = np.transpose(im.reshape(shape[0], shape[1], -1, slices), [0,1,3,2])
elif filetype.lower() == 'jpg':
# convert RGB to monochromatic
im = np.asarray(cv2.imread(filename, cv2.CV_LOAD_IMAGE_GRAYSCALE))
else:
try:
im = np.asarray(cv2.imread(filename, -1))
except:
im = np.asarray(cv2.LoadImage(filename, -1))
im = np.asarray(im.ravel()[0][:]) # hack
print("You are using an old version of openCV. Loading image using cv.LoadImage.")
if not im.shape:
raise IOError("An error occurred while reading \"" + filename + "\"")
# The user may specify that the data be returned as a specific
# type. Otherwise, it is returned in whatever format it was
# stored in on disk.
if dtype:
im = im.astype(dtype)
# Normalize the image if requested to do so. This is only
# supported for floating point images.
if normalize :
if (im.dtype == np.float32 or im.dtype == np.float64):
return im / im.max()
else:
raise NotImplementedError
else:
return im
def save_image(filename, image, dtype = None):
"""
Save the image to disk using OpenCV or libtiff. The file format
to use is inferred from the suffix of the filename. OpenCV is
used to write all file types except for tiff images.
When saving tiffs, you may a 2D or 3D image into save_image(). A
3D image will be saved as a tif stack automatically.
"""
filetype = filename.split('.')[-1]
# Test if there is no filetype
if filename == filetype or len(filetype) > 3:
raise IOError('Could not write file \'%s\'. You must specify a file suffix.' % (filename))
if os.path.dirname(filename) and not os.path.exists(os.path.dirname(filename)):
raise IOError("Directory \"" + os.path.dirname(filename) +
"\" does not exist. Could not save file \"" + filename + "\"")
# If the user has specified a data type, we convert it here.
if dtype != None:
image = image.astype(dtype)
# For now we transpose the data since it is stored in y,x,z order.
# We can remove this later when we switch to z,y,x.
if len(image.shape) == 3:
image = np.transpose(image, (2,0,1))
if filetype.lower() == 'tif':
from libtiff import TIFF
tif = TIFF.open(filename, mode='w')
tif.write_image(image)
del tif # flush data to disk
elif len(image.shape) == 2:
try:
cv2.imwrite(filename, image)
except:
print("You are using an old version of openCV. Saving image using cv.SaveImage.")
try:
cv2.SaveImage(filename, image)
except:
print('There was an error saving the file', filename)
else:
raise IOError('Saving 3-D image cubes to a \'%s\' file is not supported. Please save your image as a \'tif\' stack.' % (filetype))
| [
"33631472+zhengyuanlu@users.noreply.github.com"
] | 33631472+zhengyuanlu@users.noreply.github.com |
fd761c88737e3622a0466b6b6356c2175bc9c74e | 18eba2edf1fe3dcb31d646269f1a41747500db9f | /karaoke.py | e0e4d40e2b47bc5409bb65286939778e57efb7f0 | [
"Apache-2.0"
] | permissive | ziyua/ptavi-p3 | 32bc8428b2d9e26d4106c3b2580fe717fb770c69 | d54e888bf74c773a9ec5668ec3616cc58e3c093d | refs/heads/master | 2020-04-07T09:21:02.511566 | 2014-10-13T17:18:07 | 2014-10-13T17:18:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
from xml.sax import make_parser
import smallsmilhandler as ssh
import sys
import os
class KaraokeLocal():
def __init__(self, filename):
parser = make_parser()
SSMILH = ssh.SmallSMILHandler()
parser.setContentHandler(SSMILH)
parser.parse(open(filename))
self.list = SSMILH.get_tags()
def do_local(self):
for dic in self.list:
if 'src' in dic and dic['src'][:7] == "http://":
nameLocal = dic['src'].rsplit('/', 1)[1]
if not os.path.exists(nameLocal):
os.system("wget -q " + dic['src'])
dic['src'] = nameLocal
def __str__(self):
returnStr = ""
for dic in self.list:
returnStr += dic['name']
for key in dic:
if key != "name":
returnStr += '\t' + key + '="' + dic[key] + '"'
returnStr += "\n"
return returnStr
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit("Usage: python karaoke.py file.smil")
k = KaraokeLocal(sys.argv[1])
print k
k.do_local()
print k
| [
"z.y.ma@qq.com"
] | z.y.ma@qq.com |
eababec9f6471e53a80fca79134347940be8d290 | fe91ffa11707887e4cdddde8f386a8c8e724aa58 | /components/schema_org/generate_schema_org_code_unittest.py | efe4f2b9872edd705ddf08553a7364cb1d9eefc1 | [
"BSD-3-Clause"
] | permissive | akshaymarch7/chromium | 78baac2b45526031846ccbaeca96c639d1d60ace | d273c844a313b1e527dec0d59ce70c95fd2bd458 | refs/heads/master | 2023-02-26T23:48:03.686055 | 2020-04-15T01:20:07 | 2020-04-15T01:20:07 | 255,778,651 | 2 | 1 | BSD-3-Clause | 2020-04-15T02:04:56 | 2020-04-15T02:04:55 | null | UTF-8 | Python | false | false | 5,946 | py | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for generate_schema_org_code."""
import sys
import unittest
import generate_schema_org_code
from generate_schema_org_code import schema_org_id
import os
SRC = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
sys.path.append(os.path.join(SRC, 'third_party', 'pymock'))
import mock
_current_dir = os.path.dirname(os.path.realpath(__file__))
# jinja2 is in chromium's third_party directory
# Insert at front to override system libraries, and after path[0] == script dir
sys.path.insert(
1, os.path.join(_current_dir, *([os.pardir] * 2 + ['third_party'])))
import jinja2
class GenerateSchemaOrgCodeTest(unittest.TestCase):
def test_get_template_vars(self):
schema = {
"@graph": [{
"@id": "http://schema.org/MediaObject",
"@type": "rdfs:Class"
},
{
"@id": "http://schema.org/propertyName",
"@type": "rdf:Property"
}]
}
names = {
"http://schema.org/MediaObject": 1234,
"MediaObject": 1235,
"http://schema.org/propertyName": 2345,
"propertyName": 2346
}
self.assertEqual(
generate_schema_org_code.get_template_vars(schema, names), {
'entities': [{
'name': 'MediaObject',
'name_hash': 1235
}],
'properties': [{
'name': 'propertyName',
'name_hash': 2346,
'thing_types': [],
'enum_types': []
}],
'enums': [],
'entity_parent_lookup':
[{
'name': 'MediaObject',
'name_hash': 1235,
'parents': [{
'name': 'MediaObject',
'name_hash': 1235
}]
}]
})
def test_lookup_parents(self):
thing = {'@id': schema_org_id('Thing')}
intangible = {
'@id': schema_org_id('Intangible'),
'rdfs:subClassOf': thing
}
structured_value = {
'@id': schema_org_id('StructuredValue'),
'rdfs:subClassOf': intangible
}
brand = {'@id': schema_org_id('Brand'), 'rdfs:subClassOf': intangible}
schema = {'@graph': [thing, intangible, structured_value, brand]}
self.assertSetEqual(
generate_schema_org_code.lookup_parents(brand, schema, {}),
set(['Thing', 'Intangible', 'Brand']))
def test_get_root_type_thing(self):
thing = {'@id': schema_org_id('Thing')}
intangible = {
'@id': schema_org_id('Intangible'),
'rdfs:subClassOf': thing
}
structured_value = {
'@id': schema_org_id('StructuredValue'),
'rdfs:subClassOf': intangible
}
schema = {'@graph': [thing, intangible, structured_value]}
self.assertEqual(
generate_schema_org_code.get_root_type(structured_value, schema),
thing)
def test_get_root_type_datatype(self):
number = {
'@id': schema_org_id('Number'),
'@type': [schema_org_id('DataType'), 'rdfs:Class']
}
integer = {'@id': schema_org_id('Integer'), 'rdfs:subClassOf': number}
schema = {'@graph': [integer, number]}
self.assertEqual(
generate_schema_org_code.get_root_type(integer, schema), number)
def test_get_root_type_enum(self):
thing = {'@id': schema_org_id('Thing')}
intangible = {
'@id': schema_org_id('Intangible'),
'rdfs:subClassOf': thing
}
enumeration = {
'@id': schema_org_id('Enumeration'),
'rdfs:subClassOf': intangible
}
actionStatusType = {
'@id': schema_org_id('ActionStatusType'),
'rdfs:subClassOf': enumeration
}
schema = {'@graph': [thing, intangible, enumeration, actionStatusType]}
self.assertEqual(
generate_schema_org_code.get_root_type(actionStatusType, schema),
actionStatusType)
def test_parse_property_identifier(self):
thing = {'@id': schema_org_id('Thing')}
intangible = {
'@id': schema_org_id('Intangible'),
'rdfs:subClassOf': thing
}
structured_value = {
'@id': schema_org_id('StructuredValue'),
'rdfs:subClassOf': intangible
}
property_value = {
'@id': schema_org_id('PropertyValue'),
'rdfs:subClassOf': structured_value
}
number = {
'@id': schema_org_id('Number'),
'@type': [schema_org_id('DataType'), 'rdfs:Class']
}
integer = {'@id': schema_org_id('Integer'), 'rdfs:subClassOf': number}
identifier = {
'@id': schema_org_id('Identifier'),
schema_org_id('rangeIncludes'): [property_value, integer, number]
}
schema = {
'@graph': [
thing, intangible, structured_value, property_value, number,
integer, identifier
]
}
names = {"http://schema.org/Identifier": 1234, "Identifier": 1235}
self.assertEqual(
generate_schema_org_code.parse_property(identifier, schema, names),
{
'name': 'Identifier',
'name_hash': 1235,
'has_number': True,
'thing_types': [property_value['@id']],
'enum_types': []
})
if __name__ == '__main__':
unittest.main()
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
95b7fcc4be922b3b722eaca5d68a73ca8391b640 | 5c4beaa51bf73fef1c82d280561f012068a6333c | /extractFeatureTest.py | 9665cfb72c4fa01f54a5a311fad07e6c6d3e98a7 | [] | no_license | archimekai/autoAnswer | 1a74f577db9ada7ca8db76b75c6c98e948113d92 | d141bcf09a274b52e4fd8cf513fe58e9dc71ee9c | refs/heads/master | 2021-01-12T15:41:36.379055 | 2016-12-07T12:32:09 | 2016-12-07T12:32:09 | 71,855,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # coding=utf-8
# author: WEN Kai, wenkai123111 AT 126.com
# Nov/16/2016 11:27 PM
from extractFeature import *
import unittest
class testExtarctFeature(unittest.TestCase):
def test_getCausality(self):
para = "因为哈密瓜生长在北方,所以哈密瓜很甜。因为太阳很大,所以太阳很热。"
getCausality(para)
| [
"wenkai123111@126.com"
] | wenkai123111@126.com |
6b69a290b1249d0942acb209ad39c8384eba9c62 | 1a2ee534d5f7f9c03ab6747e1a33db32bd19144a | /2019/Day6/day6.py | b90cb2237887ee6c575c1b713d6f2a99a1d36cca | [] | no_license | samsohn28/advent_of_code | 09b28ecef3c8845d3402e25bdccc6ddcb2fd3a39 | b7a21f77ce5997beb4a69ba9e60fe9d465166f90 | refs/heads/main | 2023-01-11T11:58:47.949240 | 2022-12-26T06:32:08 | 2022-12-26T06:32:08 | 180,074,012 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | # Day 6: Universal Orbit Map
def get_hash_set(_file):
orbits = {}
objects = set()
for line in _file:
primary, satellite = line.rstrip().split(")")
orbits[satellite] = primary
objects.add(satellite)
objects.add(primary)
return orbits, objects
def get_orbits(orbits, obj):
if obj not in orbits:
return 0
else:
return 1 + get_orbits(orbits, orbits[obj])
def get_orbits_list(orbits, obj):
if obj not in orbits:
return []
else:
return [orbits[obj]]+get_orbits_list(orbits,orbits[obj])
def total_orbits(orbits, objects):
total = 0
for obj in objects:
total += get_orbits(orbits, obj)
print(total)
def orbital_transfers_required(from_obj, to_obj, orbits, objects):
common_objs = set(get_orbits_list(orbits,from_obj)).intersection(get_orbits_list(orbits,to_obj))
to_steps = 0
from_steps = 0
from_obj = orbits[from_obj]
to_obj = orbits[to_obj]
while from_obj != to_obj:
if from_obj not in common_objs:
from_obj = orbits[from_obj]
from_steps += 1
if to_obj not in common_objs:
to_obj = orbits[to_obj]
to_steps += 1
return from_steps + to_steps
# with open("input1.txt") as _file:
# orbits, objects = get_hash_set(_file)
# print(orbital_transfers_required("YOU","SAN",orbits,objects))
with open("input.txt") as _file:
orbits, objects = get_hash_set(_file)
# part1
total_orbits(orbits, objects)
# part2
print(orbital_transfers_required("YOU","SAN",orbits,objects)) | [
"sam.sohn28@gmail.com"
] | sam.sohn28@gmail.com |
2142b629fe750797192ebc632de853f0a3fff0db | d743b92e19cf85e4593d621a0b388ce6d7de3446 | /PA3/TestCases/S3/output/q5-array-test3.tac | 4437c083f84f8ffb6e21f8edbb242020076db638 | [] | no_license | weiyx15/Decaf_2018 | 73db29f0da21efa8ecce8b9122a8e23fd76c3989 | 832af027483530bddf13a7b6a01a25e4ed82d720 | refs/heads/master | 2020-04-16T13:13:17.238653 | 2019-01-14T08:18:03 | 2019-01-14T08:18:03 | 165,616,599 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | tac | VTABLE(_Father) {
<empty>
Father
_Father.foo;
}
VTABLE(_Child) {
_Father
Child
_Father.foo;
}
VTABLE(_Main) {
<empty>
Main
}
FUNCTION(_Father_New) {
memo ''
_Father_New:
_T1 = 8
parm _T1
_T2 = call _Alloc
_T3 = 0
*(_T2 + 4) = _T3
_T4 = VTBL <_Father>
*(_T2 + 0) = _T4
return _T2
}
FUNCTION(_Child_New) {
memo ''
_Child_New:
_T5 = 12
parm _T5
_T6 = call _Alloc
_T7 = 0
*(_T6 + 4) = _T7
*(_T6 + 8) = _T7
_T8 = VTBL <_Child>
*(_T6 + 0) = _T8
return _T6
}
FUNCTION(_Main_New) {
memo ''
_Main_New:
_T9 = 4
parm _T9
_T10 = call _Alloc
_T11 = VTBL <_Main>
*(_T10 + 0) = _T11
return _T10
}
FUNCTION(_Father.foo) {
memo '_T0:4'
_Father.foo:
_T12 = *(_T0 + 4)
return _T12
}
FUNCTION(main) {
memo ''
main:
_T15 = 3
_T14 = _T15
_T16 = 10
_T17 = 0
_T18 = (_T16 < _T17)
if (_T18 == 0) branch _L13
_T19 = "Decaf runtime error: The length of the created array should not be less than 0.\n"
parm _T19
call _PrintString
call _Halt
_L13:
_T20 = 4
_T21 = (_T20 * _T16)
_T22 = (_T20 + _T21)
parm _T22
_T23 = call _Alloc
*(_T23 + 0) = _T16
_T23 = (_T23 + _T22)
_L14:
_T22 = (_T22 - _T20)
if (_T22 == 0) branch _L15
_T23 = (_T23 - _T20)
*(_T23 + 0) = _T14
branch _L14
_L15:
_T13 = _T23
_T25 = 0
_T26 = 4
_T27 = *(_T13 - 4)
_L16:
_T28 = (_T25 < _T27)
if (_T28 == 0) branch _L17
_T29 = (_T25 * _T26)
_T30 = (_T13 + _T29)
_T31 = *(_T30 + 0)
_T24 = _T31
_T32 = 1
_T33 = (_T25 + _T32)
_T25 = _T33
_T34 = 2
_T35 = (_T14 > _T34)
if (_T35 == 0) branch _L17
_T36 = (_T14 + _T24)
_T14 = _T36
parm _T24
call _PrintInt
parm _T14
call _PrintInt
_T37 = "\n"
parm _T37
call _PrintString
branch _L16
_L17:
}
| [
"weiyx_1@126.com"
] | weiyx_1@126.com |
f7f68b716a23f4dd64e3bf0e0319026f1f8abd0e | 899a8953c1d00034c1674ac16fa27de28d49ef35 | /iTest/report/lint.py | 22c64830a763d0dc2cd2821af460871e2dcbd3af | [] | no_license | monadyn/itest_cts | b6d7a5dafd2a3e217d9c89dfc82893e1ba98347a | f74b9e5b5db351595b6a49265c729fe997345dc6 | refs/heads/master | 2021-01-01T17:32:36.415617 | 2013-10-28T08:00:18 | 2013-10-28T08:00:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,857 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Jeffrey Kyllo <jkyllo-eatlint@echospiral.com>
#
# Based on code from the Bitten project:
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://echospiral.com/trac/eatlint/wiki/License.
__docformat__ = 'restructuredtext en'
from trac.core import *
from bitten.api import IReportChartGenerator, IReportSummarizer
class PyLintChartGenerator(Component):
implements(IReportChartGenerator)
# IReportChartGenerator methods
def get_supported_categories(self):
return ['lint']
def generate_chart_data(self, req, config, category):
assert category == 'lint'
db = self.env.get_db_cnx()
cursor = db.cursor()
#self.log.debug('config.name=\'%s\'' % (config.name,))
query = """
select build.rev,
(select count(*) from CTSTestSheet_item as item
where item.report = report.id and item.name='category' and item.value='convention'),
(select count(*) from CTSTestSheet_item as item
where item.report = report.id and item.name='category' and item.value='error'),
(select count(*) from CTSTestSheet_item as item
where item.report = report.id and item.name='category' and item.value='refactor'),
(select count(*) from CTSTestSheet_item as item
where item.report = report.id and item.name='category' and item.value='warning')
from CTSTestSheet as report
left outer join bitten_build as build ON (report.build=build.id)
where build.config='%s' and report.category='lint'
group by build.rev_time, build.rev, build.platform
order by build.rev_time;""" % (config.name,)
#self.log.debug('sql=\'%s\'' % (query,))
cursor.execute(query)
lint = []
prev_rev = None
prev_counts = None
for rev, conv, err, ref, warn in cursor:
total = conv + err + ref + warn
curr_counts = [rev, total, conv, err, ref, warn]
if rev != prev_rev:
lint.append(curr_counts)
else:
# cunningly / dubiously set rev = max(rev, rev) along with the counts
lint[-1] = [max(prev, curr) for prev, curr in zip(curr_counts, lint[-1])]
# recalculate total
lint[-1][1] = sum(lint[-1][2:])
prev_rev = rev
data = {'title': 'Lint Problems by Type',
'data': [
['Revision'] + ['[%s]' % item[0] for item in lint],
['Total Problems'] + [item[1] for item in lint],
['Convention'] + [item[2] for item in lint],
['Error'] + [item[3] for item in lint],
['Refactor'] + [item[4] for item in lint],
['Warning'] + [item[5] for item in lint],
]}
return 'bitten_chart_lint.html', data
class PyLintSummarizer(Component):
implements(IReportSummarizer)
# IReportSummarizer methods
def get_supported_categories(self):
return ['lint']
def render_summary(self, req, config, build, step, category):
assert category == 'lint'
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("""
SELECT item_type.value AS type, item_file.value AS file,
item_line.value as line, item_category.value as category,
report.category as report_category
FROM CTSTestSheet AS report
LEFT OUTER JOIN CTSTestSheet_item AS item_type
ON (item_type.report=report.id AND item_type.name='type')
LEFT OUTER JOIN CTSTestSheet_item AS item_file
ON (item_file.report=report.id AND
item_file.item=item_type.item AND
item_file.name='file')
LEFT OUTER JOIN CTSTestSheet_item AS item_line
ON (item_line.report=report.id AND
item_line.item=item_type.item AND
item_line.name='lines')
LEFT OUTER JOIN CTSTestSheet_item AS item_category
ON (item_category.report=report.id AND
item_category.item=item_type.item AND
item_category.name='category')
WHERE report_category='lint' AND build=%s AND step=%s
ORDER BY item_type.value""", (build.id, step.name))
file_data = {}
type_total = {}
category_total = {}
line_total = 0
file_total = 0
seen_files = {}
for type, file, line, category, report_category in cursor:
if not file_data.has_key(file):
file_data[file] = {'file': file, 'type': {}, 'lines': 0, 'category': {}}
d = file_data[file]
#d = {'type': type, 'line': line, 'category': category}
if not d['type'].has_key(type):
d['type'][type] = 0
d['type'][type] += 1
d['lines'] += 1
line_total += 1
if not d['category'].has_key(category):
d['category'][category] = 0
d['category'][category] += 1
if file:
d['href'] = req.href.browser(config.path, file)
if not type_total.has_key(type):
type_total[type] = 0
type_total[type] += 1
if not category_total.has_key(category):
category_total[category] = 0
category_total[category] += 1
if not seen_files.has_key(file):
seen_files[file] = 0
file_total += 1
data = []
for d in file_data.values():
d['catnames'] = d['category'].keys()
data.append(d)
template_data = {}
template_data['data'] = data
template_data['totals'] = {'type': type_total, 'category': category_total, 'files': file_total, 'lines': line_total}
return 'bitten_summary_lint.html', template_data
| [
"monad@126.com"
] | monad@126.com |
205c6d3bde45a4ee336ccec55817c107a7296001 | 8c6339a0a3e62b1c79c6af6747d98dc86dd00e1f | /chat/chat_client.py | fc3bcb4247b198fd30b6fa685533bf21343c7de0 | [] | no_license | tchammer/uw_python | 1ed5b64630239194d65260888bbc014972a32264 | 7f0055031a41d5b5a624918a3b19f58267d72c5d | refs/heads/master | 2021-01-21T23:04:05.927956 | 2012-03-12T03:27:50 | 2012-03-12T03:27:50 | 3,099,648 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | """
echo client, usage:
python echo_client.py <host> <port>
Both host and port are optional, defaults: localhost 50001
host must be present if you want to provide port
"""
import socket
import sys
host = 'localhost'
port = 50003
size = 1024
nargs = len(sys.argv)
if nargs > 1:
host = sys.argv[1]
if nargs > 2:
port = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,port))
done = False
while not done:
userstring = raw_input('please enter text string: ')
if userstring == "":
done = True
else:
s.send(userstring)
data = s.recv(size)
print 'from (%s,%s) %s' % (host, port, data)
s.close()
| [
"timothyechen@yahoo.com"
] | timothyechen@yahoo.com |
0e5a807f6314052a69a65a273249c0fe19633a79 | 130307ff7873fd7e2df61229f99c36ce78313135 | /EATest/test/locustfile.py | 3634f369aee00a30b3cf5b981893ef57a1119422 | [] | no_license | rJunx/EATest | 1744cda411ef18187a0b06c0f3b9a621e9fc191d | 1323f5d34b898732565c992cb4d0eb96ee3f3560 | refs/heads/master | 2020-04-20T18:52:40.848117 | 2019-02-06T23:05:35 | 2019-02-06T23:05:35 | 169,033,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from locust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
pass
def on_stop(self):
""" on_stop is called when the TaskSet is stopping """
pass
@task(1)
def index(self):
self.client.get("/")
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 5000
max_wait = 9000 | [
"rongjun.xie@hotmail.com"
] | rongjun.xie@hotmail.com |
36a7393b21a2682ca5683e48c739bc8a39c968ea | c6ed09339ff21fa70f154f34328e869f0dd8e394 | /python/mysql-replication/binlog_rows_dump.py | 255f603361ff6f90e670bf2990edb1f0b99845fd | [] | no_license | fits/try_samples | f9b15b309a67f7274b505669db4486b17bd1678b | 0986e22d78f35d57fe1dd94673b68a4723cb3177 | refs/heads/master | 2023-08-22T14:35:40.838419 | 2023-08-07T12:25:07 | 2023-08-07T12:25:07 | 642,078 | 30 | 19 | null | 2022-12-28T06:31:24 | 2010-05-02T02:23:55 | Java | UTF-8 | Python | false | false | 3,022 | py |
import configparser
from datetime import date, datetime
import json
import os
import sys
import signal
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import (WriteRowsEvent, UpdateRowsEvent, DeleteRowsEvent)
class BinlogConfig:
def __init__(self, conf_file):
self.config = configparser.ConfigParser()
self.conf_file = conf_file
def load(self):
self.config.read(self.conf_file)
if 'binlog' in self.config:
return (
self.config['binlog']['log_file'],
int(self.config['binlog']['log_pos'])
)
return (None, None)
def save(self, log_file, log_pos):
self.config['binlog'] = {
'log_file': log_file,
'log_pos': log_pos
}
with open(self.conf_file, 'w') as f:
self.config.write(f)
def to_bool(s):
return s.lower() in ['true', 't', 'ok', 'yes', 'y', 'on', '1']
def split_env(name):
v = os.getenv(name)
if v is None:
return None
return v.split(',')
ini_file = os.getenv('INI_FILE', 'binlog.ini')
bconf = BinlogConfig(ini_file)
(log_file, log_pos) = bconf.load()
blocking = to_bool(os.getenv('BLOCKING', 'off'))
host = os.getenv('MYSQL_HOST', 'localhost')
port = int(os.getenv('MYSQL_PORT', '3306'))
user = os.getenv('MYSQL_USER')
password = os.getenv('MYSQL_PASSWORD')
schemas = split_env('SCHEMAS')
tables = split_env('TABLES')
cfg = {'host': host, 'port': port, 'user': user, 'password': password}
def to_json(obj):
if isinstance(obj, (datetime, date)):
return obj.isoformat()
return str(obj)
def handle_signal(sig, frame):
sys.exit(1)
stream = BinLogStreamReader(
connection_settings = cfg,
server_id = 1,
only_events = [WriteRowsEvent, UpdateRowsEvent, DeleteRowsEvent],
only_schemas = schemas,
only_tables = tables,
resume_stream = True,
log_file = log_file,
log_pos = log_pos,
blocking = blocking
)
try:
signal.signal(signal.SIGTERM, handle_signal)
for ev in stream:
for r in ev.rows:
data = {'table': '', 'schema': '', 'event_type': ''}
if 'values' in r:
data.update(r['values'])
if 'after_values' in r:
data.update(r['after_values'])
data['table'] = ev.table
data['schema'] = ev.schema
if isinstance(ev, WriteRowsEvent):
data['event_type'] = 'insert'
elif isinstance(ev, UpdateRowsEvent):
data['event_type'] = 'update'
elif isinstance(ev, DeleteRowsEvent):
data['event_type'] = 'delete'
print( json.dumps(data, default=to_json) )
finally:
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
stream.close()
bconf.save(stream.log_file, stream.log_pos)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
| [
"wadays_wozx@nifty.com"
] | wadays_wozx@nifty.com |
3711a3e5de45097d8c0b02e93a423423d6390137 | cd530c1dff863758a754f4fa6fd19293fac3bbda | /python/app/convert.py | eac29132c8ae85cacfe2e42e02c0297056076245 | [] | no_license | maxime-bc/why-notes | af2e19f54870fe083b9feecdd4f5d71ac9fd8ab9 | f876c5b3e342e053cef7f7f82319d41f7979646c | refs/heads/main | 2023-01-28T09:29:33.849200 | 2020-12-02T20:55:24 | 2020-12-02T20:55:24 | 312,588,481 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | from datetime import datetime
from typing import Dict, Any
from app import Note
class NoteConverter(object):
@staticmethod
def _decode_dict(dict_to_decode: Dict[Any, Any]) -> Dict[Any, Any]:
decoded_dict = dict()
for k, v in dict_to_decode.items():
if not isinstance(k, int):
var = k.decode()
decoded_dict[var] = v.decode()
return decoded_dict
@staticmethod
def note_to_dict(note: Note) -> Dict[str, str]:
# convert dict values to str
return {k: str(v) for k, v in note.__dict__.items()}
@staticmethod
def dict_to_note(note_dict: Dict[str, str]) -> Note:
note_dict = NoteConverter._decode_dict(note_dict)
note_dict['id'] = int(note_dict['id'])
note_dict['creation_date'] = datetime.strptime(note_dict['creation_date'], '%Y-%m-%d %H:%M:%S.%f')
note_dict['edit_date'] = datetime.strptime(note_dict['edit_date'], '%Y-%m-%d %H:%M:%S.%f')
note_dict['id_user'] = int(note_dict['id_user'])
note_dict['is_public'] = True if note_dict['is_public'] == 'True' else False
note_dict.pop('_sa_instance_state', None)
return Note(**note_dict)
| [
"maximeblanchon91490@gmail.com"
] | maximeblanchon91490@gmail.com |
f6734836fe27cc0ec4a259ae42827caba0ca0fd5 | e6b2655a69bfeb90d3a707fa59bcb6ece6bba790 | /capture.py | 8070c4a648c5b6efacd5d35e8f44309abe69abe4 | [] | no_license | IdrisPit/face_motion_time_detector | fd67ec995f1aa973ab4df3408e1481bd8bbf54e7 | e283bbe1a98b2636f1f5a7c587fba5c8e0f87414 | refs/heads/master | 2020-06-01T19:08:01.259743 | 2019-06-09T15:51:30 | 2019-06-09T15:51:30 | 190,894,999 | 0 | 1 | null | 2019-06-10T15:04:10 | 2019-06-08T14:07:08 | Python | UTF-8 | Python | false | false | 360 | py | import cv2
video=cv2.VideoCapture(0)
a=0
while True:
a=a+1
check, frame = video.read()
print(check)
print(frame)
#gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#time.sleep(3)
cv2.imshow("Capturing", frame)
key=cv2.waitKey(2000)
if key == ord('q'):
break
print(a)
video.release()
cv2.destroyAllWindows
| [
"idrispitt@gmail.com"
] | idrispitt@gmail.com |
c03ac23ef226a1bbeab88e5ed757a872072ad21a | a7caaf953a0849f6081e44382da74a600a86b3da | /opencv-2.4.9/samples/python2/hist.py | 09fc8f90d80e51999221a360503e5c40aaea3fa0 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | watinha/collector | 22d22116fc1dbdfeec3bddb05aa42d05efe5b5b4 | fc4758f87aad99084ce4235de3e929d80c56a072 | refs/heads/master | 2021-12-28T11:12:50.548082 | 2021-08-19T20:05:20 | 2021-08-19T20:05:20 | 136,666,875 | 2 | 1 | Apache-2.0 | 2021-04-26T16:55:02 | 2018-06-08T21:17:16 | C++ | UTF-8 | Python | false | false | 3,475 | py | #!/usr/bin/env python
''' This is a sample for histogram plotting for RGB images and grayscale images for better understanding of colour distribution
Benefit : Learn how to draw histogram of images
Get familier with cv2.calcHist, cv2.equalizeHist,cv2.normalize and some drawing functions
Level : Beginner or Intermediate
Functions : 1) hist_curve : returns histogram of an image drawn as curves
2) hist_lines : return histogram of an image drawn as bins ( only for grayscale images )
Usage : python hist.py <image_file>
Abid Rahman 3/14/12 debug Gary Bradski
'''
import cv2
import numpy as np
bins = np.arange(256).reshape(256,1)
def hist_curve(im):
h = np.zeros((300,256,3))
if len(im.shape) == 2:
color = [(255,255,255)]
elif im.shape[2] == 3:
color = [ (255,0,0),(0,255,0),(0,0,255) ]
for ch, col in enumerate(color):
hist_item = cv2.calcHist([im],[ch],None,[256],[0,256])
cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
pts = np.int32(np.column_stack((bins,hist)))
cv2.polylines(h,[pts],False,col)
y=np.flipud(h)
return y
def hist_lines(im):
h = np.zeros((300,256,3))
if len(im.shape)!=2:
print "hist_lines applicable only for grayscale images"
#print "so converting image to grayscale for representation"
im = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
hist_item = cv2.calcHist([im],[0],None,[256],[0,256])
cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
for x,y in enumerate(hist):
cv2.line(h,(x,0),(x,y),(255,255,255))
y = np.flipud(h)
return y
if __name__ == '__main__':
import sys
if len(sys.argv)>1:
im = cv2.imread(sys.argv[1])
else :
im = cv2.imread('../cpp/lena.jpg')
print "usage : python hist.py <image_file>"
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
print ''' Histogram plotting \n
Keymap :\n
a - show histogram for color image in curve mode \n
b - show histogram in bin mode \n
c - show equalized histogram (always in bin mode) \n
d - show histogram for color image in curve mode \n
e - show histogram for a normalized image in curve mode \n
Esc - exit \n
'''
cv2.imshow('image',im)
while True:
k = cv2.waitKey(0)&0xFF
if k == ord('a'):
curve = hist_curve(im)
cv2.imshow('histogram',curve)
cv2.imshow('image',im)
print 'a'
elif k == ord('b'):
print 'b'
lines = hist_lines(im)
cv2.imshow('histogram',lines)
cv2.imshow('image',gray)
elif k == ord('c'):
print 'c'
equ = cv2.equalizeHist(gray)
lines = hist_lines(equ)
cv2.imshow('histogram',lines)
cv2.imshow('image',equ)
elif k == ord('d'):
print 'd'
curve = hist_curve(gray)
cv2.imshow('histogram',curve)
cv2.imshow('image',gray)
elif k == ord('e'):
print 'e'
norm = cv2.normalize(gray,alpha = 0,beta = 255,norm_type = cv2.NORM_MINMAX)
lines = hist_lines(norm)
cv2.imshow('histogram',lines)
cv2.imshow('image',norm)
elif k == 27:
print 'ESC'
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
| [
"watinha@gmail.com"
] | watinha@gmail.com |
368a24d77c6e9d9d3abb58bcdf8294f375cdac94 | e7e77d8eecb1786369b8aefd71199e6dd03e3c67 | /capai/settings.py | 3ad4c6454d53c35c7c52e5b57d24014e7c05a436 | [
"MIT"
] | permissive | kalimuthu123/CapAI | e11d654c19556f3c743f6527d693d566289cfc36 | 552b32661644543608a808b876d5d141f3b04ea2 | refs/heads/master | 2022-11-17T11:27:23.640270 | 2020-07-15T07:01:44 | 2020-07-15T07:01:44 | 274,123,394 | 0 | 1 | MIT | 2020-06-22T13:15:12 | 2020-06-22T11:52:21 | Python | UTF-8 | Python | false | false | 13 | py | DEBUG=False
| [
"1995kalimuthu@gmail.com"
] | 1995kalimuthu@gmail.com |
4d5b3298f7487306a98f6d98f50f817daeb8ceec | e10a008d351e93ab3be400d8a40860e8b6a64fa6 | /day8_dataframe練習.py | f379c5089234e54c7977907d25e2d5df3fc95606 | [] | no_license | atolinaga/Machine-Learn-Review | e4c365efca48b744729abbbd8dd9ccbdf467c111 | 7858fb41f01c1945376145e64de6bf7dcb611139 | refs/heads/master | 2021-02-09T18:06:52.830230 | 2020-03-02T08:00:30 | 2020-03-02T08:00:30 | 244,311,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py | import os #建立文件/目錄路徑
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#data path
dir_data = 'C:\python data\Part01' #必須使用os.path.join方式寫路徑部分,不然會亂碼
f_app = os.path.join(dir_data ,'application_train.csv')
print("Path of read in data:"+f_app)
app_train = pd.read_csv(f_app)
#對資料集進行切分
cut_rule = [-1,0,2.1,5.1,np.inf]
app_train['CNT_CHILDREN_GROUP'] = pd.cut(app_train['CNT_CHILDREN'].values, cut_rule, include_lowest=True)
#依切分狀況分組
app_train.groupby(['CNT_CHILDREN_GROUP'])
grp = app_train['CNT_CHILDREN_GROUP']
grouped_df = app_train.groupby(grp)['AMT_INCOME_TOTAL'] #依CNT_CHILDREN_GROUP進行分組
plt_column = ['AMT_INCOME_TOTAL']
plt_by = ['CNT_CHILDREN_GROUP']
#根據有小孩子的數量區間比較收入多寡,並畫圖
app_train.boxplot(column=plt_column, by = plt_by, showfliers = False,figsize = (5,5))
plt.suptitle('AMT_INCOME_TOTAL by children')
plt.show()
#將AMT_INCOME_TOTAL數值轉成Z轉換
mean = app_train['AMT_INCOME_TOTAL'].mean()
std = app_train['AMT_INCOME_TOTAL'].std()
app_train['AMT_INCOME_TOTAL_Z'] = app_train['AMT_INCOME_TOTAL'].apply(lambda x:((x - mean)/std))
print(app_train['AMT_INCOME_TOTAL_Z'])
| [
"48988394+atolinaga@users.noreply.github.com"
] | 48988394+atolinaga@users.noreply.github.com |
234e9a0558e87611eac1c7f734771939133adbb9 | afe289f69e0624b048f6fc7ef5934a51bb7f0106 | /My Practice/week 4/add N number.py | d87ccdc4dccfef1f6883f328f7711951e02eb406 | [] | no_license | ansarifirdosh/Semister-2 | ef5e0c616f198d34abc8bc7184a5a8527bee5e8a | 9d1223d9c324e15eafb104d5d7870474b7138f6e | refs/heads/main | 2023-09-05T09:35:13.942839 | 2021-11-21T17:11:01 | 2021-11-21T17:11:01 | 384,615,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | '''1.Write a program that takes a number N as input and prints the sum of all the
numbers'''
num=int(input("Enter the range of number: "))
sum=0 #initializing the value
for i in range(1,num+1):
sum=sum+i
print("the total sum is: ",sum)
#while loop
num=int(input("Enter the range of number: "))
sum=0
i=1
while i<=num:
sum=sum+i
i=i+1
print("the sum is: ",sum)
| [
"noreply@github.com"
] | noreply@github.com |
d4b88ec999d9cbdb79ca587da70db301e111e9f5 | c0bc3fdf72e97dadd8fcdaef171aab02c6a72498 | /sepsis/train_model.py | 62cb6f27487cd3e5127ebe39e16143431ba646fe | [
"MIT"
] | permissive | pj0616/sepsis_transfer_learning_public | 7fb2576abb3dc31c5996e5626a86e4a5f5deefee | e41b3d1f43f0e59726e04215ea0da9c9919c0f68 | refs/heads/main | 2023-03-02T14:23:52.364331 | 2021-02-06T16:19:13 | 2021-02-06T16:19:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,860 | py | import pandas as pd
import os
import joblib
import configargparse as argparse
import copy
import torch
import numpy as np
from prediction_utils.pytorch_utils.models import FixedWidthModel
from prediction_utils.pytorch_utils.datasets import (
ArrayLoaderGenerator_Alt,
ParquetLoaderGenerator,
)
from prediction_utils.util import yaml_write
from prediction_utils.pytorch_utils.metrics import StandardEvaluator
parser = argparse.ArgumentParser(config_file_parser_class=argparse.YAMLConfigFileParser)
parser.add_argument("--config_path", required=False, is_config_file=True)
# Path configuration
parser.add_argument(
"--data_path",
type=str,
default="/share/pi/nigam/projects/sepsis/extraction_200815/",
help="The root path where data is stored",
)
parser.add_argument(
"--features_path",
type=str,
default="/share/pi/nigam/projects/sepsis/extraction_200815/merged_features_binary/features_sparse/features.gz",
help="The root path where data is stored",
)
parser.add_argument(
"--cohort_path",
type=str,
default="/share/pi/nigam/projects/sepsis/extraction_200815/cohort/cohort.parquet",
help="File name for the file containing label information",
)
parser.add_argument(
"--vocab_path",
type=str,
default="/share/pi/nigam/projects/sepsis/extraction_200815/merged_features_binary/vocab/vocab.parquet",
help="File name for the file containing label information",
)
parser.add_argument(
"--features_row_id_map_path",
type=str,
default="/share/pi/nigam/projects/sepsis/extraction_200815/merged_features_binary/features_sparse/features_row_id_map.parquet",
)
parser.add_argument("--load_checkpoint_path", type=str, default=None)
parser.add_argument("--save_checkpoint_path", type=str, default=None)
# Model Hyperparameters
parser.add_argument(
"--num_epochs", type=int, default=10, help="The number of epochs of training"
)
parser.add_argument(
"--iters_per_epoch",
type=int,
default=100,
help="The number of batches to run per epoch",
)
parser.add_argument("--batch_size", type=int, default=256, help="The batch size")
parser.add_argument("--lr", type=float, default=1e-4, help="The learning rate")
parser.add_argument("--gamma", type=float, default=0.95, help="Learning rate decay")
parser.add_argument(
"--num_hidden", type=int, default=3, help="The number of hidden layers"
)
parser.add_argument(
"--hidden_dim", type=int, default=128, help="The dimension of the hidden layers"
)
parser.add_argument(
"--normalize", dest="normalize", action="store_true", help="Use layer normalization"
)
parser.add_argument(
"--drop_prob", type=float, default=0.75, help="The dropout probability"
)
parser.add_argument(
"--weight_decay", type=float, default=0.0, help="The value of the weight decay"
)
parser.add_argument(
"--early_stopping",
dest="early_stopping",
action="store_true",
help="Whether to use early stopping",
)
parser.add_argument("--early_stopping_patience", type=int, default=5)
parser.add_argument(
"--selection_metric",
type=str,
default="loss",
help="The metric to use for model selection",
)
parser.add_argument("--fold_id", type=str, default="1", help="The fold id")
parser.add_argument(
"--experiment_name", type=str, default="scratch", help="The name of the experiment"
)
parser.add_argument(
"--label_col", type=str, default="early_sepsis", help="The label to use"
)
parser.add_argument(
"--data_mode", type=str, default="array", help="Which mode of source data to use"
)
parser.add_argument("--sparse_mode", type=str, default="csr", help="the sparse mode")
parser.add_argument(
"--num_workers",
type=int,
default=5,
help="The number of workers to use for data loading during training in parquet mode",
)
parser.add_argument(
"--save_outputs",
dest="save_outputs",
action="store_true",
help="Whether to save the outputs of evaluation",
)
parser.add_argument(
"--run_evaluation",
dest="run_evaluation",
action="store_true",
help="Whether to evaluate the model",
)
parser.add_argument(
"--no_run_evaluation",
dest="run_evaluation",
action="store_false",
help="Whether to evaluate the model",
)
parser.add_argument(
"--run_evaluation_group",
dest="run_evaluation",
action="store_true",
help="Whether to evaluate the model for each group",
)
parser.add_argument(
"--no_run_evaluation_group",
dest="run_evaluation_group",
action="store_false",
help="Whether to evaluate the model for each group",
)
parser.add_argument(
"--run_evaluation_group_standard",
dest="run_evaluation_group_standard",
action="store_true",
help="Whether to evaluate the model",
)
parser.add_argument(
"--no_run_evaluation_group_standard",
dest="run_evaluation_group_standard",
action="store_false",
help="Whether to evaluate the model",
)
parser.add_argument(
"--eval_attributes", type=str, nargs="+", required=False, default=None
)
parser.add_argument("--sample_keys", type=str, nargs="*", required=False, default=None)
parser.add_argument("--subset_train_attribute", type=str, required=False, default=None)
parser.add_argument("--subset_train_group", type=str, required=False, default=None)
parser.add_argument(
"--apply_subset_train_only",
dest="apply_subset_train_only",
action="store_true",
help="Whether to apply the subsetting to only the training set, not the validation set",
)
parser.add_argument(
"--deterministic",
dest="deterministic",
action="store_true",
help="Whether to use deterministic training",
)
parser.add_argument(
"--seed", type=int, default=2020, help="The seed",
)
parser.set_defaults(
normalize=False,
early_stopping=False,
run_evaluation=True,
save_outputs=True,
run_evaluation_group=True,
run_evaluation_group_standard=True,
apply_subset_train_only=False,
deterministic=True,
)
def get_loader_generator_class(data_mode="parquet"):
if data_mode == "parquet":
return ParquetLoaderGenerator
elif data_mode == "array":
return ArrayLoaderGenerator_Alt
def read_file(filename, columns=None, **kwargs):
print(filename)
load_extension = os.path.splitext(filename)[-1]
if load_extension == ".parquet":
return pd.read_parquet(filename, columns=columns, **kwargs)
elif load_extension == ".csv":
return pd.read_csv(filename, usecols=columns, **kwargs)
if __name__ == "__main__":
args = parser.parse_args()
if args.deterministic:
torch.manual_seed(args.seed)
np.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
config_dict = copy.deepcopy(args.__dict__)
if args.fold_id == "":
train_keys = ["train"]
eval_keys = ["eval", "test"]
fold_id_test_list = ['test', 'eval']
else:
train_keys = ["train", "val"]
eval_keys = ["val", "eval", "test"]
fold_id_test_list = ['test', 'eval']
vocab = read_file(args.vocab_path, engine="pyarrow")
config_dict["input_dim"] = vocab.col_id.max() + 1
cohort = read_file(args.cohort_path)
if args.data_mode == "array":
features = joblib.load(args.features_path)
if args.features_row_id_map_path != "":
row_id_map = read_file(args.features_row_id_map_path, engine="pyarrow")
cohort = cohort.merge(row_id_map)
config_dict["row_id_col"] = "features_row_id"
else:
features = None
cohort_eval = cohort.copy()
if (args.subset_train_attribute is not None) and (
args.subset_train_group is not None
):
cohort = cohort.query(
"{} == {}".format(args.subset_train_attribute, args.subset_train_group)
)
print("Cohort shape: {}".format(cohort.shape))
if config_dict.get("config_path") is None:
result_path_suffix = ""
else:
result_path_suffix = os.path.basename(config_dict["config_path"])
result_path = os.path.join(
args.data_path,
"experiments",
args.experiment_name,
"performance",
args.label_col,
result_path_suffix,
str(config_dict["fold_id"]),
)
print("Result path: {}".format(result_path))
os.makedirs(result_path, exist_ok=True)
loader_generator = get_loader_generator_class(data_mode=args.data_mode)(
features=features,
cohort=cohort,
fold_id_test_list=fold_id_test_list,
**config_dict
)
model = FixedWidthModel(**config_dict)
print(model.config_dict)
if args.load_checkpoint_path is not None:
print("Loading checkpoint")
model.model.load_state_dict(torch.load(args.load_checkpoint_path))
# Write the resulting config
yaml_write(config_dict, os.path.join(result_path, "config.yaml"))
loaders = loader_generator.init_loaders(sample_keys=args.sample_keys)
result_df = model.train(loaders, phases=train_keys)["performance"]
del loaders
if args.save_checkpoint_path is not None:
os.makedirs(os.path.dirname(args.save_checkpoint_path), exist_ok=True)
torch.save(model.model.state_dict(), args.save_checkpoint_path)
# Dump training results to disk
result_df.to_parquet(
os.path.join(result_path, "result_df_training.parquet"),
index=False,
engine="pyarrow",
)
if args.run_evaluation:
print("Evaluating model")
loader_generator = get_loader_generator_class(data_mode=args.data_mode)(
features=features,
cohort=cohort_eval,
fold_id_test_list=fold_id_test_list,
**config_dict
)
loaders_predict = loader_generator.init_loaders_predict()
predict_dict = model.predict(loaders_predict, phases=eval_keys)
del loaders_predict
output_df_eval, result_df_eval = (
predict_dict["outputs"],
predict_dict["performance"],
)
print(result_df_eval)
# Dump evaluation result to disk
result_df_eval.to_parquet(
os.path.join(result_path, "result_df_training_eval.parquet"),
index=False,
engine="pyarrow",
)
if args.save_outputs:
output_df_eval.to_parquet(
os.path.join(result_path, "output_df.parquet"),
index=False,
engine="pyarrow",
)
if args.run_evaluation_group:
if args.eval_attributes is None:
raise ValueError(
"If using run_evaluation_group, must specify eval_attributes"
)
strata_vars = ["phase", "task", "sensitive_attribute", "attribute"]
output_df_eval = output_df_eval.assign(task=args.label_col)
output_df_eval = output_df_eval.merge(
row_id_map, left_on="row_id", right_on="features_row_id"
).merge(cohort_eval)
output_df_long = output_df_eval.melt(
id_vars=set(output_df_eval.columns) - set(args.eval_attributes),
value_vars=args.eval_attributes,
var_name="attribute",
value_name="group",
)
if args.run_evaluation_group_standard:
evaluator = StandardEvaluator()
result_df_group_standard_eval = evaluator.get_result_df(
output_df_long, strata_vars=strata_vars,
)
print(result_df_group_standard_eval)
result_df_group_standard_eval.to_parquet(
os.path.join(result_path, "result_df_group_standard_eval.parquet"),
engine="pyarrow",
index=False,
)
| [
"spfohl@stanford.edu"
] | spfohl@stanford.edu |
97c1b07702ca9f20a4725e088c59060e2482cf62 | f410a724c0b1226fe9e90eb2814a498686b7e609 | /compiler.py | a40cd6e5a688a19227b8ca69992fbd0bdde2185a | [
"Unlicense"
] | permissive | Alissonfelipe1234/turing_machine | bb26d51dcffe1354df1e2dfb7e0bc840ebec05ef | 8f72ce7ac74b96ac20ec33319b2a69ef5214418d | refs/heads/master | 2020-05-15T19:34:16.826096 | 2019-04-22T22:02:17 | 2019-04-22T22:02:17 | 182,459,225 | 0 | 0 | Unlicense | 2019-04-22T02:47:20 | 2019-04-20T22:13:33 | Python | UTF-8 | Python | false | false | 568 | py | from State import State
archiveIn = open("input.in","r")
input = archiveIn.readlines()
archiveIn.close()
cells = []
reader = 0
states = []
end_states = []
for read in input:
cells = cells + list(read)
archive = open("code.turing","r")
code = [i.replace('\n', ',\n').replace(' ', '').split(',') for i in archive.readlines() if not i.startswith("//") and not str(i).startswith("\n")]
archive.close()
current_state = states[0]
while current_state not in end_states:
print('teste')
out = open("output.out","w")
out.writelines(''.join(cells))
out.close() | [
"alissonfelipe1234@outlook.com"
] | alissonfelipe1234@outlook.com |
17946f8b7f9e4100fd4af6b2fe791fea7d6260a9 | 6e98cd7d30d935887833f4d349cd6d82156a1ec9 | /myEnvironments/djangoenv/bin/python-config | 4beac09b8e2bec19d26c611ba04f0b1db430973f | [] | no_license | JonStults/python | 05da0625010a2e3e3882086aa69413846d7ff62d | 66c246cf4dbfcab5588ba4baaef46c8c7d879fa3 | refs/heads/master | 2020-07-06T00:45:22.240070 | 2016-11-17T00:39:00 | 2016-11-17T00:39:00 | 73,974,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | #!/Users/jon/Desktop/Coding_Dojo/Python/myEnvironments/djangoenv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"jdstults1@gmail.com"
] | jdstults1@gmail.com | |
fb2461241cba302a7f77584d7019333b92354f49 | bab097b60cc93bba63451cfb4dde1e608582da31 | /python/139.py | c3ca080701645d5950a41381dc48583f5a1612a0 | [] | no_license | isabella0428/Leetcode | c4807301d4676914d7239d1e38e9a85bcb0b557f | f8b74681b5c6e642a8cbac71ca1b5490509ac5e7 | refs/heads/master | 2020-08-25T00:58:33.779280 | 2020-02-07T09:21:13 | 2020-02-07T09:21:13 | 165,828,038 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,915 | py | class Solution1:
def wordBreak(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
# include every possible combinations
# time exceeded
def generateWord(size, tmp, wordDict):
nonlocal possible
length = sum([len(i) for i in tmp])
if length > size:
return
if length == size:
possible.append("".join(tmp))
return
for item in wordDict:
tmp.append(item)
generateWord(size, tmp[:], wordDict)
tmp = tmp[:-1]
size = len(s)
tmp = []
possible = []
generateWord(size, tmp, wordDict)
if s in possible:
return True
else:
return False
class Solution2:
def wordBreak(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
# dynamic programming
length = len(s)
is_breakable = [False for i in range(length + 1)]
is_breakable[0] = True # break before the 0th element
for i in range(1, length + 1):
for j in range(i):
if is_breakable[j] and s[j:i] in wordDict:
is_breakable[i] = True
return is_breakable[length] # break before nth element which doesn't exist ->the end
class Solution3:
def wordBreak(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
# top-down qpproach with memoization
def word_break(s, dict, start, end, memo):
if s[start:end + 1] in memo:
return memo[s[start:end + 1]]
if s[start:end + 1] in dict:
memo[s[start:end + 1]] = True
return True
for i in range(start, end):
if s[start:i + 1] in dict and word_break(s, dict, i+1, end, memo):
memo[s[start:end + 1]] = True
return True
memo[s[start:end + 1]] = False
return False
dict = set(wordDict)
start = 0
end = len(s) - 1
memo = {}
return word_break(s, dict, start, end, memo)
class Solution4:
def wordBreak(self, s, wordDict):
"""
: type s: str
: type wordDict: dict
: rtype: bool
"""
# complete backpack problem
n = len(s)
dp = [False for i in range(1 + n)]
dp[0] = True
for v in range(1, n + 1):
for word in wordDict:
if v >= len(word):
dp[v] = dp[v] or (dp[v - len(word)] and s[v - len(word):v] == word)
return dp[n]
if __name__ == "__main__":
a = Solution4()
print(a.wordBreak("applepenapple",
["apple","pen"])) | [
"isabella_aus_china@163.com"
] | isabella_aus_china@163.com |
a4e2cd025347721566a4b4b6d33b1669cba139cf | 93a720d9242c73c919ec30f6018d126a391f473f | /ShowUserNonOwnerDriveACLs.py | 4611a76f24cca4f71c23283af816a2f0ad50292c | [] | no_license | scottreleehw/GAM-Scripts3 | c8fa4abddb64e47d8a3d30dd7e19e29634c9e965 | 7eab4f86214bfeb00ee4dd6131828a55f1f42c56 | refs/heads/master | 2023-01-09T06:08:08.093789 | 2020-11-05T19:36:14 | 2020-11-05T19:36:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,286 | py | #!/usr/bin/env python3
"""
# Purpose: For a Google Drive User, get all drive file ACLs for files except those indicating the user as owner
# Note: This script can use Basic or Advanced GAM:
# https://github.com/jay0lee/GAM
# https://github.com/taers232c/GAMADV-XTD3
# 1: Use print filelist to get selected ACLs
# Basic: gam user testuser@domain.com print filelist id title permissions owners > filelistperms.csv
# Advanced: gam user testuser@domain.com print filelist fields id,title,permissions,owners.emailaddress > filelistperms.csv
# 2: From that list of ACLs, output a CSV file with headers "Owner,driveFileId,driveFileTitle,emailAddress"
# that lists the driveFileIds/Titles for all ACLs except those indicating the user as owner
# $ python3 ShowUserNonOwnerDriveACLs.py filelistperms.csv localperms.csv
"""
import csv
import re
import sys
FILE_NAME = 'name'
ALT_FILE_NAME = 'title'
QUOTE_CHAR = '"' # Adjust as needed
LINE_TERMINATOR = '\n' # On Windows, you probably want '\r\n'
PERMISSIONS_N_TYPE = re.compile(r"permissions.(\d+).type")
if (len(sys.argv) > 2) and (sys.argv[2] != '-'):
outputFile = open(sys.argv[2], 'w', encoding='utf-8', newline='')
else:
outputFile = sys.stdout
outputCSV = csv.DictWriter(outputFile, ['Owner', 'driveFileId', 'driveFileTitle', 'emailAddress'], lineterminator=LINE_TERMINATOR, quotechar=QUOTE_CHAR)
outputCSV.writeheader()
if (len(sys.argv) > 1) and (sys.argv[1] != '-'):
inputFile = open(sys.argv[1], 'r', encoding='utf-8')
else:
inputFile = sys.stdin
for row in csv.DictReader(inputFile, quotechar=QUOTE_CHAR):
for k, v in iter(row.items()):
mg = PERMISSIONS_N_TYPE.match(k)
if mg and v:
permissions_N = mg.group(1)
emailAddress = row.get(f'permissions.{permissions_N}.emailAddress', '')
if v != 'user' or row[f'permissions.{permissions_N}.role'] != 'owner' or emailAddress != row['owners.0.emailAddress']:
outputCSV.writerow({'Owner': row['owners.0.emailAddress'],
'driveFileId': row['id'],
'driveFileTitle': row.get(FILE_NAME, row.get(ALT_FILE_NAME, 'Unknown')),
'emailAddress': emailAddress})
if inputFile != sys.stdin:
inputFile.close()
if outputFile != sys.stdout:
outputFile.close()
| [
"ross.scroggs@gmail.com"
] | ross.scroggs@gmail.com |
708561cc76ec21acd213e116819f0f0e2cf6b1ee | ff23003693ca137305ed540da8d723f595a97295 | /Portal/helpers/loop_requester.py | 9d8c3e654a9c439dac0f7738a57df0d0b66322f9 | [] | no_license | cash2one/Zinc | 28f422823e83ca2b145f052b6124dd96aab0984d | 6571e304ac141e259b89dd42d7ef853440953a48 | refs/heads/master | 2020-09-07T14:26:48.093529 | 2015-03-03T13:46:17 | 2015-03-03T13:46:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,518 | py | import urllib
import re
from urllib import request
from urllib.error import HTTPError
from collections import deque
# Create your models here.
class LoopSpider:
def __init__(self, start_page):
if not start_page.startswith('http'):
start_page = 'http://' + start_page
host = self.get_host(start_page)
if host == '':
return
self.host = host
self.record = set()
self.queue = deque([])
self.queue.append(start_page)
def start(self):
if self.host == '':
return set()
while len(self.queue) != 0 and len(self.record) < 20:
current = self.queue.popleft()
data, url = self.get_data(current)
if not data or url in self.record:
continue
self.record.add(url)
self.save_data(data, url)
url_list = self.get_list(data)
self.append_queue(url_list, current)
return self.record
def append_queue(self, url_list, current):
for link in url_list:
url = self.get_full_url(link, current)
if not self.get_host(url) == self.host:
continue
if not url in self.record and not url in self.queue:
self.queue.append(url)
def mkdir(self, path):
import os
path = path.strip()
path = path.rstrip('\\')
exists = os.path.exists(path)
if not exists:
os.makedirs(path)
return True
else:
return False
def get_host(self, url):
match = re.match('http[s]?://([^/"\']+)', url)
if match:
return match.group(0)
return ''
def get_domain(self, url):
host = self.get_host(url)
idx = host.find('://')
return host[idx + 3:]
def get_parent(self, url):
url = url.rstrip('/')
idx = url.rfind('/')
return url[:idx]
def get_full_url(self, url, current=''):
if url.startswith('http'):
return url
if url == '/':
return self.get_host(current)
if url.startswith('#'):
return self.get_parent(current)
if url.startswith('//'):
return 'http:' + url
if url.startswith('/'):
return self.get_host(current) + url
else:
return self.get_parent(current) + '/' + url
def get_data(self, url):
try:
res = request.urlopen(url)
data = res.read()
full_url = res.geturl()
except HTTPError:
data = False
full_url = ''
return data, full_url
def get_list(self, data):
try:
data = data.decode('UTF-8')
url_list = re.findall('(?:href|src)=\"([^\'\"]+)\"', data)
return list(set(url_list))
except UnicodeDecodeError:
return []
def save_data(self, data, url):
if not url.startswith('http'):
return
if url.startswith('#'):
return
if url.endswith('/'):
url += 'index.html'
elif url == self.get_host(url):
url += '/index.html'
elif url.rfind('.') < url.rfind('/'):
url += '.html'
idxl = url.find('://')
idxr = url.rfind('/')
path = url[idxl + 3: idxr]
self.mkdir('download/' + path)
file = open('download/' + url[idxl + 3:], 'wb')
file.write(data)
file.close() | [
"yzj1995@vip.qq.com"
] | yzj1995@vip.qq.com |
d0e84eec6f027cb78cab561b724e5b11284793e9 | 089dcf959898d6cc80dcd26afd50c9603bafed25 | /PosLikelihood.py | bf202d5e528762c07806557a8d699ad3958f2737 | [] | no_license | seokhohong/missing-word | 5817a952ca3f6378ca28cadbf0eedfefe9b665ec | 7a2e9613a91f5b33a950cf54d41ad89b1e3f4012 | refs/heads/master | 2021-01-16T21:05:25.999504 | 2016-06-09T03:37:05 | 2016-06-09T03:37:05 | 60,747,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,531 | py | __author__ = 'SEOKHO'
import pickle
from collections import Counter
import generateTagWindows
from textblob import TextBlob
from textblob_aptagger import PerceptronTagger
import math
from WindowProb import WindowProb
import statistics
import numpy
import lexicalizedTagWindows
WIN_SIZE = 5
WIN_OFFSET = int((WIN_SIZE - 1) / 2)
aptagger = PerceptronTagger()
def removeWord(tokens):
removed = []
for i in range(1, len(tokens) - 1):
removed.append([token for ind, token in enumerate(tokens) if ind != i])
return removed
def modLikelihood(allLexTags, modProb4, windowProb5, tags, tagIndex):
probs = []
for alt in allLexTags:
win4 = generateTagWindows.makeWindow(tags, begin = tagIndex - 2, end = tagIndex + 2)
winProb4 = modProb4.of(win4)
win5 = list(win4)
win5.insert(2, alt)
win5 = tuple(win5)
winProb5 = windowProb5.of(win5)
probs.append((winProb5 / winProb4 / (len(tags) - 1), alt))
probs.sort(reverse=True)
print (probs)
return sum([prob[0] for prob in probs])
class SynReplacer:
def __init__(self, winSize = 5, lex = False, compFile = False):
self.lex = lex
self.lexFilename = "lex" if lex else ""
comp = "Comp" if lex else "comp"
if not compFile:
comp = ""
self.winSize = winSize
self.winOrig = WindowProb("C:/MissingWord/"+self.lexFilename+comp+str(self.winSize)+".pickle", compressed = compFile)
with open("lexTags.pickle", "rb") as f:
self.allLexTags = pickle.load(f) #inefficient to use a lexicalized set, but it will still work for unlexicalized models
with open("toLexicalize.pickle", "rb") as f:
self.toLexicalize = pickle.load(f)
self.tagger = PerceptronTagger()
def fix(self, tokens, index, withTags = False):
blob = TextBlob(' '.join(tokens), pos_tagger = aptagger)
tags = generateTagWindows.getCompleteTags(blob)
if self.lex:
tags = lexicalizedTagWindows.lexicalizeTags(tags, tokens, self.toLexicalize)
return self.modLikelihood(tags, index, withTags = withTags)
def modLikelihood(self, tags, tagIndex, withTags):
probs = []
counts = Counter()
for alt in self.allLexTags:
for offset in range(2, 3):
win4 = generateTagWindows.makeWindow(tags, begin = tagIndex - WIN_SIZE + offset + 1, end = tagIndex + offset)
win5 = list(win4)
win5.insert(self.winSize - offset - 1, alt)
counts[alt] += self.winOrig.count(win5)
for alt in self.allLexTags:
probs.append((counts[alt] / (sum(counts.values()) + 1), alt))
probs.sort(reverse=True)
if withTags:
return probs
return [prob[0] for prob in probs]
def main():
synRepl = SynReplacer(lex = False)
with open("C:/MissingWord/train/corpusPart0.txt", "r", encoding='utf8') as f:
for i, line in enumerate(f):
line = line.strip()
if i > 0 :
break
line = "Japan has suspended of buffalo mozzarella from Italy , after reports that high levels of dioxin have been found in the cheese ."
#line = "The cat screamed and ran into the house ."
tokens = line.split()
synRepl.fix(tokens, 3, withTags = True)
for i in range(1, len(tokens) - 1):
print(i, synRepl.fix(tokens, i, withTags = True))
if __name__ == "__main__":
main()
| [
"seokho_hong@yahoo.com"
] | seokho_hong@yahoo.com |
863a76211c553751f9f92f4eb80856715308cc1f | 3e61e59502a924b3321ea812db3cbe31c508a436 | /apply_model.py | 2c564ab9c47ffeb01ce7cb7272c411146197bb64 | [
"MIT"
] | permissive | kayzhou/Guba_emotion | 2c9d9e22c53c328001a250bae1b9f07b13bf0016 | 286f1824500c77d8b90c3dc1bb0e120d732a546d | refs/heads/master | 2020-03-24T00:34:17.707725 | 2019-03-06T20:31:24 | 2019-03-06T20:31:24 | 142,297,205 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,806 | py | from sklearn.externals import joblib
from thulac import thulac
import os
import json
import numpy as np
from tqdm import tqdm
thu = thulac(seg_only=True)
clf = joblib.load('emo-LR-v1.model')
in_dir = 'data/tweet'
out_dir = 'data/tweet_emo_v1-20180711'
def load_word_vec():
"""
加载ACL2018词向量
"""
word_vec = {}
print('加载词向量中 ...')
for i, line in enumerate(open('data/sgns.financial.word')):
if i <= 10:
continue
if i > 150000:
break
words = line.strip().split(' ')
word = words[0]
word_vec[word] = np.array([float(num) for num in words[1:]])
# except UnicodeDecodeError:
# print("编码问题,行 {}".format(i))
print('加载词完成!一共 {}个词'.format(len(word_vec)))
return word_vec
word_vec = load_word_vec() # 词向量
def emo_predict(sentence):
def sentence2vector(sentence):
global word_vec
vector = np.zeros(300)
count = 0
for w in thu.cut(sentence):
w = w[0]
if w in word_vec:
vector += word_vec[w]
count += 1
if count > 0:
vector = vector / count
return vector.reshape(1, -1)
X = sentence2vector(sentence)
y_hat = clf.predict(X)
return y_hat
'''
打预标签
'''
for i, in_name in tqdm(enumerate(os.listdir(in_dir))):
print(i, in_name)
in_name = os.path.join(in_dir, in_name)
for j, line in enumerate(open(in_name)):
d = json.loads(line)
content = d['content']
if 10 < len(content) < 200:
y_hat = emo_predict(content)[0]
with open('data/content/{}.txt'.format(y_hat), 'a') as f:
f.write(str(y_hat) + '\t' + content + '\n')
| [
"zkzhou_91@163.com"
] | zkzhou_91@163.com |
f49c1c1254199ca9968375441f0872d83cc0047d | 87c7e05ba9eebec9ab07090e61ac743b61a88750 | /product/migrations/0002_product_featured.py | c004dc4f2aa83732ec97e92347c072e676dd918b | [] | no_license | ristiriantoadi/belajar-django | faad7ed25505b9a72a0c560e560f0edc66f3f062 | 7141d96a395b370fd72fa0eaa6760b440c375ab7 | refs/heads/master | 2023-03-12T18:05:29.280333 | 2021-02-28T10:40:43 | 2021-02-28T10:40:43 | 343,033,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # Generated by Django 3.1.7 on 2021-02-28 08:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='featured',
field=models.BooleanField(default=True),
),
]
| [
"ristiriantoadi@gmail.com"
] | ristiriantoadi@gmail.com |
cd966a58e69dc06f2d0a257a5dfbfcd40725bc3e | dec9ede4b28b8a5ac79ab5c89754f6ff5d65d8e1 | /source/main/settings.py | a97452f77ebe21ed189fdfb51743c0d75bacf140 | [] | no_license | Beknasar/python_group_6_homework_57_Ulanbek_uulu_Beknasar | 036f1eb2f84626344581bb7d864e63e40c3d2e4f | 3bf5e4eaa7133955b1bbb0131ebf9f4732965b1f | refs/heads/master | 2022-12-09T02:10:11.232216 | 2020-09-02T15:44:49 | 2020-09-02T15:44:49 | 292,327,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,240 | py | """
Django settings for main project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '83le51*6hai4mci%b-xtei(cms3smwhl9k4wy2m+l$8(^s=0qf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'accounts',
'webapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Bishkek'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = 'index'
LOGOUT_REDIRECT_URL = 'index'
LOGIN_URL = 'login'
| [
"680633@gmail.com"
] | 680633@gmail.com |
37c1d087494730331ebe6127f9be25970cf36954 | 7e0ea0944effb5b1773f7a46823f4eedf1b325c4 | /Python/html/python2/main.py | 2e377249c116211d4fc38a24ecd00acbc41838f6 | [] | no_license | aldamatrack/testing | 85256e60063a97e61a6cd55e495085796a8e843e | b697fb206007780e6feafad685bc3d81b85c1c24 | refs/heads/main | 2023-04-23T13:45:39.552513 | 2021-05-09T04:42:23 | 2021-05-09T04:42:23 | 363,562,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | import requests as r
from bs4 import BeautifulSoup
with open("prove.txt", "a+") as test:
html_request = r.get(" http://www.vanityfair.com/society/2014/06/monica-lewinsky-humiliation-culture").text
soup = BeautifulSoup(html_request, "lxml")
maintext = soup.find_all("p", class_="paywall")
#working
for i in maintext:
try:
test.write(str(i.text))
except:
print("**")
continue
| [
"amaltesc@cisco.com"
] | amaltesc@cisco.com |
36a952613ea7e0c2c3c7e896fce875fb1f52b1c8 | 8310787600c77126c2a03b784d4c3e3972237dee | /blogpost/__init__.py | 8e0858339994980fe1d8c6aad3a061ec99f332be | [] | no_license | HarsimranSingh6321/flask-blogpost | ccad1d38106164a683b9fb651b17aa1fffecc6b7 | 6bcf69431611b0450f77bd3600ce95086fad16d6 | refs/heads/master | 2022-11-17T08:09:59.886507 | 2020-07-09T14:29:17 | 2020-07-09T14:29:17 | 278,145,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail , Message
from flask import Flask , Blueprint
from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from blogpost.config import Config
# app=Flask(__name__)
db=SQLAlchemy()
Bootstrap()
login_manager=LoginManager()
mail=Mail()
login_manager.login_view = 'users.login' #similar as url_for function it will also lead to login function inside blueprint users
login_manager.login_message_category = 'info'
def create_app(config_class=Config):
app=Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
Bootstrap().init_app(app)
login_manager.init_app(app)
mail.init_app(app)
#Registerig Blueprints
from blogpost.users.routes import users
from blogpost.main.routes import main
from blogpost.posts.routes import posts
from blogpost.errors.handler import errors
app.register_blueprint(users)
app.register_blueprint(main)
app.register_blueprint(posts)
app.register_blueprint(errors)
return app
| [
"noreply@github.com"
] | noreply@github.com |
becd6b60081e776ae5a505d8fda91b85fce26a25 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/servicefabric/v20190301preview/service.py | a1dd32a3ef5290bb5498425ed70ed2ee1b75dad7 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 29,220 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ServiceArgs', 'Service']
@pulumi.input_type
class ServiceArgs:
def __init__(__self__, *,
application_name: pulumi.Input[str],
cluster_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
service_kind: pulumi.Input[Union[str, 'ServiceKind']],
correlation_scheme: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCorrelationDescriptionArgs']]]] = None,
default_move_cost: Optional[pulumi.Input[Union[str, 'MoveCost']]] = None,
location: Optional[pulumi.Input[str]] = None,
partition_description: Optional[pulumi.Input[Union['NamedPartitionSchemeDescriptionArgs', 'SingletonPartitionSchemeDescriptionArgs', 'UniformInt64RangePartitionSchemeDescriptionArgs']]] = None,
placement_constraints: Optional[pulumi.Input[str]] = None,
service_load_metrics: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceLoadMetricDescriptionArgs']]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
service_package_activation_mode: Optional[pulumi.Input[Union[str, 'ArmServicePackageActivationMode']]] = None,
service_placement_policies: Optional[pulumi.Input[Sequence[pulumi.Input['ServicePlacementPolicyDescriptionArgs']]]] = None,
service_type_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Service resource.
:param pulumi.Input[str] application_name: The name of the application resource.
:param pulumi.Input[str] cluster_name: The name of the cluster resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Union[str, 'ServiceKind']] service_kind: The kind of service (Stateless or Stateful).
:param pulumi.Input[Sequence[pulumi.Input['ServiceCorrelationDescriptionArgs']]] correlation_scheme: A list that describes the correlation of the service with other services.
:param pulumi.Input[Union[str, 'MoveCost']] default_move_cost: Specifies the move cost for the service.
:param pulumi.Input[str] location: Azure resource location.
:param pulumi.Input[Union['NamedPartitionSchemeDescriptionArgs', 'SingletonPartitionSchemeDescriptionArgs', 'UniformInt64RangePartitionSchemeDescriptionArgs']] partition_description: Describes how the service is partitioned.
:param pulumi.Input[str] placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
:param pulumi.Input[Sequence[pulumi.Input['ServiceLoadMetricDescriptionArgs']]] service_load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects.
:param pulumi.Input[str] service_name: The name of the service resource in the format of {applicationName}~{serviceName}.
:param pulumi.Input[Union[str, 'ArmServicePackageActivationMode']] service_package_activation_mode: The activation Mode of the service package
:param pulumi.Input[Sequence[pulumi.Input['ServicePlacementPolicyDescriptionArgs']]] service_placement_policies: A list that describes the correlation of the service with other services.
:param pulumi.Input[str] service_type_name: The name of the service type
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Azure resource tags.
"""
pulumi.set(__self__, "application_name", application_name)
pulumi.set(__self__, "cluster_name", cluster_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_kind", service_kind)
if correlation_scheme is not None:
pulumi.set(__self__, "correlation_scheme", correlation_scheme)
if default_move_cost is not None:
pulumi.set(__self__, "default_move_cost", default_move_cost)
if location is not None:
pulumi.set(__self__, "location", location)
if partition_description is not None:
pulumi.set(__self__, "partition_description", partition_description)
if placement_constraints is not None:
pulumi.set(__self__, "placement_constraints", placement_constraints)
if service_load_metrics is not None:
pulumi.set(__self__, "service_load_metrics", service_load_metrics)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
if service_package_activation_mode is not None:
pulumi.set(__self__, "service_package_activation_mode", service_package_activation_mode)
if service_placement_policies is not None:
pulumi.set(__self__, "service_placement_policies", service_placement_policies)
if service_type_name is not None:
pulumi.set(__self__, "service_type_name", service_type_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="applicationName")
def application_name(self) -> pulumi.Input[str]:
"""
The name of the application resource.
"""
return pulumi.get(self, "application_name")
@application_name.setter
def application_name(self, value: pulumi.Input[str]):
pulumi.set(self, "application_name", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
The name of the cluster resource.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceKind")
def service_kind(self) -> pulumi.Input[Union[str, 'ServiceKind']]:
"""
The kind of service (Stateless or Stateful).
"""
return pulumi.get(self, "service_kind")
@service_kind.setter
def service_kind(self, value: pulumi.Input[Union[str, 'ServiceKind']]):
pulumi.set(self, "service_kind", value)
@property
@pulumi.getter(name="correlationScheme")
def correlation_scheme(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCorrelationDescriptionArgs']]]]:
"""
A list that describes the correlation of the service with other services.
"""
return pulumi.get(self, "correlation_scheme")
@correlation_scheme.setter
def correlation_scheme(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCorrelationDescriptionArgs']]]]):
pulumi.set(self, "correlation_scheme", value)
@property
@pulumi.getter(name="defaultMoveCost")
def default_move_cost(self) -> Optional[pulumi.Input[Union[str, 'MoveCost']]]:
"""
Specifies the move cost for the service.
"""
return pulumi.get(self, "default_move_cost")
@default_move_cost.setter
def default_move_cost(self, value: Optional[pulumi.Input[Union[str, 'MoveCost']]]):
pulumi.set(self, "default_move_cost", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Azure resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="partitionDescription")
def partition_description(self) -> Optional[pulumi.Input[Union['NamedPartitionSchemeDescriptionArgs', 'SingletonPartitionSchemeDescriptionArgs', 'UniformInt64RangePartitionSchemeDescriptionArgs']]]:
"""
Describes how the service is partitioned.
"""
return pulumi.get(self, "partition_description")
@partition_description.setter
def partition_description(self, value: Optional[pulumi.Input[Union['NamedPartitionSchemeDescriptionArgs', 'SingletonPartitionSchemeDescriptionArgs', 'UniformInt64RangePartitionSchemeDescriptionArgs']]]):
pulumi.set(self, "partition_description", value)
@property
@pulumi.getter(name="placementConstraints")
def placement_constraints(self) -> Optional[pulumi.Input[str]]:
"""
The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
"""
return pulumi.get(self, "placement_constraints")
@placement_constraints.setter
def placement_constraints(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "placement_constraints", value)
@property
@pulumi.getter(name="serviceLoadMetrics")
def service_load_metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceLoadMetricDescriptionArgs']]]]:
"""
The service load metrics is given as an array of ServiceLoadMetricDescription objects.
"""
return pulumi.get(self, "service_load_metrics")
@service_load_metrics.setter
def service_load_metrics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceLoadMetricDescriptionArgs']]]]):
pulumi.set(self, "service_load_metrics", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the service resource in the format of {applicationName}~{serviceName}.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter(name="servicePackageActivationMode")
def service_package_activation_mode(self) -> Optional[pulumi.Input[Union[str, 'ArmServicePackageActivationMode']]]:
"""
The activation Mode of the service package
"""
return pulumi.get(self, "service_package_activation_mode")
@service_package_activation_mode.setter
def service_package_activation_mode(self, value: Optional[pulumi.Input[Union[str, 'ArmServicePackageActivationMode']]]):
pulumi.set(self, "service_package_activation_mode", value)
@property
@pulumi.getter(name="servicePlacementPolicies")
def service_placement_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServicePlacementPolicyDescriptionArgs']]]]:
"""
A list that describes the correlation of the service with other services.
"""
return pulumi.get(self, "service_placement_policies")
@service_placement_policies.setter
def service_placement_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServicePlacementPolicyDescriptionArgs']]]]):
pulumi.set(self, "service_placement_policies", value)
@property
@pulumi.getter(name="serviceTypeName")
def service_type_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the service type
"""
return pulumi.get(self, "service_type_name")
@service_type_name.setter
def service_type_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_type_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Azure resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Service(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_name: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
correlation_scheme: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCorrelationDescriptionArgs']]]]] = None,
default_move_cost: Optional[pulumi.Input[Union[str, 'MoveCost']]] = None,
location: Optional[pulumi.Input[str]] = None,
partition_description: Optional[pulumi.Input[Union[pulumi.InputType['NamedPartitionSchemeDescriptionArgs'], pulumi.InputType['SingletonPartitionSchemeDescriptionArgs'], pulumi.InputType['UniformInt64RangePartitionSchemeDescriptionArgs']]]] = None,
placement_constraints: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_kind: Optional[pulumi.Input[Union[str, 'ServiceKind']]] = None,
service_load_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceLoadMetricDescriptionArgs']]]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
service_package_activation_mode: Optional[pulumi.Input[Union[str, 'ArmServicePackageActivationMode']]] = None,
service_placement_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePlacementPolicyDescriptionArgs']]]]] = None,
service_type_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
The service resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] application_name: The name of the application resource.
:param pulumi.Input[str] cluster_name: The name of the cluster resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCorrelationDescriptionArgs']]]] correlation_scheme: A list that describes the correlation of the service with other services.
:param pulumi.Input[Union[str, 'MoveCost']] default_move_cost: Specifies the move cost for the service.
:param pulumi.Input[str] location: Azure resource location.
:param pulumi.Input[Union[pulumi.InputType['NamedPartitionSchemeDescriptionArgs'], pulumi.InputType['SingletonPartitionSchemeDescriptionArgs'], pulumi.InputType['UniformInt64RangePartitionSchemeDescriptionArgs']]] partition_description: Describes how the service is partitioned.
:param pulumi.Input[str] placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Union[str, 'ServiceKind']] service_kind: The kind of service (Stateless or Stateful).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceLoadMetricDescriptionArgs']]]] service_load_metrics: The service load metrics is given as an array of ServiceLoadMetricDescription objects.
:param pulumi.Input[str] service_name: The name of the service resource in the format of {applicationName}~{serviceName}.
:param pulumi.Input[Union[str, 'ArmServicePackageActivationMode']] service_package_activation_mode: The activation Mode of the service package
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePlacementPolicyDescriptionArgs']]]] service_placement_policies: A list that describes the correlation of the service with other services.
:param pulumi.Input[str] service_type_name: The name of the service type
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Azure resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServiceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The service resource.
:param str resource_name: The name of the resource.
:param ServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_name: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
correlation_scheme: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCorrelationDescriptionArgs']]]]] = None,
default_move_cost: Optional[pulumi.Input[Union[str, 'MoveCost']]] = None,
location: Optional[pulumi.Input[str]] = None,
partition_description: Optional[pulumi.Input[Union[pulumi.InputType['NamedPartitionSchemeDescriptionArgs'], pulumi.InputType['SingletonPartitionSchemeDescriptionArgs'], pulumi.InputType['UniformInt64RangePartitionSchemeDescriptionArgs']]]] = None,
placement_constraints: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_kind: Optional[pulumi.Input[Union[str, 'ServiceKind']]] = None,
service_load_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceLoadMetricDescriptionArgs']]]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
service_package_activation_mode: Optional[pulumi.Input[Union[str, 'ArmServicePackageActivationMode']]] = None,
service_placement_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServicePlacementPolicyDescriptionArgs']]]]] = None,
service_type_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceArgs.__new__(ServiceArgs)
if application_name is None and not opts.urn:
raise TypeError("Missing required property 'application_name'")
__props__.__dict__["application_name"] = application_name
if cluster_name is None and not opts.urn:
raise TypeError("Missing required property 'cluster_name'")
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["correlation_scheme"] = correlation_scheme
__props__.__dict__["default_move_cost"] = default_move_cost
__props__.__dict__["location"] = location
__props__.__dict__["partition_description"] = partition_description
__props__.__dict__["placement_constraints"] = placement_constraints
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if service_kind is None and not opts.urn:
raise TypeError("Missing required property 'service_kind'")
__props__.__dict__["service_kind"] = service_kind
__props__.__dict__["service_load_metrics"] = service_load_metrics
__props__.__dict__["service_name"] = service_name
__props__.__dict__["service_package_activation_mode"] = service_package_activation_mode
__props__.__dict__["service_placement_policies"] = service_placement_policies
__props__.__dict__["service_type_name"] = service_type_name
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:servicefabric/v20190301preview:Service"), pulumi.Alias(type_="azure-native:servicefabric:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20170701preview:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20170701preview:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20190301:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20190301:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20190601preview:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20190601preview:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20191101preview:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20191101preview:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20200301:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20200301:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20201201preview:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20201201preview:Service"), pulumi.Alias(type_="azure-native:servicefabric/v20210601:Service"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20210601:Service")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Service, __self__).__init__(
'azure-native:servicefabric/v20190301preview:Service',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Service':
"""
Get an existing Service resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ServiceArgs.__new__(ServiceArgs)
__props__.__dict__["correlation_scheme"] = None
__props__.__dict__["default_move_cost"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["partition_description"] = None
__props__.__dict__["placement_constraints"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["service_kind"] = None
__props__.__dict__["service_load_metrics"] = None
__props__.__dict__["service_package_activation_mode"] = None
__props__.__dict__["service_placement_policies"] = None
__props__.__dict__["service_type_name"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return Service(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="correlationScheme")
def correlation_scheme(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceCorrelationDescriptionResponse']]]:
"""
A list that describes the correlation of the service with other services.
"""
return pulumi.get(self, "correlation_scheme")
@property
@pulumi.getter(name="defaultMoveCost")
def default_move_cost(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the move cost for the service.
"""
return pulumi.get(self, "default_move_cost")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Azure resource etag.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Azure resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partitionDescription")
def partition_description(self) -> pulumi.Output[Optional[Any]]:
"""
Describes how the service is partitioned.
"""
return pulumi.get(self, "partition_description")
@property
@pulumi.getter(name="placementConstraints")
def placement_constraints(self) -> pulumi.Output[Optional[str]]:
"""
The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
"""
return pulumi.get(self, "placement_constraints")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The current deployment or provisioning state, which only appears in the response
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="serviceKind")
def service_kind(self) -> pulumi.Output[str]:
"""
The kind of service (Stateless or Stateful).
"""
return pulumi.get(self, "service_kind")
@property
@pulumi.getter(name="serviceLoadMetrics")
def service_load_metrics(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceLoadMetricDescriptionResponse']]]:
"""
The service load metrics is given as an array of ServiceLoadMetricDescription objects.
"""
return pulumi.get(self, "service_load_metrics")
@property
@pulumi.getter(name="servicePackageActivationMode")
def service_package_activation_mode(self) -> pulumi.Output[Optional[str]]:
"""
The activation Mode of the service package
"""
return pulumi.get(self, "service_package_activation_mode")
@property
@pulumi.getter(name="servicePlacementPolicies")
def service_placement_policies(self) -> pulumi.Output[Optional[Sequence['outputs.ServicePlacementPolicyDescriptionResponse']]]:
"""
A list that describes the correlation of the service with other services.
"""
return pulumi.get(self, "service_placement_policies")
@property
@pulumi.getter(name="serviceTypeName")
def service_type_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the service type
"""
return pulumi.get(self, "service_type_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Azure resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | noreply@github.com |
e247ee8cdc59cb6e464b9aa9317d277beb69043b | 3c8f2d7596655b38ab1c776738c4901358003d86 | /channel_apk.py | 325fa74acb7bdff0aba8dfe472d730ec98487549 | [] | no_license | zhy0313/yeyuPythonChannels | 78b37e25a1a82995ea78fd9436991aad3b4e5ae0 | 5ac22b3a4cd1ff14a9c87b08476c15244e0aafaa | refs/heads/master | 2021-05-01T08:22:05.597260 | 2017-01-22T07:29:08 | 2017-01-22T07:47:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,116 | py | import re
import os
def getprofileinfo(profile_path):
#签名信息
keystore_path = None
storepassword = None
alias = None
with open(profile_path, 'r') as f:
lines = f.readlines()
for line in lines:
pattern = re.compile('^#')
if re.match(pattern, line) is None:
kv = line.split("=")
if 'key.store' == kv[0]:
keystore_path = kv[1].strip('\n')
elif 'key.alias' == kv[0]:
alias = kv[1].strip('\n')
elif 'key.alias.password' == kv[0]:
storepassword = kv[1].strip('\n')
return keystore_path, alias, storepassword
def getapk(curdir):
filepaths = os.listdir(curdir)
for filepath in filepaths:
if os.path.splitext(filepath)[1] == '.apk':
return os.path.join(curdir, filepath)
return None
def apkdecode(apkfile, inputfile):
command = "apktool.sh d -f " + apkfile + " -o " + inputfile
os.system(command)
def getchannels(channelfile):
with open(channelfile, 'r') as f:
lines = f.readlines()
resultlines = []
for line in lines:
line = line.strip('\n')
resultlines.append(line)
return resultlines
return None
def replace_mainfestchannel(mainfest, channel):
with open(mainfest, 'r') as f:
manifestTxt = f.read()
result = replace_umengchannel(channel, manifestTxt)
with open(mainfest, 'w') as f:
f.write(result)
with open(mainfest, 'r') as f:
manifestTxt = f.read()
result = replace_referrername(channel, manifestTxt)
with open(mainfest, 'w') as f:
f.write(result)
def replace_umengchannel(channel, manifest):
pattern = r'(<meta-data\s+android:name="UMENG_CHANNEL"\s+android:value=")(\S+)("\s?/>)'
replacement = r"\g<1>{channel}\g<3>".format(channel=channel)
return re.sub(pattern, replacement, manifest)
def replace_referrername(channel, manifest):
pattern = r'(<meta-data\s+android:name="REFERRER_NAME"\s+android:value=")(\S+)("\s?/>)'
replacement = r"\g<1>{channel}\g<3>".format(channel=channel)
return re.sub(pattern, replacement, manifest)
def build_unsigned_apk(builddir, apkfile):
command = 'apktool.sh b ' + builddir + ' -o ' + apkfile
os.system(command)
return builddir.join(apkfile)
def jarsigner(keystore_path, storepassword, signedapk, unsignedapk, alias):
command = "jarsigner -sigalg MD5withRSA -digestalg SHA1 -keystore " + keystore_path \
+ " -storepass " + storepassword + " -signedjar " + signedapk + " " + unsignedapk \
+ " " + alias
os.system(command)
def manychannel_apk():
# curdir = os.getcwd()
curdir = '/Users/tuyc/Desktop/apktool/'
(keystore_path, alias, storepassword) = getprofileinfo(os.path.join(curdir, 'local.properties'))
builddir = os.path.join(curdir, 'build')
if not os.path.exists(builddir):
os.mkdir(builddir)
channelfile = os.path.join(curdir, 'channel.txt')
if not os.path.exists(channelfile):
print('channel.txt not exist!')
return None
unsigneddir = os.path.join(curdir, 'unsigned')
if not os.path.exists(unsigneddir):
os.mkdir(unsigneddir)
signeddir = os.path.join(curdir, 'signed')
if not os.path.exists(signeddir):
os.mkdir(signeddir)
apkfile = getapk(curdir)
if apkfile is None:
print('no apk file!')
return None
apkdecode(apkfile, builddir)
channels = getchannels(channelfile)
if channels is None or len(channels) == 0:
print('channel.txt have not channelinfo!')
return None
for channel in channels:
manifest = os.path.join(builddir, 'AndroidManifest.xml')
replace_mainfestchannel(manifest, channel)
unsignedapk = os.path.join(unsigneddir, channel + '.apk')
build_unsigned_apk(builddir, unsignedapk)
signedapk = os.path.join(signeddir, channel + '.apk')
jarsigner(keystore_path, storepassword, signedapk, unsignedapk, alias)
manychannel_apk()
| [
"136911168@qq.com"
] | 136911168@qq.com |
a27b0f66484ea5d83df62c7c951493441daa1baf | 97e223521f6d280aef43d3e7ecf032fbfc5a8288 | /Lesson_8/Task_4.py | d0b35a77b42395aeb7ed6430a94a2fb21ee8d7cc | [] | no_license | kiribon4ik/Basics_python | dc2bba98457adaf4ae29fbe8a46e75951ecc4a99 | 64a31864e3d95b9e32fcb57264580deba18d577f | refs/heads/master | 2021-05-17T09:22:10.212427 | 2020-06-26T14:59:07 | 2020-06-26T14:59:07 | 250,724,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | class Warehouse:
def __init__(self):
self.equip_list = []
self.num_places = 50
class OfficeEquipment:
def __init__(self, name, quantity, brand_new=False):
self.name = name
self.quantity = quantity
self.brand_new = brand_new
class Printer(OfficeEquipment):
def __init__(self, name, quantity, print_speed, brand_new=False):
super().__init__(name, quantity, brand_new)
self.print_speed = print_speed
class Scanner(OfficeEquipment):
def __init__(self, name, quantity, scan_speed, brand_new=False):
super().__init__(name, quantity, brand_new)
self.scan_speed = scan_speed
class Xerox(OfficeEquipment):
def __init__(self, name, quantity, num_copies, brand_new=False):
super().__init__(name, quantity, brand_new)
self.num_copies = num_copies
| [
"kiribon4ik@gmail.com"
] | kiribon4ik@gmail.com |
351061615153182fe976eb1400dffb87162bff40 | 31601597bb866bda0de4a81f2baee8ac22e43c86 | /fruitday/settings.py | 28cb9ae210bb1c8d76afc46ab405ad0706efb599 | [] | no_license | dragon7577/Fruitday | f9260af001836d3036557bc75742a8650a05cf93 | 0c4e2c948bd6cc0c29ff06dd33cfaf2f69d69a33 | refs/heads/master | 2020-04-23T22:30:23.784248 | 2019-02-19T15:57:43 | 2019-02-19T15:57:43 | 171,504,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,357 | py | """
Django settings for fruitday project.
Generated by 'django-admin startproject' using Django 1.11.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-#qi4iph#!@m8b$cj+r@6-403k7-=m=-34$wf#k@3a10_8f7gx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Cart',
'Category',
'Product',
'User',
'index',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fruitday.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fruitday.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'fruitday',
'HOST':'localhost',
'USER':'root',
'PASSWORD':'123456',
'PORT':3306,
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR,'static'),)
| [
"sz_huangdong@163.com"
] | sz_huangdong@163.com |
a79a1d7d246e5258d095ac26e0818219406db420 | 08e49773756bd299864d1b278fcf1e859efb6430 | /first2/src/using_list.py | 858ae9ff2163caea049c7ca1deb7642728b393bc | [] | no_license | Chrlol/Chrlol | f481ea489a4447edc372d1c8a33912b48c1f605d | db2727eb60b5d3ab6ff12410305729d0a5c22868 | refs/heads/master | 2016-08-06T22:54:38.098242 | 2013-09-14T16:14:31 | 2013-09-14T16:14:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | # This is my shopping list #
shoplist = ['apple', 'mango', 'carrot', 'banana']
print('I have', len(shoplist), 'items to purchase.')
print('These items are:', end=' ')
for item in shoplist:
print(item, end=' ')
print('\nI also have to buy rice.')
shoplist.append('rice')
print('My shopping list is now', shoplist)
print('I will sort my list now')
shoplist.sort()
print('Sorted shopping list is', shoplist)
print('The first item I will buy is', shoplist[0])
olditem = shoplist[0]
del shoplist[0]
print('I bought the', olditem)
print('My shopping list is now', shoplist) | [
"Chrfeilberg@gmail.com"
] | Chrfeilberg@gmail.com |
c0680485e5008a6554b28a45fbd927848f84b0a4 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/B/bwscrape/basic_twitter_scrapersefton_12.py | 60ef63d50469e74904a0718aad24bf8abe06d460 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,490 | py | ###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'STEVEN Patton MANN'
RESULTS_PER_PAGE = '50'
LANGUAGE = 'en'
NUM_PAGES = 1500
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
#print result
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['created_at'] = result['created_at']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, faileddd to scrape %s' % base_url
break
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'STEVEN Patton MANN'
RESULTS_PER_PAGE = '50'
LANGUAGE = 'en'
NUM_PAGES = 1500
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
#print result
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['created_at'] = result['created_at']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, faileddd to scrape %s' % base_url
break
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
f559fe69be6abd14b6d770d3782855f366b409b1 | 24573c3fea0f81bea125e51835c5e8e14fd4bf6f | /4/both.py | 5485928088556c2eef52c6bde0d1ba3290e9b914 | [] | no_license | vaderkvarn/aoc19 | 09252b494addf4e073051070b8f647f21e5eb47f | b9c5dca388115a007335f812fa4ed519f201975d | refs/heads/master | 2020-09-23T04:34:15.339611 | 2019-12-29T15:48:22 | 2019-12-29T15:48:22 | 225,403,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py |
min_n = 245318
max_n = 765747
def c1(s):
for i in range(1, len(s)):
if s[i] == s[i - 1]:
return True
return False
def c2(s):
for i in range(1, len(s)):
if s[i] < s[i - 1]:
return False
return True
def c3(s):
for i in range(2, len(s) + 1):
c = s[i-1]
if s[i-2] == c and (i == 2 or s[i-3] != c) and (i == len(s) or s[i] != c):
return True
return False
num_p1 = 0
num_p2 = 0
for i in range(min_n, max_n):
s = str(i)
if c1(s) and c2(s):
num_p1 += 1
if c3(s) and c2(s):
num_p2 += 1
print(num_p1)
print(num_p2) | [
"hjalmar@wopii.com"
] | hjalmar@wopii.com |
713242c77de82f4a140ad0d44ae53b149b48bb82 | e6e9b0544c84b3cb1f638f5bd5df1e4bd54145c0 | /app.py | 293613ac0260a4c63df46914be23fbc1e7adc632 | [] | no_license | huechuwhc/huehij2eicmocj | 2f98a5c419f6bf3ee57c3b8eda1491618973d1f1 | b335b7b0a64b32e5d1d85d65d44386677bde93c5 | refs/heads/main | 2023-01-01T18:31:13.089963 | 2020-10-20T04:10:28 | 2020-10-20T04:10:28 | 305,581,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | print("Title of program: Encouragement bot")
print()
while True:
description = input("how u feel?")
list_of_words = description.split()
feelings_list = []
encouragement_list = []
counter = 0
for each_word in list_of_words:
if each_word == "sad":
feelings_list.append("sad")
encouragement_list.append("tomorrow will be even worse :)")
counter += 1
if each_word == "happy":
feelings_list.append("happy")
encouragement_list.append("weird flex but okay")
counter += 1
if each_word == "tired":
feelings_list.append("tired")
encouragement_list.append("go sleep lor")
counter += 1
if counter == 0:
output = "woi say properly leh"
elif counter == 1:
output = "It seems that you are feeling quite " + feelings_list[0] + ". However, do remember that "+ encouragement_list[0] + "! Hope you feel better :)"
else:
feelings = ""
for i in range(len(feelings_list)-1):
feelings += feelings_list[i] + ", "
feelings += "and " + feelings_list[-1]
encouragement = ""
for j in range(len(encouragement_list)-1):
encouragement += encouragement_list[i] + ", "
encouragement += "and " + encouragement_list[-1]
output = "It seems that you are feeling quite " + feelings + ". Please always remember "+ encouragement + "! Hope you feel better :)"
print()
print(output)
print()
| [
"noreply@github.com"
] | noreply@github.com |
26fdec633ba574b89f3c526073c6dc9fb663e998 | 53721753e309adca83234da805ab735bfe32fa36 | /web3/hw/app.py | 84cdf2bf93b1ba0237da597e5b0e1e47140a29f4 | [] | no_license | NguyenNamDan/nguyennamdan-webmodule-c4e23 | eaea3fe71ca9401985fd80a803e9c58b36a15fb9 | 32cf2a454c341a1757b976888a102dac02b5d4ca | refs/heads/master | 2020-04-07T04:22:29.238100 | 2018-12-17T14:59:18 | 2018-12-17T14:59:18 | 158,052,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | from flask import Flask, render_template, request
import mlab
from addbike import AddBike
mlab.connect()
app = Flask(__name__)
@app.route("/new_bike", methods= ["GET", "POST"])
def new_bike():
if request.method == "GET":
return render_template("ex1_addBike.html")
elif request.method == "POST":
# Save database
form = request.form
m = form["Model"]
daily = form["DailyFee"]
img = form["Image"]
y = form["Year"]
exist = AddBike.objects(Model= m, Image= img, Year= y).first()
if exist != None:
return "exist :3 "
else:
i = AddBike(Model= m, DailyFee= daily, Image= img, Year= y)
i.save()
return "ok!! :v "
if __name__ == "__main__":
app.run(debug= True) | [
"admin@Silver.local"
] | admin@Silver.local |
a6f1df8c8c3dd73bd2c937dd3e0186367e7ecc93 | 19980ea46bb169873f01aaa1e89fc0d8ba488030 | /samples/sampleopenflow/demos/demo11.py | b7138557fd71f15abbc7ceeaa7af146675c781a3 | [
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | gaberger/pybvc | b0e4c7fd280d87330fe15e18eecca94f089bf1a4 | bf546c4595a1a6282fca084865c5a0e69194030f | refs/heads/master | 2023-01-13T21:19:01.625744 | 2015-12-01T16:01:00 | 2015-12-01T16:01:00 | 42,198,126 | 0 | 0 | BSD-3-Clause | 2022-12-26T20:18:11 | 2015-09-09T18:53:12 | Python | UTF-8 | Python | false | false | 8,091 | py | #!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Instruction,
OutputAction,
Match)
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
from pybvc.common.constants import (ETH_TYPE_IPv4,
IP_PROTO_ICMP,
IP_DSCP_CS2,
IP_ECN_CE)
def of_demo_11():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 11 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
# --- Flow Match: Ethernet Source Address
# Ethernet Destination Address
# IPv4 Source Address
# IPv4 Destination Address
# ICMPv4 Type
# ICMPv4 Code
# IP DSCP
# IP ECN
# Input Port
# NOTE: Ethernet type MUST be 2048 (0x800) -> IPv4 protocol
# IP Protocol Type MUST be 1 -> ICMP
eth_type = ETH_TYPE_IPv4
eth_src = "00:00:00:11:23:ae"
eth_dst = "00:ff:20:01:1a:3d"
ipv4_src = "17.1.2.3/8"
ipv4_dst = "172.168.5.6/18"
ip_proto = IP_PROTO_ICMP
ip_dscp = IP_DSCP_CS2 # 'Class Selector' = 'Immediate'
ip_ecn = IP_ECN_CE # Congestion Encountered
icmpv4_type = 6 # Alternate Host Address
icmpv4_code = 3 # Alternate Address for Host
input_port = 10
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'"
% (ctrlIpAddr, nodeName))
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Ethernet Type (%s)\n"
" Ethernet Source Address (%s)\n"
" Ethernet Destination Address (%s)\n"
" IPv4 Source Address (%s)\n"
" IPv4 Destination Address (%s)\n"
" IP Protocol Number (%s)\n"
" IP DSCP (%s)\n"
" IP ECN (%s)\n"
" ICMPv4 Type (%s)\n"
" ICMPv4 Code (%s)\n"
" Input Port (%s)"
% (hex(eth_type), eth_src,
eth_dst, ipv4_src, ipv4_dst,
ip_proto, ip_dscp, ip_ecn,
icmpv4_type, icmpv4_code,
input_port))
print (" Action: Output (NORMAL)")
time.sleep(rundelay)
flow_entry = FlowEntry()
table_id = 0
flow_entry.set_flow_table_id(table_id)
flow_id = 18
flow_entry.set_flow_id(flow_id)
flow_entry.set_flow_hard_timeout(0)
flow_entry.set_flow_idle_timeout(0)
flow_entry.set_flow_priority(1009)
# --- Instruction: 'Apply-actions'
# Action: 'Output' NORMAL
instruction = Instruction(instruction_order=0)
action = OutputAction(order=0, port="NORMAL")
instruction.add_apply_action(action)
flow_entry.add_instruction(instruction)
# --- Match Fields: Ethernet Type
# Ethernet Source Address
# Ethernet Destination Address
# IPv4 Source Address
# IPv4 Destination Address
# IP Protocol Number
# IP DSCP
# IP ECN
# ICMPv4 Type
# ICMPv4 Code
# Input Port
match = Match()
match.set_eth_type(eth_type)
match.set_eth_src(eth_src)
match.set_eth_dst(eth_dst)
match.set_ipv4_src(ipv4_src)
match.set_ipv4_dst(ipv4_dst)
match.set_ip_proto(ip_proto)
match.set_ip_dscp(ip_dscp)
match.set_ip_ecn(ip_ecn)
match.set_icmpv4_type(icmpv4_type)
match.set_icmpv4_code(icmpv4_code)
match.set_in_port(input_port)
flow_entry.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Get configured flow from the Controller")
time.sleep(rundelay)
result = ofswitch.get_configured_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully read from the Controller")
print ("Flow info:")
flow = result.get_data()
print json.dumps(flow, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Delete flow with id of '%s' from the Controller's cache "
"and from the table '%s' on the '%s' node"
% (flow_id, table_id, nodeName))
time.sleep(rundelay)
result = ofswitch.delete_flow(flow_entry.get_flow_table_id(),
flow_entry.get_flow_id())
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully removed from the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_11()
| [
"jeb@elbrys.com"
] | jeb@elbrys.com |
a779eb627d5d8dc22e9abc378292ab3974ecda66 | 92e0cc321f673675942efdbf14cb7ff91ede56f0 | /app/__init__.py | 266280b449e708353a4a8963b431ef4e477c9593 | [] | no_license | msilva1610/curso-flask01 | 150d5162c8d2e449c8607b2252a8f8d2eb0715f3 | 909edae028170e4e433829234f7b11997a651798 | refs/heads/master | 2021-06-29T22:25:30.412676 | 2019-11-30T06:00:54 | 2019-11-30T06:00:54 | 224,936,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # coding: utf-8
from flask import Flask, request, Response
from flask_sqlalchemy import SQLAlchemy
from config import config
db = SQLAlchemy()
# import routes
def create_app(config_name):
# from .routes import load
app = Flask(__name__)
app.config.from_object(config[config_name])
db.init_app(app)
from app import routes
routes.load(app)
return app
# app = bootstrap()
# app.run(debug=True, port=3000, host='0.0.0.0') | [
"Mau@MacBook-Air-de-Maurilio.local"
] | Mau@MacBook-Air-de-Maurilio.local |
410778eda359ba00d8f98afb5deb6ac84ae624c1 | 86319aad3690906f614ac1af28b8843529e9e0da | /thwackbin/__init__.py | ab95f9cc0ab6c8b46a7c0f643cb504f8c070fdcc | [] | no_license | sohgoh/thwackbin | b5828783a6179e96784bed0bdb894b179e3bea07 | ba9fedc4bcec598f367aa6d4f2567d1840c65c51 | refs/heads/master | 2021-01-21T03:14:08.261732 | 2014-04-16T03:53:51 | 2014-04-16T04:02:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | """
thwackbin
~~~~~~~~~
Thwackbin is an HTTP request/response test service which exposes the AppThwack REST API.
This service should be used to test/validate clients which wish to consume the actual API endpoint.
"""
__name__ = 'thwackbin'
__version__ = '0.0.1'
__author__ = 'Andrew Hawker <andrew@appthwack.com>'
import flask
def create_app():
"""
Create the thwackbin WSGI application.
"""
app = flask.Flask(__name__)
#Initialize mock data.
from thwackbin import data
data.init()
#Register blueprints.
from thwackbin import appthwack
app.register_blueprint(appthwack.api)
#Patch exc handlers to always return JSON.
from thwackbin import patch
app = patch.patch_exception_handlers(app)
app.config['DOWNLOAD_FOLDER'] = data.ROOT
return app
| [
"andrew.r.hawker@gmail.com"
] | andrew.r.hawker@gmail.com |
99dd2ad93382d05efd81223bfd055ed492ed1616 | 103e45cb0d6b25d0c90e533439cd7e525f25b1a8 | /tensorflow_estimator/python/estimator/canned/v1/linear_testing_utils_v1.py | f73ac2d1a91f5a501e93c6d1a2da04f2a930a4fe | [
"Apache-2.0"
] | permissive | rushabh-v/estimator | ae9ff7c485e4b5beafe2ba6be452eeb099cd63d9 | 6915557cef8bfc86f29f87e4467d601e4553b957 | refs/heads/master | 2021-01-06T18:14:19.948301 | 2020-02-13T02:28:23 | 2020-02-13T02:28:59 | 241,434,870 | 2 | 0 | Apache-2.0 | 2020-02-18T18:20:37 | 2020-02-18T18:20:36 | null | UTF-8 | Python | false | false | 91,108 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for testing linear estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.feature_column import feature_column
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow_estimator.python.estimator import estimator
from tensorflow_estimator.python.estimator import run_config
from tensorflow_estimator.python.estimator.canned import linear
from tensorflow_estimator.python.estimator.canned import metric_keys
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.inputs import numpy_io
from tensorflow_estimator.python.estimator.inputs import pandas_io
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
AGE_WEIGHT_NAME = 'linear/linear_model/age/weights'
HEIGHT_WEIGHT_NAME = 'linear/linear_model/height/weights'
OCCUPATION_WEIGHT_NAME = 'linear/linear_model/occupation/weights'
BIAS_NAME = 'linear/linear_model/bias_weights'
LANGUAGE_WEIGHT_NAME = 'linear/linear_model/language/weights'
# This is so that we can easily switch between feature_column and
# feature_column_v2 for testing.
feature_column.numeric_column = feature_column._numeric_column
feature_column.categorical_column_with_hash_bucket = feature_column._categorical_column_with_hash_bucket # pylint: disable=line-too-long
feature_column.categorical_column_with_vocabulary_list = feature_column._categorical_column_with_vocabulary_list # pylint: disable=line-too-long
feature_column.categorical_column_with_vocabulary_file = feature_column._categorical_column_with_vocabulary_file # pylint: disable=line-too-long
feature_column.embedding_column = feature_column._embedding_column
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = tf.math.abs(expected - actual, 'diff') / tf.math.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return tf.compat.v1.debugging.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [tf.compat.v1.initializers.global_variables()]
with tf.compat.v1.Session() as sess:
sess.run(init_all_op)
tf.compat.v1.train.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = tf.queue.FIFOQueue(capacity=100, dtypes=queue_dtypes)
tf.compat.v1.train.queue_runner.add_queue_runner(
tf.compat.v1.train.queue_runner.QueueRunner(
input_queue, [input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
class CheckPartitionerVarHook(tf.compat.v1.train.SessionRunHook):
"""A `SessionRunHook` to check a partitioned variable."""
def __init__(self, test_case, var_name, var_dim, partitions):
self._test_case = test_case
self._var_name = var_name
self._var_dim = var_dim
self._partitions = partitions
def begin(self):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope()) as scope:
scope.reuse_variables()
partitioned_weight = tf.compat.v1.get_variable(
self._var_name, shape=(self._var_dim, 1))
self._test_case.assertTrue(
isinstance(partitioned_weight, variables_lib.PartitionedVariable))
for part in partitioned_weight:
self._test_case.assertEqual(self._var_dim // self._partitions,
part.get_shape()[0])
class BaseLinearRegressorPartitionerTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def testPartitioner(self):
x_dim = 64
partitions = 4
def _partitioner(shape, dtype):
del dtype # unused; required by Fn signature.
# Only partition the embedding tensor.
return [partitions, 1] if shape[0] == x_dim else [1]
regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
partitioner=_partitioner,
model_dir=self._model_dir)
def _input_fn():
return {
'language':
tf.sparse.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
def testDefaultPartitionerWithMultiplePsReplicas(self):
partitions = 2
# This results in weights larger than the default partition size of 64M,
# so partitioned weights are created (each weight uses 4 bytes).
x_dim = 32 << 20
class FakeRunConfig(run_config.RunConfig):
@property
def num_ps_replicas(self):
return partitions
# Mock the device setter as ps is not available on test machines.
with tf.compat.v1.test.mock.patch.object(
estimator,
'_get_replica_device_setter',
return_value=lambda _: '/cpu:0'):
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
config=FakeRunConfig(),
model_dir=self._model_dir)
def _input_fn():
return {
'language':
tf.sparse.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
linear_regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaseLinearRegressorEvaluationTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with tf.Graph().as_default():
tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)
tf.Variable([2.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({
'age': ((1,),)
}, ((10.,),)), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with tf.Graph().as_default():
tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)
tf.Variable([2.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({
'age': ((1,), (1,))
}, ((10.,), (10.,))), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with tf.Graph().as_default():
tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)
tf.Variable([2.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
x_dim = 3
label_dim = 2
with tf.Graph().as_default():
tf.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=AGE_WEIGHT_NAME)
tf.Variable([7.0, 8.0], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age', shape=(x_dim,)),),
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = linear_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is
# [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0]
# [3.0, 4.0]
# [5.0, 6.0]
# which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
def test_evaluation_for_multiple_feature_columns(self):
with tf.Graph().as_default():
tf.Variable([[10.0]], name=AGE_WEIGHT_NAME)
tf.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)
tf.Variable([5.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
batch_size = 2
feature_columns = [
self._fc_lib.numeric_column('age'),
self._fc_lib.numeric_column('height')
]
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([20, 40]),
'height': np.array([4, 8])
},
y=np.array([[213.], [421.]]),
batch_size=batch_size,
num_epochs=None,
shuffle=False)
est = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
eval_metrics = est.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
def test_evaluation_for_multiple_feature_columns_mix(self):
with tf.Graph().as_default():
tf.Variable([[10.0]], name=AGE_WEIGHT_NAME)
tf.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)
tf.Variable([5.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
batch_size = 2
feature_columns = [
feature_column.numeric_column('age'),
tf.feature_column.numeric_column('height')
]
def _input_fn():
features_ds = tf.compat.v1.data.Dataset.from_tensor_slices({
'age': np.array([20, 40]),
'height': np.array([4, 8])
})
labels_ds = tf.compat.v1.data.Dataset.from_tensor_slices(
np.array([[213.], [421.]]))
return (tf.compat.v1.data.Dataset.zip(
(features_ds, labels_ds)).batch(batch_size).repeat(None))
est = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
eval_metrics = est.evaluate(input_fn=_input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaseLinearRegressorPredictTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with tf.Graph().as_default():
tf.Variable([[10.]], name='linear/linear_model/x/weights')
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('x'),),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[20.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
x_dim = 4
feature_columns = (self._fc_lib.numeric_column('x', shape=(x_dim,)),)
with tf.Graph().as_default():
tf.Variable( # shape=[x_dim, label_dimension]
[[1., 2., 3.], [2., 3., 4.], [3., 4., 5.], [4., 5., 6.]],
name='linear/linear_model/x/weights')
tf.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = x * weight + bias, shape=[batch_size, label_dimension]
self.assertAllClose([[30.2, 40.4, 50.6], [70.2, 96.4, 122.6]],
predicted_scores)
def testTwoFeatureColumns(self):
"""Tests predict with two feature columns."""
with tf.Graph().as_default():
tf.Variable([[10.]], name='linear/linear_model/x0/weights')
tf.Variable([[20.]], name='linear/linear_model/x1/weights')
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('x0'),
self._fc_lib.numeric_column('x1')),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={
'x0': np.array([[2.]]),
'x1': np.array([[3.]])
},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
def testTwoFeatureColumnsMix(self):
"""Tests predict with two feature columns."""
with tf.Graph().as_default():
tf.Variable([[10.]], name='linear/linear_model/x0/weights')
tf.Variable([[20.]], name='linear/linear_model/x1/weights')
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column.numeric_column('x0'),
tf.feature_column.numeric_column('x1')),
model_dir=self._model_dir)
def _predict_input_fn():
return tf.compat.v1.data.Dataset.from_tensor_slices({
'x0': np.array([[2.]]),
'x1': np.array([[3.]])
}).batch(1)
predictions = linear_regressor.predict(input_fn=_predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
def testSparseCombiner(self):
w_a = 2.0
w_b = 3.0
w_c = 5.0
bias = 5.0
with tf.Graph().as_default():
tf.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
1, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
return tf.compat.v1.data.Dataset.from_tensors({
'language':
tf.sparse.SparseTensor(
values=['a', 'c', 'b', 'c'],
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
dense_shape=[2, 2]),
})
feature_columns = (self._fc_lib.categorical_column_with_vocabulary_list(
'language', vocabulary_list=['a', 'b', 'c']),)
# Check prediction for each sparse_combiner.
# With sparse_combiner = 'sum', we have
# logits_1 = w_a + w_c + bias
# = 2.0 + 5.0 + 5.0 = 12.0
# logits_2 = w_b + w_c + bias
# = 3.0 + 5.0 + 5.0 = 13.0
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[12.0], [13.0]], predicted_scores)
# With sparse_combiner = 'mean', we have
# logits_1 = 1/2 * (w_a + w_c) + bias
# = 1/2 * (2.0 + 5.0) + 5.0 = 8.5
# logits_2 = 1/2 * (w_b + w_c) + bias
# = 1/2 * (3.0 + 5.0) + 5.0 = 9.0
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='mean')
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[8.5], [9.0]], predicted_scores)
# With sparse_combiner = 'sqrtn', we have
# logits_1 = sqrt(2)/2 * (w_a + w_c) + bias
# = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974
# logits_2 = sqrt(2)/2 * (w_b + w_c) + bias
# = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='sqrtn')
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[9.94974], [10.65685]], predicted_scores)
class BaseLinearRegressorIntegrationTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
self._fc_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = tf.compat.v1.feature_column.make_parse_example_spec(
feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaseLinearRegressorTrainingTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return tf.compat.v1.assign_add(global_step, 1).op
return tf.no_op()
assert_loss = assert_close(
tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),
loss,
name='assert_loss')
with tf.control_dependencies((assert_loss,)):
if global_step is not None:
return tf.compat.v1.assign_add(global_step, 1).op
return tf.no_op()
mock_optimizer = tf.compat.v1.test.mock.NonCallableMock(
spec=tf.compat.v1.train.Optimizer,
wraps=tf.compat.v1.train.Optimizer(
use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = tf.compat.v1.test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
expected_global_step,
expected_age_weight=None,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in tf.train.list_variables(self._model_dir)
}
self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
tf.train.load_variable(self._model_dir,
tf.compat.v1.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, 1], shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertEqual(expected_age_weight,
tf.train.load_variable(self._model_dir, AGE_WEIGHT_NAME))
self.assertEqual([1], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
tf.train.load_variable(self._model_dir, BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create LinearRegressor.
label = 5.
age = 17
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self._assert_checkpoint(num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
feature_columns = [self._fc_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
feature_columns = [self._fc_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={
'age': data_rank_1,
'w': data_rank_1
},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testFromScratch(self):
# Create LinearRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=num_steps,
expected_age_weight=0.,
expected_bias=0.)
def testFromCheckpoint(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 10. + 5. = 175
# loss = (logits - label)^2 = (175 - 5)^2 = 28900
mock_optimizer = self._mock_optimizer(expected_loss=28900.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((17,),)
}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias
# logits[0] = 17 * 10. + 5. = 175
# logits[1] = 15 * 10. + 5. = 155
# loss = sum(logits - label)^2 = (175 - 5)^2 + (155 - 3)^2 = 52004
mock_optimizer = self._mock_optimizer(expected_loss=52004.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((17,), (15,))
}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
class BaseLinearClassifierTrainingTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return tf.compat.v1.assign_add(global_step, 1).op
assert_loss = assert_close(
tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),
loss,
name='assert_loss')
with tf.control_dependencies((assert_loss,)):
return tf.compat.v1.assign_add(global_step, 1).op
mock_optimizer = tf.compat.v1.test.mock.NonCallableMock(
spec=tf.compat.v1.train.Optimizer,
wraps=tf.compat.v1.train.Optimizer(
use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = tf.compat.v1.test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
n_classes,
expected_global_step,
expected_age_weight=None,
expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape
for (name, shape) in tf.train.list_variables(self._model_dir)
}
self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
tf.train.load_variable(self._model_dir,
tf.compat.v1.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, logits_dimension], shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertAllEqual(
expected_age_weight,
tf.train.load_variable(self._model_dir, AGE_WEIGHT_NAME))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
tf.train.load_variable(self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={
'age': data_rank_1,
'w': data_rank_2
},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={
'age': data_rank_1,
'w': data_rank_1
},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifier:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifier:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formular
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=(-1 * math.log(1.0 / n_classes)))
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_age_weight=[[0.]] if n_classes == 2 else [[0.] * n_classes],
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = 17 * age_weight + bias and label = 1
# so, loss = 1 * -log ( soft_max(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
age_weight = [[2.0]]
bias = [-35.0]
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17.0, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias
# logits[0] = 17 * 2. - 35. = -1.
# logits[1] = 18.5 * 2. - 35. = 2.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(2) ) = 2.1269
# expected_loss = loss[0] + loss[1]
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = [17, 18.5] * age_weight + bias and label = [1, 0]
# so, loss = 1 * -log ( soft_max(logits)[label] )
# expected_loss = loss[0] + loss[1]
if n_classes == 2:
expected_loss = 1.3133 + 2.1269
else:
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(input_fn=lambda: ({'age': (age)}, (label)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaseLinearClassifierEvaluationTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (np.reshape(
-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-30.0] if n_classes == 2 else [-30.0] * n_classes
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = sum(corss_entropy(41)) = 41.
expected_metrics = {
metric_keys.MetricKeys.LOSS: 41.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 41.,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(
sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics),
rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({
'age': (age)
}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
expected_loss = 1.3133 * 2
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.5,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.25,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(
sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics),
rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({
'age': (age),
'w': (weights)
}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
# weights = [1., 2.]
expected_loss = 1.3133 * (1. + 2.)
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, 1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE:
(max(label_mean, 1 - label_mean)),
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.1668,
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(
sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics),
rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaseLinearClassifierPredictTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (np.reshape(
-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = np.asscalar(
np.reshape(np.array(age_weight) * age + bias, (1,)))
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [0],
'all_class_ids': [0, 1],
'classes': [label_output_fn(0)],
'all_classes': [label_output_fn(0),
label_output_fn(1)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.reshape(np.array(age_weight) * age + bias, (-1,))
class_ids = onedim_logits.argmax()
all_class_ids = list(range(len(onedim_logits)))
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'all_class_ids': all_class_ids,
'classes': [label_output_fn(class_ids)],
'all_classes': [label_output_fn(i) for i in all_class_ids],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllEqual(expected_predictions['all_classes'],
predictions[0]['all_classes'])
expected_predictions.pop('all_classes')
predictions[0].pop('all_classes')
self.assertAllClose(
sorted_key_dict(expected_predictions), sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testSparseCombiner(self):
w_a = 2.0
w_b = 3.0
w_c = 5.0
bias = 5.0
with tf.Graph().as_default():
tf.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
1, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
return tf.compat.v1.data.Dataset.from_tensors({
'language':
tf.sparse.SparseTensor(
values=['a', 'c', 'b', 'c'],
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
dense_shape=[2, 2]),
})
feature_columns = (self._fc_lib.categorical_column_with_vocabulary_list(
'language', vocabulary_list=['a', 'b', 'c']),)
# Check prediction for each sparse_combiner.
# With sparse_combiner = 'sum', we have
# logits_1 = w_a + w_c + bias
# = 2.0 + 5.0 + 5.0 = 12.0
# logits_2 = w_b + w_c + bias
# = 3.0 + 5.0 + 5.0 = 13.0
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[12.0], [13.0]], predicted_scores)
# With sparse_combiner = 'mean', we have
# logits_1 = 1/2 * (w_a + w_c) + bias
# = 1/2 * (2.0 + 5.0) + 5.0 = 8.5
# logits_2 = 1/2 * (w_b + w_c) + bias
# = 1/2 * (3.0 + 5.0) + 5.0 = 9.0
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='mean')
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[8.5], [9.0]], predicted_scores)
# With sparse_combiner = 'sqrtn', we have
# logits_1 = sqrt(2)/2 * (w_a + w_c) + bias
# = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974
# logits_2 = sqrt(2)/2 * (w_b + w_c) + bias
# = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='sqrtn')
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[9.94974], [10.65685]], predicted_scores)
class BaseLinearClassifierIntegrationTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
self._fc_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_classifier_fn(
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = tf.compat.v1.feature_column.make_parse_example_spec(
feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=x)),
'y':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([1], tf.dtypes.int64),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
class BaseLinearLogitFnTest(object):
def __init__(self, fc_lib=feature_column):
self._fc_lib = fc_lib
def test_basic_logit_correctness(self):
"""linear_logit_fn simply wraps feature_column_lib.linear_model."""
age = self._fc_lib.numeric_column('age')
with tf.Graph().as_default():
logit_fn = linear.linear_logit_fn_builder(units=2, feature_columns=[age])
logits = logit_fn(features={'age': [[23.], [31.]]})
bias_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
'linear_model/bias_weights')[0]
age_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]
with tf.compat.v1.Session() as sess:
sess.run([tf.compat.v1.initializers.global_variables()])
self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
sess.run(bias_var.assign([10., 5.]))
self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())
sess.run(age_var.assign([[2.0, 3.0]]))
# [2 * 23 + 10, 3 * 23 + 5] = [56, 74].
# [2 * 31 + 10, 3 * 31 + 5] = [72, 98]
self.assertAllClose([[56., 74.], [72., 98.]], logits.eval())
def test_compute_fraction_of_zero(self):
"""Tests the calculation of sparsity."""
if self._fc_lib != feature_column:
return
age = tf.feature_column.numeric_column('age')
occupation = feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=5)
with tf.Graph().as_default():
cols_to_vars = {}
tf.compat.v1.feature_column.linear_model(
features={
'age': [[23.], [31.]],
'occupation': [['doctor'], ['engineer']]
},
feature_columns=[age, occupation],
units=3,
cols_to_vars=cols_to_vars)
cols_to_vars.pop('bias')
fraction_zero = linear._compute_fraction_of_zero(
list(cols_to_vars.values()))
age_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]
with tf.compat.v1.Session() as sess:
sess.run([tf.compat.v1.initializers.global_variables()])
# Upon initialization, all variables will be zero.
self.assertAllClose(1, fraction_zero.eval())
sess.run(age_var.assign([[2.0, 0.0, -1.0]]))
# 1 of the 3 age weights are zero, and all of the 15 (5 hash buckets
# x 3-dim output) are zero.
self.assertAllClose(16. / 18., fraction_zero.eval())
def test_compute_fraction_of_zero_v2(self):
"""Tests the calculation of sparsity."""
if self._fc_lib != feature_column_v2:
return
age = tf.feature_column.numeric_column('age')
occupation = tf.feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=5)
with tf.Graph().as_default():
model = feature_column_v2.LinearModel(
feature_columns=[age, occupation], units=3, name='linear_model')
features = {
'age': [[23.], [31.]],
'occupation': [['doctor'], ['engineer']]
}
model(features)
variables = model.variables
variables.remove(model.bias)
fraction_zero = linear._compute_fraction_of_zero(variables)
age_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]
with tf.compat.v1.Session() as sess:
sess.run([tf.compat.v1.initializers.global_variables()])
# Upon initialization, all variables will be zero.
self.assertAllClose(1, fraction_zero.eval())
sess.run(age_var.assign([[2.0, 0.0, -1.0]]))
# 1 of the 3 age weights are zero, and all of the 15 (5 hash buckets
# x 3-dim output) are zero.
self.assertAllClose(16. / 18., fraction_zero.eval())
class BaseLinearWarmStartingTest(object):
def __init__(self,
_linear_classifier_fn,
_linear_regressor_fn,
fc_lib=feature_column):
self._linear_classifier_fn = _linear_classifier_fn
self._linear_regressor_fn = _linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
# Create a directory to save our old checkpoint and vocabularies to.
self._ckpt_and_vocab_dir = tempfile.mkdtemp()
# Make a dummy input_fn.
def _input_fn():
features = {
'age': [[23.], [31.]],
'age_in_years': [[23.], [31.]],
'occupation': [['doctor'], ['consultant']]
}
return features, [0, 1]
self._input_fn = _input_fn
def tearDown(self):
# Clean up checkpoint / vocab dir.
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._ckpt_and_vocab_dir)
def test_classifier_basic_warm_starting(self):
"""Tests correctness of LinearClassifier default warm-start."""
age = self._fc_lib.numeric_column('age')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=linear_classifier.model_dir)
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_linear_classifier.get_variable_names():
self.assertAllClose(
linear_classifier.get_variable_value(variable_name),
warm_started_linear_classifier.get_variable_value(variable_name))
def test_regressor_basic_warm_starting(self):
"""Tests correctness of LinearRegressor default warm-start."""
age = self._fc_lib.numeric_column('age')
# Create a LinearRegressor and train to save a checkpoint.
linear_regressor = self._linear_regressor_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
optimizer='SGD')
linear_regressor.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearRegressor, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_regressor = self._linear_regressor_fn(
feature_columns=[age],
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=linear_regressor.model_dir)
warm_started_linear_regressor.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_linear_regressor.get_variable_names():
self.assertAllClose(
linear_regressor.get_variable_value(variable_name),
warm_started_linear_regressor.get_variable_value(variable_name))
def test_warm_starting_selective_variables(self):
"""Tests selecting variables to warm-start."""
age = self._fc_lib.numeric_column('age')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
# The provided regular expression will only warm-start the age variable
# and not the bias.
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
vars_to_warm_start='.*(age).*'))
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
self.assertAllClose(
linear_classifier.get_variable_value(AGE_WEIGHT_NAME),
warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))
# Bias should still be zero from initialization.
self.assertAllClose(
[0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))
def test_warm_starting_with_vocab_remapping_and_partitioning(self):
"""Tests warm-starting with vocab remapping and partitioning."""
vocab_list = ['doctor', 'lawyer', 'consultant']
vocab_file = os.path.join(self._ckpt_and_vocab_dir, 'occupation_vocab')
with open(vocab_file, 'w') as f:
f.write('\n'.join(vocab_list))
occupation = self._fc_lib.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=vocab_file,
vocabulary_size=len(vocab_list))
# Create a LinearClassifier and train to save a checkpoint.
partitioner = tf.compat.v1.fixed_size_partitioner(num_shards=2)
linear_classifier = self._linear_classifier_fn(
feature_columns=[occupation],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD',
partitioner=partitioner)
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change). Use a new FeatureColumn with a
# different vocabulary for occupation.
new_vocab_list = ['doctor', 'consultant', 'engineer']
new_vocab_file = os.path.join(self._ckpt_and_vocab_dir,
'new_occupation_vocab')
with open(new_vocab_file, 'w') as f:
f.write('\n'.join(new_vocab_list))
new_occupation = self._fc_lib.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=new_vocab_file,
vocabulary_size=len(new_vocab_list))
# We can create our VocabInfo object from the new and old occupation
# FeatureColumn's.
occupation_vocab_info = estimator.VocabInfo(
new_vocab=new_occupation.vocabulary_file,
new_vocab_size=new_occupation.vocabulary_size,
num_oov_buckets=new_occupation.num_oov_buckets,
old_vocab=occupation.vocabulary_file,
old_vocab_size=occupation.vocabulary_size,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
backup_initializer=tf.compat.v1.initializers.random_uniform(
minval=0.39, maxval=0.39))
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[occupation],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
var_name_to_vocab_info={
OCCUPATION_WEIGHT_NAME: occupation_vocab_info
},
# Explicitly providing None here will only warm-start variables
# referenced in var_name_to_vocab_info (the bias will not be
# warm-started).
vars_to_warm_start=None),
partitioner=partitioner)
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# 'doctor' was ID-0 and still ID-0.
self.assertAllClose(
linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[0, :],
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[0, :])
# 'consultant' was ID-2 and now ID-1.
self.assertAllClose(
linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[2, :],
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[1, :])
# 'engineer' is a new entry and should be initialized with the
# backup_initializer in VocabInfo.
self.assertAllClose([0.39] * 4,
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[2, :])
# Bias should still be zero (from initialization logic).
self.assertAllClose(
[0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))
def test_warm_starting_with_naming_change(self):
"""Tests warm-starting with a Tensor name remapping."""
age_in_years = self._fc_lib.numeric_column('age_in_years')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age_in_years],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[self._fc_lib.numeric_column('age')],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
# The 'age' variable correspond to the 'age_in_years' variable in the
# previous model.
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
var_name_to_prev_var_name={
AGE_WEIGHT_NAME: AGE_WEIGHT_NAME.replace('age', 'age_in_years')
}))
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
self.assertAllClose(
linear_classifier.get_variable_value(
AGE_WEIGHT_NAME.replace('age', 'age_in_years')),
warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))
# The bias is also warm-started (with no name remapping).
self.assertAllClose(
linear_classifier.get_variable_value(BIAS_NAME),
warm_started_linear_classifier.get_variable_value(BIAS_NAME))
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
eedb775b51f7e71d1fd05e539cbedb60b1d6917c | 061fd0a6956b409d63bdca7655f6a8e5fef51b18 | /problem_75.py | a67aa2a0aed649b0bee38c37add22fb6b2604a20 | [] | no_license | bhathaway/projecteuler | bcfdaeb10dfbd13e95cbd3239d7d61851b9c42f9 | 49e704a9affb95197f640fbfcf2fd85788f258f3 | refs/heads/master | 2021-08-18T12:46:10.924743 | 2018-10-24T15:52:07 | 2018-10-24T15:52:07 | 24,650,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,714 | py | # Finding all pythagorean triples such that the sum of the sides equals a
# specific integer can be done by using Euclid's general formula.
# Given a^2 + b^2 = c^2, a = k * (m^2 - n^2), b = k * (2 * m * n), c = k * (m^2 + n^2)
# Some basic algebra shows that letting s = a + b + c leads to
# s/2k = m * (m + n).
# So it becomes clear that finding integer solutions for m and n is really a
# factoring problem. As such, I choose p = m + n as a convenience. Since "a" must be
# positive, m > n. Since n = p - m, it is true that m > p - m by substitution;
# furthermore 2m > p. Also "b" must be positive, and therefore m + n > m, or
# p > m, by substitution. The whole point of this is now clarified by saving that
# we seek s/2k = m * p, where m and p are integers and 2m > p > m.
# So, supposing we're given an integer s for which we want all pythagorean
# triples whose values sum to s. Find an algorithm for exhaustively finding
# all possible triples. There may be some value in solving the slightly easier
# case of k = 1 frist. First, it's clear that s must at least be even, so reject it
# otherwise. If we focus on "m", we could use the integer square root function to
# choose an upper and lower bound on m. We can use the constraints to not be
# overly concerned with whether the bounds exactly capture the true range of candidates.
# As such, a lower bound for m is sqrt(s/4), and an upper bound is sqrt(s/2).
# With that range in mind, it's simply a matter of incrementing the values and
# checking for divisibility.
from prime import *
import sys
# Tested
def simpleTriplesWithSum(s):
if s % 2 == 0:
s_div_2 = s // 2
lower = max(isqrt(s // 4), 1)
upper = isqrt(s // 2)
for m in xrange(lower, upper + 1):
p = s_div_2 // m
if p * m == s_div_2 and p > m and 2*m > p:
n = p - m
a = m*m - n*n
b = 2*m*n
c = m*m + n*n
if a < b:
yield (a, b, c)
else:
yield (b, a, c)
def allTriplesWithSum(s):
triples = set()
for d in xrange(1, (s // 2) + 1):
if s % d == 0:
for t in simpleTriplesWithSum(s // d):
triple = (t[0]*d, t[1]*d, t[2]*d)
if triple not in triples:
triples.add(triple)
return triples
def solve(L):
count = 0
for i in xrange(12, L+1, 2):
sys.stdout.write("Reviewing sum: {}\r".format(i))
triples = allTriplesWithSum(i)
if len(triples) == 1:
count += 1
#print "{} has exactly one triple: {}".format(i, triples.pop())
print "\nTotal count: {}".format(count)
| [
"bryce@earthmine.com"
] | bryce@earthmine.com |
0066ce0cf6dec499c4e799bd080971fd91fc4211 | 27885302b6f63a7d253399683b5b8e800558b893 | /setup.py | 166eff9f55ab83687f0839cdb650e25653183146 | [
"MIT"
] | permissive | Manuel83/cbpi-eventbus | ce4abaf2f96546c4a3654d1245beec6c04eada40 | 6b4a302bf744b2a8022e0048d45871a353953691 | refs/heads/master | 2020-04-14T20:40:39.099942 | 2019-01-04T13:48:11 | 2019-01-04T13:48:11 | 164,102,868 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | from setuptools import setup, find_packages
setup(name='cbpi-eventbus',
version='0.0.3',
description='CraftBeerPi EventBus',
author='Manuel Fritsch',
author_email='manuel@craftbeerpi.com',
url='https://github.com/Manuel83/cbpi-eventbus',
packages=['cbpi_eventbus'],
) | [
"cUrrent2015"
] | cUrrent2015 |
9d005b3da3b868f15c26ab7366dadbbf3cccbac1 | db6a9dcb2340ce7b54d064ff6ecba2dc9de0de1d | /2/10.py | 1e5261ddbb84c33d7be5675edafc82270438a79e | [] | no_license | tatuu/nlp100 | 330dfa577074ef54afa7044770d2ed914207a782 | 477fdc3a48be4f234efdab1e8b4b5b028d3ecfab | refs/heads/master | 2021-04-15T04:26:39.437169 | 2018-05-13T03:47:07 | 2018-05-13T03:47:07 | 126,504,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | import os
name = os.path.dirname(os.path.abspath(__name__))
joined_path = os.path.join(name, './nlp100/2/hightemp.txt')
path = os.path.normpath(joined_path)
txt = open(path, encoding="utf-8_sig")
lines = txt.readlines()
txt.close()
print("num:" + str(len(lines)))
| [
"tatuutatuu0906@gmail.com"
] | tatuutatuu0906@gmail.com |
9960074f78a8ff9d0d36b41eb50fb4ad3968e291 | f0adf5afb93b7f0a67802e876a02e898cd92a172 | /Tencent/GNN/metapath2vec/Generate_metapaths.py | 18ff11473dcf9de3cb3004299cde11057d87f76b | [
"Apache-2.0"
] | permissive | orange-eng/internship | 9a2f746b3d50673038481392100d375f6eec82d3 | c8c566df453d3a4bdf692338f74916ae15792fa1 | refs/heads/main | 2023-07-18T11:46:36.659858 | 2021-08-31T09:39:10 | 2021-08-31T09:39:10 | 358,230,295 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,719 | py | import networkx as nx
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from dgl.nn.pytorch import GraphConv #内置的GCNlayer
import dgl
import matplotlib.pyplot as plt
import random
import time
import tqdm
import sys
import os
def construct_graph():
file_user = './data/user_features.csv'
file_item = './data/item_features.csv'
file_edge = './data/JData_Action_201602.csv'
f_user = pd.read_csv(file_user)
f_item = pd.read_csv(file_item)
f_edge = pd.read_csv(file_edge)
#
f_edge = f_edge.sample(10000)
users = set()
items = set()
for index, row in f_edge.iterrows():
users.add(row['user_id'])
items.add(row['sku_id'])
user_ids_index_map = {x: i for i, x in enumerate(users)} # user编号
item_ids_index_map = {x: i for i, x in enumerate(items)} # item编号
user_index_id_map = {i: x for i, x in enumerate(users)} # index:user
item_index_id_map = {i: x for i, x in enumerate(items)} # index:item
user_item_src = []
user_item_dst = []
for index, row in f_edge.iterrows():
user_item_src.append(user_ids_index_map.get(row['user_id'])) # 获取user的编号
user_item_dst.append(item_ids_index_map.get(row['sku_id'])) # 获取item编号
# 构图; 异构图的编号
'''
ui = dgl.bipartite((user_item_src, user_item_dst), 'user', 'ui', 'item') # 构建异构图; bipartite
iu = dgl.bipartite((user_item_dst, user_item_src), 'item', 'iu', 'user')
hg = dgl.hetero_from_relations([ui, iu])
'''
data_dict = {('user', 'item', 'user'): (torch.tensor(user_item_src), torch.tensor(user_item_dst))}
hg = dgl.heterograph(data_dict)
return hg, user_index_id_map, item_index_id_map
def parse_trace(trace, user_index_id_map, item_index_id_map):
s = []
for index in range(trace.size):
if index % 2 == 0:
s.append(user_index_id_map[trace[index]])
else:
s.append(item_index_id_map[trace[index]])
return ','.join(s)
def main():
hg, user_index_id_map, item_index_id_map = construct_graph()
meta_path = ['ui','iu','ui','iu','ui','iu']
num_walks_per_node = 1
f = open("./output/output_path.txt", "w")
for user_idx in tqdm.trange(hg.number_of_nodes('user')): #以user开头的metapath
traces = dgl.contrib.sampling.metapath_random_walk(
hg=hg, etypes=meta_path, seeds=[user_idx,], num_traces=num_walks_per_node)
dgl.sampling.random_walk
tr = traces[0][0].numpy()
tr = np.insert(tr,0,user_idx)
res = parse_trace(tr, user_index_id_map, item_index_id_map)
f.write(res+'\n')
f.close()
if __name__=='__main__':
main() | [
"972353371@qq.com"
] | 972353371@qq.com |
0a50720d2bd2363f7df7bbead08912e451d0fc2f | 2eb63fb61cb36f5999f9279c12289e40efd80e86 | /Perceptron/Perceptron_Functional.py | 9f2b03066c58a405f4d3af14b1192e197e8e9cec | [] | no_license | RiverTamDance/Machine-Learning | 8a59fef0b231cbb79062e5165ac9c0b3bf102d6b | 7bbc67cf28d8d4b7da82c4ad2b9ae9843d6b1c72 | refs/heads/master | 2023-07-26T07:57:51.536427 | 2021-09-02T18:23:56 | 2021-09-02T18:23:56 | 402,519,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,344 | py | from operator import mul
from functools import partial
from itertools import accumulate, cycle, islice
data = [([1,1],0), ([4,4],1), ([5,5],1), ([2,2],0), ([2,6],0), ([3,5],0)]
#prepends 1 to each variable-list, which is a dummy value to allow the threshold calculations to work.
def data_prepend_one(data):
pdata = [([1]+x[0], x[1]) for x in data]
return(pdata)
def data_pipeline(data):
data = data_prepend_one(data)
return(data)
#this calculates our predicted value for y, while the true value is d=x[1]
def f(w,x):
if sum(map(mul, w, x)) <= 0:
y = 0
else:
y = 1
return(y)
#this gives data the form (x,d,y)
def yvals(w, data):
ydata = [(x[0], x[1], f(w,x[0])) for x in data]
return(ydata)
def step2a(w, data):
data = yvals(w, data)
return(data)
def weight_math(wi_1,xi,d,y,r):
wi_2 = wi_1 + r*(d-y)*xi
return(wi_2)
def step2b(w,a,r):
wm_const = partial(weight_math,d=a[1], y=a[2], r=r)
w = map(wm_const, w, a[0])
return(list(w))
###################3
""" fundamentally, I need to deal with 2 different timings: the ys get updated after n data points have been consumed, whereas
the ws get updated after every datapoint. hmm i wonder if I should try a recursive approach.
"""
def perceptron(r, data, w=None):
if w is None:
w = [0] * (len(data[0][0]))
data_y = step2a(w, data)
#This transposes my data with the predicted ys, so that we have a row of y values and a row of d values, which are then
# very simple to check for equality. really it is zip(*lst) that does all the work, the rest is just converting back into
# list form.
t_data_y = list(map(list, zip(*data_y)))
#if we find a w i.e. a hyperplane that fits our data we stop.
if t_data_y[-1] == t_data_y[-2]:
return([])
else:
w = [round(i,1) for i in list(accumulate(data_y, partial(step2b, r=r), initial = w))[-1]]
return([w] + perceptron(r, data, w))
data = data_pipeline(data)
print(perceptron(0.6, data))
""" mismatch = True
while mismatch == True:
w = list(accumulate(data_y, partial(step2b, r=r), initial = w))[-1]
data_y = data_prep(w, data)
mismatch = ismatch(data_y)
print(w) """
#w = accumulate(datan, partial(step2b, r=r), initial = w)
#print(list(w))
| [
"Taylordrichards@outlook.com"
] | Taylordrichards@outlook.com |
57b6a2b3115560ab7402abc10bc2adc57b9103be | 61bf5b4f24c55f2a7114cebd347426ec7de2ec16 | /models/LDF.py | fbebc7f6096e8885d6015c983e7ce67b089258a0 | [] | no_license | rainofmine/Fine_Grained_ZSL | 6f3b411e36f97f6e42ba9bac3ee5dee0016ee3ef | 8312006d4c5572c9058d4f293ddfbc9738bee191 | refs/heads/master | 2020-04-05T21:41:19.030134 | 2018-11-13T03:26:24 | 2018-11-13T03:26:24 | 157,229,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | # -*- coding: utf-8 -*-
# Tianchi competition:zero-shot learning competition
# Team: AILAB-ZJU
# Code function:LDF baseline model
# Author: Yinda XU
import torch
from torch import Tensor
from torch import nn
from .resnet_mod import resnet_mod56
class LDF_baseline(nn.Module):
def __init__(self, arr_ClassAttr):
super(LDF_baseline, self).__init__()
num_classes, dim_ClassAttr = arr_ClassAttr.shape
self.resnet_mod = resnet_mod56(num_classes=dim_ClassAttr)
self.ts_ClassAttr_t = Tensor(arr_ClassAttr).transpose(0, 1).cuda()
def forward(self, X):
X = self.resnet_mod(X)
ret = torch.matmul(X, self.ts_ClassAttr_t)
return ret
| [
"1139196922@qq.com"
] | 1139196922@qq.com |
218203466c2363759af164801c8ec1b799146725 | 20c7261a2f47afdc6d3684bba6785805f327829b | /paciente.py | 594d0ed711c627aef23b6105832edcf45442e1c1 | [] | no_license | danielalopezr/proyecto-final | 60f75e202e93acfa2b26a060d623599c9944b8d2 | f01a5b7d58c02c36d9d51a88132788a73d3947f8 | refs/heads/master | 2022-11-25T08:12:51.497495 | 2020-07-31T03:39:00 | 2020-07-31T03:39:00 | 283,931,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | class Paciente(object):
nombre = ""
apellido = ""
fiebre = 0
resp = 0
tos = 0
dolor = 0
estado = 0
def __init__(self, nombre, apellido, fiebre, resp, tos, dolor, estado):
self.nombre = nombre
self.apellido = apellido
self.fiebre = fiebre
self.resp = resp
self.tos = tos
self.dolor = dolor
self.estado = estado
def write(self):
with open('pacientes.txt', 'a') as archivo:
linea = "\n{0}-{1}-{2}-{3}-{4}-{5}-{6}".format(self.nombre, self.apellido, self.fiebre, self.resp, self.tos, self.dolor, self.estado)
archivo.write(linea)
| [
"noreply@github.com"
] | noreply@github.com |
b822c11f9e238482be6770487f8eca406f7a35c5 | c409d21254bc31ce11f1e3df94b4e8c58aae587a | /compilr/views.py | efb751c7e24ee7dff741c301119eb7f1f07cebb9 | [] | no_license | shivamx/codebaker | 4715b63b0fc59e672eabcb55b68bfd63b4f973c6 | 4f74d56424f1932d6efa6ee5666aee0ffb31f39a | refs/heads/master | 2021-01-10T03:45:38.966050 | 2015-11-22T08:56:20 | 2015-11-22T08:56:20 | 45,942,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | from django.shortcuts import render
from django.http import HttpResponse
from .forms import NameForm
import json
import requests
def index(request):
if request.method == 'POST':
lang = request.POST['lang']
source = request.POST['source']
iinput = request.POST['iinput']
RUN_URL = u'https://api.hackerearth.com/v3/code/run/'
CLIENT_SECRET = '496c2cb7d30d44718e6ecf2b096d02f62112667f'
#source = "print int(raw_input())"
data = {
'client_secret': CLIENT_SECRET,
'async': 0,
'source': source,
'input': iinput,
'lang': lang,
'time_limit': 5,
'memory_limit': 262144,
}
r = requests.post(RUN_URL, data=data).json()
status = r['run_status']['status']
ooutput = r['run_status']['output']
time_used = r['run_status']['time_used']
status_detail = r['run_status']['status_detail']
mem_used = r['run_status']['memory_used']
compile_status = r['compile_status']
output_box = 1
#print r.json()
#return HttpResponse( r.json() )
return render(request, 'compilr/index.html',
{'status':status, 'ooutput':ooutput, 'time_used':time_used, 'status_detail':status_detail, 'mem_used':mem_used, 'iinput':iinput, 'output_box':output_box})
return render(request, 'compilr/index.html')
# Create your views here. | [
"shivam@skillwiz.com"
] | shivam@skillwiz.com |
e31a7060d75486ec7fd9ef972bacfc4b74111180 | b4f66ebb5084efa6839771b62a1034a82094df6e | /setup.py | d1770422892a97a4591b7399521fb5a79403887d | [] | no_license | mhfowler/howdoispeak | 06f49dab64f62dea727a429557887742d1509265 | 110287dba64ae308943f431f628e528d7c941748 | refs/heads/master | 2016-09-05T14:04:48.605245 | 2015-01-17T23:27:15 | 2015-01-17T23:27:15 | 18,955,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | """
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ['munging/iphone_backup_upload.py']
DATA_FILES = ["munging/secrets.json"]
OPTIONS = {
'argv_emulation': True,
'iconfile':'green_circles.icns',
}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
py_modules=["munging.common"]
)
| [
"max_fowler@brown.edu"
] | max_fowler@brown.edu |
345780a54096078a2aaac59516c0760d2d764db5 | 80f3b1be54b106ea7158a7bc2506d4b9fad891f0 | /core/run_ctest/run_test_utils.py | 12dfa13cfc0a641cbae1dc5be33080c617d9932c | [
"MIT"
] | permissive | maleehamasood/openctest | e33121eb2ece613257ae344d4376564df361a008 | bbb88902d3f901fc88dd1a40adcbc046184a4a44 | refs/heads/main | 2023-06-05T12:36:00.561462 | 2021-06-30T12:52:18 | 2021-06-30T12:52:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,798 | py | import re, sys
sys.path.append("..")
from ctest_const import *
from program_input import p_input
maven_args = p_input["maven_args"]
use_surefire = p_input["use_surefire"]
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
class TestResult:
def __init__(self, ran_tests_and_time=set(), failed_tests=set()):
self.failed_tests = failed_tests
self.ran_tests_and_time = ran_tests_and_time
def maven_cmd(test, add_time=False):
# surefire:test reuses test build from last compilation
# if you modified the test and want to rerun it, you must use `mvn test`
test_mode = "surefire:test" if use_surefire else "test"
cmd = ["mvn", test_mode, "-Dtest={}".format(test)] + maven_args
if add_time:
cmd = ["time"] + cmd
print(">>>>[ctest_core] command: " + " ".join(cmd))
return cmd
def strip_ansi(s):
return ansi_escape.sub('', s)
def join_test_string(tests):
test_by_cls = group_test_by_cls(tests)
ret = ""
for clsname, methods in test_by_cls.items():
ret += clsname
ret += "#"
ret += "+".join(list(methods))
ret += ","
return ret
def group_test_by_cls(tests):
d = {}
for t in tests:
clsname, method = t.split("#")
if clsname not in d:
d[clsname] = set()
d[clsname].add(method)
return d
def reverse_map(map):
# test -> params
r_map = {}
for param in map.keys():
for test in map[param]:
if test not in r_map.keys():
r_map[test] = set()
r_map[test].add(param)
return r_map
def encode_signature(params, tested_params):
signature = ""
for i in range(len(params)):
param = params[i]
if param in tested_params:
signature = signature + "1"
else:
signature = signature + "0"
assert len(signature) == len(params)
return signature
def decode_signature(params, signature):
assert len(signature) == len(params)
tested_params = set()
for i in range(len(signature)):
if signature[i] == "1":
tested_params.add(params[i])
return tested_params
def split_tests(associated_test_map):
"""split test to rule out value assumption interference"""
reversed_map = reverse_map(associated_test_map)
params = sorted(list(associated_test_map.keys()))
group_map = {}
for test in reversed_map.keys():
signature = encode_signature(params, reversed_map[test])
if signature not in group_map.keys():
group_map[signature] = set()
group_map[signature].add(test)
for sig in group_map.keys():
tested_params = decode_signature(params, sig)
group_map[sig] = (tested_params, group_map[sig])
return list(group_map.values())
| [
"samcheng@ucdavis.edu"
] | samcheng@ucdavis.edu |
c2786c1ec09a518cd998b9512ec4a0142ff1e4ce | f735a0265dbad9eaf3c5ce791273c567ad2907a2 | /example/ui/dw_widgets_pyside_ui.py | cc46c19375c7ad94236f94848a9e737cbc94e205 | [
"CC-BY-4.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-3.0"
] | permissive | RicBent/QDarkStyleSheet | cd10d57bf7bbbc22bf3f1dde5f736df26993143a | a085ecbc79d4502afc0c68ffb3bfc702dcf4e65b | refs/heads/master | 2020-04-04T16:19:17.442084 | 2018-11-01T19:34:29 | 2018-11-01T19:34:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,616 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dw_widgets.ui'
#
# Created: Thu Nov 1 16:06:05 2018
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_DockWidget(object):
def setupUi(self, DockWidget):
DockWidget.setObjectName("DockWidget")
DockWidget.resize(269, 306)
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.gridLayout = QtGui.QGridLayout(self.dockWidgetContents)
self.gridLayout.setObjectName("gridLayout")
self.label_81 = QtGui.QLabel(self.dockWidgetContents)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_81.setFont(font)
self.label_81.setObjectName("label_81")
self.gridLayout.addWidget(self.label_81, 0, 1, 1, 1)
self.label_82 = QtGui.QLabel(self.dockWidgetContents)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_82.setFont(font)
self.label_82.setObjectName("label_82")
self.gridLayout.addWidget(self.label_82, 0, 2, 1, 1)
self.label_56 = QtGui.QLabel(self.dockWidgetContents)
self.label_56.setMinimumSize(QtCore.QSize(0, 0))
self.label_56.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_56.setFont(font)
self.label_56.setObjectName("label_56")
self.gridLayout.addWidget(self.label_56, 1, 0, 1, 1)
self.listWidget = QtGui.QListWidget(self.dockWidgetContents)
self.listWidget.setMinimumSize(QtCore.QSize(0, 0))
self.listWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.listWidget.setObjectName("listWidget")
QtGui.QListWidgetItem(self.listWidget)
QtGui.QListWidgetItem(self.listWidget)
QtGui.QListWidgetItem(self.listWidget)
QtGui.QListWidgetItem(self.listWidget)
self.gridLayout.addWidget(self.listWidget, 1, 1, 1, 1)
self.listWidgetDis = QtGui.QListWidget(self.dockWidgetContents)
self.listWidgetDis.setEnabled(False)
self.listWidgetDis.setObjectName("listWidgetDis")
QtGui.QListWidgetItem(self.listWidgetDis)
QtGui.QListWidgetItem(self.listWidgetDis)
QtGui.QListWidgetItem(self.listWidgetDis)
QtGui.QListWidgetItem(self.listWidgetDis)
self.gridLayout.addWidget(self.listWidgetDis, 1, 2, 1, 1)
self.label_57 = QtGui.QLabel(self.dockWidgetContents)
self.label_57.setMinimumSize(QtCore.QSize(0, 0))
self.label_57.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_57.setFont(font)
self.label_57.setObjectName("label_57")
self.gridLayout.addWidget(self.label_57, 2, 0, 1, 1)
self.treeWidget = QtGui.QTreeWidget(self.dockWidgetContents)
self.treeWidget.setMinimumSize(QtCore.QSize(0, 0))
self.treeWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.treeWidget.setObjectName("treeWidget")
item_0 = QtGui.QTreeWidgetItem(self.treeWidget)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_2 = QtGui.QTreeWidgetItem(item_1)
item_0 = QtGui.QTreeWidgetItem(self.treeWidget)
item_1 = QtGui.QTreeWidgetItem(item_0)
self.gridLayout.addWidget(self.treeWidget, 2, 1, 1, 1)
self.treeWidgetDis = QtGui.QTreeWidget(self.dockWidgetContents)
self.treeWidgetDis.setEnabled(False)
self.treeWidgetDis.setObjectName("treeWidgetDis")
item_0 = QtGui.QTreeWidgetItem(self.treeWidgetDis)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_2 = QtGui.QTreeWidgetItem(item_1)
item_0 = QtGui.QTreeWidgetItem(self.treeWidgetDis)
item_1 = QtGui.QTreeWidgetItem(item_0)
self.gridLayout.addWidget(self.treeWidgetDis, 2, 2, 1, 1)
self.label_58 = QtGui.QLabel(self.dockWidgetContents)
self.label_58.setMinimumSize(QtCore.QSize(0, 0))
self.label_58.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_58.setFont(font)
self.label_58.setObjectName("label_58")
self.gridLayout.addWidget(self.label_58, 3, 0, 1, 1)
self.tableWidget = QtGui.QTableWidget(self.dockWidgetContents)
self.tableWidget.setMinimumSize(QtCore.QSize(0, 0))
self.tableWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(2)
self.tableWidget.setRowCount(3)
item = QtGui.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(0, 1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(1, 1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(2, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setItem(2, 1, item)
self.gridLayout.addWidget(self.tableWidget, 3, 1, 1, 1)
self.tableWidgetDis = QtGui.QTableWidget(self.dockWidgetContents)
self.tableWidgetDis.setEnabled(False)
self.tableWidgetDis.setObjectName("tableWidgetDis")
self.tableWidgetDis.setColumnCount(2)
self.tableWidgetDis.setRowCount(3)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setItem(0, 1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setItem(1, 1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setItem(2, 0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDis.setItem(2, 1, item)
self.gridLayout.addWidget(self.tableWidgetDis, 3, 2, 1, 1)
DockWidget.setWidget(self.dockWidgetContents)
self.retranslateUi(DockWidget)
QtCore.QMetaObject.connectSlotsByName(DockWidget)
def retranslateUi(self, DockWidget):
DockWidget.setWindowTitle(QtGui.QApplication.translate("DockWidget", "Widgets", None, QtGui.QApplication.UnicodeUTF8))
self.label_81.setText(QtGui.QApplication.translate("DockWidget", "Enabled", None, QtGui.QApplication.UnicodeUTF8))
self.label_82.setText(QtGui.QApplication.translate("DockWidget", "Disabled", None, QtGui.QApplication.UnicodeUTF8))
self.label_56.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_56.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_56.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_56.setText(QtGui.QApplication.translate("DockWidget", "ListWidget", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
__sortingEnabled = self.listWidget.isSortingEnabled()
self.listWidget.setSortingEnabled(False)
self.listWidget.item(0).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.item(1).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.item(2).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.item(3).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.setSortingEnabled(__sortingEnabled)
__sortingEnabled = self.listWidgetDis.isSortingEnabled()
self.listWidgetDis.setSortingEnabled(False)
self.listWidgetDis.item(0).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidgetDis.item(1).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidgetDis.item(2).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidgetDis.item(3).setText(QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.listWidgetDis.setSortingEnabled(__sortingEnabled)
self.label_57.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_57.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_57.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_57.setText(QtGui.QApplication.translate("DockWidget", "TreeWidget", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.setSortingEnabled(True)
self.treeWidget.headerItem().setText(0, QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.headerItem().setText(1, QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
__sortingEnabled = self.treeWidget.isSortingEnabled()
self.treeWidget.setSortingEnabled(False)
self.treeWidget.topLevelItem(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.topLevelItem(0).child(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Subitem", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.topLevelItem(0).child(0).setText(1, QtGui.QApplication.translate("DockWidget", "Test", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.topLevelItem(0).child(0).child(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Subitem", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.topLevelItem(1).setText(0, QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.topLevelItem(1).child(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Subitem", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidget.setSortingEnabled(__sortingEnabled)
self.treeWidgetDis.setSortingEnabled(True)
self.treeWidgetDis.headerItem().setText(0, QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.headerItem().setText(1, QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
__sortingEnabled = self.treeWidgetDis.isSortingEnabled()
self.treeWidgetDis.setSortingEnabled(False)
self.treeWidgetDis.topLevelItem(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.topLevelItem(0).child(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Subitem", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.topLevelItem(0).child(0).setText(1, QtGui.QApplication.translate("DockWidget", "Test", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.topLevelItem(0).child(0).child(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Subitem", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.topLevelItem(1).setText(0, QtGui.QApplication.translate("DockWidget", "New Item", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.topLevelItem(1).child(0).setText(0, QtGui.QApplication.translate("DockWidget", "New Subitem", None, QtGui.QApplication.UnicodeUTF8))
self.treeWidgetDis.setSortingEnabled(__sortingEnabled)
self.label_58.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_58.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_58.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_58.setText(QtGui.QApplication.translate("DockWidget", "TableWidget", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.verticalHeaderItem(0).setText(QtGui.QApplication.translate("DockWidget", "New Row", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.verticalHeaderItem(1).setText(QtGui.QApplication.translate("DockWidget", "New Row", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.verticalHeaderItem(2).setText(QtGui.QApplication.translate("DockWidget", "New Row", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(0).setText(QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(1).setText(QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
__sortingEnabled = self.tableWidget.isSortingEnabled()
self.tableWidget.setSortingEnabled(False)
self.tableWidget.item(0, 0).setText(QtGui.QApplication.translate("DockWidget", "1.23", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.item(0, 1).setText(QtGui.QApplication.translate("DockWidget", "Hello", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.item(1, 0).setText(QtGui.QApplication.translate("DockWidget", "1,45", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.item(1, 1).setText(QtGui.QApplication.translate("DockWidget", "Olá", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.item(2, 0).setText(QtGui.QApplication.translate("DockWidget", "12/12/2012", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.item(2, 1).setText(QtGui.QApplication.translate("DockWidget", "Oui", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setSortingEnabled(__sortingEnabled)
self.tableWidgetDis.verticalHeaderItem(0).setText(QtGui.QApplication.translate("DockWidget", "New Row", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.verticalHeaderItem(1).setText(QtGui.QApplication.translate("DockWidget", "New Row", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.verticalHeaderItem(2).setText(QtGui.QApplication.translate("DockWidget", "New Row", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.horizontalHeaderItem(0).setText(QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.horizontalHeaderItem(1).setText(QtGui.QApplication.translate("DockWidget", "New Column", None, QtGui.QApplication.UnicodeUTF8))
__sortingEnabled = self.tableWidgetDis.isSortingEnabled()
self.tableWidgetDis.setSortingEnabled(False)
self.tableWidgetDis.item(0, 0).setText(QtGui.QApplication.translate("DockWidget", "1.23", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.item(0, 1).setText(QtGui.QApplication.translate("DockWidget", "Hello", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.item(1, 0).setText(QtGui.QApplication.translate("DockWidget", "1,45", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.item(1, 1).setText(QtGui.QApplication.translate("DockWidget", "Olá", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.item(2, 0).setText(QtGui.QApplication.translate("DockWidget", "12/12/2012", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.item(2, 1).setText(QtGui.QApplication.translate("DockWidget", "Oui", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidgetDis.setSortingEnabled(__sortingEnabled)
| [
"daniel.pizetta@usp.br"
] | daniel.pizetta@usp.br |
c57ed8fabfe988b0c62f053976b78740d8376825 | 889dab04d6f9e5af0aded790a632c93e06ca4233 | /python_dla_kazdego/komputer_nastrojow.py | 6760fdbeedd37e49d481eb99c830fe7df2cbd7e3 | [] | no_license | Kinia201201/programowanie_obiektowe | a00c8d291ec59a03b922a1611645f4c1efda87eb | 865616404a41fd15d34a2bc82a6804aaf72df967 | refs/heads/master | 2020-08-26T20:05:10.425863 | 2020-01-14T23:31:30 | 2020-01-14T23:31:30 | 217,131,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 21 23:39:49 2019
@author: Kinga
"""
# Komputer nastrojóW
# Demonstruje klauzulę elif
import random
print("Wyczuwam Twoją energię. Twoje prawdziwe emocje znajduja odbicie na moim ekranie.")
print("Jesteś...")
mood = random.randint(1,3)
if mood == 1:
# szczęśliwy
print( \
"""
-----------
| |
| 0 0 |
| < |
| |
| . . |
| '...' |
-----------
""")
elif mood == 2:
# zły
print( \
"""
-----------
| |
| 0 0 |
| < |
| |
| ------ |
| |
-----------
""")
elif mood ==3:
# smutny
print( \
"""
-----------
| |
| 0 0 |
| < |
| |
| .'. |
| ' ' |
-----------
""")
else:
print("Nieprawidłowa wartość nastroju! (Musisz być na prawdę w złym humorze).")
print("...dzisiaj.")
input("\n\nAby zakończyć grę naciśnij klawisz Enter.") | [
"kingawielgus.kw@gmail.com"
] | kingawielgus.kw@gmail.com |
7e0c9c66789b5d70e91d999e13647ddd4b2098ae | e6132244015942c5ec75c8eff4f90cd0e9302470 | /src/wshop/apps/customer/notifications/services.py | 46bab44bef79471157b1207adfc9a79e677340e1 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | vituocgia/wshop-core | d3173f603861685b523f6b66af502b9e94b7b0c2 | 5f6d1ec9e9158f13aab136c5bd901c41e69a1dba | refs/heads/master | 2020-03-18T08:25:14.669538 | 2018-05-23T05:55:56 | 2018-05-23T05:55:56 | 134,508,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | from wshop.core.loading import get_model
Notification = get_model('customer', 'Notification')
def notify_user(user, subject, **kwargs):
"""
Send a simple notification to a user
"""
Notification.objects.create(recipient=user, subject=subject, **kwargs)
def notify_users(users, subject, **kwargs):
"""
Send a simple notification to an iterable of users
"""
for user in users:
notify_user(user, subject, **kwargs)
| [
"dotiendiep@gmail.com"
] | dotiendiep@gmail.com |
6a9c53e38081c3e99e7b55be75b34f21b54c83db | 0d252cd89a1c88dcd62c66527ad55b6083171fdc | /repositories/race_result_repository.py | cd12a418dcb446d56728a32e5f4306eecb35bec9 | [] | no_license | OhmniD/CodeClan_Week05_Full_Stack_Solo_Project | 99f1e0af5b7ff16d57670ae42fb40726b858e53c | fdb32d2de0ee186696c20ad50e705ba5df0eda63 | refs/heads/main | 2023-06-17T15:52:51.235742 | 2021-07-14T16:42:20 | 2021-07-14T16:42:20 | 373,792,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,394 | py | from db.run_sql import run_sql
from models.race_result import RaceResult
import repositories.driver_repository as driver_repository
import repositories.round_repository as round_repository
import repositories.team_repository as team_repository
def save(race_result):
sql = "INSERT INTO race_results (position, driver_id, team_id, round_id, fastest_lap) VALUES (%s, %s, %s, %s, %s) RETURNING id"
values = [race_result.position, race_result.driver.id, race_result.team.id, race_result.round.id, race_result.fastest_lap]
result = run_sql(sql, values)
race_result.id = result[0]['id']
return race_result
def select_all():
race_results = []
sql = "SELECT * FROM race_results"
results = run_sql(sql)
for row in results:
driver = driver_repository.select(row['driver_id'])
team = team_repository.select(row['team_id'])
round = round_repository.select(row['round_id'])
race_result = RaceResult(row['position'], driver, team, round, row['fastest_lap'], row['id'])
race_results.append(race_result)
return race_results
def select(id):
race_result = NoneType
sql = "SELECT * FROM race_results WHERE id = %s"
values = [id]
result = run_sql(sql, values)[0]
if result is not None:
driver = driver_repository.select(result['driver_id'])
team = team_repository.select(row['team_id'])
round = round_repository.select(result['round_id'])
race_result = RaceResult(result['position'], driver, team, round, result['fastest_lap'], result['id'])
return race_result
def update(race_result):
sql = "UPDATE race_results SET (position, driver_id, team_id round_id, fastest_lap) = (%s, %s, %s, %s, %s) WHERE id = %s"
values = [race_result.position, race_result.driver.id, race_result.team.id, race_result.round.id, race_result.fastest_lap, race_result.id]
run_sql(sql, values)
def delete_all():
sql = "DELETE FROM race_results"
run_sql(sql)
def delete(id):
sql = "DELETE FROM race_results WHERE id = %s"
values = [id]
run_sql(sql, values)
def all_results_by_round(round_id):
results_by_round = []
sql = "SELECT * FROM race_results WHERE round_id = %s ORDER BY position ASC"
values = [round_id]
results = run_sql(sql, values)
for row in results:
id = row['id']
position = row['position']
fastest_lap = row['fastest_lap']
driver = driver_repository.select(row['driver_id'])
team = team_repository.select(row['team_id'])
round = round_repository.select(row['round_id'])
result = RaceResult(position, driver, team, round, fastest_lap, id)
results_by_round.append(result)
return results_by_round
def all_results_by_driver(driver_id):
results_by_driver = []
sql = "SELECT * FROM race_results WHERE driver_id = %s ORDER BY position ASC"
values = [driver_id]
results = run_sql(sql, values)
for row in results:
id = row['id']
position = row['position']
fastest_lap = row['fastest_lap']
driver = driver_repository.select(row['driver_id'])
team = team_repository.select(row['team_id'])
round = round_repository.select(row['round_id'])
result = RaceResult(position, driver, team, round, fastest_lap, id)
results_by_driver.append(result)
return results_by_driver | [
"dave@ohmnid.com"
] | dave@ohmnid.com |
1abecdb8b37e726aa8037f242e3f895ddc87949c | 4ebef3166e73d45623b3dbe5dca893a75f4645ce | /lesson07/les07task01.py | 77fd7e20325981eff84584bcd8d3d38b4777cb4d | [] | no_license | vakhnin/geekbrains-algorithms | 7e24d9d055ed1e72f975a6ba143e31f02ec11abf | 1916ed1f0ddf89a37c15c2dca551dd73e10f16e9 | refs/heads/main | 2023-06-29T01:04:42.752759 | 2021-08-01T09:54:34 | 2021-08-01T09:54:34 | 375,615,589 | 0 | 0 | null | 2021-08-01T09:54:34 | 2021-06-10T07:51:16 | Python | UTF-8 | Python | false | false | 2,203 | py | # lesson 07 task 01
#
# Отсортируйте по убыванию методом пузырька одномерный целочисленный массив, заданный случайными числами на промежутке
# [-100; 100). Выведите на экран исходный и отсортированный массивы.
# Примечания:
# алгоритм сортировки должен быть в виде функции, которая принимает на вход массив данных,
# постарайтесь сделать алгоритм умнее, но помните, что у вас должна остаться сортировка пузырьком.
import random
SIZE = 10
MIN_ITEM = -100
MAX_ITEM = 100 - 1
array = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]
def test_sort(arr, compare):
for i in range(len(arr) - 1):
if compare == "desc":
assert arr[i] >= arr[i + 1]
elif compare == "asc":
assert arr[i] <= arr[i + 1]
else:
assert False
if compare == "asc":
print('Массив отсортирован по возрастанию')
elif compare == "desc":
print('Массив отсортирован по убыванию')
else:
print('Неизвестно, как отсортирован массив')
def bubble_sort(arr):
is_swap = True
idx_swap = len(arr) - 1
# Если на предыдущем проходе не менялись местами элементами, массив отсортирован, выходим
while is_swap:
is_swap = False
# После элементов обмена массив можно не проходить. Хвост массива уже отсортирован
for i in range(idx_swap):
if arr[i] < arr[i + 1]:
arr[i], arr[i + 1] = arr[i + 1], arr[i]
idx_swap = i
is_swap = True
print("Исходный массив")
print(array)
bubble_sort(array)
test_sort(array, "desc")
print('Массив после сортировки')
print(array)
| [
"sergey.vakhnin@gmail.com"
] | sergey.vakhnin@gmail.com |
9b1a69c6cbbfe6bc1782772b13b0804ea72105ec | 385e547fa9d6473f2462d0f13ca2a716d3faab96 | /python/ninjaDelverBear_funcions.py | ce2c34261ababe58458f8751e2f901c59e4d13ba | [] | no_license | holalluis/apunts | 8b78228b4d4db1eb8579946c0a125f13d3e11150 | b66382253041f0cfb2c263b995554c7b9cb1d57e | refs/heads/master | 2023-07-26T00:19:14.827669 | 2023-07-13T13:13:45 | 2023-07-13T13:13:45 | 169,467,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | import math
import random
def shuf(baralla):
#algoritme de yates per barrejar un array
for i in range(len(baralla)-1,0,-1):
j = int(math.floor(random.random()*(i+1)))
temp = baralla[i]
baralla[i] = baralla[j]
baralla[j] = temp
return baralla
def queden(baralla):
#digues quantes cartes queden en total
print "\nQueden",len(baralla),"cartes a la baralla:"
def quantesCartesQuedenDe(carta,baralla):
#digues quantes cartes queden en concret
total=0
for i in baralla:
if(i==carta):
total+=1
if(total!=0):
print " Queden",total,carta," - Probabilitat:",round(100*float(total)/len(baralla),2),"%"
else:
print " No queden",carta
def queQueda(baralla,arrayDeCartes):
#que queda a tota la baralla
queden(baralla)
for i in arrayDeCartes:
quantesCartesQuedenDe(i,baralla)
print "\n"
| [
"holalluis@gmail.com"
] | holalluis@gmail.com |
d99bb0f05279917d04b3ce2724ab923fe6c0c6db | 4f29a9a90744d446aac8f7a81dba7c5c191c7253 | /how/how/utils/__init__.py | 1a1d9981c4cb6f3910cd19b6b5d9a443b07930a6 | [] | no_license | yodakaz/ASMK_yoda | 00f1025c2cd2e8670b03e5ba2a724b3504f6a5e2 | 165b75672321546a8090490879d7b20306793381 | refs/heads/main | 2023-06-22T07:01:30.088906 | 2021-07-20T05:07:39 | 2021-07-20T05:07:39 | 385,087,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | """
Standalone utilities, mainly helper functions
"""
| [
"noreply@github.com"
] | noreply@github.com |
cff2395789e5892845c1437ce574bc8ae6727100 | 32648177f3e2f9ce2d9345d80252c3c63522c852 | /python/track2.py | 66465a84d1519da026ade5a2ec340d67d7b43078 | [] | no_license | nickzhums/track2ux | 555e7e02db7d967100cdffcc03273bda56a934ae | 6f0bd69e6f2e80aaeb6d4d43b515d139f86746e5 | refs/heads/master | 2022-11-07T17:34:29.501270 | 2022-11-03T04:44:20 | 2022-11-03T04:44:20 | 258,109,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,434 | py | import os
import azure.mgmt.compute # use track2
import azure.mgmt.network # use track1, network track2 does not ready.
import azure.mgmt.resource # use track2
import uuid
from azure.identity import ClientSecretCredential
from azure.identity import DefaultAzureCredential
# from azure.common.credentials import ServicePrincipalCredentials
class createVMSample(object):
def __init__(self, group_name, location):
self.location = "westus2"
#tenant_id = os.environ.get("AZURE_TENANT_ID")
#client_id = os.environ.get("AZURE_CLIENT_ID")
#client_secret = os.environ.get("AZURE_CLIENT_SECRET")
subscription_id = "6f341b56-77fd-41d5-b8df-7229d50461b6"
self.subscription_id = subscription_id
#client_credentials = ClientSecretCredential(
# client_id=client_id,
# client_secret=client_secret,
# tenant_id=tenant_id
#)
# service_credentials = ServicePrincipalCredentials(
# client_id=client_id,
# secret=client_secret,
# tenant=tenant_id
# )
credentials = DefaultAzureCredential()
self.compute_client = azure.mgmt.compute.ComputeManagementClient(credential=credentials, subscription_id=self.subscription_id)
self.network_client = azure.mgmt.network.NetworkManagementClient(credential=credentials, subscription_id=self.subscription_id)
self.resource_client = azure.mgmt.resource.ResourceManagementClient(credential=credentials, subscription_id=self.subscription_id)
#self.compute_client = azure.mgmt.compute.ComputeManagementClient(credential=client_credentials, subscription_id=self.subscription_id)
#self.network_client = azure.mgmt.network.NetworkManagementClient(credential=client_credentials, subscription_id=self.subscription_id)
#self.resource_client = azure.mgmt.resource.ResourceManagementClient(credential=client_credentials, subscription_id=self.subscription_id)
group_name = "pythonsdk-test-" + str(uuid.uuid1())
self.group = self.resource_client.resource_groups.create_or_update(
group_name,
{'location': self.location}
)
self.group.tags = {
"environment":"test",
"department":"tech"
}
self.updated_group = self.resource_client.resource_groups.create_or_update(group_name, self.group)
self.group_list = self.resource_client.resource_groups.list()
for g in self.group_list:
print(g.name)
async_op = self.resource_client.resource_groups.begin_delete(group_name)
async_op.wait()
print('finished')
# TODO: need change to track2 after network track2 ready.
def create_virtual_network(self, group_name, location, network_name, subnet_name):
result = self.network_client.virtual_networks.begin_create_or_update(
group_name,
network_name,
{
'location': location,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
},
)
result_create = result.result()
async_subnet_creation = self.network_client.subnets.begin_create_or_update(
group_name,
network_name,
subnet_name,
{'address_prefix': '10.0.0.0/24'}
)
subnet_info = async_subnet_creation.result()
return subnet_info
# TODO: need change to track2 after network track2 ready.
def create_network_interface(self, group_name, location, nic_name, subnet):
async_nic_creation = self.network_client.network_interfaces.begin_create_or_update(
group_name,
nic_name,
{
'location': location,
'ip_configurations': [{
'name': 'MyIpConfig',
'subnet': {
'id': subnet.id
}
}]
}
)
nic_info = async_nic_creation.result()
return nic_info.id
def create_vm(self, vm_name, network_name, subnet_name, interface_name):
group_name = self.group.name
location = self.location
# create network
subnet = self.create_virtual_network(group_name, location, network_name, subnet_name)
nic_id = self.create_network_interface(group_name, location, interface_name, subnet)
# Create a vm with empty data disks.
BODY = {
"location": "eastus",
"hardware_profile": {
"vm_size": "Standard_D2_v2"
},
"storage_profile": {
"image_reference": {
"sku": "2016-Datacenter",
"publisher": "MicrosoftWindowsServer",
"version": "latest",
"offer": "WindowsServer"
},
"os_disk": {
"caching": "ReadWrite",
"managed_disk": {
"storage_account_type": "Standard_LRS"
},
"name": "myVMosdisk",
"create_option": "FromImage"
},
"data_disks": [
{
"disk_size_gb": "1023",
"create_option": "Empty",
"lun": "0"
},
{
"disk_size_gb": "1023",
"create_option": "Empty",
"lun": "1"
}
]
},
"os_profile": {
"admin_username": "testuser",
"computer_name": "myVM",
"admin_password": "Aa1!zyx_",
"windows_configuration": {
"enable_automatic_updates": True # need automatic update for reimage
}
},
"network_profile": {
"network_interfaces": [
{
"id": nic_id,
"properties": {
"primary": True
}
}
]
}
}
result = self.compute_client.virtual_machines.begin_create_or_update(group_name, vm_name, BODY)
result = result.result()
def main():
print("init sample.")
sample = createVMSample('testgroupvm', 'eastus')
print("create vm ...")
sample.create_vm('testvm', 'testnetwork', 'testsubnet', 'testinterface')
print("finish.")
if __name__ == '__main__':
main()
| [
"Littlejedi"
] | Littlejedi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.