blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0b8ca9d38489a79fac11ef930d7f55e4b5f8e7c4 | 929c5d410575498104bcc3a32e2d827ff11a8294 | /3 trimestre/pyton/aula.py | 75efd626989288c74f2ac34ac93a3bf0f5e5b37a | [] | no_license | adriel1010/aulaTecnologiasWeb | 52ab30f61a342bbebf86b602ee9800ec276a2ae1 | 699e36d23319bafa5c603dd81f291eb261cabdc4 | refs/heads/master | 2021-01-24T16:45:54.853451 | 2018-09-26T01:33:10 | 2018-09-26T01:33:10 | 123,209,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | print("Olá mundo :D")
print("olá 2")
num = 10
num = "Adriel"
num = 10.98
a = num * 10
print( "O resultado é {}, certo ?".format(num) )
nome = "Adriel"
print( "{}, o resultado é {}, certo?".format(nome, num) )
num = 5 ** 2 #5 ao quadrado
if(num % 2 == 0):
print("0 número {} é par !".format(num))
print("Fim do IF")
if(True):
print("Pertence ao IF interno")
while(True):
print("Código do while")
break
i = 0
num = input("Digite um número: ")
num = int(num)
while(i <= 10):
res = i * num
print("{} X {} = {}".format(num,i,res))
i += 1
vetor = []
vetor.append(10)
vetorDefinido = [""] * 10
vetorDefinido[0] = 19
vetorDefinido[1] = 5
vetor = [""] * 4
i = 0
while(i < len(vetor)):
vetor[i] = int( input("Número: ") )
i = i+1
for x in vetor:
print("O vetor tem o valor {}".format(x) )
def somar(a,b,b):
print(a+b+c)
class Aluno(Pessoa):
| [
"adrielcarlos1010@gmail.com"
] | adrielcarlos1010@gmail.com |
8faa8b56120958de0b6f1135e29aabb4e6389a29 | ea4e3ac0966fe7b69f42eaa5a32980caa2248957 | /download/unzip/pyobjc/pyobjc-14/pyobjc/stable/PyOpenGL-2.0.2.01/src/shadow/GL.KTX.buffer_region.0100.py | ac8f5465112ac5e17f3261bbe25ef82d3803a274 | [] | no_license | hyl946/opensource_apple | 36b49deda8b2f241437ed45113d624ad45aa6d5f | e0f41fa0d9d535d57bfe56a264b4b27b8f93d86a | refs/heads/master | 2023-02-26T16:27:25.343636 | 2020-03-29T08:50:45 | 2020-03-29T08:50:45 | 249,169,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,082 | py | # This file was created automatically by SWIG.
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _buffer_region
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name) or (name == "thisown"):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
__version__ = _buffer_region.__version__
__date__ = _buffer_region.__date__
__api_version__ = _buffer_region.__api_version__
__author__ = _buffer_region.__author__
__doc__ = _buffer_region.__doc__
glBufferRegionEnabled = _buffer_region.glBufferRegionEnabled
glNewBufferRegion = _buffer_region.glNewBufferRegion
glDeleteBufferRegion = _buffer_region.glDeleteBufferRegion
glReadBufferRegion = _buffer_region.glReadBufferRegion
glDrawBufferRegion = _buffer_region.glDrawBufferRegion
glInitBufferRegionKTX = _buffer_region.glInitBufferRegionKTX
__info = _buffer_region.__info
GL_KTX_FRONT_REGION = _buffer_region.GL_KTX_FRONT_REGION
GL_KTX_BACK_REGION = _buffer_region.GL_KTX_BACK_REGION
GL_KTX_Z_REGION = _buffer_region.GL_KTX_Z_REGION
GL_KTX_STENCIL_REGION = _buffer_region.GL_KTX_STENCIL_REGION
| [
"hyl946@163.com"
] | hyl946@163.com |
e19ca4a254a85de070b01227989ac7817ac06393 | 991c0299c9eae4034db672a2c405bafc8f44e1c8 | /pyspedas/pyspedas/psp/tests/tests.py | 42387d6e05e349408f2bce7c37bc167306ae1bed | [
"MIT"
] | permissive | nsioulas/MHDTurbPy | 650b915cb23f4dd2458d09450e7dc1382c643da2 | 7ac6615caa737fb89b0314d1d55fcd60537c423c | refs/heads/main | 2023-06-01T06:31:58.944174 | 2023-05-13T05:36:06 | 2023-05-13T05:36:06 | 590,799,212 | 8 | 1 | null | 2023-02-01T20:38:49 | 2023-01-19T08:33:08 | Jupyter Notebook | UTF-8 | Python | false | false | 7,362 | py | import os
import unittest
from pyspedas.utilities.data_exists import data_exists
import pyspedas
class LoadTestCases(unittest.TestCase):
def test_unpublished_data(self):
"""
this test doesn't load any data, since the username/pw is invalid
"""
# no password
fields_vars = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='mag_RTN', username='hello')
# invalid password
fields_vars = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='mag_RTN', username='hello', password='world')
fields_vars = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='mag_SC', username='hello', password='world')
fields_vars = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='mag_SC_1min', username='hello', password='world')
fields_vars = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='mag_RTN_1min', username='hello', password='world')
fields_vars = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='mag_RTN_4_Sa_per_Cyc', username='hello', password='world')
fields_vars = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='mag_SC_4_Sa_per_Cyc', username='hello', password='world')
fields_vars = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='sqtn_rfs_V1V2', username='hello', password='world')
spc = pyspedas.psp.spc(trange=['2018-11-5', '2018-11-5/06:00'], username='hello', password='world')
spi = pyspedas.psp.spi(trange=['2018-11-5', '2018-11-5/06:00'], username='hello', password='world')
def test_load_dfb_dbm_dvac(self):
fields_vars = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='dfb_dbm_dvac', level='l2')
self.assertTrue(data_exists('psp_fld_l2_dfb_dbm_dvac12'))
def test_load_fld_data(self):
fields_vars = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='mag_rtn', level='l2', time_clip=True)
self.assertTrue(data_exists('psp_fld_l2_mag_RTN'))
filtered = pyspedas.psp.filter_fields('psp_fld_l2_mag_RTN', [4, 16])
self.assertTrue(data_exists('psp_fld_l2_mag_RTN_004016'))
filtered = pyspedas.psp.filter_fields('psp_fld_l2_mag_RTN', 0)
self.assertTrue(data_exists('psp_fld_l2_mag_RTN_000'))
filtered = pyspedas.psp.filter_fields('psp_fld_l2_mag_RTN', [4, 16], keep=True)
def test_load_fld_1min(self):
fields_vars = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='mag_rtn_1min', level='l2')
filtered = pyspedas.psp.filter_fields('psp_fld_l2_mag_RTN_1min', [4, 16])
self.assertTrue(data_exists('psp_fld_l2_mag_RTN_1min'))
self.assertTrue(data_exists('psp_fld_l2_quality_flags'))
notplot = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='mag_rtn_1min', level='l2', notplot=True)
self.assertTrue('psp_fld_l2_mag_RTN_1min' in notplot.keys())
def test_load_fld_rtn_4_per_cyc(self):
fields = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='mag_rtn_4_per_cycle', level='l2')
filtered = pyspedas.psp.filter_fields('psp_fld_l2_mag_RTN_4_Sa_per_Cyc', [4, 16])
self.assertTrue(data_exists('psp_fld_l2_mag_RTN_4_Sa_per_Cyc'))
self.assertTrue(data_exists('psp_fld_l2_quality_flags'))
def test_load_fld_sc_4_per_cyc(self):
fields = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='mag_sc_4_per_cycle',
level='l2')
filtered = pyspedas.psp.filter_fields('psp_fld_l2_mag_SC_4_Sa_per_Cyc', [4, 16])
self.assertTrue(data_exists('psp_fld_l2_mag_SC_4_Sa_per_Cyc'))
self.assertTrue(data_exists('psp_fld_l2_quality_flags'))
def test_load_sqtn_rfs_v1v2(self):
fields = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='sqtn_rfs_v1v2')
filtered = pyspedas.psp.filter_fields('electron_density', [4, 16])
self.assertTrue(data_exists('electron_density'))
self.assertTrue(data_exists('electron_core_temperature'))
def test_load_dfb_dc_spec(self):
fields = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='dfb_dc_spec')
filtered = pyspedas.psp.filter_fields('psp_fld_l2_dfb_dc_spec_dV12hg', [4, 16])
self.assertTrue(data_exists('psp_fld_l2_dfb_dc_spec_dV12hg'))
self.assertTrue(data_exists('psp_fld_l2_dfb_dc_spec_SCMdlfhg'))
def test_load_dfb_ac_xspec(self):
fields = pyspedas.psp.fields(trange=['2018-11-5', '2018-11-5/06:00'], datatype='dfb_ac_xspec')
filtered = pyspedas.psp.filter_fields('psp_fld_l2_dfb_ac_xspec_power_ch1_SCMdlfhg', [4, 16])
self.assertTrue(data_exists('psp_fld_l2_dfb_ac_xspec_power_ch1_SCMdlfhg'))
self.assertTrue(data_exists('psp_fld_l2_dfb_ac_xspec_power_ch1_SCMdlfhg'))
def test_load_spc_data(self):
spc_vars = pyspedas.psp.spc(trange=['2018-11-5', '2018-11-6'], datatype='l3i', level='l3')
self.assertTrue(data_exists('psp_spc_np_fit'))
self.assertTrue(data_exists('psp_spc_np_fit_uncertainty'))
self.assertTrue(data_exists('psp_spc_wp_fit'))
self.assertTrue(data_exists('psp_spc_vp_fit_SC'))
self.assertTrue(data_exists('psp_spc_vp_fit_RTN'))
self.assertTrue(data_exists('psp_spc_np1_fit'))
def test_load_spe_data(self):
spe_vars = pyspedas.psp.spe(trange=['2018-11-5', '2018-11-6'], datatype='spa_sf1_32e', level='l2')
self.assertTrue(data_exists('psp_spe_EFLUX'))
self.assertTrue(data_exists('psp_spe_QUALITY_FLAG'))
def test_load_spi_data(self):
spi_vars = pyspedas.psp.spi(trange=['2018-11-5', '2018-11-6'], datatype='spi_sf0a_mom_inst', level='l3')
self.assertTrue(data_exists('psp_spi_DENS'))
self.assertTrue(data_exists('psp_spi_VEL'))
self.assertTrue(data_exists('psp_spi_T_TENSOR'))
self.assertTrue(data_exists('psp_spi_TEMP'))
self.assertTrue(data_exists('psp_spi_EFLUX_VS_ENERGY'))
self.assertTrue(data_exists('psp_spi_EFLUX_VS_THETA'))
self.assertTrue(data_exists('psp_spi_EFLUX_VS_PHI'))
def test_load_epihi_data(self):
epihi_vars = pyspedas.psp.epihi(trange=['2018-11-5', '2018-11-5/06:00'], datatype='let1_rates1h', level='l2')
self.assertTrue(data_exists('psp_epihi_B_He_Rate'))
self.assertTrue(data_exists('psp_epihi_R1A_He_BIN'))
self.assertTrue(data_exists('psp_epihi_R3B_He_BIN'))
self.assertTrue(data_exists('psp_epihi_R6A_He_BIN'))
def test_load_epi_data(self):
epilo_vars = pyspedas.psp.epi()
self.assertTrue(data_exists('psp_isois_HET_A_Electrons_Rate_TS'))
self.assertTrue(data_exists('psp_isois_HET_A_H_Rate_TS'))
self.assertTrue(data_exists('psp_isois_A_H_Rate_TS'))
self.assertTrue(data_exists('psp_isois_A_Heavy_Rate_TS'))
self.assertTrue(data_exists('psp_isois_H_CountRate_ChanP_SP'))
self.assertTrue(data_exists('psp_isois_Electron_CountRate_ChanE'))
def test_downloadonly(self):
files = pyspedas.psp.epilo(downloadonly=True)
self.assertTrue(os.path.exists(files[0]))
if __name__ == '__main__':
unittest.main()
| [
"nsioulas@g.ucla.edu"
] | nsioulas@g.ucla.edu |
3fa8270849bd728041105b6d7119c3a99035ee56 | 3fb104c3aa28e045fd2a6bab518116215e03a260 | /.venv/Lib/site-packages/aws_cdk/aws_ecr_assets/_jsii/__init__.py | 6077d0f16767538da05d8916c1afc51eee7bca78 | [] | no_license | danyal2050/demo-python-cdk | ae90293e7ee2358b45fef6c404436c3c8e884603 | 242557ca00da7cde10071e431242d6ca6467e066 | refs/heads/master | 2023-03-22T14:22:44.710332 | 2020-12-29T11:59:02 | 2020-12-29T11:59:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
import aws_cdk.assets._jsii
import aws_cdk.aws_ecr._jsii
import aws_cdk.aws_iam._jsii
import aws_cdk.aws_s3._jsii
import aws_cdk.core._jsii
import aws_cdk.cx_api._jsii
import constructs._jsii
__jsii_assembly__ = jsii.JSIIAssembly.load(
"@aws-cdk/aws-ecr-assets",
"1.79.0",
__name__[0:-6],
"aws-ecr-assets@1.79.0.jsii.tgz",
)
__all__ = [
"__jsii_assembly__",
]
publication.publish()
| [
"ramon.marrero@lanube.io"
] | ramon.marrero@lanube.io |
906285afe8404eb1a87b4c5d698121cf5c26fdd6 | 406f1eb64aa96b3c7eb89a4c873ac7a089cc70bc | /flaskapp.py | dbc9d67048377c7953f7393aeea093ddacfebe5a | [] | no_license | Prathamveer/Flask-App-1 | 7f7c01faef17183e0a1daef4c3e7dc2d224facff | c3239a019eb696c731733134ecce5f22d14646fe | refs/heads/main | 2023-03-27T03:09:43.238503 | 2021-03-31T10:04:03 | 2021-03-31T10:04:03 | 353,309,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | from flask import Flask,jsonify, request
app = Flask(__name__)
tasks=[
{
"id":1,
"title": u'Buy Groceries',
"description":u'Milk, Cheese, Pizza, Fruit, Tylenol',
"done":False
},
{
"id":2,
"title": u'Learn Java',
"description":u'I am learning Java from Somanna',
"done":False
}
]
@app.route("/add-data", methods=['POST'])
def add_task():
if not request.json:
return jsonify({
"status":"error",
"message": "Please provide the data!"
},400)
task = {
'id': tasks[-1]['id']+1,
'title':request.json['title'],
'description':request.json.get('description', ""),
'done':False
}
tasks.append(task)
return jsonify({
"status":"success",
"message":"Task Added Successfully!!"
})
@app.route("/get-data")
def get_task():
return jsonify({
"data":tasks
})
if __name__=="__main__":
app.run(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
a045b73bfaca5784847e614a130eff9d3c1caedb | 1f82e09ae56a0dfb811663a5dc835e1628def9eb | /Asteroids_1/main.py | dc88a8d9a44ee93ab2a5d1c244ea14c360494169 | [] | no_license | armyrunner/CS1410-OOP2 | 6f1eb70260496fe0733cbcd571d0838f81cd7ea1 | 115c314831c7839aa4afb55cff780d87e4ffdd52 | refs/heads/master | 2023-03-06T22:46:08.097008 | 2021-02-19T01:03:37 | 2021-02-19T01:03:37 | 340,193,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,395 | py | import pygame
import game
# YOU SHOULD CHANGE THIS TO IMPORT YOUR GAME MODULE
import asteroids
# YOU SHOULD CONFIGURE THESE TO MATCH YOUR GAME
# window title bar text
TITLE = "Asteroids"
# pixels width
WINDOW_WIDTH = 700
# pixels high
WINDOW_HEIGHT = 600
# frames per second
DESIRED_RATE = 10
class PygameApp( game.Game ):
def __init__( self, title, width, height, frame_rate ):
game.Game.__init__( self, title, width, height, frame_rate )
# create a game instance
# YOU SHOULD CHANGE THIS TO IMPORT YOUR GAME MODULE
self.mGame = asteroids.Asteroids( width, height )
return
def game_logic( self, keys, newkeys, buttons, newbuttons, mouse_position, dt ):
# keys contains all keys currently held down
# newkeys contains all keys pressed since the last frame
# Use pygame.K_? as the keyboard keys.
# Examples: pygame.K_a, pygame.K_UP, etc.
# if pygame.K_UP in newkeys:
# The user just pressed the UP key
#
# buttons contains all mouse buttons currently held down
# newbuttons contains all buttons pressed since the last frame
# Use 1, 2, 5 as the mouse buttons
# if 5 in buttons:
# The user is holding down the right mouse button
#
# mouse_position contains x and y location of mouse in window
# dt contains the number of seconds since last frame
x = mouse_position[ 0 ]
y = mouse_position[ 1 ]
# Update the state of the game instance
# YOU SHOULD CHANGE THIS TO IMPORT YOUR GAME MODULE
# MOVE SHIOP LEFT
if pygame.K_a in newkeys:
self.mGame.turnShipLeft(10)
elif pygame.K_a in keys:
self.mGame.turnShipLeft(10)
if pygame.K_LEFT in keys:
self.mGame.turnShipLeft(10 )
elif pygame.K_LEFT in keys:
self.mGame.turnShipLeft(10)
# MOVE SHIP RIGHT
if pygame.K_d in newkeys:
self.mGame.turnShipLeft(10)
elif pygame.K_d in keys:
self.mGame.turnShipRight(10)
if pygame.K_RIGHT in keys:
self.mGame.turnShipRight(10 )
elif pygame.K_RIGHT in keys:
self.mGame.turnShipRight(10 )
# MOVE SHIP FORWARD
if pygame.K_w in newkeys:
self.mGame.accelerateShip(5)
elif pygame.K_w in keys:
self.mGame.accelerateShip(5)
if pygame.K_UP in keys:
self.mGame.accelerateShip(5 )
elif pygame.K_UP in keys:
self.mGame.accelerateShip(5)
# MOVE SHIP REVERSE
if pygame.K_s in newkeys:
self.mGame.accelerateShip(-5)
elif pygame.K_s in keys:
self.mGame.accelerateShip(-5)
if pygame.K_DOWN in keys:
self.mGame.accelerateShip(-5 )
elif pygame.K_DOWN in keys:
self.mGame.accelerateShip(-5)
# if 1 in newbuttons:
# self.mGame.actOnLeftClick( x, y )
self.mGame.evolve( dt )
return
def paint( self, surface ):
# Draw the current state of the game instance
self.mGame.draw( surface )
return
def main( ):
pygame.font.init( )
game = PygameApp( TITLE, WINDOW_WIDTH, WINDOW_HEIGHT, DESIRED_RATE )
game.main_loop( )
if __name__ == "__main__":
main( )
| [
"live2runmarthon@gmail.com"
] | live2runmarthon@gmail.com |
7b3d8010e4239376f52e5c0c69d990f30f7a3c82 | a8faef3782449f73bfb26ef9a7e0ac717be5e5d4 | /LabelsReader.py | b6408c02276574ef2805f2bd3a651d480c185266 | [] | no_license | Microv/NonlinearAnomalyDetection | 971ab0e69b65d58cbaad5ee2a62831ff553c87be | c3cc1daf66c4233ecb33f6fd1219a8e30797195a | refs/heads/master | 2021-01-09T06:06:09.604425 | 2017-02-04T11:27:16 | 2017-02-04T11:27:16 | 80,912,991 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,751 | py | from ISCXLabelsHandler import ISCXLabelsHandler
from DARPA2000LabelsHandler import DARPA2000LabelsHandler
import xml.sax
from os import listdir, path
import pickle
import pytz
import datetime, calendar
GROUND_TRUTH_DIR = 'labels/'
class ISCXLabelsReader():
def __init__(self, verbose=False):
self.parser = xml.sax.make_parser()
self.anomalies = dict()
self.verbose = verbose
def read(self):
anomaly_file = 'iscx_anomalies.pickle'
if anomaly_file not in listdir('.'):
if self.verbose:
print 'Building dictionary of anomalies...'
for filename in listdir(GROUND_TRUTH_DIR):
if '.xml' in filename:
if self.verbose:
print 'Reading ' + filename
self.parser.setContentHandler(ISCXLabelsHandler(path.splitext(filename)[0], self.anomalies, self.verbose))
self.parser.parse(GROUND_TRUTH_DIR + filename)
with open(anomaly_file, 'wb') as f:
pickle.dump(self.anomalies, f)
else:
if self.verbose:
print 'Reading dictionary of anomalies...'
with open(anomaly_file, 'rb') as f:
self.anomalies = pickle.load(f)
if self.verbose:
print 'Dictionary read'
return self.anomalies
class CTU13LabelsReader():
def __init__(self, verbose=False):
self.anomalies = dict()
self.verbose = verbose
def read(self):
anomaly_file = 'ctu13_anomalies.pickle'
if anomaly_file not in listdir('.'):
if self.verbose:
print 'Building dictionary of anomalies...'
for filename in listdir(GROUND_TRUTH_DIR):
if '.binetflow' in filename:
if self.verbose:
print 'Reading ' + filename
self.parseFile(filename)
with open(anomaly_file, 'wb') as f:
pickle.dump(self.anomalies, f)
else:
if self.verbose:
print 'Reading dictionary of anomalies...'
with open(anomaly_file, 'rb') as f:
self.anomalies = pickle.load(f)
if self.verbose:
print 'Dictionary read'
return self.anomalies
def parseFile(self, file):
with open(GROUND_TRUTH_DIR + file, 'rb') as f:
header = f.readline()
while True:
line = f.readline()
if line == "":
break
line = line[:-1].split(',')
start_time = line[0]
duration = line[1]
source = line[3]
destination = line[6]
full_label = line[14]
start_ts = self.timestamp(start_time)
stop_ts = start_ts + float(duration)
if 'Botnet' in full_label:
label = 'anomaly'
else:
label = 'normal'
if self.verbose:
print start_time + '(' + str(start_ts) + ')', duration + '(' + str(stop_ts) + ')', source, destination, full_label + '(' + label + ')'
if label == 'anomaly':
if source not in self.anomalies:
self.anomalies[source] = dict()
self.anomalies[source][destination] = list()
elif destination not in self.anomalies[source]:
self.anomalies[source][destination] = list()
self.anomalies[source][destination].append([start_ts,stop_ts])
def timestamp(self, dateTime):
TIMEZONE = 'Europe/Prague'
local_tz = pytz.timezone(TIMEZONE)
datetime_without_tz = datetime.datetime.strptime(dateTime, "%Y/%m/%d %H:%M:%S.%f")
datetime_with_tz = local_tz.localize(datetime_without_tz, is_dst=None)
datetime_in_utc = datetime_with_tz.astimezone(pytz.utc)
timestamp = calendar.timegm(datetime_in_utc.timetuple())
return timestamp
class DARPALabelsReader():
def __init__(self, verbose=False):
self.anomalies = dict()
self.verbose = verbose
def read(self):
anomaly_file = 'darpa_anomalies.pickle'
if anomaly_file not in listdir('.'):
if self.verbose:
print 'Building dictionary of anomalies...'
for filename in listdir(GROUND_TRUTH_DIR):
if '.list' in filename:
if self.verbose:
print 'Reading ' + filename
self.parseFile(filename)
with open(anomaly_file, 'wb') as f:
pickle.dump(self.anomalies, f)
else:
if self.verbose:
print 'Reading dictionary of anomalies...'
with open(anomaly_file, 'rb') as f:
self.anomalies = pickle.load(f)
if self.verbose:
print 'Dictionary read'
return self.anomalies
def parseFile(self, filename):
attack = None
with open(GROUND_TRUTH_DIR + filename, 'rb') as f:
while True:
line = f.readline()
if line == "":
break
line = line[:-1].split(' ')
if len(line) < 2:
continue
name = line[0]
value = line[1]
if name == 'ID:':
if attack:
start_ts = self.timestamp(attack['date'] + '-' + attack['start_time'])
duration = self.timestamp('01/01/1970-' + attack['duration'])
stop_ts = start_ts + duration
if self.verbose:
for key in attack:
print key + ':', attack[key]
print start_ts
print stop_ts
# if noisy
sources = attack['attacker']
destinations = attack['victim']
for source in sources:
if source not in self.anomalies:
self.anomalies[source] = dict()
for destination in destinations:
self.anomalies[source][destination] = list()
else:
for destination in destinations:
if destination not in self.anomalies[source]:
self.anomalies[source][destination] = list()
for source in sources:
for destination in destinations:
self.anomalies[source][destination].append([start_ts,stop_ts])
attack = dict()
attack['id'] = value
elif name == 'Date:':
attack['date'] = value
elif name == 'Name:':
attack['name'] = value
elif name == 'Category':
attack['category'] = value
elif name == 'Start_Time:':
attack['start_time'] = value
elif name == 'Duration:':
attack['duration'] = value
elif name == 'Attacker:':
attack['attacker'] = list()
values = value.split(',')
for value in values:
value = value.split('.')
if len(value) == 4 and value[0] != 'login':
ip = ''
for i in range(len(value)):
if '-' in value[i]:
ls = value[i].split('-')
first = int(ls[0])
last = int(ls[1])
for j in range(first, last):
attack['attacker'].append(ip + str(int(j)))
continue
else:
ip += str(int(value[i])) + '.'
attack['attacker'].append(ip[:-1])
else:
attack['attacker'].append(value[0])
elif name == 'Victim:':
attack['victim'] = list()
values = value.split(',')
for value in values:
value = value.split('.')
if len(value) == 4:
ip = ''
for i in range(len(value)):
if value[i] == '*':
for j in range(0, 255):
attack['victim'].append(ip + str(int(j)))
continue
if '-' in value[i]:
ls = value[i].split('-')
first = int(ls[0])
last = int(ls[1])
for j in range(first, last):
attack['victim'].append(ip + str(int(j)))
continue
else:
ip += str(int(value[i])) + '.'
attack['victim'].append(ip[:-1])
else:
attack['victim'].append(value[0])
def timestamp(self, dateTime):
TIMEZONE = 'America/New_York'
local_tz = pytz.timezone(TIMEZONE)
datetime_without_tz = datetime.datetime.strptime(dateTime, "%m/%d/%Y-%H:%M:%S")
datetime_with_tz = local_tz.localize(datetime_without_tz, is_dst=None)
datetime_in_utc = datetime_with_tz.astimezone(pytz.utc)
timestamp = calendar.timegm(datetime_in_utc.timetuple())
return timestamp
class DARPA2000LabelsReader():
def __init__(self, verbose=False):
self.parser = xml.sax.make_parser()
self.anomalies = dict()
self.verbose = verbose
def read(self):
anomaly_file = 'darpa2000_anomalies.pickle'
if anomaly_file not in listdir('.'):
if self.verbose:
print 'Building dictionary of anomalies...'
for filename in listdir(GROUND_TRUTH_DIR):
if '.xml' in filename:
if self.verbose:
print 'Reading ' + filename
self.parser.setContentHandler(DARPA2000LabelsHandler(self.anomalies, self.verbose))
self.parser.parse(GROUND_TRUTH_DIR + filename)
with open(anomaly_file, 'wb') as f:
pickle.dump(self.anomalies, f)
else:
if self.verbose:
print 'Reading dictionary of anomalies...'
with open(anomaly_file, 'rb') as f:
self.anomalies = pickle.load(f)
if self.verbose:
print 'Dictionary read'
return self.anomalies
class MergedLabelsReader():
def __init__(self, verbose=False):
self.anomalies = dict()
self.verbose = verbose
def read(self):
# CTU botnet
self.anomalies['147.32.84.165'] = dict()
self.anomalies['147.32.84.165']['147.32.96.69'] = list()
self.anomalies['147.32.84.165']['147.32.96.69'].append([1483706040, 1483706520])
self.anomalies['147.32.84.165']['147.32.96.69'].append([1483711200, 1483711680])
self.anomalies['147.32.84.165']['147.32.96.69'].append([1483722000, 1483722480])
self.anomalies['147.32.84.165']['147.32.96.69'].append([1483729200, 1483729680])
self.anomalies['147.32.84.165']['147.32.96.69'].append([1483735980, 1483736460])
self.anomalies['147.32.84.165']['147.32.96.69'].append([1483745400, 1483745880])
# CAIDA 2007 DDoS
IP_FILE = 'ip_addr_caida.txt'
with open(GROUND_TRUTH_DIR + IP_FILE, 'rb') as f:
while True:
line = f.readline()
if line == "":
break
line = line[:-1].split(',')
src = line[0]
dst = line[1]
if self.verbose:
print 'Attack'
print 'From: ' + src
print 'To: ' + dst
if src not in self.anomalies:
self.anomalies[src] = dict()
self.anomalies[src][dst] = list()
elif dst not in self.anomalies[src]:
self.anomalies[src][dst] = list()
self.anomalies[src][dst].append([1484557200, 1484559796])
self.anomalies[src][dst].append([1484571600, 1484574173])
self.anomalies[src][dst].append([1484586000, 1484588580])
# Simulated Attacks
FILENAME = 'merged_dataset_log.csv'
with open(GROUND_TRUTH_DIR + FILENAME, 'rb') as f:
header = f.readline()
while True:
line = f.readline()
if line == "":
break
line = line[:-1].split(';')
source = line[2]
target = line[3]
start_timestamp = int(line[4])
stop_timestamp = int(line[5])
if self.verbose:
print 'Attack'
print 'From: ' + source
print 'To: ' + target
print 'Begins: ' + str(start_timestamp)
print 'Ends: ' + str(stop_timestamp)
if source not in self.anomalies:
self.anomalies[source] = dict()
self.anomalies[source][target] = list()
elif target not in self.anomalies[source]:
self.anomalies[source][target] = list()
self.anomalies[source][target].append([start_timestamp, stop_timestamp])
return self.anomalies
| [
"michele@localhost.localdomain"
] | michele@localhost.localdomain |
0361cf95323138a93fe8fdbfbe7b79443489acf3 | 426a61f9a3413ccd09cc7a59ffa8988fad2c5c2f | /network/refinement_network.py | e4b0ebda2fdee7d750722892d8a59fbfaa5fb217 | [
"Unlicense"
] | permissive | sveatlo/unmasked | 46c53b54d377ecae72aafbc8ed7b237bef238415 | 6870ee56beea7401aa97194f76487c391af9dd5d | refs/heads/main | 2023-07-06T22:58:22.042569 | 2021-08-08T19:36:52 | 2021-08-08T19:36:52 | 355,092,011 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,629 | py | import torch
import torch.nn as nn
from network.contextual_attention import ContextualAttention
from network.gated_conv import GatedConv2d, GatedDeConv2d
class RefinementNetwork(nn.Module):
def __init__(self, in_channels: int = 4, out_channels: int = 3, latent_channels: int = 48, padding_type: str = 'zero', activation: str = 'lrelu', norm: str = 'none'):
super().__init__()
# b1 has attention
self.b1_1 = nn.Sequential(
GatedConv2d(in_channels, latent_channels, 5, 1, 2, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels, latent_channels, 3, 2, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels, latent_channels*2, 3, 1, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels*2, latent_channels*4, 3, 2, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels*4, latent_channels*4, 3, 1, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels*4, latent_channels*4, 3, 1, 1, padding_type=padding_type, activation='relu', norm=norm)
)
self.b1_2 = nn.Sequential(
GatedConv2d(latent_channels*4, latent_channels*4, 3, 1, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels*4, latent_channels*4, 3, 1, 1, padding_type=padding_type, activation=activation, norm=norm)
)
self.context_attention=ContextualAttention(ksize=3, stride=1, rate=2, fuse_k=3, softmax_scale=10, fuse=True)
# b2 is conv only
self.b2 = nn.Sequential(
GatedConv2d(in_channels, latent_channels, 5, 1, 2, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels, latent_channels, 3, 2, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels, latent_channels*2, 3, 1, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels*2, latent_channels*2, 3, 2, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels*2, latent_channels*4, 3, 1, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels*4, latent_channels*4, 3, 1, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels*4, latent_channels*4, 3, 1, 2, dilation=2, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels*4, latent_channels*4, 3, 1, 4, dilation=4, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels*4, latent_channels*4, 3, 1, 8, dilation=8, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels*4, latent_channels*4, 3, 1, 16, dilation=16, padding_type=padding_type, activation=activation, norm=norm)
)
self.combine = nn.Sequential(
GatedConv2d(latent_channels*8, latent_channels*4, 3, 1, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels*4, latent_channels*4, 3, 1, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedDeConv2d(latent_channels*4, latent_channels*2, 3, 1, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels*2, latent_channels*2, 3, 1, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedDeConv2d(latent_channels*2, latent_channels, 3, 1, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels, latent_channels//2, 3, 1, 1, padding_type=padding_type, activation=activation, norm=norm),
GatedConv2d(latent_channels//2, out_channels, 3, 1, 1, padding_type=padding_type, activation='none', norm=norm),
nn.Tanh()
)
def forward(self, img, coarse_img, mask):
img_masked = img * (1 - mask) + coarse_img * mask
x = torch.cat([img_masked, mask], dim=1)
x_1 = self.b2(x)
x_2 = self.b1_1(x)
mask_s = nn.functional.interpolate(mask, (x_2.shape[2], x_2.shape[3]))
x_2 = self.context_attention(x_2, x_2, mask_s)
x_2 = self.b1_2(x_2)
y = torch.cat([x_1, x_2], dim=1)
y = self.combine(y)
y = nn.functional.interpolate(y, (img.shape[2], img.shape[3]))
return y
| [
"svatoplukhanzel@pm.me"
] | svatoplukhanzel@pm.me |
4274a097f97d6858437ee3a6400bf7187f8a8d8d | 45a9f6533a9bff337e420e1e5ee34a3fb2a60ba4 | /AnalyticsVidhya/analyticsvidhya/Sol.py | cf3ad1e2996872c1b16a3af95e1b91091ea9ee5c | [] | no_license | vikramjain/Competetions | 4dfc9f429692aab97afebdb265217722e2ba9cf4 | 07c9b6ca8cb571474761109a0f751c9eb3b6da54 | refs/heads/master | 2021-09-05T12:03:23.136412 | 2018-01-27T08:31:42 | 2018-01-27T08:31:42 | 112,911,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,976 | py | import numpy as np
import pandas as pd
from sklearn import preprocessing
from scipy.stats import mode
import matplotlib.pyplot as plt
from numpy import nan
from sklearn.cross_validation import train_test_split
from pandas import Series,DataFrame
import sklearn
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
def get_title(name):
if 'A' in name:
return 1
elif 'B' in name:
return 2
elif 'C' in name:
return 2
train = pd.read_csv('train.csv', sep=',')
test = pd.read_csv('test.csv', sep=',')
train.replace('nan', nan, inplace=True)
test.replace('nan', nan, inplace=True)
train.var2 = pd.Categorical(train.var2)
test.var2 = pd.Categorical(test.var2)
train['var2'] = train['var2'].apply(get_title)
test['var2'] = test['var2'].apply(get_title)
train.var2 = pd.Categorical(train.var2)
test.var2 = pd.Categorical(test.var2)
print(train.head())
print(train.isnull().sum())
print(train.dtypes)
# Cabin
train_Y = train["electricity_consumption"]
train.drop("ID",axis=1,inplace=True)
train.drop("datetime",axis=1,inplace=True)
train.drop("electricity_consumption",axis=1,inplace=True)
test_id = test["ID"]
test.drop("ID",axis=1,inplace=True)
test.drop("datetime",axis=1,inplace=True)
X_train = train
Y_train = train_Y
X_test = test
parameter_grid ={"n_estimators" : [50, 75, 100, 125, 150], "max_features": ["auto", "sqrt", "log2"], "min_samples_split" : [2,4,8], "bootstrap": [True, False]}
# Random Forests
random_forest = RandomForestRegressor(n_estimators=100)
random_forest = RandomForestRegressor(random_state = 1, n_estimators = 100, min_samples_split = 8, min_samples_leaf = 4)
cross_validation = StratifiedKFold(train_Y, n_folds=3)
random_forest = GridSearchCV(random_forest, param_grid=parameter_grid, cv=cross_validation)
'''
parameter_grid = {
'max_depth' : [4,5,6,7,8],
'n_estimators': [50,100,120,150],
'criterion': ['gini','entropy'],
'max_features': ['auto', 'log2', 'sqrt', None],
'min_samples_split': [3,4,5,6,7]
}
cross_validation = StratifiedKFold(Y_train, n_folds=5)
random_forest = GridSearchCV(random_forest,
param_grid=parameter_grid,
cv=cross_validation)
'''
random_forest.fit(X_train, Y_train)
Y_pred_1 = random_forest.predict(X_test)
print(random_forest.score(X_train, Y_train))
Y_pred = Y_pred_1
print(Y_pred)
submission = pd.DataFrame({
"ID": test_id,
"electricity_consumption": Y_pred
})
submission.to_csv('final.csv', index=False)
| [
"noreply@github.com"
] | noreply@github.com |
0d76cf43909a3dd1ccc2036f17670eac71e8eb31 | 7832e7dc8f1583471af9c08806ce7f1117cd228a | /aliyun-python-sdk-cs/aliyunsdkcs/request/v20151215/DescribeServiceContainersRequest.py | c4cf438bb23b376cec873a05b86b7fbcdb6e08e8 | [
"Apache-2.0"
] | permissive | dianplus/aliyun-openapi-python-sdk | d6494850ddf0e66aaf04607322f353df32959725 | 6edf1ed02994245dae1d1b89edc6cce7caa51622 | refs/heads/master | 2023-04-08T11:35:36.216404 | 2017-11-02T12:01:15 | 2017-11-02T12:01:15 | 109,257,597 | 0 | 0 | NOASSERTION | 2023-03-23T17:59:30 | 2017-11-02T11:44:27 | Python | UTF-8 | Python | false | false | 1,422 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class DescribeServiceContainersRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'CS', '2015-12-15', 'DescribeServiceContainers')
self.set_uri_pattern('/clusters/[ClusterId]/services/[ServiceId]/containers')
self.set_method('GET')
def get_ClusterId(self):
return self.get_path_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_path_param('ClusterId',ClusterId)
def get_ServiceId(self):
return self.get_path_params().get('ServiceId')
def set_ServiceId(self,ServiceId):
self.add_path_param('ServiceId',ServiceId) | [
"ling.wu@alibaba-inc.com"
] | ling.wu@alibaba-inc.com |
47dbd06d73da73797a6148d5211413ca087eca89 | 12b63effcd7ca6f307c4b55a45e04e6656d5ea98 | /Studio/Holiday.py | 1e0d122754ffbec422dc1c90a81712adcc62ed7e | [] | no_license | rarose67/Python-practice- | cc70b26d1ba6cd4ee845e5a1d0b2d1d8ec7bc733 | 699eaf8a7ceb69c86bec35cd41df7191a70ef91f | refs/heads/master | 2020-03-27T08:14:06.470762 | 2018-08-27T02:02:41 | 2018-08-27T02:02:41 | 146,234,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | """
It is possible to name the days 0 through 6, where day 0 is Sunday and day 6 is Saturday. If you go on a wonderful holiday leaving on day 3 (a Wednesday) and you return home after 10 nights, you arrive home on day 6 (a Saturday).
Write a general version of the program which asks for the day number that your vacation starts on and the length of your holiday, and then tells you the number of the day of the week you will return on.
"""
#difine a list of days
days = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
#ask user for start day
start_day = int(input("What day of the week does your vacation start?" +
"(use 0 for Sunday, 6 for Saturday, etc.)"))
#ask user for the lenth of the vacation
vac_length = int(input("How long is your vacation?"))
#determine what day you'll return
last_day = (start_day + vac_length) % 7
#output your return day
print("You'll return on", days[last_day]) | [
"robertrosestl67@gmail.com"
] | robertrosestl67@gmail.com |
87285319b453d6b779837ac5d96b87d989629dbd | 1277c0d30434133a7ce6f4d1db6c04d65b0a49c9 | /backend/findme_20524/wsgi.py | 8403d67f78f9ff859caa0a7e2ffa509f5e7f5195 | [] | no_license | crowdbotics-apps/findme-20524 | aef86f49038e1e06967c3d22fee0968ec769c3b4 | da959e3a82c81a93ce2e6d3388ad610ebc7be7f5 | refs/heads/master | 2022-12-23T10:47:01.480756 | 2020-09-21T19:34:35 | 2020-09-21T19:34:35 | 297,441,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for findme_20524 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'findme_20524.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
901840825dba82b2be79ca144758e71078df3529 | 4fc373c4cfe248f37ccf6719cd3d16b5d895d2f1 | /myvenv/Lib/site-packages/twilio/rest/preview/trusted_comms/current_call.py | 244d12a505e1dea52e994ffe3834adc55a47bbfa | [] | no_license | sharatsachin/chatbot-asmt | efde91dd9dc539b4b65db55b717af8106ca88e2c | 896e5b2b4598b3453e5d5a5adab2836164e8e53a | refs/heads/master | 2020-06-01T17:01:59.089506 | 2019-06-08T08:42:22 | 2019-06-08T08:42:22 | 190,858,127 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,332 | py | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class CurrentCallList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version):
"""
Initialize the CurrentCallList
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallList
:rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallList
"""
super(CurrentCallList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self):
"""
Constructs a CurrentCallContext
:returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext
:rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext
"""
return CurrentCallContext(self._version, )
def __call__(self):
"""
Constructs a CurrentCallContext
:returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext
:rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext
"""
return CurrentCallContext(self._version, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.TrustedComms.CurrentCallList>'
class CurrentCallPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the CurrentCallPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallPage
:rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallPage
"""
super(CurrentCallPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of CurrentCallInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallInstance
:rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallInstance
"""
return CurrentCallInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.TrustedComms.CurrentCallPage>'
class CurrentCallContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version):
"""
Initialize the CurrentCallContext
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext
:rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext
"""
super(CurrentCallContext, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/CurrentCall'.format(**self._solution)
def fetch(self, from_=values.unset, to=values.unset):
"""
Fetch a CurrentCallInstance
:param unicode from_: The originating Phone Number
:param unicode to: The terminating Phone Number
:returns: Fetched CurrentCallInstance
:rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallInstance
"""
params = values.of({'From': from_, 'To': to, })
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CurrentCallInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.TrustedComms.CurrentCallContext {}>'.format(context)
class CurrentCallInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload):
"""
Initialize the CurrentCallInstance
:returns: twilio.rest.preview.trusted_comms.current_call.CurrentCallInstance
:rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallInstance
"""
super(CurrentCallInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'from_': payload['from'],
'to': payload['to'],
'reason': payload['reason'],
'created_at': deserialize.iso8601_datetime(payload['created_at']),
'url': payload['url'],
}
# Context
self._context = None
self._solution = {}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CurrentCallContext for this CurrentCallInstance
:rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallContext
"""
if self._context is None:
self._context = CurrentCallContext(self._version, )
return self._context
@property
def sid(self):
"""
:returns: A string that uniquely identifies this Current Call.
:rtype: unicode
"""
return self._properties['sid']
@property
def from_(self):
"""
:returns: The originating Phone Number
:rtype: unicode
"""
return self._properties['from_']
@property
def to(self):
"""
:returns: The terminating Phone Number
:rtype: unicode
"""
return self._properties['to']
@property
def reason(self):
"""
:returns: The business reason for this phone call
:rtype: unicode
"""
return self._properties['reason']
@property
def created_at(self):
"""
:returns: The date this Current Call was created
:rtype: datetime
"""
return self._properties['created_at']
@property
def url(self):
"""
:returns: The URL of this resource.
:rtype: unicode
"""
return self._properties['url']
def fetch(self, from_=values.unset, to=values.unset):
"""
Fetch a CurrentCallInstance
:param unicode from_: The originating Phone Number
:param unicode to: The terminating Phone Number
:returns: Fetched CurrentCallInstance
:rtype: twilio.rest.preview.trusted_comms.current_call.CurrentCallInstance
"""
return self._proxy.fetch(from_=from_, to=to, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.TrustedComms.CurrentCallInstance {}>'.format(context)
| [
"sharat.ss.sachin@gmail.com"
] | sharat.ss.sachin@gmail.com |
f0cb0d5b3d99d57ab4045f1759b682f46e459f98 | a16ef81e469d9cc6f2b802e961255ca845fb17e6 | /python-selenuim/css_choose_element/css-1.py | 8c51e485b4ff49cbe347914a98bfe69f3b23f344 | [] | no_license | asherboy1/python-selenium-new | 32ccdabde18c9d8fe8c74718dd00991d25d75140 | eab7ba7e9946891f6aea6c89aab1e90b54ac63a6 | refs/heads/main | 2023-04-16T00:58:45.232576 | 2021-04-26T02:41:29 | 2021-04-26T02:41:29 | 361,594,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | from selenium import webdriver
wd = webdriver.Chrome()
wd.get("http://cdn1.python3.vip/files/selenium/sample1.html")
#css .class 属性 例子 .animal{} {}中为修饰语句
#color background-color
#find_element_by_css_selector(CSS Selector参数) 主流使用
element = wd.find_element_by_css_selector('.animal')
#注意 . 表示使用的哪一个 如果没有. 等效于 wd.find_element_by_tag_name("animal")
element1 = wd.find_element_by_css_selector('#searchtext')
#注意 此句使用# 表示通过id查找 等效于 wd.find_element_by_id('serachtext')
print(element1.get_attribute('outerHTML'))
print(element.get_attribute('outerHTML')) | [
"noreply@github.com"
] | noreply@github.com |
b8c2baad104a85d6d9d0a99b0ad72431dd316e94 | 11bf0003909dfc62f3dad7184107f25391af3855 | /Modelling_Complete.py | f38465f2a7e3a2cac321241217810a70c719ed0d | [] | no_license | amit1013/Flight-Ticket-Prediction | 0a17cb4a2ae62a8bc702bf2dae0b43ef4348178c | b5d8330a667f364d2a47179f81730f8e2dcd9e96 | refs/heads/master | 2020-06-16T14:19:21.410304 | 2019-07-07T03:47:01 | 2019-07-07T03:47:01 | 195,606,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,831 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 16 12:04:32 2019
@author: amit
"""
import pandas as pd
import numpy as np
import seaborn as sns
import re
import datetime
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.metrics import r2_score, mean_squared_log_error
from sklearn.kernel_ridge import KernelRidge
from sklearn.neighbors import KNeighborsRegressor
import lightgbm as lgbm
from scipy import stats
from datetime import timedelta
import seaborn as sns
import xgboost as xgb
## Pre Processing
original_data = pd.read_csv(
"E:/Kaggle_Problem/Flight Ticket Prediction/Data_Train.csv")
test_data = pd.read_csv(
"E:/Kaggle_Problem/Flight Ticket Prediction/Test_Set.csv")
holidays = pd.read_csv(
"E:/Kaggle_Problem/Flight Ticket Prediction/Holiday_Dates.csv")
#distance = pd.read_csv(
# "E:/Kaggle_Problem/Flight Ticket Prediction/distance_data.csv")
holidays['Date'] = pd.to_datetime(holidays['Date'], format='%d-%m-%y')
test_data['Date_of_Journey'] = pd.to_datetime(test_data['Date_of_Journey'], format='%d-%m-%y')
original_data['Date_of_Journey'] = pd.to_datetime(
original_data['Date_of_Journey'], format='%d-%m-%y')
def clean_data(dataframe):
# dataframe['Arrival_Time'] = dataframe['Arrival_Time'].apply(
# lambda x: re.search(r'\d{1,2}:\d{1,2}', x).group())
# dataframe['Duration'] = dataframe['Duration'].apply(
# lambda x: int(re.search(r'\d{1,2}', x).group()))
dataframe['Dep_Time_2'] = dataframe['Dep_Time'].apply(
lambda x: float(x.split(":")[0])+float(x.split(":")[1])/60)
dataframe['Dep_Time_Hour'] = dataframe['Dep_Time'].apply(
lambda x: int(x.split(":")[0]))
dataframe['Arrival_Time'] = dataframe['Arrival_Time'].apply(
lambda x: float(x.split(":")[0])+float(x.split(":")[1])/60)
return dataframe
def basic_feature_engineering(dataframe):
##Date related
dataframe['Month'] = dataframe['Date_of_Journey'].apply(
lambda x: str(x.month))
dataframe['Weekday'] = dataframe['Date_of_Journey'].apply(
lambda x: str(datetime.date.weekday(x)))
dataframe['Week'] = dataframe['Date_of_Journey'].apply(
lambda x: int(x.week))
dataframe['Day'] = dataframe['Date_of_Journey'].apply(
lambda x: int(x.day))
## Meal Bagage Related
dataframe['meal_baggage_flag'] = dataframe['Additional_Info'].apply(
lambda x: 1 if(('meal' in x.lower()) | ('baggage' in x.lower())) else 0)
## Binning the days
# dataframe['Week_2'] = pd.cut(dataframe['Day'], bins=[1,7,14,21,28,33], labels=['1', '2', '3', '4', '5'])
# dataframe.drop('Day', inplace=True, axis=1)
#
##Time related
# dataframe['Dep_Time_Bin'] = pd.cut(dataframe['Dep_Time'], 6,
# labels=['Midnight', 'Early_Morning', 'Morning', 'Afternoon','Evening', 'Night'])
# dataframe['Arrival_Time_Bin'] = pd.cut(dataframe['Arrival_Time'], 6,
# labels=['Midnight', 'Early_Morning', 'Morning', 'Afternoon','Evening', 'Night'])
return dataframe
def to_longformat(dataframe, column_to_unpivot, column_as_id):
"""
Function to convert the columns to long format from wide format assuming
the column to unpivot is in the form a string of a list
delimited by space Eg: [value1 value2]
Parameters
----------
column_to_unpivot: String
The column of the variables to convert to long format
column_as_id: List
The list of columns to keep as Index while converting to long format
Returns
-------
The dataframe converted into long format
"""
dataframe[column_to_unpivot] = dataframe[column_to_unpivot].apply(
lambda x: str(x).strip().split("[")[1][:-1])
temp = dataframe[column_to_unpivot].str.split(" ", expand=True)
dataframe = pd.concat([dataframe, temp], axis=1)
dataframe = pd.melt(dataframe, id_vars=column_as_id,
value_vars=range(0, temp.shape[1]))
dataframe.dropna(inplace=True)
dataframe.drop('variable', axis=1, inplace=True)
return dataframe
def advance_feature_engineering(train_data, test_data, holiday_data):
## Historical Rolling Average
# train_data['Dep_Time'] = pd.to_datetime(train_data['Dep_Time'], format='%H:%M')
# test_data['Dep_Time'] = pd.to_datetime(test_data['Dep_Time'], format='%H:%M')
#
# airline_route_date = train_data.groupby(
# ['Airline', 'Source','Destination', 'Date_of_Journey', 'Dep_Time_Hour'], as_index=False).agg({'Price': 'mean'})
# airline_route_date.rename(columns={'Price': 'Average_Price'}, inplace=True)
# airline_route_date.sort_values(by=['Airline', 'Source','Destination', 'Date_of_Journey', 'Dep_Time_Hour'], inplace=True)
# airline_route_date['lagged_average_price'] = airline_route_date.groupby(
# ['Airline', 'Source','Destination', 'Date_of_Journey', 'Dep_Time_Hour'], as_index=False)['Average_Price'].shift(1)
# rolling_average = airline_route_date.groupby(
# ['Airline', 'Source','Destination', 'Date_of_Journey', 'Dep_Time_Hour'], as_index=False)['lagged_average_price'].expanding(1).mean()
# rolling_average.reset_index(inplace=True, drop=True)
# airline_route_date['Rolling_Average'] = rolling_average
# airline_route_date.fillna(0, inplace=True)
# train_data = pd.merge(train_data, airline_route_date[
# ['Airline', 'Source','Destination', 'Date_of_Journey', 'Dep_Time_Hour', 'Rolling_Average']],
# how='left', on=['Airline', 'Source','Destination', 'Date_of_Journey', 'Dep_Time_Hour'])
# test_data = pd.merge(test_data, airline_route_date[
# ['Airline', 'Source','Destination', 'Date_of_Journey', 'Dep_Time_Hour', 'Rolling_Average']],
# how='left', on=['Airline', 'Source','Destination', 'Date_of_Journey', 'Dep_Time_Hour'])
## Number of holidays in a 8 day period
train_data['Min_Date'] = train_data['Date_of_Journey'].apply(
lambda x: x-timedelta(4))
# Creating the maximum date for the upper limit of the window
train_data['Max_Date'] = train_data['Date_of_Journey'].apply(
lambda x: x+timedelta(4))
# Creating a list of range of dates from minimum to maximum date
train_data['Min_Max_Date'] = train_data.apply(
lambda x: str(np.arange(x['Min_Date'].date(), x['Max_Date'].date())), axis=1)
long_formatted = to_longformat(
train_data, 'Min_Max_Date',
['Date_of_Journey'])
long_formatted.drop_duplicates(inplace=True)
long_formatted.rename(columns={'value': 'date_range'}, inplace=True)
long_formatted['date_range'] = long_formatted['date_range'].str.replace("'","").str.strip()
long_formatted['date_range'] = pd.to_datetime(long_formatted['date_range'],
format='%Y-%m-%d')
long_formatted = pd.merge(long_formatted, holiday_data, how='left',
left_on='date_range',
right_on='Date')
holidays_1 = long_formatted.groupby('Date_of_Journey', as_index=False).agg(
{'Flag': 'count'})
train_data = pd.merge(train_data, holidays_1, on='Date_of_Journey', how='left')
test_data = pd.merge(test_data, holidays_1, on='Date_of_Journey', how='left')
train_data.drop(['Min_Date', 'Max_Date', 'Min_Max_Date'], inplace=True, axis=1)
## Historical Weekday month average
airline_month_weekday = train_data.groupby(['Airline' ,'Month', 'Day']).agg(
{'Price': 'mean'}).reset_index()
airline_month_weekday.rename(columns={'Price': 'Month_Weekday_Average'}, inplace=True)
train_data = pd.merge(train_data, airline_month_weekday, how='left',
on=['Airline', 'Month', 'Day'])
test_data = pd.merge(test_data, airline_month_weekday, how='left',
on=['Airline', 'Month', 'Day'])
## Historical Standard deviation of Duration to capture the frequency of flights
std_data = train_data.groupby(['Airline', 'Week', 'Source', 'Destination']).agg({
'Duration': lambda x: np.std(x)}).reset_index()
std_data.rename(columns={'Duration': 'duration_std'}, inplace=True)
train_data = pd.merge(train_data, std_data, how='left',
on=['Airline', 'Week', 'Source', 'Destination'])
test_data = pd.merge(test_data, std_data, how='left',
on=['Airline', 'Week', 'Source', 'Destination'])
## Number of flights of airline at airline DOJ and DeP Time Level
airline_month_day_hour = train_data.groupby(['Airline', 'Date_of_Journey', 'Dep_Time_Hour']).size().reset_index()
airline_month_day_hour.rename(columns={0: 'Airline_Date_Number'}, inplace=True)
train_data = pd.merge(train_data, airline_month_day_hour, how='left',
on=['Airline', 'Date_of_Journey', 'Dep_Time_Hour'])
test_data = pd.merge(test_data, airline_month_day_hour, how='left',
on=['Airline', 'Date_of_Journey', 'Dep_Time_Hour'])
## Peak Hour
peak_hour = train_data.groupby(['Date_of_Journey', 'Source', 'Destination']).agg(
{'Dep_Time_Hour': lambda x: stats.mode(x)[0]}).reset_index()
peak_hour.rename(columns={'Dep_Time_Hour': 'Peak_Dep_Hour'}, inplace=True)
train_data = pd.merge(train_data, peak_hour, how='left',
on=['Date_of_Journey', 'Source', 'Destination'])
test_data = pd.merge(test_data, peak_hour, how='left',
on=['Date_of_Journey', 'Source', 'Destination'])
# Most Demanded Day
most_demand = train_data.groupby(['Month', 'Source', 'Destination']).agg(
{'Day': lambda x: stats.mode(x)[0]}).reset_index()
most_demand.rename(columns={'Day': 'Peak_Demand_Day'}, inplace=True)
train_data = pd.merge(train_data, most_demand, how='left',
on=['Month', 'Source', 'Destination'])
test_data = pd.merge(test_data, most_demand, how='left',
on=['Month', 'Source', 'Destination'])
# Most Demanded Source
most_demand = train_data.groupby(['Date_of_Journey']).agg(
{'Source': lambda x: stats.mode(x)[0]}).reset_index()
most_demand.rename(columns={'Source': 'Peak_Demand_Source'}, inplace=True)
train_data = pd.merge(train_data, most_demand, how='left',
on=['Date_of_Journey'])
test_data = pd.merge(test_data, most_demand, how='left',
on=['Date_of_Journey'])
## Min _Max_Price at DOJ Source Destination Level
most_demand = train_data.groupby([ 'Date_of_Journey', 'Source', 'Destination']).agg(
{'Price': ['min', 'max']}).reset_index()
most_demand.columns = ['Date_of_Journey', 'Source', 'Destination', 'Price_Min', 'Price_Max']
train_data = pd.merge(train_data, most_demand, how='left',
on=['Date_of_Journey', 'Source', 'Destination'])
test_data = pd.merge(test_data, most_demand, how='left',
on=['Date_of_Journey', 'Source', 'Destination'])
## Number of unique routes for a airline-source-destination
route_data = train_data.groupby(['Airline', 'Source', 'Destination', 'Month']).agg({
'Route': lambda x: len(x.unique())}).reset_index()
route_data.rename(columns={'Route': 'Number_of_Routes'}, inplace=True)
train_data = pd.merge(train_data, route_data, how='left',
on=['Airline', 'Source', 'Destination', 'Month'])
test_data = pd.merge(test_data, route_data, how='left',
on=['Airline', 'Source', 'Destination', 'Month'])
## Number of unique departure times for airline
departure_data = train_data.groupby(['Airline', 'Source', 'Destination', 'Week']).agg({
'Dep_Time': lambda x: len(x.unique())}).reset_index()
departure_data.rename(columns={'Dep_Time': 'Number_of_Dep_Times'}, inplace=True)
train_data = pd.merge(train_data, departure_data, how='left',
on=['Airline', 'Source', 'Destination', 'Week'])
test_data = pd.merge(test_data, departure_data, how='left',
on=['Airline', 'Source', 'Destination', 'Week'])
## Number of unique date of journey
departure_data = train_data.groupby(['Airline', 'Source', 'Destination']).agg({
'Date_of_Journey': lambda x: len(x.unique())}).reset_index()
departure_data.rename(columns={'Date_of_Journey': 'Number_of_Dates'}, inplace=True)
train_data = pd.merge(train_data, departure_data, how='left',
on=['Airline', 'Source', 'Destination'])
test_data = pd.merge(test_data, departure_data, how='left',
on=['Airline', 'Source', 'Destination'])
## Number of flights
number_of_flights = train_data.groupby(['Date_of_Journey', 'Source', 'Destination']).size().reset_index()
number_of_flights.rename(columns={0: 'Number_of_Flights'}, inplace=True)
train_data = pd.merge(train_data, number_of_flights, on=['Date_of_Journey', 'Source', 'Destination'],
how='left')
test_data = pd.merge(test_data, number_of_flights, on=['Date_of_Journey', 'Source', 'Destination'],
how='left')
## Average Timedelta in minutes between flights
timedelta_data = train_data[['Airline', 'Date_of_Journey', 'Source', 'Destination','Dep_Time']]
timedelta_data['Dep_Time'] = pd.to_datetime(timedelta_data['Dep_Time'], format='%H:%M')
timedelta_data.sort_values(by=['Airline', 'Source', 'Destination', 'Date_of_Journey','Dep_Time'], inplace=True)
timedelta_data['lagged_time'] = timedelta_data.groupby(
['Airline', 'Source', 'Destination', 'Date_of_Journey'])['Dep_Time'].shift(1)
timedelta_data['time_diff'] = timedelta_data.apply(
lambda x: (x['Dep_Time']-x['lagged_time']).seconds/60, axis=1)
timedelta_data = timedelta_data.groupby(['Airline','Source', 'Destination' ,'Date_of_Journey']).agg(
{'time_diff': 'mean'}).reset_index()
train_data = pd.merge(train_data, timedelta_data, how='left',
on=['Airline','Source', 'Destination' ,'Date_of_Journey'])
test_data = pd.merge(test_data, timedelta_data, how='left',
on=['Airline','Source', 'Destination' ,'Date_of_Journey'])
## Number of Competitors Flight
flight_count = train_data.groupby(
['Airline', 'Route', 'Month']).size().reset_index()
flight_count.rename(columns={0: 'flight_count'}, inplace=True)
competitor_df = pd.DataFrame()
for airline in train_data['Airline'].unique():
temp = flight_count.loc[flight_count['Airline']!=airline,:]
competitor_count = temp.groupby(['Route', 'Month']).agg(
{'flight_count': 'sum'}).reset_index()
competitor_count.rename(columns={'flight_count': 'competitor_flight_count'}, inplace=True)
airline_ = pd.DataFrame({'Airline': [airline]*(len(competitor_count))})
temp_df = pd.concat([airline_, competitor_count], axis=1)
competitor_df = pd.concat([competitor_df, temp_df], axis=0)
train_data = pd.merge(train_data, competitor_df, on=
['Airline', 'Route', 'Month'], how='left')
test_data = pd.merge(test_data, competitor_df, on=
['Airline', 'Route', 'Month'], how='left')
train_data.fillna({'competitor_flight_count': 0}, inplace=True)
test_data.fillna({'competitor_flight_count': 0}, inplace=True)
## Competitors Flight average price
competitor_df = pd.DataFrame()
for airline in train_data['Airline'].unique():
temp = train_data.copy().loc[train_data['Airline']!=airline,:]
competitor_avg = temp.groupby(['Route']).agg(
{'Price': 'mean'}).reset_index()
competitor_avg.rename(columns={'Price': 'competitor_avg_price'}, inplace=True)
airline_ = pd.DataFrame({'Airline': [airline]*(len(competitor_avg))})
temp_df = pd.concat([airline_, competitor_avg], axis=1)
competitor_df = pd.concat([competitor_df, temp_df], axis=0)
train_data = pd.merge(train_data, competitor_df, on=
['Airline', 'Route'], how='left')
test_data = pd.merge(test_data, competitor_df, on=
['Airline', 'Route'], how='left')
train_data.fillna({'competitor_avg_price': 0}, inplace=True)
test_data.fillna({'competitor_avg_price': 0}, inplace=True)
return train_data, test_data
original_data = clean_data(original_data)
test_data = clean_data(test_data)
original_data = basic_feature_engineering(original_data)
test_data = basic_feature_engineering(test_data)
train_data, test_data = advance_feature_engineering(original_data, test_data, holidays)
train_data.drop(['Date_of_Journey', 'Route', 'Dep_Time', 'Dep_Time_Hour'], axis=1, inplace=True)
test_data.drop(['Date_of_Journey', 'Route', 'Dep_Time', 'Dep_Time_Hour'], axis=1, inplace=True)
train_data['Additional_Info'].unique()
x_values = pd.get_dummies(train_data.drop('Price', axis=1))
x_values = x_values.drop(['Additional_Info_1 Short layover',
'Additional_Info_2 Long layover',
'Additional_Info_No Info',
'Additional_Info_Red-eye flight',
'Airline_Trujet',
'via_1_HBX', 'via_1_IXA', 'via_1_IXZ', 'via_1_JLR', 'via_1_NDC', 'via_1_VTZ'], axis=1)
y_values = train_data['Price']
#x_values.fillna(0, inplace=True)
lgbm_model = lgbm.LGBMRegressor(num_leaves=105, learning_rate =0.01, lambda_l1=0.0001,
n_estimators=1560, min_child_samples=3,
colsample_bytree = 0.46, max_bin=900)
train_predictions = pd.DataFrame({'predictions': lgbm_model.predict(x_values)})
train_predictions = pd.concat([train_data, train_predictions, y_values], axis=1)
train_predictions.to_csv("E:/Kaggle_Problem/Flight Ticket Prediction/train_predict.csv", index=False)
test_data = pd.get_dummies(test_data)
test_data.fillna(0, inplace=True)
predictions = pd.DataFrame(lgbm_model.predict(test_data))
predictions.to_csv("E:/Kaggle_Problem/Flight Ticket Prediction/01042019_v3.csv", index=False)
| [
"noreply@github.com"
] | noreply@github.com |
b6a6f3e54f625b004ba7557625516a3b771e62db | 5663631dd78c1aae7640cb7d0b03cbb6afb482ea | /neon/optimizers/optimizer.py | ec4edb7c62df4be3fc323cd30d5d31ad12048a35 | [
"Apache-2.0"
] | permissive | kod3r/neon | 6d0c7d9150bd77210c81e9df9ae7908e6e10bfff | 73e77fa101c63c0a2c440d3eb2ac64ae9de0d6c6 | refs/heads/master | 2021-01-18T10:32:00.755477 | 2015-09-12T02:34:16 | 2015-09-12T02:34:16 | 42,411,178 | 2 | 0 | null | 2015-09-13T20:12:41 | 2015-09-13T20:12:41 | null | UTF-8 | Python | false | false | 15,114 | py | # ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from neon import NervanaObject
import numpy as np
def get_param_list(layer_list):
'''
returns a flattened list of params
'''
plist = []
for l in layer_list:
ptuple = l.get_params()
plist.extend(ptuple) if isinstance(ptuple, list) else plist.append(ptuple)
return plist
class Optimizer(NervanaObject):
'''
Optimizers will take a param, update, and state
will be responsible for keeping track of a schedule
'''
def optimize(self, layer_list, epoch):
raise NotImplementedError()
class Schedule(NervanaObject):
"""
Learning rate schedule for constant or step learning rates.
By default implements a constant learning rate.
"""
def __init__(self, step_config=None, change=1.):
"""
Arguments:
step_config (int or list, optional): Configure the epoch step rate (int)
or step times (list of epoch indices). Defaults to None (constant).
change (float or list, optional): In step mode, learning rate is
multiplied by ``change ** steps``, where ``steps`` is the number of
steps in the step schedule that have passed. If ``change`` is a list,
``step_config`` must also be a list. Then at ``step[i]``, the
learning rate is set to ``change[i]``.
"""
if isinstance(step_config, list) and isinstance(change, list):
assert len(step_config) == len(change), "change and step_config must have the same" \
"length after step_config is deduplicated to do epoch-level LR assignment."
self.step_config = step_config
self.change = change
self.steps = 0
def get_learning_rate(self, learning_rate, epoch):
"""
Get the current learning rate given the epoch and initial rate
Arguments:
learning_rate (float): the initial learning rate
epoch (int): the current epoch, used to calculate the new effective learning rate.
"""
if isinstance(self.step_config, list) and isinstance(self.change, list):
if epoch in self.step_config:
# steps will store the current lr
self.steps = self.change[self.step_config.index(epoch)]
if self.steps == 0:
return learning_rate
else:
return self.steps
elif isinstance(self.step_config, int):
self.steps = np.floor((epoch + 1) / self.step_config)
elif isinstance(self.step_config, list):
if epoch in self.step_config:
self.steps += 1
# gets called every minibatch, only want to drop once
# per epoch.
del self.step_config[self.step_config.index(epoch)]
return float(learning_rate * self.change ** self.steps)
class ExpSchedule(Schedule):
"""
Exponential learning rate schedule.
Arguments:
decay (float): how much exponential decay to apply to the learning rate
"""
def __init__(self, decay):
self.decay = decay
def get_learning_rate(self, learning_rate, epoch):
return learning_rate / (1. + self.decay * epoch)
class GradientDescentMomentum(Optimizer):
"""
Stochastic gradient descent with momentum
"""
def __init__(self, learning_rate, momentum_coef, stochastic_round=False,
wdecay=0.0, name="gdm", schedule=Schedule()):
"""
Arguments:
learning_rate (float): the multiplicative coefficient of updates
momentum_coef (float): the coefficient of momentum
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend
wdecay (float): the weight decay
name (str): the optimizer's layer's pretty-print name.
"""
super(GradientDescentMomentum, self).__init__(name=name)
self.learning_rate, self.momentum_coef = (learning_rate, momentum_coef)
self.wdecay = wdecay
self.schedule = schedule
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
layer_list (list): a list of Layer objects to optimize.
epoch (int): the current epoch, needed for the Schedule object.
"""
lrate = self.schedule.get_learning_rate(self.learning_rate, epoch)
param_list = get_param_list(layer_list)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
states.append(self.be.zeros_like(grad))
velocity = states[0]
velocity[:] = velocity * self.momentum_coef - lrate * (grad + self.wdecay * param)
param[:] = param + velocity
class RMSProp(Optimizer):
"""
Root Mean Square propagation (leaving out schedule for now).
"""
def __init__(self, stochastic_round=False, decay_rate=0.95, learning_rate=2e-3, epsilon=1e-6,
clip_gradients=False, gradient_limit=5, name="rmsprop"):
"""
Arguments:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
decay_rate (float): decay rate of states
learning_rate (float): the multiplication coefficent of updates
epsilon (float): smoothing epsilon to avoid divide by zeros
clip_gradients (bool): whether to truncate the gradients.
gradient_limit (float): positive value to clip gradients between.
"""
self.state_list = None
self.epsilon = epsilon
self.decay_rate = decay_rate
self.learning_rate = learning_rate
self.clip_gradients = clip_gradients
self.gradient_limit = gradient_limit
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
layer_list (list): a list of Layer objects to optimize.
epoch (int): the current epoch, needed for the Schedule object.
"""
lrate, epsilon, decay = (self.learning_rate, self.epsilon, self.decay_rate)
param_list = get_param_list(layer_list)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
states.append(self.be.zeros_like(grad))
if self.clip_gradients:
grad = self.be.clip(grad, -self.gradient_limit, self.gradient_limit)
# update state
state = states[0]
state[:] = decay * state + self.be.square(grad) * (1.0 - decay)
param[:] = param - grad * lrate / (self.be.sqrt(state + epsilon) + epsilon)
class Adadelta(Optimizer):
"""
Adadelta based learning rule updates.
See Zeiler2012 for instance.
"""
def __init__(self, stochastic_round=False, decay=0.95, epsilon=1e-6, name="ada"):
"""
Args:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
decay: decay parameter in Adadelta
epsilon: epsilon parameter in Adadelta
"""
super(Adadelta, self).__init__(name=name)
self.decay = decay
self.epsilon = epsilon
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
param_list (list): a list of tuples of the form ((param, grad), state),
corresponding to parameters, grads,
and states of layers to be updated
epoch (int): the current epoch, needed for the Schedule object.
"""
epsilon, decay = (self.epsilon, self.decay)
param_list = get_param_list(layer_list)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
# E[Grad^2], E[Delt^2], updates
states.extend([self.be.zeros_like(grad) for i in range(3)])
states[0][:] = states[0] * decay + (1. - decay) * grad * grad
states[2][:] = self.be.sqrt((states[1] + epsilon) / (states[0] + epsilon)) * grad
states[1][:] = states[1] * decay + (1. - decay) * states[2] * states[2]
param[:] = param - states[2]
class Adam(Optimizer):
"""
Adam based learning rule updates. http://arxiv.org/pdf/1412.6980v8.pdf
"""
def __init__(self, stochastic_round=False, learning_rate=0.001, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, name="adam"):
"""
Args:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
learning_rate (float): the multiplicative coefficient of updates
beta_1 (float): Adam parameter beta1
beta_2 (float): Adam parameter beta2
epsilon (float): numerical stability parameter
"""
super(Adam, self).__init__(name=name)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.learning_rate = learning_rate
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
param_list (list): a list of tuples of the form ((param, grad), state),
corresponding to parameters, grads, and states of layers to be updated
epoch (int): the current epoch, needed for the Schedule object.
"""
t = epoch + 1
l = self.learning_rate * self.be.sqrt(1 - self.beta_2 ** t) / (1 - self.beta_1 ** t)
param_list = get_param_list(layer_list)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
# running_1st_mom, running_2nd_mom
states.extend([self.be.zeros_like(grad) for i in range(2)])
m, v = states
m[:] = m * self.beta_1 + (1. - self.beta_1) * grad
v[:] = v * self.beta_2 + (1. - self.beta_2) * grad * grad
param[:] = param - l * m / (self.be.sqrt(v) + self.epsilon)
class MultiOptimizer(Optimizer):
"""
A wrapper class for using multiple Optimizers within the same model.
"""
def __init__(self, optimizer_mapping, name="multiopt"):
"""
Args:
optimizer_mapping (dict): dictionary specifying the mapping of layers to optimizers.
Key: Layer class name or Layer `name` attribute. The latter takes
precedence over the former for finer layer-to-layer control.
Don't name your layers ``'default'``. Value: the optimizer object to use for those
layers. For instance, ``{'default': optimizer1, 'Bias': optimizer2,
'special_bias': optimizer3}`` will use ``optimizer3`` for the layer named
``special_bias``, ``optimizer2`` for all other Bias layers, and ``optimizer1``
for all other layers.
"""
super(MultiOptimizer, self).__init__(name=name)
self.optimizer_mapping = optimizer_mapping
assert 'default' in self.optimizer_mapping, "Must specify a default" \
"optimizer in layer type to optimizer mapping"
self.map_list = None
def map_optimizers(self, layer_list):
"""
maps the optimizers to their corresponding layers
"""
map_list = dict()
for layer in layer_list:
classname = layer.__class__.__name__
name = layer.name
opt = None
if name in self.optimizer_mapping:
opt = self.optimizer_mapping[name]
elif classname in self.optimizer_mapping:
opt = self.optimizer_mapping[classname]
else:
opt = self.optimizer_mapping['default']
if opt not in map_list:
map_list[opt] = [layer]
else:
map_list[opt].append(layer)
return map_list
def reset_mapping(self, new_mapping):
"""
Pass this optimizer a new mapping, and on subsequent optimize call, the
mapping will be refreshed (since map_list will be recreated)
"""
self.optimizer_mapping = new_mapping
self.map_list = None
def optimize(self, layer_list, epoch):
"""
Determine which optimizer in the container should go with which layers,
then apply their optimize functions to those layers.
Notes:
We can recalculate ``map_list`` in case ``optimizer_mapping`` changes
during training.
"""
if self.map_list is None:
self.map_list = self.map_optimizers(layer_list)
for opt in self.map_list:
opt.optimize(self.map_list[opt], epoch)
def get_description(self):
desc = {'type': self.__class__.__name__}
for key in self.optimizer_mapping:
desc[key] = self.optimizer_mapping[key].get_description()
return desc
| [
"urs.koster@gmail.com"
] | urs.koster@gmail.com |
eb8bd2bd90dfe1850bd04800fbf208772c98a519 | 8e07b5b7a8dd38e0ef2c7ffc97d0392d886f32e6 | /venv/Lib/site-packages/mypy/typeshed/third_party/2and3/paramiko/server.pyi | f43bc83b05520072133af6f1a6c7ad7944981cc9 | [] | no_license | RodrigoNeto/cursopythonyt | fc064a2e6106324e22a23c54bdb9c31040ac9eb6 | 279dad531e21a9c7121b73d84fcbdd714f435e7e | refs/heads/master | 2023-07-03T00:54:09.795054 | 2021-08-13T12:42:24 | 2021-08-13T12:42:24 | 395,646,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,113 | pyi | import threading
from typing import Any, List, Optional, Tuple, Union
from paramiko.channel import Channel
from paramiko.message import Message
from paramiko.pkey import PKey
from paramiko.transport import Transport
class ServerInterface:
def check_channel_request(self, kind: str, chanid: int) -> int: ...
def get_allowed_auths(self, username: str) -> str: ...
def check_auth_none(self, username: str) -> int: ...
def check_auth_password(self, username: str, password: str) -> int: ...
def check_auth_publickey(self, username: str, key: PKey) -> int: ...
def check_auth_interactive(self, username: str, submethods: str) -> Union[int, InteractiveQuery]: ...
def check_auth_interactive_response(self, responses: List[str]) -> Union[int, InteractiveQuery]: ...
def check_auth_gssapi_with_mic(self, username: str, gss_authenticated: int = ..., cc_file: Optional[str] = ...) -> int: ...
def check_auth_gssapi_keyex(self, username: str, gss_authenticated: int = ..., cc_file: Optional[str] = ...) -> int: ...
def enable_auth_gssapi(self) -> bool: ...
def check_port_forward_request(self, address: str, port: int) -> int: ...
def cancel_port_forward_request(self, address: str, port: int) -> None: ...
def check_global_request(self, kind: str, msg: Message) -> Union[bool, Tuple[Any, ...]]: ...
def check_channel_pty_request(
self, channel: Channel, term: str, width: int, height: int, pixelwidth: int, pixelheight: int, modes: str
) -> bool: ...
def check_channel_shell_request(self, channel: Channel) -> bool: ...
def check_channel_exec_request(self, channel: Channel, command: bytes) -> bool: ...
def check_channel_subsystem_request(self, channel: Channel, name: str) -> bool: ...
def check_channel_window_change_request(
self, channel: Channel, width: int, height: int, pixelwidth: int, pixelheight: int
) -> bool: ...
def check_channel_x11_request(
self, channel: Channel, single_connection: bool, auth_protocol: str, auth_cookie: bytes, screen_number: int
) -> bool: ...
def check_channel_forward_agent_request(self, channel: Channel) -> bool: ...
def check_channel_direct_tcpip_request(self, chanid: int, origin: Tuple[str, int], destination: Tuple[str, int]) -> int: ...
def check_channel_env_request(self, channel: Channel, name: str, value: str) -> bool: ...
def get_banner(self) -> Tuple[Optional[str], Optional[str]]: ...
class InteractiveQuery:
name: str
instructions: str
prompts: List[Tuple[str, bool]]
def __init__(self, name: str = ..., instructions: str = ..., *prompts: Union[str, Tuple[str, bool]]) -> None: ...
def add_prompt(self, prompt: str, echo: bool = ...) -> None: ...
class SubsystemHandler(threading.Thread):
def __init__(self, channel: Channel, name: str, server: ServerInterface) -> None: ...
def get_server(self) -> ServerInterface: ...
def start_subsystem(self, name: str, transport: Transport, channel: Channel) -> None: ...
def finish_subsystem(self) -> None: ...
| [
"rodrigoneto.forseti@gmail.com"
] | rodrigoneto.forseti@gmail.com |
8e83913f251d402a25e5c30c18a1ccbc9ca7bc1b | bfbe642d689b5595fc7a8e8ae97462c863ba267a | /bin/Python27/Lib/site-packages/openmdao.lib-0.8.1-py2.7.egg/openmdao/lib/drivers/newsumtdriver.py | 1fe4b338c4195f801abf288f9fbdec1e2569630d | [
"LicenseRef-scancode-other-permissive",
"MIT"
] | permissive | mcanthony/meta-core | 0c0a8cde1669f749a4880aca6f816d28742a9c68 | 3844cce391c1e6be053572810bad2b8405a9839b | refs/heads/master | 2020-12-26T03:11:11.338182 | 2015-11-04T22:58:13 | 2015-11-04T22:58:13 | 45,806,011 | 1 | 0 | null | 2015-11-09T00:34:22 | 2015-11-09T00:34:22 | null | UTF-8 | Python | false | false | 19,803 | py | """
``newsumtdriver.py`` - Driver for the NEWSUMT optimizer.
"""
# disable complaints about Module 'numpy' has no 'array' member
# pylint: disable-msg=E1101
# Disable complaints Invalid name "setUp" (should match [a-z_][a-z0-9_]{2,30}$)
# pylint: disable-msg=C0103
# Disable complaints about not being able to import modules that Python
# really can import
# pylint: disable-msg=F0401,E0611
# Disable complaints about Too many arguments (%s/%s)
# pylint: disable-msg=R0913
# Disable complaints about Too many local variables (%s/%s) Used
# pylint: disable-msg=R0914
#public symbols
__all__ = ['NEWSUMTdriver']
import logging
try:
from numpy import zeros, ones
from numpy import int as numpy_int
except ImportError as err:
logging.warn("In %s: %r" % (__file__, err))
from openmdao.lib.datatypes.api import Array, Float, Int
from openmdao.main.api import Case, ExprEvaluator
from openmdao.main.exceptions import RunStopped
from openmdao.main.hasparameters import HasParameters
from openmdao.main.hasconstraints import HasIneqConstraints
from openmdao.main.hasobjective import HasObjective
from openmdao.main.driver_uses_derivatives import DriverUsesDerivatives
from openmdao.util.decorators import add_delegate, stub_if_missing_deps
from openmdao.main.interfaces import IHasParameters, IHasIneqConstraints, \
IHasObjective, implements, IOptimizer
import newsumt.newsumtinterruptible as newsumtinterruptible
# code for redirecting unit stderr and stdout
# output from newsumt Fortran code
# Not using it now
# save = None
# null_fds = None
# def redirect_fortran_stdout_to_null():
# '''
# capture the output intended for
# stdout and just send it to dev/null
# '''
# global save, null_fds
# sys.stdout.flush()
# #sys.stdout = open(os.devnull, 'w')
# #sys.stdout = WritableObject()
# # open 2 fds
# null_fds = [os.open(os.devnull, os.O_RDWR), os.open(os.devnull, os.O_RDWR)]
# # save the current file descriptors to a tuple
# save = os.dup(1), os.dup(2)
# # put /dev/null fds on 1 and 2
# os.dup2(null_fds[0], 1)
# os.dup2(null_fds[1], 2)
# def restore_fortran_stdout():
# '''
# restore stdout to the
# value it has before the call to
# redirect_fortran_stdout_to_null
# '''
# global save, null_fds
# sys.stdout.flush()
# #sys.stdout == sys.__stdout__
# # restore file descriptors so I can print the results
# os.dup2(save[0], 1)
# os.dup2(save[1], 2)
# # close the temporary fds
# os.close(null_fds[0])
# os.close(null_fds[1])
# Disable complaints about Unused argument
# pylint: disable-msg=W0613
def user_function(info, x, obj, dobj, ddobj, g, dg, n2, n3, n4, imode, driver):
"""
Calculate the objective functions, constraints,
and gradients of those. Call back to the driver
to get the values that were plugged
in.
Note, there is some evidence of loss of precision on the output of
this function.
"""
# evaluate objective function or constraint function
if info in [1, 2]:
if imode == 1:
# We are in a finite difference step drive by NEWSUMT
# However, we still take advantage of a component's
# user-defined gradients via Fake Finite Difference.
# Note, NEWSUMT estimates 2nd-order derivatives from
# the first order differences.
# Save baseline states and calculate derivatives
if driver.baseline_point:
driver.calc_derivatives(first=True, savebase=True)
driver.baseline_point = False
# update the parameters in the model
driver.set_parameters(x)
# Run model under Fake Finite Difference
driver.ffd_order = 1
super(NEWSUMTdriver, driver).run_iteration()
driver.ffd_order = 0
else:
# Optimization step
driver.set_parameters(x)
super(NEWSUMTdriver, driver).run_iteration()
driver.baseline_point = True
# evaluate objectives
if info == 1:
obj = driver.eval_objective()
# evaluate constraint functions
if info == 2:
for i, v in enumerate(driver.get_ineq_constraints().values()):
val = v.evaluate(driver.parent)
if '>' in val[2]:
g[i] = val[0]-val[1]
else:
g[i] = val[1]-val[0]
# save constraint values in driver if this isn't a finite difference
if imode != 1:
driver.constraint_vals = g
elif info == 3 :
# evaluate the first and second order derivatives
# of the objective function
# NEWSUMT bug: sometimes we end up here when ifd=-4
if not driver.differentiator:
return obj, dobj, ddobj, g, dg
driver.ffd_order = 1
driver.differentiator.calc_gradient()
driver.ffd_order = 2
driver.differentiator.calc_hessian(reuse_first=True)
driver.ffd_order = 0
obj_name = driver.get_objectives().keys()[0]
dobj = driver.differentiator.get_gradient(obj_name)
i_current = 0
for row, name1 in enumerate(driver.get_parameters().keys()):
for name2 in driver.get_parameters().keys()[0:row+1]:
ddobj[i_current] = driver.differentiator.get_2nd_derivative(obj_name, wrt=(name1, name2))
i_current += 1
elif info in [4, 5]:
# evaluate gradient of nonlinear or linear constraints.
# Linear gradients are only called once, at startup
if info == 5:
# NEWSUMT bug - During initial run, NEWSUMT will ask for analytic
# derivatives of the linear constraints even when ifd=-4. The only
# thing we can do is return zero.
if not driver.differentiator:
return obj, dobj, ddobj, g, dg
driver.ffd_order = 1
driver.differentiator.calc_gradient()
driver.ffd_order = 0
i_current = 0
for param_name in driver.get_parameters().keys():
for con_name in driver.get_ineq_constraints().keys():
dg[i_current] = -driver.differentiator.get_derivative(con_name, wrt=param_name)
i_current += 1
return obj, dobj, ddobj, g, dg
# pylint: enable-msg=W0613
class _contrl(object):
"""Just a primitive data structure for storing contrl common block data.
We save the common blocks to prevent collision in the case where there are
multiple instances of NEWSUMT running in our model."""
def __init__(self):
self.clear()
def clear(self):
""" Clear values. """
# pylint: disable-msg=W0201
self.c = 0.0
self.epsgsn = 0.0
self.epsodm = 0.0
self.epsrsf = 0.0
self.fdch = 0.0
self.g0 = 0.0
self.ifd = 0
self.iflapp = 0
self.iprint = 0
self.jsigng = 0
self.lobj = 0
self.maxgsn = 0
self.maxodm = 0
self.maxrsf = 0
self.mflag = 0
self.ndv = 0
self.ntce = 0
self.p = 0.0
self.ra = 0.0
self.racut = 0.0
self.ramin = 0.0
self.stepmx = 0.0
self.tftn = 0.0
# pylint: enable-msg=W0201
class _countr(object):
"""Just a primitive data structure for storing countr common block data.
We save the common blocks to prevent collision in the case where there are
multiple instances of NEWSUMT running in our model."""
def __init__(self):
self.clear()
def clear(self):
""" Clear values. """
# pylint: disable-msg=W0201
self.iobjct = 0
self.iobapr = 0
self.iobgrd = 0
self.iconst = 0
self.icongr = 0
self.inlcgr = 0
self.icgapr = 0
# pylint: enable-msg=W0201
# pylint: disable-msg=R0913,R0902
@stub_if_missing_deps('numpy')
@add_delegate(HasParameters, HasIneqConstraints, HasObjective)
class NEWSUMTdriver(DriverUsesDerivatives):
""" Driver wrapper of Fortran version of NEWSUMT.
.. todo:: Check to see if this itmax variable is needed.
NEWSUMT might handle it for us.
"""
implements(IHasParameters, IHasIneqConstraints, IHasObjective, IOptimizer)
itmax = Int(10, iotype='in', desc='Maximum number of iterations before \
termination.')
default_fd_stepsize = Float(0.01, iotype='in', desc='Default finite ' \
'difference stepsize. Parameters with ' \
'specified values override this.')
ilin = Array(dtype=numpy_int, default_value=zeros(0,'i4'), iotype='in',
desc='Array designating whether each constraint is linear.')
# Control parameters for NEWSUMT.
# NEWSUMT has quite a few parameters to give the user control over aspects
# of the solution.
epsgsn = Float(0.001, iotype='in', desc='Convergence criteria \
of the golden section algorithm used for the \
one dimensional minimization.')
epsodm = Float(0.001, iotype='in', desc='Convergence criteria \
of the unconstrained minimization.')
epsrsf = Float(0.001, iotype='in', desc='Convergence criteria \
for the overall process.')
g0 = Float(0.1, iotype='in', desc='Initial value of the transition \
parameter.')
ra = Float(1.0, iotype='in', desc='Penalty multiplier. Required if mflag=1')
racut = Float(0.1, iotype='in', desc='Penalty multiplier decrease ratio. \
Required if mflag=1.')
ramin = Float(1.0e-13, iotype='in', desc='Lower bound of \
penalty multiplier. \
Required if mflag=1.')
stepmx = Float(2.0, iotype='in', desc='Maximum bound imposed on the \
initial step size of the one-dimensional \
minimization.')
iprint = Int(0, iotype='in', desc='Print information during NEWSUMT \
solution. Higher values are more verbose. If 0,\
print initial and final designs only.', high=4, low=0)
lobj = Int(0, iotype='in', desc='Set to 1 if linear objective function.')
maxgsn = Int(20, iotype='in', desc='Maximum allowable number of golden \
section iterations used for 1D minimization.')
maxodm = Int(6, iotype='in', desc='Maximum allowable number of one \
dimensional minimizations.')
maxrsf = Int(15, iotype='in', desc='Maximum allowable number of \
unconstrained minimizations.')
mflag = Int(0, iotype='in', desc='Flag for penalty multiplier. \
If 0, initial value computed by NEWSUMT. \
If 1, initial value set by ra.')
def __init__(self):
super(NEWSUMTdriver, self).__init__()
self.iter_count = 0
# Save data from common blocks into the driver
self.contrl = _contrl()
self.countr = _countr()
# define the NEWSUMTdriver's private variables
# note, these are all resized in config_newsumt
# basic stuff
self.design_vals = zeros(0, 'd')
self.constraint_vals = []
# temp storage
self.__design_vals_tmp = zeros(0, 'd')
self._ddobj = zeros(0)
self._dg = zeros(0)
self._dh = zeros(0)
self._dobj = zeros(0)
self._g = zeros(0)
self._gb = zeros(0)
self._g1 = zeros(0)
self._g2 = zeros(0)
self._g3 = zeros(0)
self._s = zeros(0)
self._sn = zeros(0)
self._x = zeros(0)
self._iik = zeros(0, dtype=int)
self._lower_bounds = zeros(0)
self._upper_bounds = zeros(0)
self._iside = zeros(0)
self.fdcv = zeros(0)
# Just defined here. Set elsewhere
self.n1 = self.n2 = self.n3 = self.n4 = 0
# Ready inputs for NEWSUMT
self._obj = 0.0
self._objmin = 0.0
self.isdone = False
self.resume = False
self.uses_Hessians = False
def start_iteration(self):
"""Perform the optimization."""
# Flag used to figure out if we are starting a new finite difference
self.baseline_point = True
# set newsumt array sizes and more...
self._config_newsumt()
self.iter_count = 0
# get the values of the parameters
# check if any min/max constraints are violated by initial values
for i, val in enumerate(self.get_parameters().values()):
value = val.evaluate(self.parent)
self.design_vals[i] = value
# next line is specific to NEWSUMT
self.__design_vals_tmp[i] = value
# Call the interruptible version of SUMT in a loop that we manage
self.isdone = False
self.resume = False
def continue_iteration(self):
"""Returns True if iteration should continue."""
return not self.isdone and self.iter_count < self.itmax
def pre_iteration(self):
"""Checks or RunStopped and evaluates objective."""
super(NEWSUMTdriver, self).pre_iteration()
if self._stop:
self.raise_exception('Stop requested', RunStopped)
def run_iteration(self):
""" The NEWSUMT driver iteration."""
self._load_common_blocks()
try:
( fmin, self._obj, self._objmin, self.design_vals,
self.__design_vals_tmp, self.isdone, self.resume) = \
newsumtinterruptible.newsuminterruptible(user_function,
self._lower_bounds, self._upper_bounds,
self._ddobj, self._dg, self._dh, self._dobj,
self.fdcv, self._g,
self._gb, self._g1, self._g2, self._g3,
self._obj, self._objmin,
self._s, self._sn, self.design_vals, self.__design_vals_tmp,
self._iik, self.ilin, self._iside,
self.n1, self.n2, self.n3, self.n4,
self.isdone, self.resume, analys_extra_args = (self,))
except Exception, err:
self._logger.error(str(err))
raise
self._save_common_blocks()
self.iter_count += 1
# Update the parameters and run one final time with what it gave us.
# This update is needed because I obeserved that the last callback to
# user_function is the final leg of a finite difference, so the model
# is not in sync with the final design variables.
if not self.continue_iteration():
dvals = [float(val) for val in self.design_vals]
self.set_parameters(dvals)
super(NEWSUMTdriver, self).run_iteration()
self.record_case()
def _config_newsumt(self):
"""Set up arrays for the Fortran newsumt routine, and perform some
validation and make sure that array sizes are consistent.
"""
params = self.get_parameters().values()
ndv = len( params )
if ndv < 1:
self.raise_exception('no parameters specified', RuntimeError)
# Create some information arrays using our Parameter data
self._lower_bounds = zeros(ndv)
self._upper_bounds = zeros(ndv)
self._iside = zeros(ndv)
self.fdcv = ones(ndv)*self.default_fd_stepsize
for i, param in enumerate(params):
self._lower_bounds[i] = param.low
self._upper_bounds[i] = param.high
# The way Parameters presently work, we always specify an
# upper and lower bound
self._iside[i] = 3
if param.fd_step:
self.fdcv[i] = param.fd_step
if self.differentiator:
ifd = 0
else:
ifd = -4
self.n1 = ndv
ncon = len( self.get_ineq_constraints() )
if ncon > 0:
self.n2 = ncon
else:
self.n2 = 1
self.n3 = ( ndv * ( ndv + 1 )) / 2
if ncon > 0:
self.n4 = ndv * ncon
else:
self.n4 = 1
self.design_vals = zeros(ndv)
self.constraint_vals = zeros(ncon)
# Linear constraint setting
if len(self.ilin) == 0 :
if ncon > 0:
self.ilin = zeros(ncon, dtype=int)
else:
self.ilin = zeros(1, dtype=int)
elif len(self.ilin) != ncon:
msg = "Dimension of NEWSUMT setting 'ilin' should be equal to " + \
"the number of constraints."
self.raise_exception(msg, RuntimeError)
# Set initial values in the common blocks
self.countr.clear()
self.contrl.clear()
self.contrl.c = 0.2
self.contrl.epsgsn = self.epsgsn
self.contrl.epsodm = self.epsodm
self.contrl.epsrsf = self.epsrsf
self.contrl.fdch = 0.05
self.contrl.g0 = self.g0
self.contrl.ifd = ifd
self.contrl.iflapp = 0
self.contrl.jprint = self.iprint - 1
self.contrl.jsigng = 1
self.contrl.lobj = self.lobj
self.contrl.maxgsn = self.maxgsn
self.contrl.maxodm = self.maxodm
self.contrl.maxrsf = self.maxrsf
self.contrl.mflag = self.mflag
self.contrl.ndv = ndv
self.contrl.ntce = ncon
self.contrl.p = 0.5
self.contrl.ra = self.ra
self.contrl.racut = self.racut
self.contrl.ramin = self.ramin
self.contrl.stepmx = self.stepmx
self.contrl.tftn = 0.0
# work arrays
self.__design_vals_tmp = zeros(self.n1,'d')
self._ddobj = zeros( self.n3 )
self._dg = zeros( self.n4 )
self._dh = zeros( self.n1 )
self._dobj = zeros( self.n1 )
self._g = zeros( self.n2 )
self._gb = zeros( self.n2 )
self._g1 = zeros( self.n2 )
self._g2 = zeros( self.n2 )
self._g3 = zeros( self.n2 )
self._s = zeros( self.n1 )
self._sn = zeros( self.n1 )
self._iik = zeros( self.n1, dtype=int )
def _load_common_blocks(self):
""" Reloads the common blocks using the intermediate info saved in the
class.
"""
for name, value in self.contrl.__dict__.items():
setattr( newsumtinterruptible.contrl, name, value )
for name, value in self.countr.__dict__.items():
setattr( newsumtinterruptible.countr, name, value )
def _save_common_blocks(self):
""" Saves the common block data to the class to prevent trampling by
other instances of NEWSUMT.
"""
common = self.contrl
for name, value in common.__dict__.items():
setattr(common, name, \
type(value)(getattr(newsumtinterruptible.contrl, name)))
common = self.countr
for name, value in common.__dict__.items():
setattr(common, name, \
type(value)(getattr(newsumtinterruptible.countr, name)))
| [
"kevin.m.smyth@gmail.com"
] | kevin.m.smyth@gmail.com |
8eedd8a9491c2e95bc52cac13f9f3589f01bcf3b | c33ddce47e635e1e560f5bd3daadc0c694c0d4ce | /lib/exceptions.py | dd7176baf9476f9f503e4f998a5bc57009860979 | [
"Apache-2.0"
] | permissive | hzengin/openvpn-config-splitter | 5b106956b950c4ce8cd056f8a3de2f93efdd6793 | d1a69735d218c00ecd235662c5390e3154333f5d | refs/heads/master | 2016-09-06T19:40:29.332600 | 2015-02-10T23:09:23 | 2015-02-10T23:09:23 | 30,618,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | class InvalidConfigFile(Exception):
def __init__(self, content):
self.content = content
def __str__(self):
return repr("Invalid configuration file")
| [
"hzengin99@gmail.com"
] | hzengin99@gmail.com |
73608aa0ec56957d6d4f1a5aacb52173748a78d9 | 75b25a926c164bd0ea705485a9abdd988e0ef596 | /fbad/dockerutils.py | a12ff7e8e4bb132b9f6a46e55c52f3c92dfd2294 | [
"MIT"
] | permissive | bennr01/fbad | 39edf5790bcdecca47f75acc1ced4cfc641fce22 | a872aa6e077931743a8cbce3328ce4cbc509745c | refs/heads/master | 2020-03-06T19:37:58.889646 | 2018-10-31T14:35:55 | 2018-10-31T14:35:55 | 127,033,219 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | """utilities for interacting with docker."""
import subprocess
def in_swarm():
"""
Check whether docker is running in swarm mode.
:return: True when running in swarm mode, False otherwise.
:rtype: bool
"""
output = subprocess.check_output(["docker", "info"])
swarm_enabled = ("Swarm: active" in output)
return swarm_enabled
| [
"benjamin99.vogt@web.de"
] | benjamin99.vogt@web.de |
2246efedd5629640d5a477e86ce72ebaa71edb1e | 58023de0289f4e5dc62b3e91a798e03423ac1b8e | /app/dngadmin_formkeydemo.py | 6dcc9516b7bf2146908f2bef9e37bfd5c0801a8e | [] | no_license | BeLinKang/DngAdmin | 5b1466cb7c24292200ae860be3689b43478d5160 | f78d90b93ab509f83018cc5bbe922568ab2b1fd7 | refs/heads/master | 2023-07-15T05:56:47.665627 | 2021-08-27T12:54:23 | 2021-08-27T12:54:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,006 | py |
# Create your views here.
from django.shortcuts import render #视图渲染模块
from django.http import HttpResponse #请求模块
from . import models #数据库操作模块
from django.db.models import Q #数据库逻辑模块
from django.db.models import Avg,Max,Min,Sum #数据库聚合计算模块
from datetime import datetime,timedelta #Cookie 模块
from django.http import HttpResponse, HttpResponseRedirect #重定向模块
from django.shortcuts import render
import os
import sys
import json
from urllib import parse#转码
import re #正则模块
import random#随机模块
import hashlib# 加密模块
from django.utils import timezone #时间处理模块
import datetime#时间
import time# 日期模块
from . import dngadmin_common #公共模块
from . import dngadmin_formcommon #表单组件模块
from django.forms.models import model_to_dict
def formkeydemo(request):
# ----------------------------------------------------------
# 通过路径获得栏目ID 》》》开始
# ----------------------------------------------------------
dngroute_uid = dngadmin_common.dng_ckurl(request)[0]
get_url = dngadmin_common.dng_ckurl(request)[1]
# ----------------------------------------------------------
# 日记记录与COOKIE验证与权限 》》》开始
# ----------------------------------------------------------
ip = request.META.get('HTTP_X_FORWARDED_FOR') # 获取ip信息
liulanqi = request.META.get('HTTP_USER_AGENT') # 获取浏览器信息
yuming_url = request.META.get('HTTP_HOST') # 当前访问的域名
geturl = request.META.get('QUERY_STRING') # 获取域名后缀的URL
mulu_url = request.path # 获取不包含?号之前的映射路径
tishi = request.GET.get('tishi') #提示
jinggao = request.GET.get('jinggao') # 警告
yes = request.GET.get('yes') # 警告
if "dnguser_uid" in request.COOKIES: # 判断cookies有无,跳转
cookie_user_uid = request.get_signed_cookie(key="dnguser_uid", default=None,
salt=dngadmin_common.dng_anquan().salt_str, max_age=None)
cookie_user_name = request.get_signed_cookie(key="dnguser_name", default=None,
salt=dngadmin_common.dng_anquan().salt_str, max_age=None)
cookie_user_cookie_echo = request.get_signed_cookie(key="dnguser_cookie_echo", default=None,
salt=dngadmin_common.dng_anquan().salt_str, max_age=None)
cookie_user_cookie = request.get_signed_cookie(key="dnguser_cookie", default=None,
salt=dngadmin_common.dng_anquan().salt_str, max_age=None)
cookie_pr = dngadmin_common.dng_yanzheng(cookie_user_uid, cookie_user_name, cookie_user_cookie,
cookie_user_cookie_echo)
if cookie_pr:
dnguser_uid =cookie_pr.uid_int #赋值ID
dnguser_name = cookie_pr.username_str#赋值用户名
dnguser_cookie=cookie_pr.cookie_str#赋值COOKIE记录
else:
return HttpResponseRedirect('/dngadmin/tips/?jinggao=' + parse.quote('检测到非法登录'))
if dngadmin_common.dng_anquan().tongshi_bool == False: # 验证是否同时登录
if dngadmin_common.dng_tongshi(uid=dnguser_uid, cookie=dnguser_cookie) == False:
return HttpResponseRedirect('/dngadmin/tips/?jinggao=' + parse.quote('不允许同时登录账号'))
else:
return HttpResponseRedirect('/dngadmin/tips/?jinggao=' + parse.quote('您需要重新登录'))
# ----------------------------------------------------------
# 日记记录与COOKIE验证与权限《《《 结束
# ----------------------------------------------------------
# ----------------------------------------------------------
# 判断页面权限》》》开始
# ----------------------------------------------------------
dnguser =dngadmin_common.dng_dnguser(dnguser_uid)
group = dngadmin_common.dng_usergroup(gid=dnguser.group_int) # 获取会员组名称
dngroute = models.dngroute.objects.filter(uid_int=dngroute_uid).first()#查询路径取回本页面菜单信息
dngadmin_common.dng_dngred(uid=dnguser_uid, title=dngroute.name_str, url=mulu_url, user=liulanqi, ip=ip) # 日记记录函数
if not dngroute.url_str in mulu_url: #判断URL统一
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您的访问与菜单映射不匹配</h1></center><div>""")
elif not '|'+str(dngroute_uid)+'|'in group.menu_text: #判断菜单权限
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您没有访问这个栏目的权限</h1></center><div>""")
elif not dnguser.integral_int >= dngroute.integral_int:
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您积分"""+str(dnguser.integral_int)+""",访问需要达到"""+str(dngroute.integral_int)+"""积分!</h1></center><div>""")
elif not dnguser.money_int >= dngroute.money_int:
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您余额"""+str(dnguser.money_int)+""",访问需要达到"""+str(dngroute.money_int)+"""余额!</h1></center><div>""")
elif not dnguser.totalmoney_int >= dngroute.totalmoney_int:
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您累计充值""" + str(dnguser.totalmoney_int) + """,访问需要累计充值达到""" + str(dngroute.totalmoney_int) + """!</h1></center><div>""")
elif not dnguser.totalspend_int >= dngroute.totalspend_int:
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您累计消费""" + str(dnguser.totalspend_int) + """,访问需要累计消费达到""" + str(dngroute.totalspend_int) + """!</h1></center><div>""")
elif not dnguser.spread_int >= dngroute.spread_int:
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您推广""" + str(dnguser.spread_int) + """人,访问需要推广""" + str(dngroute.spread_int) + """人!</h1></center><div>""")
added =False #增
delete = False #删
update =False #改
see =False #查
if '|' + str(dngroute_uid) + '|' in group.added_text: # 判断增加权限
added =True
if '|' + str(dngroute_uid) + '|' in group.delete_text: # 判断删除权限
delete =True
if '|' + str(dngroute_uid) + '|' in group.update_text: # 判断修改权限
update =True
if '|' + str(dngroute_uid) + '|' in group.see_text: # 判断查看权限
see =True
# ----------------------------------------------------------
# 判断页面权限《《《 结束
# ----------------------------------------------------------
return render(request,"dngadmin/formkeydemo.html",{
"title":dngroute.name_str,
"edition": dngadmin_common.dng_setup().edition_str, # 版本号
"file": dngadmin_common.dng_setup().file_str, # 备案号
"tongue": dngadmin_common.dng_setup().statistics_text, # 统计
"added": added,#增
"delete": delete,#删
"update": update, #改
"see": see, #开发者权限
"tishi": tishi,
"jinggao": jinggao,
"yes": yes,
"yuming_url": yuming_url,
})
def formkeydemo_post(request):
# ----------------------------------------------------------
# 通过路径获得栏目ID 》》》开始
# ----------------------------------------------------------
dngroute_uid = dngadmin_common.dng_ckurl(request)[0]
get_url = dngadmin_common.dng_ckurl(request)[1]
# ----------------------------------------------------------
# 日记记录与COOKIE验证与权限 》》》开始
# ----------------------------------------------------------
ip = request.META.get('HTTP_X_FORWARDED_FOR') # 获取ip信息
liulanqi = request.META.get('HTTP_USER_AGENT') # 获取浏览器信息
geturl = request.META.get('QUERY_STRING') # 获取域名后缀的URL
mulu_url = request.path # 获取不包含?号之前的映射路径
if "dnguser_uid" in request.COOKIES: # 判断cookies有无,跳转
cookie_user_uid = request.get_signed_cookie(key="dnguser_uid", default=None,
salt=dngadmin_common.dng_anquan().salt_str, max_age=None)
cookie_user_name = request.get_signed_cookie(key="dnguser_name", default=None,
salt=dngadmin_common.dng_anquan().salt_str, max_age=None)
cookie_user_cookie_echo = request.get_signed_cookie(key="dnguser_cookie_echo", default=None,
salt=dngadmin_common.dng_anquan().salt_str, max_age=None)
cookie_user_cookie = request.get_signed_cookie(key="dnguser_cookie", default=None,
salt=dngadmin_common.dng_anquan().salt_str, max_age=None)
cookie_pr = dngadmin_common.dng_yanzheng(cookie_user_uid, cookie_user_name, cookie_user_cookie,
cookie_user_cookie_echo)
if cookie_pr:
dnguser_uid =cookie_pr.uid_int #赋值ID
dnguser_name = cookie_pr.username_str#赋值用户名
dnguser_cookie=cookie_pr.cookie_str#赋值COOKIE记录
else:
return HttpResponseRedirect('/dngadmin/tips/?jinggao=' + parse.quote('检测到非法登录'))
if dngadmin_common.dng_anquan().tongshi_bool == False: # 验证是否同时登录
if dngadmin_common.dng_tongshi(uid=dnguser_uid, cookie=dnguser_cookie) == False:
return HttpResponseRedirect('/dngadmin/tips/?jinggao=' + parse.quote('不允许同时登录账号'))
else:
return HttpResponseRedirect('/dngadmin/tips/?jinggao=' + parse.quote('您需要重新登录'))
# ----------------------------------------------------------
# 日记记录与COOKIE验证与权限《《《 结束
# ----------------------------------------------------------
# ----------------------------------------------------------
# 判断页面权限开始》》》开始
# ----------------------------------------------------------
dnguser =dngadmin_common.dng_dnguser(dnguser_uid)
group = dngadmin_common.dng_usergroup(gid=dnguser.group_int) # 获取会员组名称
dngroute = models.dngroute.objects.filter(uid_int=dngroute_uid).first()#查询路径取回本页面菜单信息
dngadmin_common.dng_dngred(uid=dnguser_uid, title=dngroute.name_str, url=mulu_url, user=liulanqi, ip=ip) # 日记记录函数
if not dngroute.url_str in mulu_url: #判断URL统一
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您的访问与菜单映射不匹配</h1></center><div>""")
elif not '|'+str(dngroute_uid)+'|'in group.menu_text: #判断菜单权限
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您没有访问这个栏目的权限</h1></center><div>""")
elif not dnguser.integral_int >= dngroute.integral_int:
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您积分"""+str(dnguser.integral_int)+""",访问需要达到"""+str(dngroute.integral_int)+"""积分!</h1></center><div>""")
elif not dnguser.money_int >= dngroute.money_int:
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您余额"""+str(dnguser.money_int)+""",访问需要达到"""+str(dngroute.money_int)+"""余额!</h1></center><div>""")
elif not dnguser.totalmoney_int >= dngroute.totalmoney_int:
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您累计充值""" + str(dnguser.totalmoney_int) + """,访问需要累计充值达到""" + str(dngroute.totalmoney_int) + """!</h1></center><div>""")
elif not dnguser.totalspend_int >= dngroute.totalspend_int:
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您累计消费""" + str(dnguser.totalspend_int) + """,访问需要累计消费达到""" + str(dngroute.totalspend_int) + """!</h1></center><div>""")
elif not dnguser.spread_int >= dngroute.spread_int:
return HttpResponse("""<BR><BR><BR><BR><BR><center><h1>您推广""" + str(dnguser.spread_int) + """人,访问需要推广""" + str(dngroute.spread_int) + """人!</h1></center><div>""")
added =False #增
delete = False #删
update =False #改
see =False #查
if '|' + str(dngroute_uid) + '|' in group.added_text: # 判断增加权限
added =True
if '|' + str(dngroute_uid) + '|' in group.delete_text: # 判断删除权限
delete =True
if '|' + str(dngroute_uid) + '|' in group.update_text: # 判断修改权限
update =True
if '|' + str(dngroute_uid) + '|' in group.see_text: # 判断查看权限
see =True
else:
urlstr = parse.quote('您没有修改权限')
response = HttpResponseRedirect('/dngadmin/formkeydemo/?jinggao=' + urlstr)
return response
| [
"455873983@qq.com"
] | 455873983@qq.com |
163d7c44a7e018cae6d6ff4a03b364723f15cc08 | 487c45df5fcbe7fdf6df5a348f6fe163bbb22033 | /leetcode/875_koko_eating_bananas.py | 20b4f0c350be2d1c309eb1f272a208f5b384aa40 | [
"Unlicense"
] | permissive | leetcode-notes/daily-algorithms-practice | dba03ac1c55262f6bae7d5aa4dac590c3c067e75 | 2a03499ed0b403d79f6c8451c9a839991b23e188 | refs/heads/master | 2023-06-18T14:14:58.770797 | 2021-07-12T05:27:32 | 2021-07-12T05:27:32 | 264,057,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | class Solution:
def minEatingSpeed(self, piles, H: int) -> int:
low, high = 1, max(piles)
def cannot_finish(k, piles):
total = 0
for p in piles:
total += p//k
if p % k:
total += 1
return total > H
while low < high:
mid = low + (high-low)//2
if cannot_finish(mid, piles):
low = mid + 1
else:
high = mid
return low
"""
Success
Details
Runtime: 500 ms, faster than 53.72% of Python3 online
submissions for Koko Eating Bananas.
Memory Usage: 15.4 MB, less than 76.05% of Python3 online
submissions for Koko Eating Bananas.
Next challenges:
Minimize Max Distance to Gas Station
"""
| [
"leetcode.notes@gmail.com"
] | leetcode.notes@gmail.com |
df95550af87a49e5577ad50eb3448311733e01fb | 926e1595567ab120a4a5a6734d24aedc5ec62297 | /python/exercieses/zadanie_3.py | f6a8cde2e953d661ccb3d1d80c3a016640f85e72 | [] | no_license | Shansky/teaching_magda | 5574fed3bd5ab300764cec5fd7ec9990ced16117 | fe86934236d72403d8e8dd237b4a6390eaf26b46 | refs/heads/master | 2020-03-14T22:16:42.635912 | 2018-05-02T08:08:46 | 2018-05-02T08:08:46 | 131,817,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | def factorial(n):
if n>1:
return n*factorial(n-1)
else:
return 1
| [
"mateusz.szymanski@profitroom.net"
] | mateusz.szymanski@profitroom.net |
2fecbef126f0a4c1540866aad4fb08b2a77da923 | 8942242d001a53a353f643787c1e745465e961c7 | /leetcode/wiggle-sort.py | eb1f233362d9141981a8984bc2dbaec882fe9e92 | [] | no_license | brcsomnath/competitive-programming | 7b5277b455acaec8a729cca3576ad2aa669ba04f | 49326c577d7fa343a2c803efb3e81d00df523d7d | refs/heads/master | 2020-05-18T20:18:55.741482 | 2019-05-29T17:29:46 | 2019-05-29T17:32:45 | 184,628,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | '''
Given an unsorted array nums, reorder it in-place
such that nums[0] <= nums[1] >= nums[2] <= nums[3]....
For example, given nums = [3, 5, 2, 1, 6, 4],
one possible answer is [1, 6, 2, 5, 3, 4].
'''
def process(array):
position = 1
for index in range(1, len(array)):
if position & 1:
if array[index] < array[index - 1]:
array[index], array[index - 1] = array[index - 1], array[index]
else:
if array[index] > array[index - 1]:
array[index], array[index - 1] = array[index - 1], array[index]
position = 1 - position
return array
def main():
array = [int(element) for element in input().split()]
print(process(array))
if __name__ == "__main__":
main() | [
"somnath@Somnaths-Personal-MacBook-Pro-2.local"
] | somnath@Somnaths-Personal-MacBook-Pro-2.local |
e2be22d23ef0a492a068ef4aa2428993698da3bd | e0188f937248e5aa82e62e86b59944c93b09f256 | /fairy/tail/ft_list.py | 308d9266138236cdea3a7aea98ff22c7b02e42f0 | [
"MIT"
] | permissive | qilutong/fairy | de8c9115eeb91cf2a6a95fb8880ae9f3784d2045 | e4d40c5d3d6d431875a49c4f708ee278ab795901 | refs/heads/master | 2020-04-09T05:36:35.342483 | 2019-09-04T12:32:17 | 2019-09-04T12:32:17 | 160,071,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | # -*- coding: utf-8 -*-
"""
@FileName : ft_list.py
@Description : None
@Author : 齐鲁桐
@Email : qilutong@yahoo.com
@Time : 2019-05-08 14:57
@Modify : None
"""
from __future__ import absolute_import, division, print_function
def list_insert(raw_list, index, data):
"""
根据索引在列表中插入新值,修改了索引为负数的情况,使之更符合直觉
:param raw_list: 要修改的列表
:param index: 索引
:param data: 插入的数据
:return:
"""
# -1插入最后
if index == -1:
raw_list.append(data)
return
# 其余负数加1
if index < 0:
index += 1
raw_list.insert(index, data)
| [
"qilutong@yahoo.com"
] | qilutong@yahoo.com |
a71a8a0ac4477cae9a18a321c9ca800b069b634c | 040adb3acaf8b5811a1fcb79db03bbd04a73d25a | /figures.py | 23009c1da9b72dbc6c3bd5250cb260acd782eb76 | [] | no_license | xwl-xwl/disentangled | a8fdabfb0477286a96c3dae2c870de24305ad3fc | 37024bdf851a00888de57b494e6b59b58b2207db | refs/heads/main | 2023-08-25T16:02:49.094606 | 2021-10-21T16:38:42 | 2021-10-21T16:38:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69,036 | py |
import numpy as np
import scipy.stats as sts
import functools as ft
import sklearn.decomposition as skd
import sklearn.svm as skc
import scipy.linalg as spla
import general.plotting as gpl
import general.plotting_styles as gps
import general.paper_utilities as pu
import general.utility as u
import disentangled.data_generation as dg
import disentangled.disentanglers as dd
import disentangled.characterization as dc
import disentangled.aux as da
import disentangled.theory as dt
import disentangled.multiverse_options as dmo
config_path = 'disentangled/figures.conf'
colors = np.array([(127,205,187),
(65,182,196),
(29,145,192),
(34,94,168),
(37,52,148),
(8,29,88)])/256
tuple_int = lambda x: (int(x),)
def _make_cgp_ax(ax):
ax.set_yticks([.5, 1])
ax.set_ylabel('classifier')
gpl.add_hlines(.5, ax)
ax.set_ylim([.5, 1])
def _make_rgp_ax(ax):
ax.set_yticks([0, .5, 1])
ax.set_ylabel('regression')
gpl.add_hlines(0, ax)
ax.set_ylim([0, 1])
def plot_cgp(results, ax, **kwargs):
plot_single_gen(results, ax, **kwargs)
_make_cgp_ax(ax)
def plot_rgp(results, ax, **kwargs):
plot_single_gen(results, ax, **kwargs)
_make_rgp_ax(ax)
def plot_bgp(res_c, res_r, ax_c, ax_r, **kwargs):
plot_cgp(res_c, ax_c, **kwargs)
plot_rgp(res_r, ax_r, **kwargs)
def plot_multi_bgp(res_list_c, res_list_r, ax_c, ax_r, legend_labels=None,
**kwargs):
plot_multi_gen(res_list_c, ax_c, **kwargs)
plot_multi_gen(res_list_r, ax_r, legend_labels=legend_labels, **kwargs)
_make_cgp_ax(ax_c)
_make_rgp_ax(ax_r)
def plot_single_gen(results, ax, xs=None, color=None,
labels=('standard', 'gen'), legend_label=''):
if xs is None:
xs = [0, 1]
gpl.violinplot(results.T, xs, ax=ax, color=(color, color),
showextrema=False)
ax.plot(xs, np.mean(results, axis=0), 'o', color=color,
label=legend_label)
ax.set_xticks(xs)
ax.set_xticklabels(labels)
gpl.clean_plot(ax, 0)
gpl.clean_plot_bottom(ax, keeplabels=True)
return ax
def plot_multi_gen(res_list, ax, xs=None, labels=('standard', 'gen'),
sep=.2, colors=None, legend_labels=None):
if xs is None:
xs = np.array([0, 1])
if colors is None:
colors = (None,)*len(res_list)
if legend_labels is None:
legend_labels = ('',)*len(res_list)
start_xs = xs - len(res_list)*sep/4
n_seps = (len(res_list) - 1)/2
use_xs = np.linspace(-sep*n_seps, sep*n_seps, len(res_list))
for i, rs in enumerate(res_list):
plot_single_gen(rs, ax, xs=xs + use_xs[i], color=colors[i],
legend_label=legend_labels[i])
ax.set_xticks(xs)
ax.set_xticklabels(labels)
ax.legend(frameon=False)
gpl.clean_plot(ax, 0)
gpl.clean_plot_bottom(ax, keeplabels=True)
return ax
def train_eg_bvae(dg, params):
beta_eg = params.getfloat('beta_eg')
latent_dim = params.getint('latent_dim')
n_epochs = params.getint('n_epochs')
n_train_eg = params.getint('n_train_eg')
layer_spec = params.getlist('layers', typefunc=tuple_int)
batch_size = params.getint('batch_size')
hide_print = params.getboolean('hide_print')
eg_model = (ft.partial(dd.BetaVAE, beta=beta_eg),)
out = dc.train_multiple_models(dg, eg_model,
layer_spec, epochs=n_epochs,
input_dim=latent_dim,
n_train_samps=n_train_eg,
use_mp=True, n_reps=1,
batch_size=batch_size,
hide_print=hide_print)
return out
def train_eg_fd(dg, params, offset_var=True, **kwargs):
n_part = params.getint('n_part_eg')
latent_dim = params.getint('latent_dim')
n_epochs = params.getint('n_epochs')
n_train_eg = params.getint('n_train_eg')
layer_spec = params.getlist('layers', typefunc=tuple_int)
batch_size = params.getint('batch_size')
hide_print = params.getboolean('hide_print')
no_autoenc = params.getboolean('no_autoencoder')
if offset_var:
offset_var_eg = params.getfloat('offset_var_eg')
offset_distr = sts.norm(0, offset_var_eg)
else:
offset_distr = None
eg_model = (ft.partial(dd.FlexibleDisentanglerAE,
true_inp_dim=dg.input_dim,
n_partitions=n_part,
offset_distr=offset_distr,
no_autoenc=no_autoenc, **kwargs),)
out = dc.train_multiple_models(dg, eg_model,
layer_spec, epochs=n_epochs,
input_dim=latent_dim,
n_train_samps=n_train_eg,
use_mp=True, n_reps=1,
batch_size=batch_size,
hide_print=hide_print)
return out
def explore_autodisentangling_layers(latents, layers, inp_dim, dims, **kwargs):
out_dict = {}
for i in range(len(layers) + 1):
layers_i = layers[:i]
out = explore_autodisentangling_latents(latents, dims, inp_dim,
layers_i, **kwargs)
out_dict[layers_i] = out
return out_dict
def explore_autodisentangling_latents(latents, *args, n_class=10, **kwargs):
classes = np.zeros((len(latents), n_class, 2))
regrs = np.zeros_like(classes)
for i, latent in enumerate(latents):
full_args = args + (latent,)
out = explore_autodisentangling(*full_args, n_class=n_class, **kwargs)
classes[i], regrs[i] = out
return classes, regrs
def explore_autodisentangling(dims, inp_dim, layers, latent, n_samps=10000,
epochs=200, n_class=10, ret_m=False,
use_rf=False, low_thr=.001, rf_width=3):
if use_rf:
rbf_dg = dg.RFDataGenerator(dims, inp_dim, total_out=True,
low_thr=low_thr, input_noise=0,
noise=0, width_scaling=rf_width)
else:
rbf_dg = dg.KernelDataGenerator(dims, None, inp_dim,
low_thr=low_thr)
print(rbf_dg.representation_dimensionality(participation_ratio=True))
fdae = dd.FlexibleDisentanglerAE(rbf_dg.output_dim, layers, latent,
n_partitions=0)
y, x = rbf_dg.sample_reps(n_samps)
fdae.fit(x, y, epochs=epochs, verbose=False)
class_p, regr_p = characterize_generalization(rbf_dg,
dd.IdentityModel(),
n_class)
class_m, regr_m = characterize_generalization(rbf_dg, fdae, n_class)
if ret_m:
out = class_m, regr_m, (rbf_dg, fdae)
else:
out = (class_m, regr_m)
return out
def characterize_generalization(dg, model, c_reps, train_samples=1000,
test_samples=500, bootstrap_regr=True,
n_boots=1000, norm=True, cut_zero=True,
repl_mean=None):
results_class = np.zeros((c_reps, 2))
results_regr = np.zeros((c_reps, 2))
for i in range(c_reps):
if norm:
train_distr = da.HalfMultidimensionalNormal.partition(
dg.source_distribution)
else:
train_distr = dg.source_distribution.make_partition()
test_distr = train_distr.flip()
results_class[i, 0] = dc.classifier_generalization(
dg, model, n_train_samples=train_samples,
n_test_samples=test_samples,
n_iters=1, repl_mean=repl_mean)[0]
results_class[i, 1] = dc.classifier_generalization(
dg, model, train_distrib=train_distr,
test_distrib=test_distr, n_train_samples=train_samples,
n_test_samples=test_samples, n_iters=1, repl_mean=repl_mean)[0]
results_regr[i, 0] = dc.find_linear_mapping_single(
dg, model, half=False, n_samps=train_samples,
repl_mean=repl_mean)[1]
results_regr[i, 1] = dc.find_linear_mapping_single(
dg, model, n_samps=train_samples,
repl_mean=repl_mean)[1]
if cut_zero:
results_regr[results_regr < 0] = 0
if False and bootstrap_regr:
results_regr_b = np.zeros((n_boots, 2))
results_regr_b[:, 0] = u.bootstrap_list(results_regr[:, 0],
np.mean, n=n_boots)
results_regr_b[:, 1] = u.bootstrap_list(results_regr[:, 1],
np.mean, n=n_boots)
results_regr = results_regr_b
return results_class, results_regr
class DisentangledFigure(pu.Figure):
def make_fdg(self, retrain=False):
try:
assert not retrain
fdg = self.fdg
except:
inp_dim = self.params.getint('inp_dim')
dg_dim = self.params.getint('dg_dim')
dg_epochs = self.params.getint('dg_epochs')
dg_noise = self.params.getfloat('dg_noise')
dg_regweight = self.params.getlist('dg_regweight', typefunc=float)
dg_layers = self.params.getlist('dg_layers', typefunc=int)
dg_source_var = self.params.getfloat('dg_source_var')
dg_train_egs = self.params.getint('dg_train_egs')
dg_pr_reg = self.params.getboolean('dg_pr_reg')
dg_bs = self.params.getint('dg_batch_size')
source_distr = sts.multivariate_normal(np.zeros(inp_dim),
dg_source_var)
fdg = dg.FunctionalDataGenerator(inp_dim, dg_layers, dg_dim,
noise=dg_noise,
use_pr_reg=dg_pr_reg,
l2_weight=dg_regweight)
fdg.fit(source_distribution=source_distr, epochs=dg_epochs,
train_samples=dg_train_egs, batch_size=dg_bs)
self.fdg = fdg
return fdg
def _standard_panel(self, fdg, model, run_inds, f_pattern, folder, axs,
labels=None, rep_scale_mag=5, source_scale_mag=.5,
x_label=True, y_label=True, colors=None, view_init=None,
multi_num=1, **kwargs):
model = model[0, 0]
if labels is None:
labels = ('',)*len(run_inds)
if len(axs) == 3:
ax_break = 1
else:
ax_break = 2
manifold_axs = axs[:ax_break]
res_axs = np.expand_dims(axs[ax_break:], 0)
rs = self.params.getlist('manifold_radii', typefunc=float)
n_arcs = self.params.getint('manifold_arcs')
vis_3d = self.params.getboolean('vis_3d')
# print(characterize_generalization(fdg, model, 10))
dc.plot_source_manifold(fdg, model, rs, n_arcs,
source_scale_mag=source_scale_mag,
rep_scale_mag=rep_scale_mag,
markers=False, axs=manifold_axs,
titles=False, plot_model_3d=vis_3d,
model_view_init=view_init)
if colors is None:
colors = (None,)*len(run_inds)
if multi_num > 1:
double_inds = np.concatenate(list((i,)*len(run_inds)
for i in range(multi_num)))
run_inds = run_inds*multi_num
labels = labels*multi_num
colors=colors*multi_num
else:
double_inds = (None,)*len(run_inds)
for i, ri in enumerate(run_inds):
dc.plot_recon_gen_summary(ri, f_pattern, log_x=False,
collapse_plots=False, folder=folder,
axs=res_axs, legend=labels[i],
print_args=False, set_title=False,
color=colors[i], double_ind=double_inds[i],
**kwargs)
res_axs[0, 0].set_yticks([.5, 1])
res_axs[0, 1].set_yticks([0, .5, 1])
class Figure1(DisentangledFigure):
def __init__(self, fig_key='figure1', colors=colors, **kwargs):
fsize = (6, 5)
cf = u.ConfigParserColor()
cf.read(config_path)
params = cf[fig_key]
self.fig_key = fig_key
self.panel_keys = ('partition_schematic',
'representation_schematic',
'encoder_schematic',
'encoder_visualization', 'metric_schematic')
super().__init__(fsize, params, colors=colors, **kwargs)
def make_gss(self):
gss = {}
part_schem_grid = self.gs[:80, :30]
gss[self.panel_keys[0]] = self.get_axs((part_schem_grid,))
metric_schem_grid = self.gs[:80, 36:55]
gss[self.panel_keys[4]] = self.get_axs((metric_schem_grid,))
rep_schem_grid = pu.make_mxn_gridspec(self.gs, 2, 2,
25, 100, 0, 55,
0, 0)
gss[self.panel_keys[1]] = self.get_axs(rep_schem_grid, all_3d=True)
encoder_schem_grid = self.gs[:40, 70:]
gss[self.panel_keys[2]] = self.get_axs((encoder_schem_grid,))
plot_3d_axs = np.zeros((2, 2), dtype=bool)
plot_3d_axs[0, 1] = self.params.getboolean('vis_3d')
ev1_grid = pu.make_mxn_gridspec(self.gs, 1, 2,
50, 70, 65, 100,
8, 0)
ev2_grid = pu.make_mxn_gridspec(self.gs, 1, 2,
79, 100, 65, 100,
8, 15)
ev_grid = np.concatenate((ev1_grid, ev2_grid), axis=0)
gss[self.panel_keys[3]] = self.get_axs(ev_grid,
plot_3ds=plot_3d_axs)
self.gss = gss
def _make_nonlin_func(self, cents, wids=2):
def f(x):
cs = np.expand_dims(cents, 0)
xs = np.expand_dims(x, 1)
d = np.sum(-(xs - cs)**2, axis=2)
r = np.exp(d/2*wids)
return r
return f
def _plot_schem(self, pts, f, ax, corners=None, corner_color=None,
**kwargs):
pts_trs = f(pts)
l = ax.plot(pts_trs[:, 0], pts_trs[:, 1], pts_trs[:, 2],
**kwargs)
if corners is not None:
if corner_color is not None:
kwargs['color'] = corner_color
corners_trs = f(corners)
ax.plot(corners_trs[:, 0], corners_trs[:, 1],
corners_trs[:, 2], 'o', **kwargs)
def _plot_hyperplane(self, pts, lps, f, ax):
pts_f = f(pts)
lps_f = f(lps)
cats = [0, 1]
c = skc.SVC(kernel='linear', C=1000)
c.fit(lps_f, cats)
n_vecs = spla.null_space(c.coef_)
v1 = np.linspace(-1, 1, 2)
v2 = np.linspace(-1, 1, 2)
x, y = np.meshgrid(v1, v2)
x_ns = np.expand_dims(x, 0)*np.expand_dims(n_vecs[:, 0], (1, 2))
y_ns = np.expand_dims(y, 0)*np.expand_dims(n_vecs[:, 1], (1, 2))
offset = np.expand_dims(c.coef_[0]*c.intercept_,
(1, 2))
proj_pts = x_ns + y_ns
proj_pts = proj_pts - offset
ax.plot_surface(*proj_pts, alpha=1)
def panel_representation_schematic(self):
key = self.panel_keys[1]
ax_lin, ax_nonlin = self.gss[key][0]
ax_lin_h, ax_nonlin_h = self.gss[key][1]
rpt = 1
lpt = -1
pts, corners = dc.make_square(100, lpt=lpt, rpt=rpt)
pts_h1, corners_h1 = dc.make_half_square(100, lpt=lpt, rpt=rpt)
pts_h2, corners_h2 = dc.make_half_square(100, lpt=rpt, rpt=lpt)
trs = u.make_unit_vector(np.array([[1, 1],
[-1, 1],
[-1, .5]]))
lin_func = lambda x: np.dot(x, trs.T)
cents = np.array([[rpt, rpt],
[lpt, lpt],
[.5*rpt, 0]])
nonlin_func = self._make_nonlin_func(cents)
rads = self.params.getlist('manifold_radii', typefunc=float)
grey_col = self.params.getcolor('grey_color')
pt_color = self.params.getcolor('berry_color')
h1_color = self.params.getcolor('train_color')
h2_color = self.params.getcolor('test_color')
alpha = self.params.getfloat('schem_alpha')
ms = 3
elev_lin = 20
az_lin = -10
elev_nonlin = 50
az_nonlin = -120
colors = (grey_col,)*(len(rads) - 1) + (grey_col,)
alphas = (alpha,)*(len(rads) - 1) + (1,)
for i, r in enumerate(rads):
if i == len(rads) - 1:
corners_p = r*corners
else:
corners_p = None
self._plot_schem(r*pts, lin_func, ax_lin, corners=corners_p,
color=colors[i], corner_color=pt_color,
alpha=alphas[i], markersize=ms)
self._plot_schem(r*pts, nonlin_func, ax_nonlin, corners=corners_p,
color=colors[i], corner_color=pt_color,
alpha=alphas[i], markersize=ms)
self._plot_schem(r*pts_h1, lin_func, ax_lin_h, corners=corners_p,
color=h1_color, corner_color=pt_color,
alpha=alphas[i], markersize=ms)
self._plot_schem(r*pts_h2, lin_func, ax_lin_h, corners=None,
color=h2_color, corner_color=pt_color,
alpha=alphas[i], markersize=ms)
self._plot_schem(r*pts_h1, nonlin_func, ax_nonlin_h,
corners=corners_p,
color=h1_color, corner_color=pt_color,
alpha=alphas[i], markersize=ms)
self._plot_schem(r*pts_h2, nonlin_func, ax_nonlin_h,
corners=None,
color=h2_color, corner_color=pt_color,
alpha=alphas[i], markersize=ms)
self._plot_hyperplane(r*pts_h1, corners_p[:2], lin_func,
ax_lin_h)
self._plot_hyperplane(r*pts_h1, corners_p[:2], nonlin_func,
ax_nonlin_h)
ax_lin.view_init(elev_lin, az_lin)
ax_nonlin.view_init(elev_nonlin, az_nonlin)
ax_lin_h.view_init(elev_lin, az_lin)
ax_nonlin_h.view_init(elev_nonlin, az_nonlin)
gpl.set_3d_background(ax_nonlin)
gpl.set_3d_background(ax_lin)
gpl.remove_ticks_3d(ax_nonlin)
gpl.remove_ticks_3d(ax_lin)
gpl.set_3d_background(ax_nonlin_h)
gpl.set_3d_background(ax_lin_h)
gpl.remove_ticks_3d(ax_nonlin_h)
gpl.remove_ticks_3d(ax_lin_h)
def panel_encoder_visualization(self):
key = self.panel_keys[3]
axs = self.gss[key]
vis_axs = axs[0]
class_ax, regr_ax = axs[1]
if self.data.get(key) is None:
fdg = self.make_fdg()
exp_dim = fdg.representation_dimensionality(
participation_ratio=True)
pass_model = dd.IdentityModel()
c_reps = self.params.getint('dg_classifier_reps')
gen_perf = characterize_generalization(fdg, pass_model,
c_reps)
self.data[key] = (fdg, pass_model, exp_dim, gen_perf)
fdg, pass_model, exp_dim, gen_perf = self.data[key]
print('PR = {}'.format(exp_dim))
rs = self.params.getlist('manifold_radii', typefunc=float)
n_arcs = self.params.getint('manifold_arcs')
vis_3d = self.params.getboolean('vis_3d')
dc.plot_source_manifold(fdg, pass_model, rs, n_arcs,
source_scale_mag=.5,
rep_scale_mag=.03,
markers=False, axs=vis_axs,
titles=False, plot_model_3d=vis_3d,
l_axlab_str='latent dim {} (au)')
dg_color = self.params.getcolor('dg_color')
plot_bgp(gen_perf[0], gen_perf[1], class_ax, regr_ax, color=dg_color)
# plot_single_gen(gen_perf[0], class_ax, color=dg_color)
# plot_single_gen(gen_perf[1], regr_ax, color=dg_color)
# class_ax.set_ylabel('classifier\ngeneralization')
# regr_ax.set_ylabel('regression\ngeneralization')
# gpl.add_hlines(.5, class_ax)
# gpl.add_hlines(0, regr_ax)
# class_ax.set_ylim([.5, 1])
# regr_ax.set_ylim([0, 1])
class Figure2(DisentangledFigure):
def __init__(self, fig_key='figure2', colors=colors, **kwargs):
fsize = (6, 5)
cf = u.ConfigParserColor()
cf.read(config_path)
params = cf[fig_key]
self.fig_key = fig_key
self.panel_keys = ('order_disorder', 'training_rep', 'rep_summary')
super().__init__(fsize, params, colors=colors, **kwargs)
def make_gss(self):
gss = {}
# ordered_rep_grid = self.gs[:25, :30]
# class_perf_grid = self.gs[75:, :15]
# regr_perf_grid = self.gs[75:, 30:45]
inp_grid = pu.make_mxn_gridspec(self.gs, 1, 2,
50, 68, 10, 50,
5, 10)
# high_d_grid = pu.make_mxn_gridspec(self.gs, 1, 3,
# 75, 100, 0, 10,
# 5, 2)
high_d_grid = (self.gs[75:, :5],)
hypoth_grids = pu.make_mxn_gridspec(self.gs, 1, 2,
75, 100, 10, 50,
5, 5)
gss[self.panel_keys[0]] = (self.get_axs(inp_grid),
self.get_axs(high_d_grid),
self.get_axs(hypoth_grids))
train_grid = self.gs[:15, 35:55]
train_ax = self.get_axs((train_grid,))[0]
n_parts = len(self.params.getlist('n_parts'))
rep_grids = pu.make_mxn_gridspec(self.gs, n_parts, 2,
0, 65, 60, 100,
5, 0)
plot_3d_axs = np.zeros((n_parts, 2), dtype=bool)
plot_3d_axs[:, 1] = self.params.getboolean('vis_3d')
rep_axs = self.get_axs(rep_grids, sharex='vertical',
sharey='vertical', plot_3ds=plot_3d_axs)
gss[self.panel_keys[1]] = train_ax, rep_axs
rep_classifier_grid = self.gs[75:, 60:75]
rep_regression_grid = self.gs[75:, 85:]
gss[self.panel_keys[2]] = self.get_axs((rep_classifier_grid,
rep_regression_grid))
self.gss = gss
def panel_order_disorder(self):
key = self.panel_keys[0]
(ax_inp, ax_hd, axs) = self.gss[key]
if self.data.get(key) is None:
fdg = self.make_fdg()
exp_dim = fdg.representation_dimensionality(
participation_ratio=True)
pass_model = dd.IdentityModel()
self.data[key] = (fdg, pass_model, exp_dim)
fdg, pass_model, exp_dim = self.data[key]
map_dims = self.params.getint('map_dims')
map_parts = self.params.getint('map_parts')
samps, targs, targs_scal, _ = dt.generate_binary_map(map_dims,
map_parts)
p = skd.PCA()
p.fit(targs)
targs_dim = p.transform(targs)
p_scal = skd.PCA()
p_scal.fit(targs_scal)
partition_color = self.params.getcolor('partition_color')
theory_color = self.params.getcolor('theory_color')
ax_inp[0, 0].plot(p.explained_variance_ratio_, 'o', label='actual',
color=partition_color)
ax_inp[0, 0].plot(p_scal.explained_variance_ratio_, 'o',
label='linear theory', color=theory_color)
ax_inp[0, 0].legend(frameon=False)
ax_inp[0, 0].set_xlabel('PC number')
ax_inp[0, 0].set_ylabel('proportion\nexplained')
gpl.clean_plot(ax_inp[0, 0], 0)
ax_inp[0, 1].plot(targs_dim[:, 0], targs_dim[:, 1], 'o',
color=partition_color)
gpl.clean_plot(ax_inp[0, 1], 0)
gpl.make_yaxis_scale_bar(ax_inp[0, 1], .8)
gpl.make_xaxis_scale_bar(ax_inp[0, 1], .8)
ax_inp[0, 1].set_xlabel('PC 1')
ax_inp[0, 1].set_ylabel('PC 2')
eps = [-.1, -.05, 0, .05, .1]
for i, eps_i in enumerate(eps):
ax_hd[0].plot([0, 0], [1 + eps_i, -1 - eps_i], 'o')
gpl.clean_plot(ax_hd[0], 0)
gpl.clean_plot_bottom(ax_hd[0])
gpl.make_yaxis_scale_bar(ax_hd[0], .8)
ax_hd[0].set_ylabel('PC P')
# for i, eps_i in enumerate(eps):
# ax_hd[0, 0].plot([0, 0], [1 + eps_i, -1 - eps_i], 'o')
# ax_hd[0, 2].plot([0, 0], [1 + eps_i, -1 - eps_i], 'o')
# gpl.clean_plot(ax_hd[0, 1], 1)
# gpl.clean_plot(ax_hd[0, 0], 0)
# gpl.clean_plot(ax_hd[0, 2], 0)
# gpl.clean_plot_bottom(ax_hd[0, 1])
# gpl.clean_plot_bottom(ax_hd[0, 0])
# gpl.clean_plot_bottom(ax_hd[0, 2])
# gpl.make_yaxis_scale_bar(ax_hd[0, 0], .8)
# ax_hd[0, 0].set_ylabel('PC 1')
# gpl.make_yaxis_scale_bar(ax_hd[0, 2], .8)
# ax_hd[0, 2].set_ylabel('PC P')
rs_close = self.params.getlist('manifold_radii_close', typefunc=float)
n_arcs = self.params.getint('manifold_arcs')
dc.plot_diagnostics(fdg, pass_model, rs_close, n_arcs, plot_source=True,
dim_red=False, square=False,
scale_mag=.2, markers=False, ax=axs[0, 0])
axs[0, 0].set_xlabel('PC 1')
axs[0, 0].set_ylabel('PC 2')
rs = self.params.getlist('manifold_radii', typefunc=float)
dc.plot_diagnostics(fdg, pass_model, rs, n_arcs, plot_source=True,
dim_red=False,
scale_mag=.2, markers=False, ax=axs[0, 1])
axs[0, 1].set_xlabel('PC 1')
def panel_training_rep(self):
key = self.panel_keys[1]
train_ax, rep_axs = self.gss[key]
if self.data.get(key) is None:
fdg = self.make_fdg()
n_parts = self.params.getlist('n_parts', typefunc=int)
latent_dim = self.params.getint('latent_dim')
n_reps = self.params.getint('n_reps')
dg_epochs = self.params.getint('dg_epochs')
n_epochs = self.params.getint('n_epochs')
n_train_bounds = self.params.getlist('n_train_eg_bounds',
typefunc=float)
n_train_diffs = self.params.getint('n_train_eg_diffs')
layer_spec = self.params.getlist('layers', typefunc=tuple_int)
no_autoencoder = self.params.getboolean('no_autoencoder')
model_kinds = list(ft.partial(dd.FlexibleDisentanglerAE,
true_inp_dim=fdg.input_dim,
n_partitions=num_p,
no_autoenc=no_autoencoder)
for num_p in n_parts)
out = dc.test_generalization_new(
dg_use=fdg, layer_spec=layer_spec,
est_inp_dim=latent_dim,
inp_dim=fdg.output_dim,
dg_train_epochs=dg_epochs,
model_n_epochs=n_epochs,
n_reps=n_reps, model_kinds=model_kinds,
models_n_diffs=n_train_diffs,
models_n_bounds=n_train_bounds,
p_mean=False, plot=False)
self.data[key] = (out, (n_parts, n_epochs))
fdg, (models, th), (p, _), (_, scrs, _), _ = self.data[key][0]
n_parts, n_epochs = self.data[key][1]
rs = self.params.getlist('manifold_radii', typefunc=float)
n_arcs = self.params.getint('manifold_arcs')
npart_signifier = self.params.get('npart_signifier')
mid_i = np.floor(len(n_parts)/2)
vis_3d = self.params.getboolean('vis_3d')
view_inits = (None, (50, 30), (40, -20))
for i, num_p in enumerate(n_parts):
hist = th[0, i, 0].history['loss']
epochs = np.arange(1, len(hist) + 1)
train_ax.plot(epochs, hist,
label='r${} = {}$'.format(npart_signifier,
num_p))
dc.plot_source_manifold(fdg, models[0, i, 0], rs, n_arcs,
source_scale_mag=.5,
rep_scale_mag=10, plot_model_3d=vis_3d,
markers=False, axs=rep_axs[i],
titles=False, model_view_init=view_inits[i])
if mid_i != i:
rep_axs[i, 0].set_ylabel('')
rep_axs[i, 1].set_ylabel('')
if i < len(n_parts) - 1:
rep_axs[i, 0].set_xlabel('')
rep_axs[i, 1].set_xlabel('')
gpl.clean_plot(train_ax, 0)
train_ax.set_yscale('log')
def panel_rep_summary(self):
key = self.panel_keys[2]
axs = self.gss[key]
run_ind = self.params.get('rep_summary_run')
f_pattern = self.params.get('f_pattern')
path = self.params.get('mp_simulations_path')
part_color = self.params.getcolor('partition_color')
pv_mask = np.array([False, True, False])
axs = np.expand_dims(axs, 0)
dc.plot_recon_gen_summary(run_ind, f_pattern, log_x=False,
collapse_plots=True, folder=path,
axs=axs, print_args=False,
pv_mask=pv_mask,
set_title=False, color=part_color)
class Figure4Beta(DisentangledFigure):
def __init__(self, fig_key='figure4beta', colors=colors, **kwargs):
fsize = (5.5, 3.5)
cf = u.ConfigParserColor()
cf.read(config_path)
params = cf[fig_key]
self.panel_keys = ('bvae_schematic', 'bvae_performance')
super().__init__(fsize, params, colors=colors, **kwargs)
def make_gss(self):
gss = {}
bvae_schematic_grid = self.gs[:, :45]
bv1_perf = pu.make_mxn_gridspec(self.gs, 1, 2, 0, 44, 55, 100,
8, 0)
bv2_perf = pu.make_mxn_gridspec(self.gs, 1, 2, 56, 100, 55, 100,
8, 15)
bv_perf = np.concatenate((bv1_perf, bv2_perf), axis=0)
vis_3d = self.params.getboolean('vis_3d')
axs_3ds = np.zeros((2, 2), dtype=bool)
axs_3ds[0, 1] = vis_3d
gss[self.panel_keys[0]] = self.get_axs((bvae_schematic_grid,))
gss[self.panel_keys[1]] = self.get_axs(bv_perf, plot_3ds=axs_3ds)
self.gss = gss
def panel_bvae_performance(self):
key = self.panel_keys[1]
axs = self.gss[key]
if not key in self.data.keys():
fdg = self.make_fdg()
out = train_eg_bvae(fdg, self.params)
c_reps = self.params.getint('dg_classifier_reps')
m = out[0]
gen_perf = characterize_generalization(fdg, m[0, 0], c_reps)
self.data[key] = (fdg, m, gen_perf)
fdg, m, gen_perf = self.data[key]
run_inds = (self.params.get('beta_eg_ind'),)
f_pattern = self.params.get('beta_f_pattern')
folder = self.params.get('beta_simulations_path')
labels = (r'$\beta$VAE',)
bvae_color = self.params.getcolor('bvae_color')
colors = (bvae_color,)
m[0, 0].p_vectors = []
m[0, 0].p_offsets = []
pv_mask = np.array([False, True, False])
axs_flat = np.concatenate((axs[0], axs[1]))
self._standard_panel(fdg, m, run_inds, f_pattern, folder, axs_flat,
labels=labels, pv_mask=pv_mask,
xlab=r'$\beta$', colors=colors,
rep_scale_mag=.01)
class Figure3(DisentangledFigure):
def __init__(self, fig_key='figure3prf', colors=colors, **kwargs):
fsize = (5.5, 3.5)
cf = u.ConfigParserColor()
cf.read(config_path)
params = cf[fig_key]
self.panel_keys = ('unbalanced_partitions', 'contextual_partitions',
'partial_information')
super().__init__(fsize, params, colors=colors, **kwargs)
def make_gss(self):
gss = {}
unbalanced_latent_grid = self.gs[:30, :20]
unbalanced_rep_grid = self.gs[:30:, 30:45]
unbalanced_class_grid = self.gs[:30, 55:70]
unbalanced_regress_grid = self.gs[:30, 80:]
axs_3d = np.zeros(4, dtype=bool)
axs_3d[1] = self.params.getboolean('vis_3d')
axs_left = pu.make_mxn_gridspec(self.gs, 3, 2, 0, 100, 0, 40,
3, 0)
axs_right = pu.make_mxn_gridspec(self.gs, 3, 2, 0, 100, 54, 100,
5, 15)
gss[self.panel_keys[0]] = self.get_axs(np.concatenate((axs_left[0],
axs_right[0])),
plot_3ds=axs_3d)
gss[self.panel_keys[1]] = self.get_axs(np.concatenate((axs_left[1],
axs_right[1])),
plot_3ds=axs_3d)
gss[self.panel_keys[2]] = self.get_axs(np.concatenate((axs_left[2],
axs_right[2])),
plot_3ds=axs_3d)
self.gss = gss
def panel_unbalanced_partitions(self):
key = self.panel_keys[0]
axs = self.gss[key]
if not key in self.data.keys():
fdg = self.make_fdg()
out = train_eg_fd(fdg, self.params)
self.data[key] = (fdg, out)
fdg, out = self.data[key]
m, _ = out
run_inds = self.params.getlist('unbalanced_eg_inds')
f_pattern = self.params.get('f_pattern')
folder = self.params.get('mp_simulations_path')
labels = ('balanced', 'unbalanced', 'very unbalanced')
part_color = self.params.getcolor('partition_color')
unbal1_color = self.params.getcolor('unbalance_color1')
unbal2_color = self.params.getcolor('unbalance_color2')
colors = (part_color, unbal1_color, unbal2_color)
rep_scale_mag = 20
pv_mask = np.array([False, False, True])
self._standard_panel(fdg, m, run_inds, f_pattern, folder, axs,
labels=labels, pv_mask=pv_mask,
rep_scale_mag=rep_scale_mag, colors=colors)
for ax in axs:
ax.set_xlabel('')
ax.set_ylabel('')
def panel_contextual_partitions(self):
key = self.panel_keys[1]
axs = self.gss[key]
if not key in self.data.keys():
fdg = self.make_fdg()
out = train_eg_fd(fdg, self.params, contextual_partitions=True,
offset_var=True)
self.data[key] = (fdg, out)
fdg, out = self.data[key]
m, _ = out
run_inds = self.params.getlist('contextual_eg_inds')
f_pattern = self.params.get('f_pattern')
folder = self.params.get('mp_simulations_path')
rep_scale_mag = 20
part_color = self.params.getcolor('partition_color')
context_color = self.params.getcolor('contextual_color')
context_offset_color = self.params.getcolor('contextual_offset_color')
colors = (part_color, context_color, context_offset_color)
labels = ('full tasks', 'contextual tasks',
'offset contextual tasks')
# pv_mask = np.array([False, False, False, False, True, False, False,
# False])
pv_mask = np.array([False, False, True])
self._standard_panel(fdg, m, run_inds, f_pattern, folder, axs,
labels=labels, pv_mask=pv_mask,
rep_scale_mag=rep_scale_mag, colors=colors,
view_init=(45, -30))
for ax in axs[:2]:
ax.set_xlabel('')
def panel_partial_information(self):
key = self.panel_keys[2]
axs = self.gss[key]
nan_salt_eg = self.params.getfloat('nan_salt_eg')
if not key in self.data.keys():
fdg = self.make_fdg()
out = train_eg_fd(fdg, self.params, nan_salt=nan_salt_eg,
offset_var=True)
self.data[key] = (fdg, out)
fdg, out = self.data[key]
m, _ = out
run_inds = self.params.getlist('partial_eg_inds')
f_pattern = self.params.get('f_pattern')
folder = self.params.get('mp_simulations_path')
rep_scale_mag = 20
part_color = self.params.getcolor('partition_color')
partial_color1 = self.params.getcolor('partial_color1')
partial_color2 = self.params.getcolor('partial_color2')
colors = (part_color, partial_color1, partial_color2)
labels = ('full information', '50% missing', 'single task')
# pv_mask = np.array([False, False, False, True, False])
pv_mask = np.array([False, False, True])
self._standard_panel(fdg, m, run_inds, f_pattern, folder, axs,
labels=labels, pv_mask=pv_mask,
rep_scale_mag=rep_scale_mag, colors=colors)
for ax in axs[:2]:
ax.set_xlabel('')
class Figure3Grid(DisentangledFigure):
def __init__(self, fig_key='figure3grid', colors=colors, **kwargs):
fsize = (5.5, 4.8)
cf = u.ConfigParserColor()
cf.read(config_path)
params = cf[fig_key]
self.panel_keys = ('task_manipulations', 'irrel_variables',
'correlation_decay', 'grid_only',
'mixed')
super().__init__(fsize, params, colors=colors, **kwargs)
self.fdg = self.data.get('fdg')
def make_gss(self):
gss = {}
gs_schem = pu.make_mxn_gridspec(self.gs, 4, 2, 0, 100, 0, 40,
3, 0)
axs_3d = np.zeros((4, 2), dtype=bool)
axs_3d[:, 1] = self.params.getboolean('vis_3d')
axs_schem = self.get_axs(gs_schem, plot_3ds=axs_3d)
gs_res = pu.make_mxn_gridspec(self.gs, 4, 2, 0, 100, 54, 100,
8, 12)
axs_res = self.get_axs(gs_res)
axs_res2 = np.concatenate((axs_schem[:, 1:], axs_res), axis=1)
axs_schem2 = axs_schem[3, 0]
gss[self.panel_keys[0]] = axs_res2[0]
gss[self.panel_keys[1]] = axs_res2[1]
gss[self.panel_keys[2]] = axs_schem2
gss[self.panel_keys[3]] = axs_res2[2]
gss[self.panel_keys[4]] = axs_res2[3]
self.gss = gss
def panel_task_manipulations(self):
key = self.panel_keys[0]
axs = self.gss[key]
if not key in self.data.keys():
fdg = self.make_fdg()
out = train_eg_fd(fdg, self.params, contextual_partitions=True,
offset_var=True)
self.data[key] = (fdg, out)
fdg, out = self.data[key]
m, _ = out
run_inds = self.params.getlist('manip_eg_inds')
f_pattern = self.params.get('f_pattern')
folder = self.params.get('mp_simulations_path')
rep_scale_mag = 20
unbal_color = self.params.getcolor('unbalance_color1')
context_color = self.params.getcolor('contextual_color')
partial_color = self.params.getcolor('partial_color2')
colors = (unbal_color, context_color, partial_color)
labels = ('unbalanced tasks', 'contextual tasks',
'single task examples')
# pv_mask = np.array([False, False, False, False, True, False, False,
# False])
pv_mask = np.array([False, False, True])
self._standard_panel(fdg, m, run_inds, f_pattern, folder, axs,
labels=labels, pv_mask=pv_mask,
rep_scale_mag=rep_scale_mag, colors=colors,
view_init=(45, -30))
for ax in axs[:2]:
ax.set_xlabel('')
def panel_irrel_variables(self):
key = self.panel_keys[1]
axs = self.gss[key]
if not key in self.data.keys():
fdg = self.make_fdg()
irrel_dims = self.params.getlist('irrel_dims', typefunc=int)
irrel_dims = np.array(irrel_dims).astype(bool)
out = train_eg_fd(fdg, self.params, offset_var=False,
no_learn_lvs=irrel_dims)
self.data[key] = (fdg, out)
fdg, out = self.data[key]
m, _ = out
run_inds = self.params.getlist('no_learn_eg_ind')
f_pattern = self.params.get('f_pattern')
folder = self.params.get('mp_simulations_path')
multi_num = self.params.getint('multi_num')
rep_scale_mag = 20
grid2_color = self.params.getcolor('partition_color')
grid3_color = self.params.getcolor('untrained_color')
colors = (grid2_color, grid3_color)
labels = ('trained dimensions', 'untrained dimensions')
pv_mask = np.array([False, False, True])
self._standard_panel(fdg, m, run_inds, f_pattern, folder, axs,
labels=labels, pv_mask=pv_mask,
rep_scale_mag=rep_scale_mag, colors=colors,
multi_num=multi_num,
view_init=(45, -30))
# for ax in axs:
# ax.set_xlabel('')
# ax.set_xticks([])
def panel_correlation_decay(self):
key = self.panel_keys[2]
ax = self.gss[key]
eg_dim = self.params.getint('inp_dim')
n_samples = self.params.getint('n_corr_samples')
partition_color = self.params.getcolor('partition_color')
grid2_color = self.params.getcolor('grid2_color')
grid3_color = self.params.getcolor('grid3_color')
part_corr = dt.norm_dot_product(eg_dim)
grid2_corr = dt.binary_dot_product(2, eg_dim)
grid3_corr = dt.binary_dot_product(3, eg_dim)
ax.hist(part_corr, histtype='step', color=partition_color,
label='partition tasks')
ax.hist(grid2_corr, histtype='step', color=grid2_color,
label=r'$N_{C} = 2^{D}$')
ax.hist(grid3_corr, histtype='step', color=grid3_color,
label=r'$N_{C} = 3^{D}$')
ax.legend(frameon=False)
gpl.clean_plot(ax, 0)
ax.set_xlabel('task alignment')
def panel_grid_only(self):
key = self.panel_keys[3]
axs = self.gss[key]
if not key in self.data.keys():
fdg = self.make_fdg()
n_grids = self.params.getint('n_grid_eg')
out = train_eg_fd(fdg, self.params, offset_var=False,
grid_coloring=True, n_granules=3)
self.data[key] = (fdg, out)
fdg, out = self.data[key]
m, _ = out
run_inds = self.params.getlist('grid_eg_inds')
f_pattern = self.params.get('f_pattern')
folder = self.params.get('mp_simulations_path')
rep_scale_mag = 20
grid2_color = self.params.getcolor('grid2_color')
grid3_color = self.params.getcolor('grid3_color')
grid_style = self.params.get('grid_style')
colors = (grid2_color, grid3_color)
labels = ('grid = 2', 'grid = 3')
pv_mask = np.array([False, False, True])
self._standard_panel(fdg, m, run_inds, f_pattern, folder, axs,
labels=labels, pv_mask=pv_mask,
rep_scale_mag=rep_scale_mag, colors=colors,
view_init=(45, -30), linestyle=grid_style)
for ax in axs:
ax.set_xlabel('')
# ax.set_xticks([])
def panel_mixed(self):
key = self.panel_keys[4]
axs = self.gss[key]
if not key in self.data.keys():
fdg = self.make_fdg()
n_grids = self.params.getint('n_grid_eg')
out = train_eg_fd(fdg, self.params, n_grids=n_grids)
self.data[key] = (fdg, out)
fdg, out = self.data[key]
m, _ = out
run_inds = self.params.getlist('mixed_eg_inds')
f_pattern = self.params.get('f_mixed_pattern')
folder = self.params.get('mp_simulations_path')
rep_scale_mag = 20
mixed2_color = self.params.getcolor('grid2_color')
mixed3_color = self.params.getcolor('grid3_color')
colors = (mixed2_color, mixed3_color)
marker_color = self.params.getcolor('marker_color')
labels = (r'$N_{C} = 2^{D}$',
r'$N_{C} = 3^{D}$')
pv_mask = np.array([False, False, True])
self._standard_panel(fdg, m, run_inds, f_pattern, folder, axs,
labels=labels, pv_mask=pv_mask,
rep_scale_mag=rep_scale_mag, colors=colors,
view_init=(45, -30), distr_parts='n_grids',
plot_hline=False)
for ax in axs[1:]:
ax.set_xlabel('grid tasks')
gpl.add_vlines(15, ax=ax, linestyle='dashed')
class Figure4(DisentangledFigure):
def __init__(self, fig_key='figure4', colors=colors, **kwargs):
fsize = (6, 4)
cf = u.ConfigParserColor()
cf.read(config_path)
params = cf[fig_key]
self.panel_keys = ('rf_input', 'disentangling_comparison')
super().__init__(fsize, params, colors=colors, **kwargs)
self.rfdg = self.data.get('rfdg')
def make_rfdg(self, retrain=False, kernel=False):
if self.rfdg is not None and not retrain:
rfdg = self.rfdg
else:
inp_dim = self.params.getint('inp_dim')
dg_dim = self.params.getint('dg_dim')
in_noise = self.params.getfloat('in_noise')
out_noise = self.params.getfloat('out_noise')
width_scaling = self.params.getfloat('width_scaling')
dg_source_var = self.params.getfloat('dg_source_var')
source_distr = sts.multivariate_normal(np.zeros(inp_dim),
dg_source_var)
if not kernel:
rfdg = dg.RFDataGenerator(inp_dim, dg_dim, total_out=True,
input_noise=in_noise, noise=out_noise,
width_scaling=width_scaling,
source_distribution=source_distr,
low_thr=.01)
else:
rfdg = dg.KernelDataGenerator(inp_dim, None, dg_dim,
low_thr=.01)
self.rfdg = rfdg
self.data['rfdg'] = rfdg
return rfdg
def make_gss(self):
gss = {}
rf_schematic_grid = self.gs[:40, :20]
rf_projection_grid = self.gs[:50, 28:45]
rf_dec_grid = pu.make_mxn_gridspec(self.gs, 1, 2, 60, 100,
0, 45, 5, 14)
axs_3ds = np.zeros(4, dtype=bool)
axs_3ds[1] = self.params.getboolean('vis_3d')
gss[self.panel_keys[0]] = self.get_axs((rf_schematic_grid,
rf_projection_grid,
rf_dec_grid[0, 0],
rf_dec_grid[0, 1]),
plot_3ds=axs_3ds)
rep_grids = pu.make_mxn_gridspec(self.gs, 2, 2, 0, 100,
55, 100, 5, 5)
axs_3ds = np.zeros((2, 2), dtype=bool)
axs_3ds[0, :] = self.params.getboolean('vis_3d')
gss[self.panel_keys[1]] = self.get_axs(rep_grids, plot_3ds=axs_3ds)
self.gss = gss
def panel_rf_input(self, kernel=False):
key = self.panel_keys[0]
schem_ax, proj_ax, dec_c_ax, dec_r_ax = self.gss[key]
rfdg = self.make_rfdg(kernel=kernel)
rf_eg_color = self.params.getcolor('rf_eg_color')
if not kernel:
rfdg.plot_rfs(schem_ax, color=rf_eg_color, thin=5)
pass_model = dd.IdentityModel()
rs = self.params.getlist('manifold_radii', typefunc=float)
n_arcs = self.params.getint('manifold_arcs')
vis_3d = self.params.getboolean('vis_3d')
dc.plot_diagnostics(rfdg, pass_model, rs, n_arcs,
scale_mag=.5, markers=False, ax=proj_ax,
plot_3d=vis_3d)
if not key in self.data.keys():
c_reps = self.params.getint('dg_classifier_reps')
out = characterize_generalization(rfdg, pass_model,
c_reps)
self.data[key] = out
results_class, results_regr = self.data[key]
color = self.params.getcolor('dg_color')
plot_bgp(results_class, results_regr, dec_c_ax, dec_r_ax,
color=color)
# plot_single_gen(results_class, dec_c_ax, color=color)
# dec_c_ax.set_ylim([.5, 1])
# plot_single_gen(results_regr, dec_r_ax, color=color)
# dec_r_ax.set_ylim([0, 1])
# dec_c_ax.set_ylabel('classifier\ngeneralization')
# dec_r_ax.set_ylabel('regression\ngeneralization')
def panel_disentangling_comparison(self, kernel=None):
key = self.panel_keys[1]
axs = self.gss[key]
rfdg = self.make_rfdg(kernel=kernel)
if not key in self.data.keys():
out_fd = train_eg_fd(rfdg, self.params)
out_bvae = train_eg_bvae(rfdg, self.params)
m_fd = out_fd[0][0, 0]
m_bvae = out_bvae[0][0, 0]
fd_gen = characterize_generalization(rfdg, m_fd, 10)
bvae_gen = characterize_generalization(rfdg, m_bvae, 10)
self.data[key] = (out_fd, out_bvae, fd_gen, bvae_gen)
if len(self.data[key]) > 2:
out_fd, out_bvae, fd_gen, bvae_gen = self.data[key]
else:
out_fd, out_bvae = self.data[key]
m_fd = out_fd[0][0, 0]
m_bvae = out_bvae[0][0, 0]
rs = self.params.getlist('manifold_radii', typefunc=float)
n_arcs = self.params.getint('manifold_arcs')
vis_3d = self.params.getboolean('vis_3d')
# print(np.mean(fd_gen[0], axis=0))
# print(np.mean(fd_gen[1], axis=0))
# print(np.mean(bvae_gen[0], axis=0))
# print(np.mean(bvae_gen[1], axis=0))
run_ind_fd = self.params.get('run_ind_fd')
run_ind_beta = self.params.get('run_ind_beta')
f_pattern = self.params.get('f_pattern')
beta_f_pattern = self.params.get('beta_f_pattern')
folder = self.params.get('mp_simulations_path')
beta_folder = self.params.get('beta_simulations_path')
dc.plot_diagnostics(rfdg, m_fd, rs, n_arcs,
scale_mag=20, markers=False,
ax=axs[0, 0], plot_3d=vis_3d)
dc.plot_diagnostics(rfdg, m_bvae, rs, n_arcs,
scale_mag=.01, markers=False,
ax=axs[0, 1], plot_3d=vis_3d)
res_axs = axs[1:]
pv_mask = np.array([False, True, False])
part_color = self.params.getcolor('partition_color')
bvae_color = self.params.getcolor('bvae_color')
xlab = r'tasks / $\beta$'
dc.plot_recon_gen_summary(run_ind_fd, f_pattern, log_x=False,
collapse_plots=False, folder=folder,
axs=res_axs, legend='multi-tasking model',
print_args=False, pv_mask=pv_mask,
set_title=False, color=part_color,
xlab=xlab)
dc.plot_recon_gen_summary(run_ind_beta, beta_f_pattern, log_x=False,
collapse_plots=False, folder=beta_folder,
axs=res_axs, legend=r'$\beta$VAE',
print_args=False, pv_mask=pv_mask,
set_title=False, color=bvae_color,
xlab=xlab)
class Figure5(DisentangledFigure):
def __init__(self, fig_key='figure5', colors=colors, **kwargs):
fsize = (5.5, 3.5)
cf = u.ConfigParserColor()
cf.read(config_path)
params = cf[fig_key]
self.panel_keys = ('img_egs', 'rep_geometry', 'traversal_comparison')
super().__init__(fsize, params, colors=colors, **kwargs)
def make_shape_dg(self, retrain=False):
try:
assert not retrain
shape_dg = self.shape_dg
except:
twod_file = self.params.get('shapes_path')
img_size = self.params.getlist('img_size', typefunc=int)
shape_dg = dg.TwoDShapeGenerator(twod_file, img_size=img_size,
max_load=np.inf,
convert_color=False)
self.shape_dg = shape_dg
return shape_dg
def make_gss(self):
gss = {}
img_grids = pu.make_mxn_gridspec(self.gs, 2, 2, 0, 30,
0, 40, 3, 1)
gss[self.panel_keys[0]] = self.get_axs(img_grids)
rep_geom_fd = self.gs[30:70, :18]
rep_geom_bvae = self.gs[30:70, 22:40]
rep_geom_class_perf = self.gs[75:, :15]
rep_geom_regr_perf = self.gs[75:, 25:40]
axs_3d = np.zeros(4, dtype=bool)
axs_3d[0:2] = self.params.getboolean('vis_3d')
gss[self.panel_keys[1]] = self.get_axs((rep_geom_fd, rep_geom_bvae,
rep_geom_class_perf,
rep_geom_regr_perf),
plot_3ds=axs_3d)
recon_grids = pu.make_mxn_gridspec(self.gs, 5, 6, 0, 100,
45, 100, 3, 1)
gss[self.panel_keys[2]] = self.get_axs(recon_grids)
self.gss = gss
def panel_img_egs(self):
key = self.panel_keys[0]
axs = self.gss[key]
shape_dg = self.make_shape_dg()
cm = self.params.get('img_colormap')
out = shape_dg.sample_reps(sample_size=np.product(axs.shape))
_, sample_imgs = out
for i, ind in enumerate(u.make_array_ind_iterator(axs.shape)):
axs[ind].imshow(sample_imgs[i], cmap=cm)
axs[ind].set_xticks([])
axs[ind].set_yticks([])
def _get_eg_models(self, reload_=False):
try:
assert not reload_
m_fd, m_bvae = self._eg_models
except:
path_fd = self.params.get('fd_eg_path')
path_bvae = self.params.get('bvae_eg_path')
m_fd = dd.FlexibleDisentanglerAEConv.load(path_fd)
m_bvae = dd.BetaVAEConv.load(path_bvae)
self._eg_models = (m_fd, m_bvae)
return m_fd, m_bvae
def panel_rep_geometry(self):
key = self.panel_keys[1]
rep_fd_ax, rep_bvae_ax, class_ax, regr_ax = self.gss[key]
shape_dg = self.make_shape_dg()
rs = self.params.getlist('manifold_radii', typefunc=float)
n_arcs = self.params.getint('manifold_arcs')
m_fd, m_bvae = self._get_eg_models()
if not key in self.data.keys():
self.data[key] = {}
fd_red_func, bvae_red_func = None, None
c_reps = self.params.getint('dg_classifier_reps')
ident_model = dd.IdentityModel(flatten=True)
repl_mean = (2,)
res_ident = characterize_generalization(shape_dg, ident_model,
c_reps, norm=False,
repl_mean=repl_mean)
res_fd = characterize_generalization(shape_dg, m_fd,
c_reps, norm=False,
repl_mean=repl_mean)
res_bvae = characterize_generalization(shape_dg, m_bvae,
c_reps, norm=False,
repl_mean=repl_mean)
self.data[key]['gen'] = (res_ident, res_fd, res_bvae)
if 'dr' in self.data[key].keys():
fd_red_func, bvae_red_func = self.data[key]['dr']
vis_3d = self.params.getboolean('vis_3d')
out_f = dc.plot_diagnostics(shape_dg, m_fd, rs, n_arcs, n_dim_red=1000,
ax=rep_fd_ax, set_inds=(3, 4),
scale_mag=20,
dim_red_func=fd_red_func, ret_dim_red=True,
plot_3d=vis_3d)
out_b = dc.plot_diagnostics(shape_dg, m_bvae, rs, n_arcs, n_dim_red=1000,
ax=rep_bvae_ax, set_inds=(3, 4),
dim_red_func=bvae_red_func, scale_mag=.2,
ret_dim_red=True, plot_3d=vis_3d, view_init=(60, 20))
if 'dr' not in self.data[key].keys():
self.data[key]['dr'] = (out_f[1], out_b[1])
res_ident, res_fd, res_bvae = self.data[key]['gen']
dg_col = self.params.getcolor('dg_color')
bvae_col = self.params.getcolor('bvae_color')
fd_col = self.params.getcolor('partition_color')
colors = (dg_col, fd_col, bvae_col)
labels = ('input', 'multi-tasking model', r'$\beta$VAE')
plot_multi_bgp((res_ident[0], res_fd[0], res_bvae[0]),
(res_ident[1], res_fd[1], res_bvae[1]),
class_ax, regr_ax, colors=colors,
legend_labels=labels)
def _get_img_traversal(self, dg, dim, n):
cent = dg.get_center()
unique_inds = np.unique(dg.data_table[dg.img_params[dim]])
cent_ind = int(np.floor(len(unique_inds)/2))
x = np.zeros((n, len(cent)))
off_ind = int(np.floor(n/2))
x[:, dim] = unique_inds[cent_ind - off_ind:cent_ind + off_ind]
imgs = dg.get_representation(x)
return imgs
def panel_traversal_comparison(self):
key = self.panel_keys[2]
axs = self.gss[key]
shape_dg = self.make_shape_dg()
m_fd, m_bvae = self._get_eg_models()
traverse_dim = self.params.getint('traverse_dim')
learn_dim = self.params.getint('learn_dim')
n_pts = self.params.getint('training_pts')
n_perts = axs.shape[1]
fd_perturb = self.params.getfloat('fd_perturb')
bvae_perturb = self.params.getfloat('bvae_perturb')
eps_d = self.params.getfloat('eps_d')
cm = self.params.get('img_colormap')
out = dc.plot_traversal_plot(shape_dg, m_fd, full_perturb=fd_perturb,
trav_dim=traverse_dim, n_pts=n_pts,
eps_d=eps_d, learn_dim=learn_dim,
n_dense_pts=n_pts, n_perts=n_perts)
recs, _, dl, dr, lr = out
di = self._get_img_traversal(shape_dg, traverse_dim, len(axs[0]))
dc.plot_img_series(di, title='', axs=axs[0], cmap=cm)
dc.plot_img_series(dr, title='', axs=axs[1], cmap=cm)
dc.plot_img_series(recs, title='', axs=axs[2], cmap=cm)
out = dc.plot_traversal_plot(shape_dg, m_bvae,
full_perturb=bvae_perturb,
trav_dim=traverse_dim, n_pts=n_pts,
eps_d=eps_d, n_dense_pts=n_pts,
learn_dim=learn_dim, n_perts=n_perts)
recs, di, dl, dr, lr = out
dc.plot_img_series(dr, title='', axs=axs[3], cmap=cm)
dc.plot_img_series(recs, title='', axs=axs[4], cmap=cm)
class SIFigureMultiverse(DisentangledFigure):
def __init__(self, fig_key='sifigure_multi', colors=colors, **kwargs):
fsize = (5.5, 5)
cf = u.ConfigParserColor()
cf.read(config_path)
params = cf[fig_key]
self.fig_key = fig_key
self.panel_keys = ('panel_multiverse',)
super().__init__(fsize, params, colors=colors, **kwargs)
def make_gss(self):
gss = {}
m1_grid = pu.make_mxn_gridspec(self.gs, 1, 8,
0, 48, 0, 100,
20, 3)
m1_axs = self.get_axs(m1_grid, sharey=True)
m2_grid = pu.make_mxn_gridspec(self.gs, 1, 8,
52, 100, 0, 100,
20, 3)
m2_axs = self.get_axs(m2_grid, sharey=True)
gss[self.panel_keys[0]] = (m1_axs[0], m2_axs[0])
self.gss = gss
def panel_multiverse(self):
key = self.panel_keys[0]
axs = self.gss[key]
fd_manifest_path = self.params.get('fd_manifest_path')
fd_pattern = self.params.get('fd_pattern')
bv_manifest_path = self.params.get('bv_manifest_path')
bv_pattern = self.params.get('bv_pattern')
results_folder = self.params.get('results_folder')
fd_color = self.params.getcolor('partition_color')
bv_color = self.params.getcolor('bvae_color')
colors = (fd_color, bv_color)
if self.data.get(key) is None:
fd_manifest = {'fd':fd_manifest_path}
mv_fd = dmo.load_multiverse(results_folder, fd_manifest,
run_pattern=fd_pattern)
bv_manifest = {'bv':bv_manifest_path}
mv_bv = dmo.load_multiverse(results_folder, bv_manifest,
run_pattern=bv_pattern)
out_fd1 = dmo.model_explanation(mv_fd, 'class_gen')
out_fd2 = dmo.model_explanation(mv_fd, 'regr_gen')
out_bv1 = dmo.model_explanation(mv_bv, 'class_gen')
out_bv2 = dmo.model_explanation(mv_bv, 'regr_gen')
self.data[key] = (mv_fd, mv_bv, (out_fd1, out_fd2), (out_bv1,
out_bv2))
mv_fd, mv_bv, out_fd, out_bv = self.data[key]
title_dict = {'layer_spec':'depth', 'train_eg':'training data',
'use_tanh':'act function',
'input_dims':'latent variables',
'no_autoencoder':'autoencoder',
'betas':r'tasks / $\beta$',
'source_distr':'latent variable\ndistribution',
'partitions':r'tasks / $\beta$',
'latent_dims':'rep width'}
for i, (r_fd_i, s_fd_i, lh_fd_i, l_fd_i, lv_fd_i) in enumerate(out_fd):
r_bv_i, s_bv_i, lh_bv_i, l_bv_i, lv_bv_i = out_bv[i]
axs_i = axs[i]
axd_i = {'partitions':axs_i[0], 'betas':axs_i[0],
'layer_spec':axs_i[1], 'train_eg':axs_i[2],
'use_tanh':axs_i[3], 'input_dims':axs_i[4],
'source_distr':axs_i[5], 'latent_dims':axs_i[6],
'no_autoencoder':axs_i[7]}
if i == len(out_fd) - 1:
labels = True
else:
labels = False
model_names = ('multi-tasking model', r'$\beta$VAE')
dmo.plot_multiple_model_coefs((l_fd_i, l_bv_i), (r_fd_i, r_bv_i),
(lh_fd_i, lh_bv_i), ax_dict=axd_i,
title_dict=title_dict, colors=colors,
labels=labels, model_names=model_names,
v_dicts=(lv_fd_i, lv_bv_i))
for i, ax in enumerate(axs_i):
gpl.clean_plot(ax, i)
axd_i['no_autoencoder'].set_xticks([0, 1])
axd_i['no_autoencoder'].set_xticklabels(['with', 'without'],
rotation='vertical')
axd_i['use_tanh'].set_xticks([0, 1])
axd_i['use_tanh'].set_xticklabels(['ReLU', 'tanh'],
rotation='vertical')
axd_i['layer_spec'].set_xticklabels([3, 4, 5])
axd_i['source_distr'].set_xticklabels(['normal', 'uniform'],
rotation='vertical')
axd_i['train_eg'].set_xticks([10000, 100000])
axd_i['train_eg'].set_xticklabels([r'$10^{4}$', r'$10^{5}$'])
axd_i['input_dims'].set_xticks([2, 5, 8])
axd_i['partitions'].legend(frameon=False)
axs[0][0].set_ylabel('classifier generalization\ninfluence')
axs[1][0].set_ylabel('regression generalization\ninfluence')
class SIFigureDim(DisentangledFigure):
def __init__(self, fig_key='sifigure_dim', colors=colors, **kwargs):
fsize = (4, 5)
cf = u.ConfigParserColor()
cf.read(config_path)
params = cf[fig_key]
self.fig_key = fig_key
self.panel_keys = ('dim_dependence',)
super().__init__(fsize, params, colors=colors, **kwargs)
def make_gss(self):
gss = {}
dims = self.params.getlist('dims', typefunc=int)
dims_grid = pu.make_mxn_gridspec(self.gs, len(dims), 2,
0, 100, 0, 100,
5, 20)
gss[self.panel_keys[0]] = self.get_axs(dims_grid)
self.gss = gss
def panel_dim_dependence(self):
key = self.panel_keys[0]
axs = self.gss[key]
dims = self.params.getlist('dims', typefunc=int)
fd_inds = self.params.getlist('fd_dims_inds')
bv_inds = self.params.getlist('bv_dims_inds')
f_pattern = self.params.get('f_pattern')
beta_f_pattern = self.params.get('beta_f_pattern')
folder = self.params.get('mp_simulations_path')
beta_folder = self.params.get('beta_simulations_path')
part_color = self.params.getcolor('partition_color')
bvae_color = self.params.getcolor('bvae_color')
xlab = r'tasks / $\beta$'
pv_mask = np.array([False, True, False])
for i, dim in enumerate(dims):
fd_ri = fd_inds[i]
bv_ri = bv_inds[i]
if i == 0:
fd_legend = 'partition'
bv_legend = r'$\beta$VAE'
else:
fd_legend = ''
bv_legend = ''
dc.plot_recon_gen_summary(fd_ri, f_pattern, log_x=False,
collapse_plots=False, folder=folder,
axs=axs[i:i+1], legend=fd_legend,
print_args=False, pv_mask=pv_mask,
set_title=False, color=part_color,
xlab=xlab)
dc.plot_recon_gen_summary(bv_ri, beta_f_pattern, log_x=False,
collapse_plots=False, folder=beta_folder,
axs=axs[i:i+1], legend=bv_legend,
print_args=False, pv_mask=pv_mask,
set_title=False, color=bvae_color,
xlab=xlab, plot_hline=False)
axs[i, 0].text(35, .8, r'$D = {}$'.format(dim))
if i < len(dims) - 1:
axs[i, 0].set_xlabel('')
axs[i, 1].set_xlabel('')
axs[i, 0].set_xticklabels([])
axs[i, 1].set_xticklabels([])
| [
"wjeffreyjohnston@gmail.com"
] | wjeffreyjohnston@gmail.com |
e8cf404787724511a0b29b4cdd94c6d79eb99f88 | 789497c626e92eccfa102572384cade211576a97 | /scrapysina/spiders/cankaoxiaoxi.py | 5d8bb9fe7daf100449c8fa06655c71f83776ed08 | [] | no_license | JimmyLsc/spiders-for-tan | 9ddad68e331a6b2e9738c558d4ce89acd21f61b2 | da91ec0caaa0d9cb2e1d93690179c48700043200 | refs/heads/main | 2023-01-29T04:14:36.109782 | 2020-12-16T07:56:30 | 2020-12-16T07:56:30 | 321,901,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | import datetime
import scrapy
from scrapysina.items import ScrapyItem
class CankaoxiaoxiSpider(scrapy.Spider):
name = 'cankaoxiaoxi'
allowed_domains = ['mil.cankaoxiaoxi.com']
start_urls = ['http://mil.cankaoxiaoxi.com/']
custom_settings = {
'ITEM_PIPELINES':{
'scrapysina.pipelines.ScrapycankaoxiaoxiPipeline': 300
}
}
def parse(self, response):
print('======================================')
title_list = response.xpath("//div[@class='listCon']/div/div[@class='listBody']/div/div/div[@class='news_pic_info']/p/a/text()").extract()
link_list = response.xpath("//div[@class='listCon']/div/div[@class='listBody']/div/div/div[@class='news_pic_info']/p/a/@href").extract()
date_list = response.xpath("//div[@class='listCon']/div/div[@class='listBody']/div/div/div[@class='news_pic_info']/div/span[@class='date_tag']/text()").extract()
for date, title, link in zip(date_list, title_list, link_list):
item = ScrapyItem()
item['link'] = link
item['date_time'] = date
item['title'] = title
month = str(date[5:7])
day = str(date[8:10])
today = str(datetime.date.today())
if month == today[5:7] and day == today[8:10]:
yield item
print('======================================')
| [
"45891479+JimmyLsc@users.noreply.github.com"
] | 45891479+JimmyLsc@users.noreply.github.com |
bcc91c66c1a46421cc90bf345566fae7477c3ed5 | c8620bc8f41b9c2da42ba19caa4efe34d80f8e23 | /Application/migrations/0002_auto_20200421_0007.py | a2abb0232b918fe4533d48d5a85c12420e7c96c7 | [] | no_license | doniamezghani/ElitechProjet | 26cb77e1c90ed93032a92ad093d6c8f9a1ef7578 | 20caa7eaf7234735a395f03a42b8366a135bab75 | refs/heads/master | 2022-12-10T20:54:54.732054 | 2020-06-03T22:50:33 | 2020-06-03T22:50:33 | 293,299,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-04-21 00:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Application', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='course',
options={'verbose_name_plural': 'courses'},
),
]
| [
"donia.mezghenni@gmail.com"
] | donia.mezghenni@gmail.com |
31029cb219a6d9233dc24ca5576b136098cb5c29 | e48e3f58e87d9702a7b7bc986a649b8a37393829 | /n_grams.py | a1c61ad4bc7c9553697b80bc703d091fc0d907b4 | [] | no_license | ITerJXP/Phishing_detection | a662b33d944689719573a66237a32cd83e2dc607 | 55788764d13869a834868322622448c15f208e9a | refs/heads/master | 2021-08-31T13:31:06.944793 | 2017-12-21T13:47:30 | 2017-12-21T13:47:30 | 112,483,597 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,534 | py | # -*- coding:utf-8 -*-
import sys
reload(sys)
from nltk.util import ngrams
import collections
def make_str(url_part_list):
_tab_ = ','
_str_ = ''
for p in url_part_list:
_str_ = _str_ + _tab_ + p
return _str_
def train_grams(_str_, n):
"""
计算n-gram集合
:param _str_: url集合
:param n: n-gram
:return: 计数集合
# e.g.:
# url = 'www.paypal.com'
# ∆=[('w',), ('w',), ('w',), ('.',), ('p',), ('a',), ('y',), ('p',), ('a',), ('l',), ('.',), ('c',), ('o',), ('m',)]
# Counter({('w',): 3, ('.',): 2, ('a',): 2, ('p',): 2, ('c',): 1, ('m',): 1, ('l',): 1, ('y',): 1, ('o',): 1})
# theta = []
# for d in delta:
# theta.append(delta_collect[d]) # ø = [3, 3, 3, 2, 2, 2, 1, 2, 2, 1, 2, 1, 1, 1]
"""
delta = list(ngrams(_str_, n)) # ∆
delta_clt = collections.Counter(delta)
return delta_clt
def search(item, pre_delta_clt):
count = 0
for i in pre_delta_clt:
if item == i:
count = pre_delta_clt[i]
return count
def unigram(url, this_delta_clt):
P = []
sum_p = 0
_sum = sum(this_delta_clt.values())
for item in this_delta_clt:
p = float(this_delta_clt[item] / _sum)
P.append(p)
for i in P:
sum_p = sum_p + i
sim = float(sum_p/len(url))
return sim
def n_grams(url, this_delta_clt, pre_delta_clt, n_gram):
"""
计算url某部分的similarity
:param _str_: 某个url的部分
:param self_delta_clt: 训练过的n_gram计数集合
:param pre_delta_clt: 训练过的(n-1)_gram计数集合
:return: similarity
"""
P = []
sum_p = 0
length = len(url) - n_gram + 1
for item in this_delta_clt:
# 查找前一状态相同项的值
count = search(item[0:-1], pre_delta_clt)
p = float(this_delta_clt[item]/count)
P.append(p)
for i in P:
sum_p = sum_p + i
try:
sim = float(sum_p)/length
except Exception, e:
sim = 0
return sim
if __name__ == '__main__':
trainset = ['aaaa', 'aaabbb', 'aaaabbbbbcccc']
_str_ = make_str(trainset)
uni_delta_clt = train_grams(_str_, 1)
bi_delta_clt = train_grams(_str_, 2)
tri_delta_clt = train_grams(_str_, 3)
qua_delta_clt = train_grams(_str_, 4)
# print sum(uni_delta_clt.values())
# print bi_delta_clt
url = ''
Sim = n_grams(url, this_delta_clt=bi_delta_clt, pre_delta_clt=uni_delta_clt, n_gram=2)
# print Sim | [
"jxptaylover@163.com"
] | jxptaylover@163.com |
29a7d8eac5c1f6058b997122e7ec0d1f6e72753a | d2eb9236fff1e38a00355203fcc78e71767603d7 | /day4/ATM/atm/core/accounts.py | aa128a8dfdaadf5dde4beac8c5e808f3f303753b | [] | no_license | Oldby141/learning | c0449040df53ec36c2038cdcb66d4de717402de7 | 1cdea2353dec95f722ea6fca80647b0037a74306 | refs/heads/master | 2020-05-01T03:58:05.611893 | 2019-04-12T13:56:06 | 2019-04-12T13:56:06 | 177,260,516 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | #!_*_coding:utf-8_*_
import json
import time
from core import db_handler
from conf import settings
def load_current_balance(account_id):
db_api = db_handler.db_handler()
data = db_api("select * from accounts where account=%s"%account_id)
#print(data)
return data
def dump_account(account_data):
db_api = db_handler.db_handler()
data = db_api("update accounts where account=%s" % account_data['id'],account_data=account_data)
return True | [
"1194475412@qq.com"
] | 1194475412@qq.com |
fbab5560e9894901c5617e613add83c277d25710 | 8e8acc57b63a66cb1450fa4d015d4ddcd74cce85 | /liaoxuefengLessons/ObjectOrientedProgramming/ENUM.py | 5e50eaa8c9becb7d3b84f7e8a321feb1a34f2cb0 | [] | no_license | indeyo/PythonStudy | fc2241db7cec8075a59a307ff47c9de83494844b | 099feb4e4c8dec9e68887cedd95705d831e51b0f | refs/heads/master | 2021-03-29T19:04:24.553848 | 2020-06-05T15:07:33 | 2020-06-05T15:07:33 | 247,978,205 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""
@Project : StudyPython0-100
@File : ENUM.py
@Time : 2019-08-05 22:57:52
@Author : indeyo_lin
@Version :
@Remark :
"""
"""
练习:
把Student的gender属性改造为枚举类型,可以避免使用字符串:
"""
# from enum import Enum, unique
#
# class Gender(Enum):
# Male = 0
# Female = 1
#
# class Student():
#
# def __init__(self, name, gender):
# self.name = name
# self.gender = gender
#
# # 测试:
# # 这道题完全不需要改嘛!!!直接通过
# bart = Student('Bart', Gender.Male)
# if bart.gender == Gender.Male:
# print('测试通过!')
# else:
# print('测试失败!')
from enum import Enum
Month = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))
for name, member in Month.__members__.items():
print(name, '=>', member, ',', member.value)
@unique
class Weekday(Enum):
Sun = 0 # Sun的value被设定为0
Mon = 1
Tue = 2
Wed = 3
Thu = 4
Fri = 5
Sat = 6 | [
"indeyo@git.com"
] | indeyo@git.com |
3b6e664d5482c257c0400170a43bf6b823377024 | 5cf7f81791a9d66ba495512f0b1d2c8b6cccbd3d | /python/test/test_rhomb_H_and_R.py | ac268c603a510df1fc1881d48b3b0bc262075ef6 | [
"BSD-3-Clause"
] | permissive | odidev/spglib | 9e0eecbb77b20e09f1affec42af48dc6a1c60e82 | e807f1193ad57af8b916245fc397e4667baaaf92 | refs/heads/develop | 2023-07-03T03:03:37.592891 | 2021-05-17T08:45:07 | 2021-05-17T08:45:07 | 390,248,634 | 0 | 0 | BSD-3-Clause | 2021-07-28T11:44:17 | 2021-07-28T07:01:36 | null | UTF-8 | Python | false | false | 2,280 | py | import unittest
import numpy as np
from spglib import get_symmetry_dataset, find_primitive
from vasp import read_vasp
import os
data_dir = os.path.dirname(os.path.abspath(__file__))
dirnames = ('trigonal', )
rhomb_numbers = (146, 148, 155, 160, 161, 166, 167)
tmat = [[0.6666666666666666, -0.3333333333333333, -0.3333333333333333],
[0.3333333333333333, 0.3333333333333333, -0.6666666666666666],
[0.3333333333333333, 0.3333333333333333, 0.3333333333333333]]
class TestRhombSettingHR(unittest.TestCase):
def setUp(self):
"""Extract filename of rhombohedral cell"""
self._filenames = []
for d in dirnames:
dirname = os.path.join(data_dir, "data", d)
filenames = []
trigo_filenames = os.listdir(dirname)
for number in rhomb_numbers:
filenames += [fname for fname in trigo_filenames
if str(number) in fname]
self._filenames += [os.path.join(dirname, fname)
for fname in filenames]
def tearDown(self):
pass
def test_rhomb_prim_agreement_over_settings(self):
for fname in self._filenames:
cell = read_vasp(fname)
symprec = 1e-5
dataset_H = get_symmetry_dataset(cell, symprec=symprec)
hall_number_R = dataset_H['hall_number'] + 1
dataset_R = get_symmetry_dataset(cell,
hall_number=hall_number_R,
symprec=symprec)
plat, _, _ = find_primitive(cell)
plat_H = np.dot(dataset_H['std_lattice'].T, tmat).T
plat_R = dataset_R['std_lattice']
np.testing.assert_allclose(plat, plat_H,
atol=1e-5, err_msg="%s" % fname)
np.testing.assert_allclose(plat_R, plat_H,
atol=1e-5, err_msg="%s" % fname)
np.testing.assert_allclose(plat_R, plat,
atol=1e-5, err_msg="%s" % fname)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestRhombSettingHR)
unittest.TextTestRunner(verbosity=2).run(suite)
# unittest.main()
| [
"atz.togo@gmail.com"
] | atz.togo@gmail.com |
af8638c194cc6aa49ff9907afb883eb041065fb8 | 4f46b4c5b2454a3ad1aea97c8cf0585414243f98 | /Table_cxlx_all_ajlx/cxlx.py | e7aa3b3f5b7fce03ced4a1b4264992f85631d6da | [] | no_license | dingyuzhu/LAW_INFO_extraction | 716431eeb008efb9f6fad703082fba6a9acf2075 | 2356cd52dae87d581b2af2e2d290171dd03da4aa | refs/heads/main | 2023-05-21T16:23:26.547741 | 2021-06-16T10:26:32 | 2021-06-16T10:26:32 | 377,108,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,517 | py | # -*- coding: UTF-8 -*-
import re
import pandas as pd
import rule_3
'''民事一审、二审诉讼记录分类'''
class CXLX_CLASSIFICATION():
def get_cxlx_data(self, all_data):
dict_cxlx_data = {}
for data in all_data: # id, party_info, ssjl, ss_ssqq,spcx_id
dict_cxlx_data[data[0]] = [data[1], data[2], data[3],data[4]]
return dict_cxlx_data
def absence(self,ssjl):
# 0:缺席 1:出席
yg_absence_ = 0
bg_absence_ = 0
_3rd_absence_ = None
absence_pattern = '([未不]{1}[到出]{1}庭|不履行到庭义务|拒绝到庭|传唤未到|未能到庭|未按[时期]{1}[到出]{1}庭|未参加|离开法庭|缺席|传唤|传票)'
if re.search(absence_pattern, ssjl):
ssjl = re.sub('(.*?)', '', ssjl)
ssjl_arr = re.split(',|。|,|/\.', ssjl)
for j in range(len(ssjl_arr)):
for i in range(len(rule_3.yg_title)):
if re.search(absence_pattern, ssjl_arr[j]):
if re.search('^{yg}'.format(yg=rule_3.yg_title[i]), ssjl_arr[j]):
yg_absence_ = 1
else:
if re.search('^{yg}'.format(yg=rule_3.yg_title[i]), ssjl_arr[j - 1]):
if not re.search(absence_pattern, ssjl_arr[j - 1]) and not re.search('到庭参加|出庭',ssjl_arr[j - 1]):
yg_absence_ = 1
if re.search('{bg}'.format(bg=rule_3.bg_title[i]), ssjl_arr[j]):
bg_absence_ = 1
else:
if re.search('{bg}'.format(bg=rule_3.bg_title[i]), ssjl_arr[j - 1]):
if not re.search(absence_pattern, ssjl_arr[j - 1]) and not re.search('到庭参加|出庭',ssjl_arr[j - 1]):
bg_absence_ = 1
if re.search('第三人', ssjl_arr[j]):
_3rd_absence_ = 1
else:
if re.search('第三人', ssjl_arr[j - 1]) and not re.search('到庭参加|出庭',ssjl_arr[j - 1]):
_3rd_absence_ = 1
return [yg_absence_, bg_absence_, _3rd_absence_]
def file_trial_time(self,ssjl):
file_time = ''
trial_time = ''
ssjl_arr = re.split(',|。|,|/\.|、', ssjl)
for i in range(len(ssjl_arr)):
if re.search('[\d]{0,4}年[\d]{0,2}月[\d]{0,2}日.*?立案', ssjl_arr[i]):
file_time = re.search('[\d]{0,4}年[\d]{0,2}月[\d]{0,2}日', ssjl_arr[i]).group()
elif re.search('[\d]{0,4}年[\d]{0,2}月[\d]{0,2}日.*?审理', ssjl_arr[i]):
trial_time = re.search('[\d]{0,4}年[\d]{0,2}月[\d]{0,2}日', ssjl_arr[i]).group()
return file_time, trial_time
'''根据诉讼记录进行分类'''
def cxlx_process(self, dict_cxlx_data):
#案件是否是再审案件
dict_cxlx_result = {} #v=[ party_info, ssjl, ss_ssqq, spcx_id]
for k, v in dict_cxlx_data.items():
v_temp = {'是否再审案件': None, '是否重审案件': None, '是否反诉案件': None, '案件所处程序': None,'原告缺席情况':None,'被告缺席情况':None,'第三人缺席情况':None,'庭审时间':None,'立案时间':None,'审判程序':None}
#是否再审:先用ssjl,若无再用ss_ssqq (party_info, ssjl, ss_ssqq, spcx_id)
if v[1] and v[2]:
if re.search('(/\(|()[\d]+(/\)|))[\D]{1}[\d]+民(再|申|抗|监).{0,3}[\d]+.{0,3}号.{0,3}',v[1]) or re.search('(/\(|()[\d]+(/\)|))[\D]{1}[\d]+民(再|申|抗|监).{0,3}[\d]+.{0,3}号.{0,3}',v[2]):
v_temp['是否再审案件'] = 1
else:
v_temp['是否再审案件'] = 0
elif v[1] and not v[2]:
if re.search('(/\(|()[\d]+(/\)|))[\D]{1}[\d]+民(再|申|抗|监).{0,3}[\d]+.{0,3}号.{0,3}',v[1]):
v_temp['是否再审案件'] = 1
else:
v_temp['是否再审案件'] = 0
elif not v[1] and v[2]:
if re.search('(/\(|()[\d]+(/\)|))[\D]{1}[\d]+民(再|申|抗|监).{0,3}[\d]+.{0,3}号.{0,3}',v[2]):
v_temp['是否再审案件'] = 1
else:
v_temp['是否再审案件'] = 0
else:
v_temp['是否再审案件'] = None
#新版本:是否重审案件:只看ssjl
if v[1]:
if re.search('重审|重新审', v[1]):
v_temp['是否重审案件'] = 1
else:
v_temp['是否重审案件'] = 0
else:
v_temp['是否重审案件'] = None
#老版本:是否重审案件:先用ssjl,若无,再用ss_ssqq(party_info, ssjl, ss_ssqq, pjjg)
# if v[1]:
# if re.search('重审|重新审', v[1]):
# v_temp['是否重审案件'] = 1
# else:
# if v[2]:
# if re.search('(请|请求)(:|:)[\S\s]+?。',v[2]):
# pre_ssqq = re.search('(请|请求)(:|:)[\S\s]+?。', v[2]).group()
# pre_ssqq = pre_ssqq.replace('\\','-')
# rest = ''.join(re.split(pre_ssqq,v[2]))
# if re.search('重审|重新审',rest):
# v_temp['是否重审案件'] = 1
# else:
# v_temp['是否重审案件'] = 0
# else:
# if re.search('重审|重新审',v[2]):
# v_temp['是否重审案件'] = 1
# else:
# v_temp['是否重审案件'] = 0
# else:
# v_temp['是否重审案件'] = 0
# else:
# if v[2]:
# if re.search('(请|请求)(:|:)[\S\s]+?。', v[2]):
# pre_ssqq = re.search('(请|请求)(:|:)[\S\s]+?。', v[2]).group()
# rest = ''.join(re.split(pre_ssqq, v[2]))
# if re.search('重审|重新审', rest):
# v_temp['是否重审案件'] = 1
# else:
# v_temp['是否重审案件'] = 0
# else:
# if re.search('重审|重新审', v[2]):
# v_temp['是否重审案件'] = 1
# else:
# v_temp['是否重审案件'] = 0
# else:
# v_temp['是否重审案件'] = None
#是否反诉案件:只用party_info(反诉只做一审的)v=[ party_info, ssjl, ss_ssqq, spcx_id]
if v[3] == 30100000000000000:
if v[0]:
if re.search('反诉原告|反诉被告|反诉人|原告(被告)|被告(原告)',v[0]):
v_temp['是反诉审案件'] = 1
else:
v_temp['是否反诉案件'] = 0
else:
v_temp['是否反诉案件'] = None
else:
if not v[0]:
v_temp['是反诉审案件'] = None
else:
v_temp['是否反诉案件'] = 0
#案件所处程序:只用ssjl
if v[1]!=None and re.search('^((?!(普通[诉讼]{0,2}程序|小额[诉讼]{0,2}))[\S\s])*(简易[诉讼]{0,2}程序)((?!(普通[诉讼]{0,2}程序|小额[诉讼]{0,2}))[\S\s])*$', v[1]) != None:
v_temp['案件所处程序'] = 1
elif v[1] != None and re.search('^((?!(简易[诉讼]{0,2}程序|小额[诉讼]{0,2}))[\S\s])*(普通[诉讼]{0,2}程序)((?!(简易[诉讼]{0,2}程序|小额[诉讼]{0,2}))[\S\s])*$',v[1]):
v_temp['案件所处程序'] = 2
elif v[1] != None and re.search('^((?!(简易[诉讼]{0,2}程序|普通[诉讼]{0,2}程序))[\S\s])*(小额[诉讼]{0,2})((?!(简易[诉讼]{0,2}程序|普通[诉讼]{0,2}程序))[\S\s])*$', v[1]):
v_temp['案件所处程序'] = 3
elif v[1] != None and re.search('(转[为入换成]{0,1}|变更为)普通[诉讼]{0,2}程序', v[1]):
v_temp['案件所处程序'] = 2
elif v[1] != None and re.search('(转[为入换成]{0,1}|变更为)小额[诉讼]{0,2}程序', v[1]):
v_temp['案件所处程序'] = 3
elif v[1] != None and re.search('(转[为入换成]{0,1}|变更为)简易[诉讼]{0,2}程序', v[1]):
v_temp['案件所处程序'] = 1
elif v[1] != None and re.search('普通程序(√)', v[1]) != None:
v_temp['案件所处程序'] = 2
elif v[1] != None and re.search('简易程序(√)', v[1]) != None:
v_temp['案件所处程序'] = 1
elif v[1] != None and re.search('简易[诉讼]{0,2}程序.*?普通[诉讼]{0,2}程序', v[1]) != None:
v_temp['案件所处程序'] = 2
elif v[1] != None and re.search('普通[诉讼]{0,2}程序.*?简易[诉讼]{0,2}程序', v[1]) != None:
v_temp['案件所处程序'] = 1
elif v[1] != None and re.search('简易[诉讼]{0,2}程序.*?小额[诉讼]{0,2}程序|适用简易.*?(小额诉讼)|小额诉讼简易程序|简易程序.*?小额诉讼', v[1]) != None:
v_temp['案件所处程序'] = 3
elif v[1] != None and re.search('小额[诉讼]{0,2}程序.*?简易[诉讼]{0,2}程序|依法适用简易.*?审理|小额(贷款|借款).*?适用简易程序', v[1]) != None:
v_temp['案件所处程序'] = 1
elif v[1] != None and re.search('小额[诉讼]{0,2}程序.*?普通[诉讼]{0,2}程序|小额(贷款|借款).*?适用普通程序', v[1]) != None:
v_temp['案件所处程序'] = 2
elif v[1] != None and re.search('简易[诉讼]{0,2}程序|普通[诉讼]{0,2}程序|小额[诉讼]{0,2}程序', v[1]) == None:
v_temp['案件所处程序'] = 4
elif v[1] == None:
v_temp['案件所处程序'] = None
#当事人缺席情况
if v[1]:
v_temp['原告缺席情况'] = self.absence(v[1])[0]
v_temp['被告缺席情况'] = self.absence(v[1])[1]
v_temp['第三人缺席情况'] = self.absence(v[1])[2]
v_temp['立案时间'] = self.file_trial_time(v[1])[0]
v_temp['庭审时间'] =self.file_trial_time(v[1])[1]
else:
v_temp['原告缺席情况'] = None
v_temp['被告缺席情况'] = None
v_temp['第三人缺席情况'] = None
v_temp['立案时间'] = None
v_temp['庭审时间'] = None
dict_cxlx_result[k] = v_temp
return dict_cxlx_result
def dict_to_df(self,dict_cxlx_result):
v_temp = {'id':[], 'wenshu_id':[], 'is_zs': [], 'is_cs':[],'is_fs':[],'procedure_':[],'yg_absence':[],'bg_absence':[],'_3rd_absence':[],'trial_time':[],'file_time':[]}
for k, v in dict_cxlx_result.items():
if k :
v_temp['id'].append(k)
v_temp['wenshu_id'].append(k)
v_temp['is_zs'].append(v['是否再审案件'])
v_temp['is_cs'].append(v['是否重审案件'])
v_temp['is_fs'].append(v['是否反诉案件'])
v_temp['procedure_'].append(v['案件所处程序'])
v_temp['yg_absence'].append(v['原告缺席情况'])
v_temp['bg_absence'].append(v['被告缺席情况'])
v_temp['_3rd_absence'].append(v['第三人缺席情况'])
v_temp['trial_time'].append(v['庭审时间'])
v_temp['file_time'].append(v['立案时间'])
df = pd.DataFrame(v_temp)
return df
def run(self,all_data):
dict_cxlx_data = self.get_cxlx_data(all_data)
dict_cxlx_result = self.cxlx_process(dict_cxlx_data)
df = self.dict_to_df(dict_cxlx_result)
return df
| [
"695164075@qq.com"
] | 695164075@qq.com |
b4a4ca55d2231e169f1b67dec313d0b491bd3655 | 8d5c6908cfaafb720ac4e1b6ea6f135b17e16cc3 | /lib/helper.py | 421402cf2dc6a631ce3709dc176187eb065c31db | [] | no_license | hhirsch/launcher | e81d5b8b3804864cd0bcb6bc5254be6b9e8b4f25 | d575658a0a71330a94021a3652b8581ade364fab | refs/heads/master | 2023-01-06T16:32:36.009832 | 2020-10-16T21:01:05 | 2020-11-01T16:54:23 | 268,054,143 | 0 | 2 | null | 2020-11-01T16:54:24 | 2020-05-30T10:06:27 | Python | UTF-8 | Python | false | false | 1,039 | py | from os import path
from assetexception import AssetException
from shutil import copytree
def gameIsInRepository(game):
return path.exists(getRepositoryPath(game))
def gameIsInCache(game):
return path.exists(getCachePath(game))
def getCachePath(game):
return path.normpath('games/' + game)
def getRepositoryPath(game):
return path.normpath('repository/' + game)
def getImagePath(game):
imagePath = path.normpath('data/images/' + game)
if path.exists(imagePath + '.ppm'):
return imagePath + '.ppm'
raise AssetException("Image not found!")
def getPilImagePath(game):
imagePath = path.normpath('data/images/' + game)
if path.exists(imagePath + '.png'):
return imagePath + '.png'
if path.exists(imagePath + '.jpg'):
return imagePath + '.jpg'
raise AssetException("Image not found!")
def copyToCache(game):
if not path.exists(getCachePath(game)):
if path.isdir(getRepositoryPath(game)):
copytree(getRepositoryPath(game), getCachePath(game))
| [
"henry@w3-net.de"
] | henry@w3-net.de |
aa516d2e2960317bd46795e9eb2cf98e9171382d | 5b047dc237b3a9bcd8f33c8df5acbd98929cabb0 | /hello.py | 69c5351d6bbc09f94e08baefb0514040b2bb3e50 | [] | no_license | Aeternam/server_status | 62a04dfa9c295c1753446536b51da9108551e4f8 | 981df07d97464d92251814d281360918802f8ae8 | refs/heads/master | 2021-01-25T07:34:03.161187 | 2014-12-14T17:00:34 | 2014-12-14T17:00:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | from bottle import Bottle, run
from bottle import template
app = Bottle()
@app.route('/hello')
def hello():
return "Hello world!"
@app.route('/')
@app.route('/hello/<name>')
def greet(name='Stranger'):
return template('Hello {{name}}, how are you?', name=name)
#@route('/wiki/<pagename>')
#def show_wiki_page(pagename):
@route('/object/<id:int>')
def callback(id):
assert isinstance(id, int)
@route('/show/<name:re:[a-z]+>')
def callback(name):
assert name.isalpha()
@route('/static/<path:path>')
def callback(path):
return static_file(path,...)
run(app, host='0.0.0.0', port=8080, debug=True)
| [
"justfan.b@gmail.com"
] | justfan.b@gmail.com |
5903d8b50e2c112c29503ef04044290df4c209e0 | 49413293d6d7a956f0cda72548e79161ca2e2e6c | /renren/renren/items.py | 58d1b42de2bfbadde9ef85d32508bbad7f7c95a2 | [] | no_license | aonephy/python | c26a2c2728d7ffbc700693be63a8c0a9743b4d84 | 8eda2cfcb75b0cc67c8ea3c9e1b7425349c1d732 | refs/heads/master | 2020-03-28T19:47:39.855862 | 2018-10-15T05:53:53 | 2018-10-15T05:53:53 | 149,010,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class RenrenItem(scrapy.Item):
visit = scrapy.Field() #
share_friend = scrapy.Field() #
has_friend = scrapy.Field() #
img = scrapy.Field() #
name = scrapy.Field() #
url = scrapy.Field() #
school = scrapy.Field() #
work = scrapy.Field() #
gender = scrapy.Field() #
birthday = scrapy.Field() #
hometown = scrapy.Field() #
address = scrapy.Field() #
| [
"117656041@qq.com"
] | 117656041@qq.com |
61bbe9c6a03dc155f5c1f6a09c732284f2f3acdf | 0d9c964fd7644395a3f0763f484e485fcc67f762 | /new/src/21.03.2020/list_of_business.py | 2e49049cc01217aba3b71d33a8cc65d4af44bb18 | [
"Apache-2.0"
] | permissive | VladBaryliuk/my_start_tasks | eaa2e6ff031f2f504be11f0f64f5d99bd1a68a0e | bf387543e6fa3ee303cbef04d2af48d558011ed9 | refs/heads/main | 2023-04-14T14:00:08.415787 | 2021-04-24T13:47:38 | 2021-04-24T13:47:38 | 354,538,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | from tkinter import *
root = Tk()
root.geometry('300x400')
btn2 = Button(text = 'save')
btn2.pack()
text = Entry()
text.pack()
list = Text()
list.pack()
def add ():
todo = text.get() + '\n'
list.insert (END, todo)
btn = Button(text = 'enter',command=add)
btn.pack()
root.mainloop()
| [
"vladmain9@gmail.com"
] | vladmain9@gmail.com |
610a43592532b1432d27feb959a4f8be1037779f | f060182c25b7da8294e54bbf8a97e6f6b5fa6c22 | /bulletPointAdder.py | 61ff040ec7367f83329ddf9723ff1ebc93929cc6 | [] | no_license | jennymhkao/automate-boring-stuff | e7a0e129f9123778594f72dc8f2511de00f241f1 | 00614c82e4a2ac39a17e394bd848c7c7cad0da17 | refs/heads/master | 2023-03-05T00:26:27.271558 | 2021-02-14T20:14:43 | 2021-02-14T20:14:43 | 286,122,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | #! python3
# bulletPointAdder.py - add star bullet point and space at beginning of each line.
import pyperclip
text = pyperclip.paste()
newText = text.split('\n')
for i in range(len(newText)): # loop through all indexes in the "newText" list
newText[i] = '* ' + newText[i] # add star to each string in "newText" list
new = '\n'.join(newText)
pyperclip.copy(text) | [
"k.jennymh@gmail.com"
] | k.jennymh@gmail.com |
8fc886a306f419925d2ee225a17ed2480a5d4cef | 69fc90ecc891ff34d74e38a83a3936baddfd7ed6 | /reminders/migrations/0018_auto_20150825_0017.py | 6ac27643f2c938c8017180c7098ba51b5b9dab5a | [] | no_license | cep-15-cocos-bl/cep2015sem2 | 9624b058b7a99d6a4d297076b6ce060d4f3185d3 | b79809d4e2ec405da3ffa36918c4f0f8ee42e935 | refs/heads/master | 2021-01-15T09:09:52.180205 | 2015-09-29T07:31:30 | 2015-09-29T07:31:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
('reminders', '0017_auto_20150818_1620'),
]
operations = [
migrations.AddField(
model_name='task',
name='owner',
field=models.ForeignKey(to='accounts.UserProfile', null=True, blank=True),
),
migrations.AddField(
model_name='tasktag',
name='owner',
field=models.ForeignKey(to='accounts.UserProfile', null=True, blank=True),
),
]
| [
"llinhong.2301@gmail.com"
] | llinhong.2301@gmail.com |
c43628058f0ac9589e5b5c5e12579f9f3ef2ea58 | 501952b4c60182ab0f2c4332fe23c7d1d3a81078 | /ChurnPrediction.py | 15704ebe67ff8e272c04ea43c5f37ac295332267 | [] | no_license | AIhmed/ChurnPrediction | 1d09097d885fa05bd6e53909d7ef375ca37e18e2 | 139d708e2fb21dc232dfe3f99b4830de3bcb6e6d | refs/heads/master | 2023-07-07T22:09:00.166003 | 2021-08-16T00:54:26 | 2021-08-16T00:54:26 | 386,054,344 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,525 | py | import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as fn
from torch.utils.data import Dataset , DataLoader
trainSet=pd.read_csv('sample_data/churn-bigml-80.csv')
testSet=pd.read_csv('sample_data/churn-bigml-20.csv')
print(testSet.columns)
valc=trainSet.loc[trainSet['Churn']==True,'International plan'].value_counts().values
valn=trainSet.loc[trainSet['Churn']==False,'International plan'].value_counts().values
np.array([valc,valn])
group=trainSet.groupby('Churn')
group['Account length', 'Area code'].describe()
group['Total night charge','Total night minutes'].describe()
group['Total eve charge','Total eve minutes'].describe()
group['Total intl charge','Total intl calls'].describe()
def churn_to_num(churn):
if churn==False:
return torch.tensor(0,dtype=torch.long)
else:
return torch.tensor(1,dtype=torch.long)
def category_to_num(val):
if val=='No':
return torch.tensor(0,dtype=torch.int8)
else:
return torch.tensor(1,dtype=torch.int8)
def get_correct_pred(pred,target):
return pred.argmax(dim=1).eq(target).sum()
night=trainSet[['Total night minutes','Total night calls','Total night charge']]
day=trainSet[['Total day minutes','Total day calls','Total day charge']]
evening=trainSet[['Total eve minutes','Total eve calls','Total eve charge']]
intl=trainSet[['Total intl minutes','Total intl calls','Total intl charge']]
day_tensor=torch.tensor([day['Total day minutes'].values, day['Total day calls'].values , day['Total day charge'].values])
night_tensor=torch.tensor([night['Total night minutes'].values, night['Total night calls'].values , night['Total night charge'].values])
evening_tensor=torch.tensor([evening['Total eve minutes'].values, evening['Total eve calls'].values , evening['Total eve charge'].values])
intl_tensor=torch.tensor([intl['Total intl minutes'].values, intl['Total intl calls'].values , intl['Total intl charge'].values])
target=trainSet['Churn'].apply(churn_to_num)
print(target)
churners=trainSet[trainSet['Churn']==True]
noneChurners=trainSet[trainSet['Churn']==False]
churners['Customer service calls'].value_counts(normalize=True)
noneChurners['Customer service calls'].value_counts(normalize=True)
class ChurnClassifierDataset(Dataset):
def __init__(self,day,evening,night,intl,target):
self.day=day
self.evening=evening
self.night=night
self.intl=intl
self.churn=target
def __len__(self):
return len(self.churn)
def __getitem__(self,index):
return {
'input_features':torch.tensor([ self.day[0][index], self.day[1][index], self.day[2][index],
self.evening[0][index],self.evening[1][index],self.evening[2][index],
self.night[0][index],self.night[1][index],self.night[2][index],
self.intl[0][index],self.intl[1][index],self.intl[2][index]]),
'target':self.churn[index]
}
train_set=ChurnClassifierDataset(day_tensor,evening_tensor,night_tensor,intl_tensor,target)
dataLoader=DataLoader(train_set,shuffle=True)
first=next(iter(dataLoader))
shape=first['input_features'].shape
input=first['input_features']
input_target=first['target']
print(shape)
layer1=nn.Linear(shape[0]*shape[1],24)
fn.leaky_relu(layer1(input.reshape(1,-1).float()))
class ChurnPrediction(nn.Module):
def __init__(self,shape,nbr_classes):
super(ChurnPrediction,self).__init__()
self.layer1=nn.Linear(shape[0]*shape[1],shape[0]*24)
self.layer2=nn.Linear(shape[0]*24,shape[0]*48)
self.layer3=nn.Linear(shape[0]*48,shape[0]*shape[1])
self.layer4=nn.Linear(shape[0]*shape[1],shape[0]*nbr_classes)
self.softmax=nn.Softmax(dim=1)
def forward(self,t):
#print('message in here bro \t', t)
t=fn.leaky_relu(self.layer1(t.reshape(1,-1).float()))
#print('message in here too \t', t)
t=fn.leaky_relu(self.layer2(t.float()))
t=fn.leaky_relu(self.layer3(t.float()))
t=fn.leaky_relu(self.layer4(t.float()))
return self.softmax(t.reshape(shape[0],2))
classifier=ChurnPrediction(shape,2)
lossfn=nn.NLLLoss()
optimizer= torch.optim.Adam(classifier.parameters(),0.001)
preds=classifier(input)
optimizer.zero_grad()
classifier.layer1.weight.grad
loss=lossfn(preds,input_target)
loss.item()
loss.backward()
optimizer.step()
classifier.layer1.weight.grad
for epoch in range(2):
correct_pred=0.0
for sample in dataLoader:
input=sample['input_features']
print(input.shape)
#print(input)
target=sample['target']
#print(target)
preds=classifier(input)
print(preds)
correct_pred=get_correct_pred(preds,target)+correct_pred
print(f'number of correct prediction is { correct_pred} \n\n\n out {len(trainSet)}')
loss=lossfn(preds,target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f'{epoch} is done \n\n')
torch.save(classifier.state_dict(),'sample_data/saved_params.pth')
print('done with the training')
tnight=testSet[['Total night minutes','Total night calls','Total night charge']]
tday=testSet[['Total day minutes','Total day calls','Total day charge']]
tevening=testSet[['Total eve minutes','Total eve calls','Total eve charge']]
tintl=testSet[['Total intl minutes','Total intl calls','Total intl charge']]
tday_tensor=torch.tensor([tday['Total day minutes'].values, tday['Total day calls'].values , tday['Total day charge'].values])
tnight_tensor=torch.tensor([tnight['Total night minutes'].values, tnight['Total night calls'].values , tnight['Total night charge'].values])
tevening_tensor=torch.tensor([tevening['Total eve minutes'].values, tevening['Total eve calls'].values , tevening['Total eve charge'].values])
tintl_tensor=torch.tensor([tintl['Total intl minutes'].values, tintl['Total intl calls'].values , tintl['Total intl charge'].values])
ttarget=testSet['Churn'].apply(churn_to_num)
test_set=ChurnClassifierDataset(tday_tensor,tevening_tensor,tnight_tensor,tintl_tensor,ttarget)
test_set[0]['input_features'].reshape(1,-1).shape
testLoader=DataLoader(test_set,shuffle=True)
correct_pred=0.0
for sample in testLoader:
input=sample['input_features'].reshape(1,-1)
print(input)
print(f'the dimensions the the input {input.shape}\n\n')
target=sample['target']
print(input.shape)
preds=classifier(input)
correct_pred=get_correct_pred(preds,target)+correct_pred
print(f'numbre of correct prediction {correct_pred} out of {len(test_set)}')
| [
"bouliche.ahmed.2@gmail.com"
] | bouliche.ahmed.2@gmail.com |
525b70fa2854ab0628bdc743b96501d0475c36c0 | 0ad1e5559c2f475ffac77b10275c2c64d2a36a9e | /hideandseek/hideandseek.py | 96ab4e39e4855f760aa208d58501ca365c864121 | [] | no_license | csyouk/python-playground | 3d165c9462427525b538ceb8de746a62e9190500 | 4bd8ffcb7ca46cda1e9f6f3f2e1e613954a9f58a | refs/heads/master | 2020-09-24T03:54:00.770078 | 2017-11-12T01:56:22 | 2017-11-12T01:56:22 | 66,607,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | # coding: utf-8
import urllib2
urllink = 'http://hideandseek.net/projects/'
fileformat = '.html'
# projectnamelist = ['searchlight','99-tiny-games','the-building-is']
projectnamelist = ['the-sandpit','the-hideseek-weekender','last-will','drunk-dungeon','would-anyone-miss-you','consultancy','tiny-christmas-games','the-new-year-games','dreams-of-your-life','the-show-must-go-on','tiny-games','hinterland','green-lantern','the-boardgame-remix-kit','playstation-game-runners','battlefield','the-wonderlab','the-london-poetry-game-2','tate-trumps','221b','ntw-05-–-the-beach','international-sandpit-project','va-lates-playgrounds','playmakers']
for project in projectnamelist:
link = urllink + project
filename = project + fileformat
request = urllib2.Request(link)
request.add_header("User-Agent","Python Crawler")
opener = urllib2.build_opener()
response = opener.open(request)
html = response.read()
f = open(filename,'w')
f.write(html)
f.close()
| [
"csyouk@hanmail.net"
] | csyouk@hanmail.net |
4d7886f416baba1c84d182a66f20391da7c27df2 | 0d5c77661f9d1e6783b1c047d2c9cdd0160699d1 | /python/paddle/fluid/tests/unittests/test_row_conv_op.py | 07dcd108689ae6069e30fe22029258d192215549 | [
"Apache-2.0"
] | permissive | xiaoyichao/anyq_paddle | ae68fabf1f1b02ffbc287a37eb6c0bcfbf738e7f | 6f48b8f06f722e3bc5e81f4a439968c0296027fb | refs/heads/master | 2022-10-05T16:52:28.768335 | 2020-03-03T03:28:50 | 2020-03-03T03:28:50 | 244,155,581 | 1 | 0 | Apache-2.0 | 2022-09-23T22:37:13 | 2020-03-01T13:36:58 | C++ | UTF-8 | Python | false | false | 3,441 | py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
def row_conv_forward(x, lod, wt):
out = np.zeros_like(x)
num_sequences = len(lod[0])
seq_info = [0]
for seq_len in lod[0]:
seq_info.append(seq_info[-1] + seq_len)
context_length = wt.shape[0]
for i in range(num_sequences): # loop over number of sequences
start = seq_info[i]
end = seq_info[i + 1]
curinput = x[start:end, :]
curoutput = out[start:end, :]
cur_timesteps = end - start
for j in range(cur_timesteps): # loop over different timesteps
for k in range(context_length):
if j + k >= cur_timesteps:
continue
curoutput[j, :] += curinput[j + k, :] * wt[k, :]
return out
class TestRowConvOp1(OpTest):
def setUp(self):
self.op_type = "row_conv"
lod = [[2, 3, 2]]
T = sum(lod[0])
D = 16
context_length = 2
x = np.random.random((T, D)).astype("float32")
wt = np.random.random((context_length, D)).astype("float32")
self.inputs = {'X': (x, lod), 'Filter': wt}
out = row_conv_forward(x, lod, wt)
self.outputs = {'Out': (out, lod)}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Filter'], 'Out', max_relative_error=0.05)
def test_check_grad_ignore_x(self):
self.check_grad(
['Filter'], 'Out', max_relative_error=0.05, no_grad_set=set('X'))
def test_check_grad_ignore_wt(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Filter'))
class TestRowConvOp2(OpTest):
def setUp(self):
self.op_type = "row_conv"
lod = [[20, 30, 50]]
T = sum(lod[0])
D = 35
context_length = 35
x = np.random.random((T, D)).astype("float32")
wt = np.random.random((context_length, D)).astype("float32")
self.inputs = {'X': (x, lod), 'Filter': wt}
out = row_conv_forward(x, lod, wt)
self.outputs = {'Out': (out, lod)}
def test_check_output(self):
self.check_output()
#max_relative_error is increased from 0.05 to 0.06 as for higher
#dimensional input, the dX on CPU for some values has max_rel_error
#slightly more than 0.05
def test_check_grad_normal(self):
self.check_grad(['X', 'Filter'], 'Out', max_relative_error=0.06)
def test_check_grad_ignore_x(self):
self.check_grad(
['Filter'], 'Out', max_relative_error=0.06, no_grad_set=set('X'))
def test_check_grad_ignore_wt(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.06, no_grad_set=set('Filter'))
if __name__ == '__main__':
unittest.main()
| [
"xiaoyichao@haohaozhu.com"
] | xiaoyichao@haohaozhu.com |
e7846284c7e134592127b48bc185fe593b0949ec | fe7b700cfe3c06d89d18ffad3eeeb3b8220c1759 | /pipeline/feature-classification/exp-3/selection-extraction/pca/pipeline_classifier_mrsi.py | 30793e60571a5f7f0342ae4b772cf21d2691ce80 | [
"MIT"
] | permissive | DivyaRavindran007007/mp-mri-prostate | 928684a607cf03a2d76ea3e3e5b971bbd3a1dd01 | bd420534b4b5c464e5bbb4a07eabdc8724831f8a | refs/heads/master | 2021-06-08T21:09:15.850708 | 2016-10-20T16:08:57 | 2016-10-20T16:08:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,823 | py | """This pipeline is intended to make the classification of MRSI modality
features."""
from __future__ import division
import os
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import label_binarize
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from protoclass.data_management import GTModality
# Define the path where the patients are stored
path_patients = '/data/prostate/experiments'
# Define the path where the features have been extracted
path_features = '/data/prostate/extraction/mp-mri-prostate'
# Define a list of the path where the feature are kept
mrsi_features = ['mrsi-spectra']
ext_features = ['_spectra_mrsi.npy']
# Define the path of the balanced data
path_balanced = '/data/prostate/balanced/mp-mri-prostate/exp-3/smote'
ext_balanced = '_mrsi.npz'
# Define the path of the ground for the prostate
path_gt = ['GT_inv/prostate', 'GT_inv/pz', 'GT_inv/cg', 'GT_inv/cap']
# Define the label of the ground-truth which will be provided
label_gt = ['prostate', 'pz', 'cg', 'cap']
# Generate the different path to be later treated
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
id_patient_list = sorted(id_patient_list)
for id_patient in id_patient_list:
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient, gt)
for gt in path_gt])
# Load all the data once. Splitting into training and testing will be done at
# the cross-validation time
data = []
data_bal = []
label = []
label_bal = []
for idx_pat in range(len(id_patient_list)):
print 'Read patient {}'.format(id_patient_list[idx_pat])
# For each patient we nee to load the different feature
patient_data = []
for idx_feat in range(len(mrsi_features)):
# Create the path to the patient file
filename_feature = (id_patient_list[idx_pat].lower().replace(' ', '_') +
ext_features[idx_feat])
path_data = os.path.join(path_features, mrsi_features[idx_feat],
filename_feature)
single_feature_data = np.load(path_data)
# Check if this is only one dimension data
if len(single_feature_data.shape) == 1:
single_feature_data = np.atleast_2d(single_feature_data).T
patient_data.append(single_feature_data)
# Concatenate the data in a single array
patient_data = np.concatenate(patient_data, axis=1)
print 'Imbalanced feature loaded ...'
# Load the dataset from each balancing method
data_bal_meth = []
label_bal_meth = []
pat_chg = (id_patient_list[idx_pat].lower().replace(' ', '_') +
ext_balanced)
filename = os.path.join(path_balanced, pat_chg)
npz_file = np.load(filename)
data_bal.append(npz_file['data_resampled'])
label_bal.append(npz_file['label_resampled'])
print 'Balanced data loaded ...'
# Create the corresponding ground-truth
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt,
path_patients_list_gt[idx_pat])
print 'Read the GT data for the current patient ...'
# Concatenate the training data
data.append(patient_data)
# Extract the corresponding ground-truth for the testing data
# Get the index corresponding to the ground-truth
roi_prostate = gt_mod.extract_gt_data('prostate', output_type='index')
# Get the label of the gt only for the prostate ROI
gt_cap = gt_mod.extract_gt_data('cap', output_type='data')
label.append(gt_cap[roi_prostate])
print 'Data and label extracted for the current patient ...'
# Define the different level of sparsity
sparsity_level = [2, 4, 8, 16, 24, 32, 36]
results_sp = []
for sp in sparsity_level:
result_cv = []
# Go for LOPO cross-validation
for idx_lopo_cv in range(len(id_patient_list)):
# Display some information about the LOPO-CV
print 'Round #{} of the LOPO-CV'.format(idx_lopo_cv + 1)
# Get the testing data
testing_data = data[idx_lopo_cv]
testing_label = np.ravel(label_binarize(label[idx_lopo_cv], [0, 255]))
print 'Create the testing set ...'
# Create the training data and label
# We need to take the balanced data
training_data = [arr for idx_arr, arr in enumerate(data_bal)
if idx_arr != idx_lopo_cv]
training_label = [arr for idx_arr, arr in enumerate(label_bal)
if idx_arr != idx_lopo_cv]
# Concatenate the data
training_data = np.vstack(training_data)
training_label = np.ravel(label_binarize(
np.hstack(training_label).astype(int), [0, 255]))
print 'Create the training set ...'
# Learn the PCA projection
pca = PCA(n_components=sp, whiten=True)
training_data = pca.fit_transform(training_data)
testing_data = pca.transform(testing_data)
# Perform the classification for the current cv and the
# given configuration
crf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
pred_prob = crf.fit(training_data,
np.ravel(training_label)).predict_proba(
testing_data)
result_cv.append([pred_prob, crf.classes_])
results_sp.append(result_cv)
# Save the information
path_store = '/data/prostate/results/mp-mri-prostate/exp-3/selection-extraction/pca/mrsi'
if not os.path.exists(path_store):
os.makedirs(path_store)
joblib.dump(results_sp, os.path.join(path_store,
'results.pkl'))
| [
"glemaitre@visor.udg.edu"
] | glemaitre@visor.udg.edu |
56c1eb3345ca5730e376f0f94bff64c4c7ab0f63 | f614e8567f9458e298c651d0be166da9fc72b4bf | /Students/Theo/Django/Lab2/lab2_project/wsgi.py | 326cd9e222d5bbcbf4647d57ed5562f67c3dc3cb | [] | no_license | PdxCodeGuild/class_Binary_Beasts | 458c5be00b7bce3bb4ac9b7ab485c47f72be4294 | b1298cb5d74513873f82be4ed37676f8b0de93dd | refs/heads/master | 2023-06-28T07:05:21.703491 | 2021-07-29T03:44:09 | 2021-07-29T03:44:09 | 344,980,863 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for lab2_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lab2_project.settings')
application = get_wsgi_application()
| [
"theo@Theos-MacBook-Air.local"
] | theo@Theos-MacBook-Air.local |
d630012bff87fda4be1257cfdfb3e5f27ef5b28b | c65ebae44586bde4052190c9d5a8476ff06e2b86 | /ICONRepClassification_Py/src/com/prj/bundle/modelling/BiLSTMDimModel.py | c6c08ac133f8697656d7739ea7b6a74fe750f78f | [] | no_license | warikoone/LpGBoost | 432f99fc2c28e504c2b1ab54fa36a56db1c4a048 | eb2f263ea0970ee9f698d89c80cbb8f413e5f345 | refs/heads/master | 2020-09-15T04:06:25.905608 | 2019-11-22T14:46:19 | 2019-11-22T14:46:19 | 223,344,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | '''
Created on Dec 21, 2018
@author: iasl
'''
import sys
import numpy as np
from keras.models import Model, Sequential
from keras.layers import Dense, LSTM, Input, Flatten
from keras.optimizers import Adam
from keras.layers.embeddings import Embedding
from keras.layers.advanced_activations import LeakyReLU, PReLU
from tensorflow import set_random_seed
from numpy.random import seed
class BiLSTMSequenceDimModel:
def __init__(self):
self.instanceSpan = 0
self.featureDimension = 0
self.x_train = np.array([])
def lstmModelConfiguration(self):
hybridFeedDimension = self.featureDimension
sequentialLayeredLSTM = Sequential()
sequentialLayeredLSTM.add(LSTM(hybridFeedDimension, return_sequences=True, input_shape=(self.instanceSpan, self.featureDimension)))
# sequentialLayeredLSTM.add(Flatten())
sequentialLayeredLSTM.add(Dense(1, activation='relu'))
# sequentialLayeredLSTM.add(Dense(1, activation='sigmoid'))
# sequentialLayeredLSTM.add(Dense(1, activation='tanh'))
# sequentialLayeredLSTM.add(Dense(1, activation='linear'))
# sequentialLayeredLSTM.add(LeakyReLU(alpha=0.001))
#
#
# print("prior input shape>>",self.x_train.shape)
transientStateScores = np.array(sequentialLayeredLSTM.predict(self.x_train))
# print("output transient shape>>",transientStateScores.shape)
return(transientStateScores)
seed(1)
set_random_seed(2)
| [
"noreply@github.com"
] | noreply@github.com |
ba214d989e2726501079dcc571ad87592b704bf7 | ee30942018813203a23d33e8a5c871c73d51092e | /DNS/myproject/myproject/urls.py | fdffd138bc6384e277b9962a1fefe0530c630784 | [] | no_license | vedavidhbudimuri/distribued-files-storage | 637328aac89064415d795db4dc6fd873731aa7cb | 908c97fa540c9e697982335d31d967dfd325f8fa | refs/heads/master | 2021-01-01T04:27:06.741953 | 2016-04-16T16:12:57 | 2016-04-16T16:12:57 | 56,390,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic import RedirectView
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
# url(r'^mainserver/',include('mainserver.urls')),
url(r'^dnsserver/',include('dnsserver.urls')),
# url(r'^backupserver/',include('backupserver.urls')),
# url(r'^$', RedirectView.as_view(url='/mainserver/savefile/', permanent=True)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"vedavidhbudimuri@gmail.com"
] | vedavidhbudimuri@gmail.com |
53f871e50d2de10422c10dea7f85c339c8de2718 | 40e0d194edd83673d75694d81e3bc8c277d1ec18 | /funkcije.py | c48f87ceb253228c8323966e76b394c3d198ed90 | [] | no_license | MarkoGlavas78/Zadaca | 3c1aa9fce21d2bc1b5182ec03deb75bee74d3b16 | b48ac04ba0a8d4fb4fc77101b4482d8d4ca55513 | refs/heads/master | 2022-07-17T08:40:00.580843 | 2020-05-09T15:52:22 | 2020-05-09T15:52:22 | 262,602,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | import likovi
from math import pi
def opseg(lik):
if isinstance(lik, likovi.Kruznica):
return 2*lik.radius()*pi
if isinstance(lik, likovi.Kvadrat):
return 4*lik.stranica()
def povrsina(lik):
if isinstance(lik, likovi.Kruznica):
return lik.radius()*lik.radius()*pi
if isinstance(lik, likovi.Kvadrat):
return lik.stranica()*lik.stranica()
if __name__=="__main__":
print('*** test funkcije ***')
print(opseg.__name__)
print(povrsina.__name__)
| [
"noreply@github.com"
] | noreply@github.com |
5b5156cbef313f7ca0e43f0c83256fc08a1596fe | 85f1908ef3c6629d71fa821ff9f76a9fe78393a8 | /check_open.py | 8c0c0787431ff5915bb482afc92514fe96357955 | [] | no_license | PRANAVI2402/prachan | a575d0d1386e76a942f1a4ba45a0ede5bcbb8e21 | 4c112ca71e423e07f47b5d08f682971a41607eaf | refs/heads/master | 2020-08-01T15:49:50.732156 | 2019-09-27T07:02:15 | 2019-09-27T07:02:15 | 211,038,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | '''fo = open("foo.txt", "w")
print ("Name of the file: ", fo.name)
print ("Closed or not : ", fo.closed)
print ("Opening mode : ", fo.mode)
fo.close()'''
# Open a file
'''fo = open("foo.txt", "r+")
#print ("Name of the file: ", fo.name)
#fo.write( "Python is a great language.\nYeah its great!!\n")
str1 = fo.read(10)
print ("Read String is : ", str1)
# Close opened file
fo.close()'''
#import os
# Rename a file from test1.txt to test2.txt
#os.rename( "foo.txt", "foo1.txt" )
#os.remove("foo1.txt")
#sero-dision error
#d=10/0
#print(d)
'''
try:
d=10/0
except ZeroDivisionError as e :
#print("you cant divide by zero {0}".format(e))
print("you cant divide by 0",e)
print("proceed")
l=[]
try:
print(l[0])
except IndexError as e :
#print("you cant divide by zero {0}".format(e))
print("Not assigned any value to the index".format(e))
#index error
#l=[]
#print(l[1])
#attribute Error
#l=[]
#print(l.get())'''
#name error
#l=9
#print(l)
import math
def sqrt(x):
if not isinstance(x, (int,float)):
raise TypeError('x must be integer')
elif x < 0 :
raise ValueError(' x cannot be negative')
else:
return math.sqrt(x)
'''
try:
print(sqrt("9"))
print(sqrt(12))
except Exception as e:
print("Error:", e)
'''
#print(sqrt("9"))
print(sqrt(12)) | [
"pranavi2402@gmail.com"
] | pranavi2402@gmail.com |
c1f0f56f1f31047cfc5c943b9b8cb27094c83a27 | 69bb1d0e824625876207d492722adfdb9d959ad1 | /Codeforces/antonAndDanik.py | c059ac795188e2be373516cbb3ff30f3a2ece7af | [] | no_license | domiee13/dungcaythuattoan | 8e2859264515e0fac3e9f0642a8b79ce5d966fff | 7e95d037d47d6e4777e9cf56b9827c3e42f556b3 | refs/heads/master | 2023-03-28T03:58:44.225136 | 2021-03-29T10:32:52 | 2021-03-29T10:32:52 | 277,798,242 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | # A. Anton and Danik
# time limit per test1 second
# memory limit per test256 megabytes
# inputstandard input
# outputstandard output
# Anton likes to play chess, and so does his friend Danik.
# Once they have played n games in a row. For each game it's known who was the winner — Anton or Danik. None of the games ended with a tie.
# Now Anton wonders, who won more games, he or Danik? Help him determine this.
# Input
# The first line of the input contains a single integer n (1 ≤ n ≤ 100 000) — the number of games played.
# The second line contains a string s, consisting of n uppercase English letters 'A' and 'D' — the outcome of each of the games. The i-th character of the string is equal to 'A' if the Anton won the i-th game and 'D' if Danik won the i-th game.
# Output
# If Anton won more games than Danik, print "Anton" (without quotes) in the only line of the output.
# If Danik won more games than Anton, print "Danik" (without quotes) in the only line of the output.
# If Anton and Danik won the same number of games, print "Friendship" (without quotes).
# Examples
# inputCopy
# 6
# ADAAAA
# outputCopy
# Anton
# inputCopy
# 7
# DDDAADA
# outputCopy
# Danik
# inputCopy
# 6
# DADADA
# outputCopy
# Friendship
t = int(input())
s = input()
if s.count('A')>s.count('D'):
print("Anton")
elif s.count('A')<s.count('D'):
print("Danik")
else:
print("Friendship") | [
"dungngocmd@gmail.com"
] | dungngocmd@gmail.com |
63e07fc1ff6aecff93869fb170be7e3769d99142 | 30145c279ee8b657215720a109a6fb4e5d33ba1e | /helperFunctions.py | cc5f73a47533335ce5c507e0555650cee799fe7a | [] | no_license | pcoh/SDCND_VehicleDetection | 30107ddcc4eb6be5348641ddf0dfde02929a2f2a | fd58712ccfc158e71b038d29ddcc7d4c0f707d10 | refs/heads/master | 2021-01-21T09:53:43.236928 | 2017-03-05T01:40:15 | 2017-03-05T01:40:15 | 83,347,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,099 | py | import matplotlib.image as mpimg
import numpy as np
import cv2
import time
from skimage.feature import hog
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
def get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True): # extract hog features:
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False, visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell), cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False, visualise=vis, feature_vector=feature_vec)
return features
def bin_spatial(img, size=(32, 32)): #perform spacial binning of color information
color1 = cv2.resize(img[:,:,0], size).ravel()
color2 = cv2.resize(img[:,:,1], size).ravel()
color3 = cv2.resize(img[:,:,2], size).ravel()
return np.hstack((color1, color2, color3))
def color_hist(img, nbins=32): # create color-histogram feature vector:
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins)
channel2_hist = np.histogram(img[:,:,1], bins=nbins)
channel3_hist = np.histogram(img[:,:,2], bins=nbins)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
# Extract hog features and/or spacially binned color features and/or color-histogram features from a list of images
def extract_features(imgs, color_space='RGB', spatial_size=(32, 32), hist_bins=32, orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0, spatial_feat=True, hist_feat=True, hog_feat=True):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in imgs:
# Initialize empty feature vector:
file_features = []
image = mpimg.imread(file)
# Apply color conversion if necessary:
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
# Extract spatial features if requested and append to feature vector:
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
file_features.append(spatial_features)
# Extract color histogram features if requested and append to feature vector:
if hist_feat == True:
# Apply color_hist()
hist_features = color_hist(feature_image, nbins=hist_bins)
file_features.append(hist_features)
# Extract HOG features if requested and append to feature vector:
if hog_feat == True:
# Call get_hog_features() with vis=False, feature_vec=True
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(get_hog_features(feature_image[:,:,channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True))
hog_features = np.ravel(hog_features)
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
file_features.append(hog_features)
features.append(np.concatenate(file_features))
# Return list of feature vectors
return features
# Extract hog features and/or spacially binned color features and/or color-histogram features from a single images
def extract_features_singleImg(img, color_space='RGB', spatial_size=(32, 32), hist_bins=32, orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0, spatial_feat=True, hist_feat=True, hog_feat=True):
# Initialize empty feature vector:
img_features = []
# Apply color conversion if necessary:
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(img)
# Extract spatial features if requested and append to feature vector:
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
img_features.append(spatial_features)
# Extract color histogram features if requested and append to feature vector:
if hist_feat == True:
hist_features = color_hist(feature_image, nbins=hist_bins)
img_features.append(hist_features)
# Extract HOG features if requested and append to feature vector:
if hog_feat == True:
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.extend(get_hog_features(feature_image[:,:,channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True))
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True)
img_features.append(hog_features)
# Return concatenated array of features:
return np.concatenate(img_features)
# Define a function to draw bounding boxes
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
def convert_color(img, conv='RGB2YCrCb'):
if conv == 'RGB2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
if conv == 'BGR2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
if conv == 'RGB2LUV':
return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
def trainClassifier1(cars, notcars, color_space, spatial_size, hist_bins, orient, pix_per_cell, cell_per_block, hog_channel, spatial_feat, hist_feat, hog_feat):
car_features = extract_features(cars, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
notcar_features = extract_features(notcars, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
scaled_X = X_scaler.transform(X)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data randomly into training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(scaled_X, y, test_size=0.2, random_state=rand_state)
print('Using:',orient,'orientations',pix_per_cell, 'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
svc = LinearSVC()
# Check the training time for the SVC
t=time.time()
svc.fit(X_train, y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
# Check the prediction time for a single sample
t=time.time()
return svc, X_scaler
# Define a single function that can extract features using hog sub-sampling and make predictions:
def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins):
box_list = []
draw_img = np.copy(img)
img = img.astype(np.float32)/255
img_tosearch = img[ystart:ystop,:,:]
ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell)-1
print('nxblocks: ', nxblocks)
nyblocks = (ch1.shape[0] // pix_per_cell)-1
# nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 64
nblocks_per_window = (window // pix_per_cell)-1
print('nblocks_per_window: ', nblocks_per_window)
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
print('nxsteps: ', nxsteps)
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
# Extract the image patch
subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64))
# Get color features
spatial_features = bin_spatial(subimg, size=spatial_size)
hist_features = color_hist(subimg, nbins=hist_bins)
# Scale features and make a prediction
test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
#test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
test_prediction = svc.predict(test_features)
if test_prediction == 1:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
box_list.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart)))
cv2.rectangle(draw_img,(xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart),(0,0,255),6)
return draw_img, box_list
def add_heat(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap# Iterate through list of bboxes
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
# Return the image
return img
# # Define a function that takes an image, start and stop positions in both x and y, window size (x and y dimensions), and overlap fraction (for both x and y)
# def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None], xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
# # If x and/or y start/stop positions not defined, set to image size
# if x_start_stop[0] == None:
# x_start_stop[0] = 0
# if x_start_stop[1] == None:
# x_start_stop[1] = img.shape[1]
# if y_start_stop[0] == None:
# y_start_stop[0] = 0
# if y_start_stop[1] == None:
# y_start_stop[1] = img.shape[0]
# # Compute the span of the region to be searched
# xspan = x_start_stop[1] - x_start_stop[0]
# yspan = y_start_stop[1] - y_start_stop[0]
# # Compute the number of pixels per step in x/y
# nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))
# ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))
# # Compute the number of windows in x/y
# # nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))
# # ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))
# # nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step)
# # ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step)
# nx_windows = np.int(xspan/nx_pix_per_step) - 1
# ny_windows = np.int(yspan/ny_pix_per_step) - 1
# # Initialize a list to append window positions to
# window_list = []
# # Loop through finding x and y window positions
# # Note: you could vectorize this step, but in practice
# # you'll be considering windows one by one with your
# # classifier, so looping makes sense
# for ys in range(ny_windows):
# for xs in range(nx_windows):
# # Calculate window position
# startx = xs*nx_pix_per_step + x_start_stop[0]
# endx = startx + xy_window[0]
# starty = ys*ny_pix_per_step + y_start_stop[0]
# endy = starty + xy_window[1]
# # Append window position to list
# window_list.append(((startx, starty), (endx, endy)))
# # Return the list of windows
# return window_list
# # Define a function you will pass an image and the list of windows to be searched (output of slide_windows())
# def search_windows(img, windows, clf, scaler, color_space='RGB', spatial_size=(32, 32), hist_bins=32, hist_range=(0, 256), orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0, spatial_feat=True, hist_feat=True, hog_feat=True):
# #1) Create an empty list to receive positive detection windows
# on_windows = []
# #2) Iterate over all windows in the list
# for window in windows:
# #3) Extract the test window from original image
# test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
# #4) Extract features for that window using extract_features_singleImg()
# features = extract_features_singleImg(test_img, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
# #5) Scale extracted features to be fed to classifier
# test_features = scaler.transform(np.array(features).reshape(1, -1))
# #6) Predict using your classifier
# prediction = clf.predict(test_features)
# #7) If positive (prediction == 1) then save the window
# if prediction == 1:
# on_windows.append(window)
# #8) Return windows for positive detections
# return on_windows
# def unison_shuffled_copies(a, b):
# assert len(a) == len(b)
# p = np.random.permutation(len(a))
# return a[p], b[p]
# def trainClassifier2(cars_train, cars_test, notcars_train, notcars_test, color_space, spatial_size, hist_bins, orient, pix_per_cell, cell_per_block, hog_channel, spatial_feat, hist_feat, hog_feat):
# car_features_train = extract_features(cars_train, color_space=color_space,
# spatial_size=spatial_size, hist_bins=hist_bins,
# orient=orient, pix_per_cell=pix_per_cell,
# cell_per_block=cell_per_block,
# hog_channel=hog_channel, spatial_feat=spatial_feat,
# hist_feat=hist_feat, hog_feat=hog_feat)
# car_features_test = extract_features(cars_test, color_space=color_space,
# spatial_size=spatial_size, hist_bins=hist_bins,
# orient=orient, pix_per_cell=pix_per_cell,
# cell_per_block=cell_per_block,
# hog_channel=hog_channel, spatial_feat=spatial_feat,
# hist_feat=hist_feat, hog_feat=hog_feat)
# notcar_features_train = extract_features(notcars_train, color_space=color_space,
# spatial_size=spatial_size, hist_bins=hist_bins,
# orient=orient, pix_per_cell=pix_per_cell,
# cell_per_block=cell_per_block,
# hog_channel=hog_channel, spatial_feat=spatial_feat,
# hist_feat=hist_feat, hog_feat=hog_feat)
# notcar_features_test = extract_features(notcars_test, color_space=color_space,
# spatial_size=spatial_size, hist_bins=hist_bins,
# orient=orient, pix_per_cell=pix_per_cell,
# cell_per_block=cell_per_block,
# hog_channel=hog_channel, spatial_feat=spatial_feat,
# hist_feat=hist_feat, hog_feat=hog_feat)
# X_train = np.vstack((car_features_train, notcar_features_train)).astype(np.float64)
# X_test = np.vstack((car_features_test, notcar_features_test)).astype(np.float64)
# # Fit a per-column scaler
# X_scaler = StandardScaler().fit(np.append(X_train,X_test, axis=0))
# # Apply the scaler to X
# scaled_X_train = X_scaler.transform(X_train)
# scaled_X_test = X_scaler.transform(X_test)
# # Define the labels vector
# y_train = np.hstack((np.ones(len(car_features_train)), np.zeros(len(notcar_features_train))))
# y_test = np.hstack((np.ones(len(car_features_test)), np.zeros(len(notcar_features_test))))
# # Shuffle training data:
# # scaled_X_train, y_train = unison_shuffled_copies(scaled_X_train, y_train)
# # Split up data into randomized training and test sets
# # rand_state = np.random.randint(0, 100)
# # X_train, X_test, y_train, y_test = train_test_split(
# # scaled_X, y, test_size=0.2, random_state=rand_state)
# print('Using:',orient,'orientations',pix_per_cell,
# 'pixels per cell and', cell_per_block,'cells per block')
# print('Feature vector length:', len(X_train[0]))
# # Use a linear SVC
# svc = LinearSVC()
# # svc = LinearSVC(penalty='l2', loss='hinge', C=0.08)
# # Check the training time for the SVC
# t=time.time()
# svc.fit(X_train, y_train)
# t2 = time.time()
# print(round(t2-t, 2), 'Seconds to train SVC...')
# # Check the score of the SVC
# print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
# # Check the prediction time for a single sample
# t=time.time()
# return svc, X_scaler
| [
"patrick.cohen.at@gmail.com"
] | patrick.cohen.at@gmail.com |
06e9af435b48d5945c4ae92e1b4270ba096357cc | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /iBqJcagS56wmDpe4x_7.py | 3acaa1ddc25b89eab9db4328cabbfff41f70461d | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | """
The volume of a spherical shell is the difference between the enclosed volume
of the outer sphere and the enclosed volume of the inner sphere:

Create a function that takes `r1` and `r2` as arguments and returns the volume
of a spherical shell rounded to the nearest thousandth.

### Examples
vol_shell(3, 3) ➞ 0
vol_shell(7, 2) ➞ 1403.245
vol_shell(3, 800) ➞ 2144660471.753
### Notes
The inputs are always positive numbers. `r1` could be the inner radius or the
outer radius, don't return a negative number.
"""
from math import pi
def vol_shell(r1, r2):
return round((4/3 *pi*(abs(r1**3 - r2**3))),3)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
088eedf92a656f4be82b91f4262a65b418fdb70b | 1d80afe967a6c44e84f9689b221f2cf52681b948 | /Star2/urls.py | 412486e15e57694d2ef7e24700a680ad0a237091 | [] | no_license | Jddanan/Intro-to-Django---Star-Ex2---Colors-New | ede3e4ac79c4b1fde2a97be19d5d0b6f6b5cfb53 | bd83802b18d615c3e95c499bccb56c14f1868aa3 | refs/heads/master | 2020-04-15T05:54:45.936305 | 2019-01-07T15:53:43 | 2019-01-07T15:53:43 | 164,441,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^colors/', include('colors.urls')),
url(r'^admin/', admin.site.urls),
] | [
"jddanan@gmail.com"
] | jddanan@gmail.com |
7341475b0e14d6e3b4fbacb119a45a981acbfb0a | 80e5fad060bb66b1985660f83299e9039ae715ca | /aidaijia_coupon/admin.py | b761ee350eaceb1eeee25361062c4fb47840e1f0 | [
"MIT"
] | permissive | sebastianlan/wedfairy-api | 838ea192fc1711027820ace1a20298d28be27794 | 3df532f282568148003b394dc8ea5ed00aea1632 | refs/heads/master | 2020-05-30T15:55:45.916937 | 2015-10-22T06:31:20 | 2015-10-22T06:31:20 | 41,350,100 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | from django.contrib import admin
import models
@admin.register(models.CouponCandidate)
class CouponCandidateAdmin(admin.ModelAdmin):
list_display = ['id', 'url', 'used']
@admin.register(models.Coupon)
class CouponAdmin(admin.ModelAdmin):
list_display = ['id', 'url', 'created_date'] | [
"sebastianlan.original@gmail.com"
] | sebastianlan.original@gmail.com |
60e7e6a4ff1e0a5d8fbcd8c79df4b3bdbd4ec063 | 079cde666810d916c49a9ac49189a929ad19e72f | /qiche/qiche/pipelines.py | 053e07626aa94cf4206aea50d4b46eb566fc3674 | [] | no_license | jokerix/demo | 82d01582a9882ac361766516d07c9ad700053768 | 7c0d7666f82ec78e9562956bb0f4482af8531ebb | refs/heads/master | 2021-07-14T02:47:28.686827 | 2020-07-03T15:16:32 | 2020-07-03T15:16:32 | 179,784,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
from urllib import request
from scrapy.pipelines.images import ImagesPipeline
from qiche import settings
class QichePipeline(object):
def __init__(self):
self.path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images')
if not os.path.exists(self.path):
os.mkdir(self.path)
def process_item(self, item, spider):
category = item['category']
urls = item['urls']
category_path = os.path.join(self.path, category)
if not os.path.exists(category_path):
os.mkdir(category_path)
for url in urls:
image_name = url.split('_')[-1]
request.urlretrieve(url, os.path.join(category_path, image_name))
return item
class BMWImagespeline(ImagesPipeline):
def get_media_requests(self, item, info):
# 此方法发送下载请求前调用
# 本深就是发送请求的
request_objs = super(BMWImagespeline, self).get_media_requests(item, info)
for request_obj in request_objs:
request_obj.item = item
return request_objs
def file_path(self, request, response=None, info=None):
# 是在图片将要储存的时候调用,获取图片存储路径
path = super(BMWImagespeline, self).file_path(request, response, info)
category = request.item.get('category')
images_store = settings.IMAGES_STORE
category_path = os.path.join(images_store, category)
if not os.path.exists(category_path):
os.makedirs(category_path)
image_name = path.replace('full/', '')
images_path = os.path.join(category_path, image_name)
return images_path
| [
"1215774897@qq.com"
] | 1215774897@qq.com |
b02e4a047d216973392b32334bb53900a55a4fb9 | 3634a283149740a566352a204add30d1c09f3deb | /InvestmentGame/storedata.py | 5305b1116f4df6c682ba619addcc33e3dad34264 | [] | no_license | NTimmerhuis/InvestmentGame | 84d495453bcdef346ae2646f39cddce29363af8e | e521771a3a242c97fb9446a97b0efd815b1a595e | refs/heads/master | 2020-07-31T10:27:47.860355 | 2019-09-26T14:38:44 | 2019-09-26T14:38:44 | 210,573,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | import pandas as pd
import numpy as np
df = pd.read_csv("data.csv")
# df.to_csv('hrdata_modified.csv') | [
"rutger.jansen@hotmail.com"
] | rutger.jansen@hotmail.com |
5f5c03bcd52eb2348ea2bfae56c4eb554064760a | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_210/263.py | 07aad036673e87dff6e60957731771366d880485 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | import operator
fin = open('B-small-attempt2.in', 'r')
fout = open('output.out', 'w')
tcs = int(fin.readline())
for tc in range(0, tcs):
inptemp = fin.readline().split(' ')
ac = int(inptemp[0])
aj = int(inptemp[1])
acs = list()
ajs = list()
for i in range(0, ac):
acinp = fin.readline().split(' ')
acs.append([int(acinp[0]), int(acinp[1])])
for i in range(0, aj):
ajinp = fin.readline().split(' ')
ajs.append([int(ajinp[0]), int(ajinp[1])])
acs.sort(key=operator.itemgetter(0))
ajs.sort(key=operator.itemgetter(0))
result = -1
if ac == 2 and aj == 0:
time1 = acs[1][1] - acs[0][0]
time2 = acs[1][0] - acs[0][1]
print("time1, 2",time1, time2)
if time1 <= 720 or time2 >= 720:
result = 2
else:
result = 4
if ac == 0 and aj == 2:
time1 = ajs[1][1] - ajs[0][0]
time2 = ajs[1][0] - ajs[0][1]
print("time1, 2", time1, time2)
if time1 <= 720 or time2 >= 720:
result = 2
else:
result = 4
if ac == 1 and aj == 0:
result = 2
if ac == 0 and aj == 1:
result = 2
if ac == 1 and aj == 1:
result = 2
print("Case #%d: %d" %(tc+1, result))
fout.write("Case #%d: %d\n" %(tc+1, result))
fin.close()
fout.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
344734125bb7c7899ca6cc6c2558fd173da78d68 | 279ed7207ac2c407487416b595e12f573049dd72 | /pybvk/bvk/bvkmodels/ni_676.py | 8e2c8f20a537ec5b2eaa574c6f66b29f2b1de7de | [] | no_license | danse-inelastic/pybvk | 30388455e211fec69130930f2925fe16abe455bd | 922c8c0a8c50a9fabd619fa06e005cacc2d13a15 | refs/heads/master | 2016-09-15T22:21:13.131688 | 2014-06-25T17:12:34 | 2014-06-25T17:12:34 | 34,995,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | # ni_676.py
# BvK force constants
element = "Ni"
lattice_type = "fcc"
temperature = 676 # Units: K
reference = "De Wit, G.A., Brockhouse, B.N.: J. Appl. Phys. 39 (1968) 451"
details = "All fits use the measured elastic constants. This fit uses general force up to fourth neighbour, axially symmetric force for fifth neighbour."
a = 3.52 # lattice parameters in angstroms
# Units: N m^-1
force_constants = { "110": { "xx": 16.250,
"zz": -0.970,
"xy": 19.390 },
"200": { "xx": 1.070,
"yy": 0.056 },
"211": { "xx": 0.963,
"yy": 0.449,
"yz": -0.391,
"xz": 0.458 },
"220": { "xx": 0.115,
"zz": -0.457,
"xy": 0.222 },
"310": { "xx": -0.256,
"yy": -0.063,
"zz": -0.040,
"xy": -0.072 } }
| [
"linjiao@caltech.edu"
] | linjiao@caltech.edu |
37999837ff6c3f69f482505a80570f97ef47569b | d43040239e2fc3733210519f0d639e6540d87b02 | /api.py | 29f87b86a01e64359f68c216046b39d92ccea917 | [] | no_license | austintip/flasql-1214 | 37314317d12621245ea99fa96fefd25c0e649372 | f7c0cb7dbd56f2c3df2e4589749bdfbaecd06842 | refs/heads/main | 2023-03-13T07:26:23.532643 | 2021-03-03T22:45:51 | 2021-03-03T22:45:51 | 344,265,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | from models import app, db, User
from flask import jsonify, request
@app.route("/")
def home():
return jsonify(message="Welcome to my api")
@app.route("/users", methods=["GET", "POST"])
def user_index_create():
if request.method == "GET":
users = User.query.all()
print(type(users[0]))
results = [user.to_dict() for user in users]
return jsonify(results)
if request.method == "POST":
# print(request.form)
# print(request.get_json)
new_user = User(name=request.form["name"], email=request.form["email"], bio=request.form["bio"])
db.session.add(new_user)
db.session.commit()
print(new_user)
return jsonify(new_user.to_dict())
if __name__ == "__main__":
app.run() | [
"austin.d.tipograph@gmail.com"
] | austin.d.tipograph@gmail.com |
0c34f38a5975e04f51eafd628a2120d9d8be371c | f6ab9a25c3e21a940a7564655b35255965277586 | /Season-One-lesson-Two.py | 7e0e9c530855a90c3d78819521dee6f1e8d9f998 | [] | no_license | YousefRashidi/Season-One-lesson-Two | 659de7b3caa5289504c80c4e9aa409c28b84b296 | 60152e0018984783d5482a9147f5a85e8e47b7ed | refs/heads/master | 2020-06-09T03:13:56.278110 | 2019-06-28T12:20:33 | 2019-06-28T12:20:33 | 193,359,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,928 | py | # review
a = 1
b = 1.2
z = 1 + 5j
text = "hello"
cond = True
lst = [1 ,True ,1.2 ,"hello" , 1000]
print(lst[3])
# a-z A-Z 0-9 _
#
# my_var = 10
# snake_case_naming
# camelCaseNaming
# PascalCaseNaming
var = text + a
# -------------------------------------------------------
# str_methods
text = "heLLo"
text_up = text.upper()
text_lo = text.lower()
print(text)
print(text.capitalize())
print(text_up)
print(text_lo)
print(text.center(100))
print(text.ljust(100))
print(text.rjust(100))
print(text.center(100, '-'))
print(text.ljust(100, '0'))
print(text.rjust(100, '*'))
poem = "nothing can ever happen twice"
res = poem.count("e")
# text.capitalize
# text.upper
# text.lower
# text.center
# text.ljust
# text.rjust
#-------------------------------------------------------------------------
# string_manipulation
poem = "nothing can ever happen twice"
word = "ever"
poem_2 = poem.replace("ever", "EVER")
print(poem_2)
indx_start = poem.index(word)
indx_stop = indx_start + len(word)
new_poem = poem[:indx_start] + word.upper() + poem[indx_stop:]
print(new_poem)
#-------------------------------------------------------------------------
# test list
lst1 = [1, 2, 3, 4, 5]
lst2 = [6, 7]
lst3 = lst1 + lst2
print(lst3[-2])
print(lst3)
lst3[-2] = 1000 # mutable object
print(lst3)
# test_str
text = "hello world! whatever"
print(text[1])
length_of_text = len(text)
# print(length_of_text )
out = text[6] + text[7] + text[8] + text[9] + text[10]
print(out)
#-------------------------------------------------------------------------
# test_tuple
tup1 = (1, 2, 3, 4, 5)
tup2 = (6, 7)
tup3 = tup1 + tup2
print(tup3[-2])
print(tup3)
tup3[-2] = 1000 # immutable object
print(tup3)
#-------------------------------------------------------------------------
# tuple_methods
tup = (1, 2, 3, 4, 5, 1, 1, 1)
repeats = tup.count(1)
indx = tup.index(3)
print(indx)
print(repeats)
#-------------------------------------------------------------------------
# tuple_slice_and_concate
tup = (0, 1, 2 ,3, 4 ,5, 6, 7, 8, 9, 10,)
tup_new = tup[:6] + (1000,) + tup[7:]
print(tup_new)
#-------------------------------------------------------------------------
# conditions
num1 = 100
num2 = 200
n = 200
cond = (n > num1) and (n < num2)
print(cond)
cond = not ((n <= num1) or (n >= num2))
print(cond)
text1 = "HELLO"
text2 = "hello"
text1.isupper() and text2.islower()
#-------------------------------------------------------------------------
# convert
tup = (1, 2, 3, 4)
lst = list(tup)
lst[1] = 1000
print(lst)
tup_new = tuple(lst)
print(tup_new)
#-------------------------------------------------------------------------
# list_methods.
lst = [1, "whatever", 2, 3, 4, 5]
lst[1] = "hello"
#-------------------------------------------------------------------------
# mutable_objects_assignment
list_a = [1, 2, 3, 4]
list_b = list_a
list_b[1] = 1000
print(list_a) # [1, 1000, 3, 4]
| [
"yusef.r.r@gmail.com"
] | yusef.r.r@gmail.com |
1edf039f04ef58d6ac247f9e811950868dfc6c32 | f4e97a269a35acc73d01ebede7ab0d17d306cf2d | /images.py | a6e444df7f10779a5f94b2b402d65df9362b6ec7 | [] | no_license | rameshrawalr2/EmotionRecognition | 2ce8eea173dff0f1561bdcb88535bcee7e5ba490 | 9e9a503caefb56a0732f4774ecc506ed1fc7d94e | refs/heads/master | 2020-03-15T05:58:42.355454 | 2018-05-19T05:01:04 | 2018-05-19T05:01:04 | 131,997,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 8 22:40:25 2018
@author: ramesh
"""
import numpy as np
import csv
#import tensorflow as tf
#import cv2
from PIL import Image
img=Image.open("result3.png")
img.show()
width, height=img.size
format=img.format
mode=img.mode
img_grey=img.convert('L')
print(width," ", height)
img_grey.save('result.jpg')
img_grey.show()
imgarray=np.array(img_grey, dtype=int)
imgarray=list(list(imgarray))
print (imgarray)
with open("img_pixels.csv", 'a') as f:
writer = csv.writer(f)
writer.writerow(imgarray)
| [
"noreply@github.com"
] | noreply@github.com |
ddfdd2f0efe461b056235acb80be18b8c1228721 | 34165333483426832b19830b927a955649199003 | /publish/library_app/reports/library_book_report.py | f1b54fc8a5cc254109a6f75a8fa9f9b3ecad1cee | [] | no_license | terroristhouse/Odoo12 | 2d4315293ac8ca489d9238f464d64bde4968a0a9 | d266943d7affdff110479656543521889b4855c9 | refs/heads/master | 2022-12-02T01:09:20.160285 | 2020-08-16T13:56:07 | 2020-08-16T13:56:07 | 283,630,459 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | from odoo import fields,models
class BookReport(models.Model):
_name = 'library.book.report'
_description = 'Book Report'
_auto = False
name = fields.Char('Title')
publisher_id = fields.Many2one('res.partner')
date_published = fields.Date()
def init(self):
self.env.cr.execute("""
CREATE OR REPLACE VIEW library_book_report AS
(SELECT * FROM library_book WHERE active=True)
""")
| [
"867940410@qq.com"
] | 867940410@qq.com |
7e3a1740255a8eafecb0e0d349e31173cc621957 | 29ebdf0a6b73a33933ee62cb727c1828c7004bbe | /Ecom/urls.py | ea8b3252d0761eba5a2247a098a5678c600c1f13 | [] | no_license | Fahad0907/Ecommerce_with_drf | 68d8099e849338ef24e86e89820c7168486730a7 | f5f48c8a829f789851ad61c414814e9298b6e42a | refs/heads/master | 2023-08-03T23:28:21.624113 | 2021-09-12T15:08:28 | 2021-09-12T15:08:28 | 398,205,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,025 | py | """Ecom URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from Order.models import Order
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from Product.views import CategoryList, ShowProduct, ShowProductDetails
from Order.views import ShowCart,IncrementQuantity, DecrementQuantity, ApplyCoupon, CheckOut
from Account.views import Registration, OrderList, UserInformation
from rest_framework.authtoken.views import obtain_auth_token
from Adminsite.views import Addproduct, UpdateProduct
urlpatterns = [
path('admin/', admin.site.urls),
path('',CategoryList.as_view()),
path('showproduct/<int:id>/', ShowProduct.as_view()),
path('productDetails/<int:id>/', ShowProductDetails.as_view()),
path('login/', obtain_auth_token),
path('cart/', ShowCart.as_view()),
path('plus/', IncrementQuantity.as_view()),
path('minus/',DecrementQuantity.as_view()),
path('coupon/',ApplyCoupon.as_view()),
path('checkout/',CheckOut.as_view()),
path('registration/', Registration.as_view()),
path('orderlist/', OrderList.as_view()),
path('userinfo/', UserInformation.as_view()),
path('addproduct/',Addproduct.as_view()),
path('updateproduct/',UpdateProduct.as_view()),
path('updateproduct/<int:id>/',UpdateProduct.as_view()),
]
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | [
"fahad.aust09@gmail.com"
] | fahad.aust09@gmail.com |
a46bb397830f9b590558e4c74bce17ec6435f80a | ba781c7044289ad7b4df8baa4c910cdaf99325df | /srtMacro.py | 312ff3209d21cd72c533e1248a57f2785587f9f4 | [] | no_license | morningM00N/bg | 8751f10012698abc71cd1426dda48b060517b332 | 7b3fe39e787f8a1ae363289718578db71f7bc7e1 | refs/heads/master | 2023-08-17T21:35:39.101390 | 2023-08-03T12:04:57 | 2023-08-03T12:04:57 | 237,815,283 | 0 | 4 | null | 2021-09-30T12:43:56 | 2020-02-02T18:21:01 | JavaScript | UTF-8 | Python | false | false | 7,475 | py | import datetime
from time import sleep
from pytest import ExitCode
from selenium import webdriver
#from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.expected_conditions import presence_of_element_located
from selenium.webdriver.common.alert import Alert
import chromedriver_autoinstaller
import telegram
import sys
#import chromedriver_autoinstaller
path = chromedriver_autoinstaller.install(cwd=True)
from tkinter.simpledialog import *
debugMode = False
cities = ['수서','동탄','평택지제','천안아산','오송','대전','김천','동대구','서대구','신경주','울산(통도사)','부산','익산','정읍','광주송정','나주','목포']
weekdayArr = ['월','화','수','목','금','토','일']
browser = None
targetDate = None
srcLoc = None
descLoc = None
searchTime = None
while True:
if debugMode == True:
phoneNumber = '010-8998-9272'
password = ""
srcLoc = '대전'
descLoc = '동탄'
targetDate = '2022-06-13'
searchTime = 8
else:
phoneNumber = None
while True:
phoneNumber = askstring('ID','전화번호를 입력하세요. (010-1345-6789)')
if phoneNumber == None:
sys.exit()
if len(phoneNumber)!=13:
continue
if phoneNumber[0:3] != '010':
continue
if phoneNumber[3]!='-' or phoneNumber[8] != '-':
continue
if phoneNumber[4:8].isdecimal() == False:
continue
if phoneNumber[9:].isdecimal() == False:
continue
break
password = askstring('Password','비밀번호를 입력하세요.')
if password == None:
sys.exit()
if targetDate == None:
while True:
targetDate = askstring('날짜','예약할 날짜를 입력하세요. (2022-05-07)')
if targetDate == None:
sys.exit()
if len(targetDate)!=10:
continue
if targetDate[0:4].isdecimal == False:
continue
if int(targetDate[0:4]) < 2022:
continue
if targetDate[4]!='-' or targetDate[7] != '-':
continue
if targetDate[5:7].isdecimal() == False:
continue
if int(targetDate[5:7])>12 or int(targetDate[5:7]) < 1:
continue
if targetDate[8:].isdecimal() == False:
continue
if int(targetDate[8:])>31 or int(targetDate[8:]) < 1:
continue
break
if searchTime == None:
while True:
searchTime = askinteger('시간','검색할 시간을 입력하세요.')
if searchTime == None:
sys.exit()
if searchTime > 24 or searchTime <0:
continue
break
if srcLoc == None:
while True:
srcLoc= askstring('출발지','출발 장소를 입력하세요.')
if srcLoc == None:
sys.exit()
if srcLoc not in cities:
continue
break
if descLoc == None:
while True:
descLoc= askstring('도착지','도착 장소를 입력하세요.')
if descLoc == None:
sys.exit()
if descLoc not in cities:
continue
if srcLoc == descLoc :
continue
break
dateXpath = "//option[. = '" + targetDate.replace('-','/') + "(" + \
weekdayArr[datetime.datetime.fromisoformat(targetDate).weekday()] +\
")']"
#chromeOptions.add_argument("headless")
if browser == None:
chromeOptions = webdriver.ChromeOptions()
browser = webdriver.Chrome( executable_path=path,
options=chromeOptions)
wait = WebDriverWait(browser, 10)
#browser = webdriver.Chrome(ChromeDriverManager().install())
### log in routine start
browser.get("https://etk.srail.kr/cmc/01/selectLoginForm.do?pageId=TK0701000000")
wait.until(EC.element_to_be_clickable((By.ID, 'srchDvCd3')))
radioBoxPhone = browser.find_element_by_id('srchDvCd3')
radioBoxPhone.click()
strCmd = "document.getElementById('srchDvNm03').value='"+phoneNumber+"'"
browser.execute_script(strCmd)
strCmd = "document.getElementById('hmpgPwdCphd03').value='"+password+"'"
browser.execute_script(strCmd)
### log in routine end
try :
browser.execute_script("document.getElementsByClassName('submit')[2].click()")
# log in failed
result = browser.switch_to.alert
result.accept()
except:
# log in success
break
debugCount = 0
targetTd = None
while True:
try:
while True:
try:
debugCount+=1
browser.get("https://etk.srail.kr/hpg/hra/01/selectScheduleList.do?pageId=TK0101010000")
wait.until(EC.presence_of_element_located((By.ID,"dptRsStnCdNm")))
browser.find_element(By.ID, "dptRsStnCdNm").clear()
browser.find_element(By.ID, "dptRsStnCdNm").send_keys(srcLoc)
browser.find_element(By.ID, "arvRsStnCdNm").clear()
browser.find_element(By.ID, "arvRsStnCdNm").send_keys(descLoc)
dropdown = browser.find_element(By.ID, "dptDt")
dropdown.find_element(By.XPATH, dateXpath).click()
# if debugCount == 3:
# searchTime = 13
cmd = "document.getElementById('dptTm').selectedIndex=0;document.getElementById('dptTm').children[0].value='"+str(searchTime)+"0000'"
if searchTime < 10:
cmd = "document.getElementById('dptTm').selectedIndex=0;document.getElementById('dptTm').children[0].value='0"+str(searchTime)+"0000'"
browser.execute_script(cmd)
browser.find_element(By.CSS_SELECTOR, ".btn_large").click()
wait.until(EC.presence_of_element_located((By.XPATH, "//tr[1]/td[7]")))
targetTd = browser.find_element(By.XPATH, "//tr[1]/td[7]")
if targetTd.text != '매진':
break
except:
print('redo')
wait.until(EC.element_to_be_clickable((By.XPATH, "//tr[1]/td[7]/a")))
browser.find_element(By.XPATH, "//tr[1]/td[7]/a").click()
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '.btn_blue_dark > span')))
browser.find_element(By.CSS_SELECTOR, '.btn_blue_dark > span').click()
break
except:
print("redo out")
chat_token = "942328115:AAFDAj7ghqSH2izU12fkYHtV7PMDhxrGnhc"
chat = telegram.Bot(token = chat_token)
chat_id = 763073279
chat.sendMessage(chat_id = chat_id, text="결제")
| [
"abcdeei888@gmail.com"
] | abcdeei888@gmail.com |
8ff8c60155eca0198afd7158b8f4dbb5f00a51d5 | 163cb8cae7d364a090565710ee9f347e5cdbf38f | /new_deeplab/utils/get_dataset_colormap_test.py | 90005ebbf542c89e44a7dd4783811474cc59853d | [
"CC-BY-4.0",
"CC-BY-3.0"
] | permissive | abhineet123/river_ice_segmentation | 2b671f7950aac6ab2b1185e3288490bc5e079bc1 | df694107be5ad6509206e409f5cde4428a715654 | refs/heads/master | 2023-05-01T11:52:10.897922 | 2023-04-25T22:55:04 | 2023-04-25T22:55:04 | 179,993,952 | 15 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,955 | py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for get_dataset_colormap.py."""
import numpy as np
import tensorflow as tf
from new_deeplab.utils import get_dataset_colormap
class VisualizationUtilTest(tf.test.TestCase):
def testBitGet(self):
"""Test that if the returned bit value is correct."""
self.assertEqual(1, get_dataset_colormap.bit_get(9, 0))
self.assertEqual(0, get_dataset_colormap.bit_get(9, 1))
self.assertEqual(0, get_dataset_colormap.bit_get(9, 2))
self.assertEqual(1, get_dataset_colormap.bit_get(9, 3))
def testPASCALLabelColorMapValue(self):
"""Test the getd color map value."""
colormap = get_dataset_colormap.create_pascal_label_colormap()
# Only test a few sampled entries in the color map.
self.assertTrue(np.array_equal([128., 0., 128.], colormap[5, :]))
self.assertTrue(np.array_equal([128., 192., 128.], colormap[23, :]))
self.assertTrue(np.array_equal([128., 0., 192.], colormap[37, :]))
self.assertTrue(np.array_equal([224., 192., 192.], colormap[127, :]))
self.assertTrue(np.array_equal([192., 160., 192.], colormap[175, :]))
def testLabelToPASCALColorImage(self):
"""Test the value of the converted label value."""
label = np.array([[0, 16, 16], [52, 7, 52]])
expected_result = np.array([
[[0, 0, 0], [0, 64, 0], [0, 64, 0]],
[[0, 64, 192], [128, 128, 128], [0, 64, 192]]
])
colored_label = get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_pascal_name())
self.assertTrue(np.array_equal(expected_result, colored_label))
def testUnExpectedLabelValueForLabelToPASCALColorImage(self):
"""Raise ValueError when input value exceeds range."""
label = np.array([[120], [300]])
with self.assertRaises(ValueError):
get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_pascal_name())
def testUnExpectedLabelDimensionForLabelToPASCALColorImage(self):
"""Raise ValueError if input dimension is not correct."""
label = np.array([120])
with self.assertRaises(ValueError):
get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_pascal_name())
def testGetColormapForUnsupportedDataset(self):
with self.assertRaises(ValueError):
get_dataset_colormap.create_label_colormap('unsupported_dataset')
def testUnExpectedLabelDimensionForLabelToADE20KColorImage(self):
label = np.array([250])
with self.assertRaises(ValueError):
get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_ade20k_name())
def testFirstColorInADE20KColorMap(self):
label = np.array([[1, 3], [10, 20]])
expected_result = np.array([
[[120, 120, 120], [6, 230, 230]],
[[4, 250, 7], [204, 70, 3]]
])
colored_label = get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_ade20k_name())
self.assertTrue(np.array_equal(colored_label, expected_result))
def testMapillaryVistasColorMapValue(self):
colormap = get_dataset_colormap.create_mapillary_vistas_label_colormap()
self.assertTrue(np.array_equal([190, 153, 153], colormap[3, :]))
self.assertTrue(np.array_equal([102, 102, 156], colormap[6, :]))
if __name__ == '__main__':
tf.test.main()
| [
"asingh1@ualberta.ca"
] | asingh1@ualberta.ca |
833948d32526164c6d4daf30a7f26eb56483ba44 | 0a9d3510cb62b16be2eef756bfd4bd3dcd9ee867 | /Final submission/fill-in-the-blanks 2.py | bfebc9d706d8c56948ff0f921c3c02f3a747b3ce | [] | no_license | nyi11a/Reverse-Madlibs | 1a61fa84fea482ece1d7284fef33fe84e00a76e6 | 53392e9c04fa9ffe87ef7a68777433eb5e1f3bf5 | refs/heads/master | 2021-01-10T05:47:24.088016 | 2016-01-28T08:45:48 | 2016-01-28T08:45:48 | 50,571,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,971 | py | import textwrap
nums_to_fill= ["-1-", "-2-", "-3-", "-4-"] #the blanks in each text
levels= ["easy", "hard", "medium"]
answers= []
answers2= ["string", "indexing", "subsequence", "method"]
answers3= ["procedures", "calling", "defining", "parameters"]
answers1= ["numbers", "assignment", "variables", "string"]
text1= """ Variables are names that stand in for -1- . -2- statements are how we
introduce variables and they tend to look something like this: Name = Expression, where the
name refers to the value that the expression has. -3- can vary, they don't always stay
constant. Variabls can also be letters in which case they form a -4- , a sequence of
characters surrounded by single or double quotes."""
text2= """ Each character in a -1- is numbered starting from zero, with the number
increasing all the way to the end of the string. For example, in 'udacity' [0], the
number zero corresponds to the letter "u." To find a particular character or set of
characters in a string you need to become familiar with string -2-. Indexing looks
like this: <string> [< start- expression (number)>:< stop- expression(number>]. The
result will be a string that is a -3- of all the characters in the string,
starting from position start and ending with position stop but not including that
character (so up through stop minus 1). The find -4- is a more straightforward way
to to find a string within a string."""
text3="""Functions are also called -1- or methods. They take input, do something
with it and return an output. We use functions by passing in values for the parameters
of the inputs in the parenthesis. This process is what is meant by -2- the
function: Calling a function is the act of executing it. But before a function can be
called, it must be declared. -3- a function is the process of declaring the
function that you will call and the arguments/operants that you will pass into it.
On the first line of your function definition, you must begin with "def". After "def"
you must assign a function name. Next, you must have a set of parentheses
with the required -4- inside.The line must end with a colon. """
def process_madlib1(mad_lib): #once level selected user_input will fill in responses
i= 0
while i <= 3: # i is 3 because counter only needs to go from 0-3... number of questions in each level
user_input= raw_input("Give the answer for number"+ " " + str(i+1) + " ")
while user_input != answers[i]: #loop to ask for user input until a correct answer is entered
print "Sorry, wrong answer. Try again"
user_input= raw_input("Give the answer for number"+ " " + str(i+1) + " ")
if user_input in answers[i]:
blank= nums_to_fill[i]
fill= answers[i]
filled_in_response= mad_lib.replace(blank, fill)
print textwrap.fill(filled_in_response, 75)
if i >= 3:
print "You are done with this level. Nice Job!"
break
i += 1
def pick_answer_key(num):
global answers
answers= []
answers2= ["string", "indexing", "subsequence", "method"]
answers3= ["procedures", "calling", "defining", "parameters"]
answers1= ["numbers", "assignment", "variables", "string"]
if num == 1:
answers= answers1
elif num == 2:
answers = answers2
else:
answers= answers3
def pick_level(): #Users will pick one of the three text levels to play
level_selection= raw_input("Type Your level: Easy, Medium or Hard" + " ")
if level_selection not in ["easy", "medium", "hard"]:
level_selection= raw_input("Please Type Your Level: Easy, Medium or Hard" + " ")
if level_selection == "easy":
pick_answer_key(1)
print textwrap.fill(text1,75)
return process_madlib1(text1)
elif level_selection== "medium":#changing the values in the answer key to reflect level of difficulty
pick_answer_key (2)
print textwrap.fill(text2,75)
return process_madlib1(text2)
elif level_selection== "hard":
pick_answer_key(3)
print textwrap.fill(text3,75)
return process_madlib1(text3)
pick_level () | [
"ms.yilla@gmail.com"
] | ms.yilla@gmail.com |
fc8cf13f7df2944b5e908ef9041ecdfea4749e91 | c4be966cc6d4aeaa4c950bcbc5a3cfc5540f533d | /p12p3.py | 90741afc9e5811ecd1881e88f0acf8765da313f3 | [] | no_license | ojhermann-ucd/comp10280 | cabc865361928565c9618250bcb2c20a4b17411e | eae44e373b4c19550860381d90c4a96f8190eb7d | refs/heads/master | 2020-05-20T18:48:06.064953 | 2016-11-25T12:26:59 | 2016-11-25T12:26:59 | 68,710,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,548 | py | """
Write a function that takes as its two arguments a number and a tolerance and,
using the technique exposed in lectures, returns an approximation of the square root of the number that is within the tolerance.
Write a program that prompts the user for a floating-point number and checks that the number entered is non-negative.
If it is, it calls the function defined in part (a) with the number and a tolerance defined in the program and prints out the square root of the number;
if not, it prints out an appropriate error message.
Pseudocode
def sroot(n, e):
function will, using user input number n and margin of error e check that difference of n and an assumed root (starting at zero) are within e
if not within e, increment root by e^2
continue this process until either an acceptable approximation is found or root^2 is larger than n
print according message
Error Checking Notes
num: garbage, negative, zero, OK
epsilon: garbage, negative, zero, OK
(n, e) to check:
.- g,g
.- g,n
.- g,z
.- g,o
.- n,g
.- n,n
.- n,z
.- n,o
.- z,g
.- z,n
.- z,z
.- z,o
.- o,g
.- o,n
.- o,z
.- o,o
"""
import sys
def sroot(num, epsilon):
step = epsilon ** 2
root = 0
numGuesses = 0
while epsilon <= abs(num - root**2) and root**2 <= num:
root += step
numGuesses += 1
if numGuesses % 100000 == 0:
print("Still running. Number of guesses: " + str(numGuesses))
else:
pass
print("Number of guesses: " + str(numGuesses))
if abs(num - root**2) < epsilon:
print("The approximate square root of " + str(num) + " is " + str(root))
else:
print("Failed to find a square root of " + str(num))
print("Finished!")
while True:
try:
num = float(input("Enter a floating point value that you would like to know the square root of: "))
if num < 0:
print("Restart the program and enter a positive floating point value if you wish to continue.")
sys.exit()
break
except ValueError:
print("Restart the program and enter a floating point value if you wish to continue.")
sys.exit()
while True:
try:
tol = float(input("Enter a floating point value that you would like to use as your tolerance margin: "))
if tol < 0:
print("Restart the program and enter a positive floating point value if you wish to continue.")
sys.exit()
elif tol == 0:
print("We're using floating point numbers, so you cannot have 0 margin of error. Please start over again.")
sys.exit()
break
except ValueError:
print("Restart the program and enter a floating point value if you wish to continue.")
sys.exit()
sroot(num, tol) | [
"noreply@github.com"
] | noreply@github.com |
f0156b76cf3099ac72b52fb98f007b69dbf16a39 | 798ad9e31ce14218a2f8fe9c4b7562f9d85da38d | /p071080702.py | f7d39238296152af92f9aab6776c320dbdeb114c | [] | no_license | Cynventria/1076LAB | e5cb31a56aede69504da9751485fa61a83894736 | ba910e8d4c80e7d82ed5298d3f389257ef6de6b1 | refs/heads/master | 2023-02-14T01:21:59.777276 | 2021-01-06T12:04:36 | 2021-01-06T12:04:36 | 298,172,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,256 | py | import tkinter
top = tkinter.Tk()
f1 = tkinter.Frame(top)
f2 = tkinter.Frame(top)
isEmpty = True
isOperator = False
isError = False
operates = 0
var = tkinter.StringVar()
var.set("0")
def SetValue(): # 設定Label的值
Screen = tkinter.Label(f1, textvariable = var).grid(row = 0, column = 1)
def Click(x):
#請完成function內的code
global isEmpty, isOperator, operates
#var.set(var.get() + " " + x)
if(x.isdigit()):
if(isEmpty):
var.set(x)
elif(isOperator):
var.set(var.get() + " " + x)
else:
var.set(var.get() + x)
isEmpty = False
isOperator = False
else:
if(isOperator):
var.set(var.get()[:-1] + x)
return
elif(isEmpty and x == "-"):
var.set(var.get()[:-1] + x)
isEmpty = False
return
isOperator = True
isEmpty = False
operates += 1
var.set(var.get() + " " + x)
def Clear():
#請完成function內的code
global isEmpty
isEmpty = True
var.set("0")
def Calculate():
#請完成function內的code
global isError, isEmpty, isOperator, operates
print("cal")
Stage = 0
x = var.get().split()
var.set("CALCULATING")
print(x)
#x = map(int, tmp)
while(operates > 0):
for v, op in enumerate(x):
print(v, op, len(x))
if(v == len(x)-1):
Stage = 1
print("ns")
if(Stage == 0 and (op == "x" or op == "/")):
if(op == "x"):
x[v-1] = str(float(x[v-1]) * float(x[v+1]))
print(x[v - 1])
del x[v]
del x[v]
operates -= 1
break
elif(op == "/"):
if(int(x[v+1]) == 0):
var.set("ERROR")
isEmpty = True
isError = True
return
x[v-1] = str(float(x[v-1]) / float(x[v+1]))
print(x[v - 1])
del x[v]
del x[v]
operates -= 1
break
elif(Stage == 1 and(op == "+" or op == "-")):
if(op == "+"):
x[v-1] = str(float(x[v-1]) + float(x[v+1]))
#print(x[v-1])
del x[v]
del x[v]
operates -= 1
break
elif(op == "-"):
x[v-1] = str(float(x[v-1]) - float(x[v+1]))
print(x[v-1])
del x[v]
del x[v]
operates -= 1
break
print(x[0])
print("fin")
var.set(x[0])
isOperator = False
SetValue()
# Button的排列:請設定row和column
# 請將???填完並在完成第一、二、三行
btn7 = tkinter.Button(f2,text = "7",borderwidth = 5,width = 5,height = 5, command = lambda : Click("7")).grid(row = 1,column = 0)
btn8 = tkinter.Button(f2,text = "8",borderwidth = 5,width = 5,height = 5, command = lambda : Click("8")).grid(row = 1,column= 1)
btn9 = tkinter.Button(f2,text = "9",borderwidth = 5,width = 5,height = 5, command = lambda : Click("9")).grid(row = 1,column= 2)
btnPlus = tkinter.Button(f2,text = "+",borderwidth = 5,width = 5,height = 5, command = lambda : Click("+")).grid(row = 1,column= 3)
btn4 = tkinter.Button(f2,text = "4",borderwidth = 5,width = 5,height = 5, command = lambda : Click("4")).grid(row = 2,column= 0)
btn5 = tkinter.Button(f2,text = "5",borderwidth = 5,width = 5,height = 5, command = lambda : Click("5")).grid(row = 2,column= 1)
btn6 = tkinter.Button(f2,text = "6",borderwidth = 5,width = 5,height = 5, command = lambda : Click("6")).grid(row = 2,column= 2)
btnMinus = tkinter.Button(f2,text = "-",borderwidth = 5,width = 5,height = 5, command = lambda : Click("-")).grid(row = 2,column= 3)
btn1 = tkinter.Button(f2,text = "1",borderwidth = 5,width = 5,height = 5, command = lambda : Click("1")).grid(row = 3,column= 0)
btn2 = tkinter.Button(f2,text = "2",borderwidth = 5,width = 5,height = 5, command = lambda : Click("2")).grid(row = 3,column= 1)
btn3 = tkinter.Button(f2,text = "3",borderwidth = 5,width = 5,height = 5, command = lambda : Click("3")).grid(row = 3,column= 2)
btnX = tkinter.Button(f2,text = "x",borderwidth = 5,width = 5,height = 5, command = lambda : Click("x")).grid(row = 3,column= 3)
btn0 = tkinter.Button(f2,text = "0",borderwidth = 5,width = 5,height = 5, command = lambda : Click("0")).grid(row = 4,column= 0)
btnClear = tkinter.Button(f2,text = "C",borderwidth = 5,width = 5,height = 5, command = Clear).grid(row = 4,column= 1)
btnEqual = tkinter.Button(f2,text = "=",borderwidth = 5,width = 5,height = 5, command = lambda : Calculate()).grid(row = 4,column= 2)
btnDiv = tkinter.Button(f2,text = "/",borderwidth = 5,width = 5,height = 5, command = lambda : Click("/")).grid(row = 4,column= 3)
f1.pack()
f2.pack()
#windows.mainloop()
top.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
9ebd4f11813ec6c732064f8de6662f201382af4c | b36d29a441af7e05f5307a69042db6a42639fbe5 | /bookmarks/models.py | 343478f908069092148d92c87cf762063d73e737 | [] | no_license | VishalTaj/iFound | 90f0bb82cf9300b39e8f177aefef3f1401afa741 | b81b750e956d5ef51f4c46b2ad7016a0e60e01e1 | refs/heads/master | 2021-01-10T15:23:24.563510 | 2015-12-22T12:52:31 | 2015-12-22T12:52:31 | 48,431,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | from django.db import models
from django.template.defaultfilters import slugify
class Bookmark(models.Model):
url = models.URLField(max_length=150)
slug = models.SlugField(max_length=150)
name = models.CharField(max_length=50)
description = models.TextField()
thumbnail = models.FileField(upload_to='documents/%Y/%m/%d')
def save(self, *args, **kwargs):
self.slug = slugify(self.url)
super(test, self).save(*args, **kwargs)
| [
"vishaltajpm@gmail.com"
] | vishaltajpm@gmail.com |
6fe53663c261f3ac799f8c34f29b111d263d3c73 | e2d9a6572046a61e8b0372c899ae178621d990e0 | /myblog/myweb/templatetags/blog_tags.py | 96958d8cd9567d0f90b5ea0567a3943ad93dcd73 | [] | no_license | luxinwang/myblog | 273b73e6887f7b74d5506b2ce8bf1028743343c5 | 0f9c0285794fecf7d11efdc295bd46aaed90694e | refs/heads/master | 2022-12-11T22:43:56.114871 | 2018-05-07T10:44:07 | 2018-05-07T10:44:07 | 132,446,132 | 0 | 0 | null | 2022-12-08T00:59:52 | 2018-05-07T10:40:44 | CSS | UTF-8 | Python | false | false | 695 | py | from django import template
from ..models import Post,Category,Tag
from django.db.models.aggregates import Count
register = template.Library()
# 最新文章标签
@register.simple_tag
def get_recent_posts(num=5):
return Post.objects.all().order_by('-create_time')[:num]
# 归档模板标签
@register.simple_tag
def archives():
return Post.objects.dates('create_time','month',order='DESC')
# 分类模板标签
@register.simple_tag
def get_categories():
return Category.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)
# 标签云模板标签
@register.simple_tag
def get_tags():
return Tag.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0) | [
"noreply@github.com"
] | noreply@github.com |
66d48cedc859b5320c861c0f2e4941dccf9152ec | 7facaf55129c8de415ffa1630adbf8ff525af448 | /portfolioapp/models.py | e06118e409f56139d3d3fa1c7c2cc8da0d2fbdef | [] | no_license | lornakamau/old-portfolio | 16424aa30b3c956f0fc130298d4cde253b0ae332 | 4e9c3cde8217c69c8a6728ef2f8a8ad85c5f2520 | refs/heads/master | 2023-03-29T07:30:03.426605 | 2020-08-08T12:02:28 | 2020-08-08T12:02:28 | 358,271,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | from django.db import models
from cloudinary.models import CloudinaryField
class Project(models.Model):
name = models.CharField(max_length = 30)
screenshot = CloudinaryField('Project screenshot')
languages = models.TextField()
short_description = models.TextField(default="short description")
long_description = models.TextField(default="long description")
link = models.URLField()
post_date = models.DateTimeField(auto_now_add=True, null = True)
def __str__(self):
return self.name
def save_project(self):
self.save()
def delete_project(self):
self.delete()
class Meta:
ordering = ['-post_date']
| [
"kamaulorna@gmail.com"
] | kamaulorna@gmail.com |
515272629218fe375177a125fdc060910ee2c669 | 8baec8070446821ed6292a4faea253b424349b8b | /tags.py | f8d16db25bd035395d0a101178f90a20233de82f | [] | no_license | TeresaAye/SQL_Wrangle_OpenStreetMap_Data_Final_Project | 3269094a85686fbe18ffe71518fc1540d36b84e5 | f7f6a10f3951dfd08f5e4f1cc5044c2a9bfbf0e5 | refs/heads/master | 2020-03-12T06:47:27.245800 | 2018-04-21T17:10:23 | 2018-04-21T17:10:23 | 130,493,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,494 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 26 17:45:40 2017
@author: TA2761
this one is ready for project
"""
# From DAND P3 Problem Set "case study" quiz "Tag Types"
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET
import pprint
import re
"""
Your task is to explore the data a bit more.
Before you process the data and add it into your database, you should check the
"k" value for each "<tag>" and see if there are any potential problems.
We have provided you with 3 regular expressions to check for certain patterns
in the tags. As we saw in the quiz earlier, we would like to change the data
model and expand the "addr:street" type of keys to a dictionary like this:
{"address": {"street": "Some value"}}
So, we have to see if we have such tags, and if we have any tags with
problematic characters.
Please complete the function 'key_type', such that we have a count of each of
four tag categories in a dictionary:
"lower", for tags that contain only lowercase letters and are valid,
"lower_colon", for otherwise valid tags with a colon in their names,
"problemchars", for tags with problematic characters, and
"other", for other tags that do not fall into the other three categories.
See the 'process_map' and 'test' functions for examples of the expected format.
"""
#filename = 'nashville_tennessee.osm' # Too big for file submission
filename = 'sample_100.osm'
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
def key_type(element, keys):
if element.tag == "tag":
k = element.attrib['k']
if re.search(lower,k):
keys["lower"] += 1
elif re.search(lower_colon,k):
keys["lower_colon"] += 1
elif re.search(problemchars,k):
keys["problemchars"] += 1
else:
keys["other"] += 1
return keys
def process_map(filename):
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
for _, element in ET.iterparse(filename):
keys = key_type(element, keys)
return keys
def test():
keys = process_map(filename)
pprint.pprint(keys)
test()
'''
tags.py code returns:
runfile('C:/DA/DA P3/DA P3 Project/tags.py', wdir='C:/DA/DA P3/DA P3 Project')
{'lower': 380629, 'lower_colon': 518638, 'other': 24732, 'problemchars': 7}
'''
| [
"noreply@github.com"
] | noreply@github.com |
929a5f3d621e47e2687b81ab050783331e6f6cf7 | 4520ce2f35605048db290c767c620bf629735295 | /Compiler/parser.py | e2813f377445f68648d471fa89b9c5b2db414db8 | [] | no_license | william-nguyen128/PPL-Project | 66bf25056ef0440631785e850c63d099dc97272c | c07bd3e416e5de9f99db96ddb252201af29dc12b | refs/heads/main | 2023-04-21T18:23:15.754934 | 2021-05-19T14:01:34 | 2021-05-19T14:01:34 | 368,231,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,968 | py | from rply import ParserGenerator
from .JSONparsedTree import Node
from .AbstractSyntaxTree import *
from .errors import *
# State instance which gets passed to parser !
class ParserState(object):
def __init__(self):
# We want to hold a dict of global-declared variables & functions.
self.variables = {}
self.functions = {}
pass # End ParserState's constructor !
class Parser:
def __init__(self, syntax=False):
self.pg = ParserGenerator(
# A list of all token names accepted by the parser.
['STRING', 'INTEGER', 'FLOAT', 'BOOLEAN', 'PI', 'E',
'PRINT', 'ABSOLUTE', 'SIN', 'COS', 'TAN', 'POWER',
'CONSOLE_INPUT', '(', ')', ';', ',', '{', '}',
'LET', 'AND', 'OR', 'NOT', 'IF', 'ELSE',
'=', '==', '!=', '>=', '>', '<', '<=',
'SUM', 'SUB', 'MUL', 'DIV', 'IDENTIFIER', 'FUNCTION'
],
# A list of precedence rules with ascending precedence, to
# disambiguate ambiguous production rules.
precedence=(
('left', ['FUNCTION']),
('left', ['LET']),
('left', ['=']),
('left', ['IF', 'ELSE', ';']),
('left', ['AND', 'OR']),
('left', ['NOT']),
('left', ['==', '!=', '>=', '>', '<', '<=']),
('left', ['SUM', 'SUB']),
('left', ['MUL', 'DIV']),
('left', ['STRING', 'INTEGER', 'FLOAT', 'BOOLEAN', 'PI', 'E'])
)
)
self.syntax = syntax
self.parse()
pass # End Parser's constructor !
def parse(self):
@self.pg.production("main : program")
def main_program(state, p):
if self.syntax is True:
return [Node("program", p[0])]
return Main(p[0])
@self.pg.production('program : statement_full')
def program_statement(state, p):
if self.syntax is True:
return [Node("statement_full", p[0])]
return Program(p[0], None, state)
@self.pg.production('program : statement_full program')
def program_statement_program(state, p):
if self.syntax is True:
return [Node("statement_full", p[0]), Node("program", p[1])]
return Program(p[0], p[1], state)
@self.pg.production('expression : ( expression )')
def expression_parenthesis(state, p):
# In this case we need parenthesis only for precedence
# so we just need to return the inner expression
if self.syntax is True:
return [Node("("), Node("expression", p[1]), Node(")")]
return ExpressParenthesis(p[1])
@self.pg.production('statement_full : IF ( expression ) { block }')
def expression_if(state, p):
if self.syntax is True:
return [Node("IF"), Node("("), Node("expression", p[2]), Node(")"), Node("{"), Node("block", p[5]), Node("}")]
return If(condition=p[2], body=p[5], state=state)
@self.pg.production('statement_full : IF ( expression ) { block } ELSE { block }')
def expression_if_else(state, p):
if self.syntax is True:
return [Node("IF"), Node("("), Node("expression", p[2]), Node(")"), Node("{"), Node("block", p[5]), Node("}"), Node("ELSE"), Node("{"),
Node("block", p[9]), Node("}")]
return If(condition=p[2], body=p[5], else_body=p[9], state=state)
@self.pg.production('block : statement_full')
def block_expr(state, p):
if self.syntax is True:
return [Node("statement_full", p[0])]
return Block(p[0], None, state)
@self.pg.production('block : statement_full block')
def block_expr_block(state, p):
if self.syntax is True:
return [Node("statement_full", p[0]), Node("block", p[1])]
return Block(p[0], p[1], state)
@self.pg.production('statement_full : statement ;')
def statement_full(state, p):
if self.syntax is True:
return [Node("statement", p[0]), Node(";")]
return StatementFull(p[0])
@self.pg.production('statement : expression')
def statement_expr(state, p):
if self.syntax is True:
return [Node("expression", p[0])]
return Statement(p[0])
@self.pg.production('statement : LET IDENTIFIER = expression')
def statement_assignment(state, p):
if self.syntax is True:
return [Node("LET"), Node("IDENTIFIER", p[1]), Node("="), Node("expression", p[3])]
return Assignment(Variable(p[1].getstr(), state), p[3], state)
@self.pg.production('statement_full : FUNCTION IDENTIFIER ( ) { block }')
def statement_func_noargs(state, p):
if self.syntax is True:
return [Node("FUNCTION"), Node("IDENTIFIER", p[1]), Node("("), Node(")"), Node("{"), Node("block", p[5]), Node("}")]
return FunctionDeclaration(name=p[1].getstr(), args=None, block=p[5], state=state)
@self.pg.production('expression : NOT expression')
def expression_not(state, p):
if self.syntax is True:
return [Node("NOT"), Node("expression", p[1])]
return Not(p[1], state)
@self.pg.production('expression : expression SUM expression')
@self.pg.production('expression : expression SUB expression')
@self.pg.production('expression : expression MUL expression')
@self.pg.production('expression : expression DIV expression')
def expression_binary_operator(state, p):
if p[1].gettokentype() == 'SUM':
if self.syntax is True:
return [Node("expression", p[0]), Node("+"), Node("expression", p[2])]
return Sum(p[0], p[2], state)
elif p[1].gettokentype() == 'SUB':
if self.syntax is True:
return [Node("expression", p[0]), Node("-"), Node("expression", p[2])]
return Sub(p[0], p[2], state)
elif p[1].gettokentype() == 'MUL':
if self.syntax is True:
return [Node("expression", p[0]), Node("*"), Node("expression", p[2])]
return Mul(p[0], p[2], state)
elif p[1].gettokentype() == 'DIV':
if self.syntax is True:
return [Node("expression", p[0]), Node("/"), Node("expression", p[2])]
return Div(p[0], p[2], state)
else:
raise LogicError('Oops, this should not be possible!')
@self.pg.production('expression : expression != expression')
@self.pg.production('expression : expression == expression')
@self.pg.production('expression : expression >= expression')
@self.pg.production('expression : expression <= expression')
@self.pg.production('expression : expression > expression')
@self.pg.production('expression : expression < expression')
@self.pg.production('expression : expression AND expression')
@self.pg.production('expression : expression OR expression')
def expression_equality(state, p):
if p[1].gettokentype() == '==':
if self.syntax is True:
return [Node("expression", p[0]), Node("=="), Node("expression", p[2])]
return Equal(p[0], p[2], state)
elif p[1].gettokentype() == '!=':
if self.syntax is True:
return [Node("expression", p[0]), Node("!="), Node("expression", p[2])]
return NotEqual(p[0], p[2], state)
elif p[1].gettokentype() == '>=':
if self.syntax is True:
return [Node("expression", p[0]), Node(">="), Node("expression", p[2])]
return GreaterThanEqual(p[0], p[2], state)
elif p[1].gettokentype() == '<=':
if self.syntax is True:
return [Node("expression", p[0]), Node("<="), Node("expression", p[2])]
return LessThanEqual(p[0], p[2], state)
elif p[1].gettokentype() == '>':
if self.syntax is True:
return [Node("expression", p[0]), Node(">"), Node("expression", p[2])]
return GreaterThan(p[0], p[2], state)
elif p[1].gettokentype() == '<':
if self.syntax is True:
return [Node("expression", p[0]), Node("<"), Node("expression", p[2])]
return LessThan(p[0], p[2], state)
elif p[1].gettokentype() == 'AND':
if self.syntax is True:
return [Node("expression", p[0]), Node("AND"), Node("expression", p[2])]
return And(p[0], p[2], state)
elif p[1].gettokentype() == 'OR':
if self.syntax is True:
return [Node("expression", p[0]), Node("OR"), Node("expression", p[2])]
return Or(p[0], p[2], state)
else:
raise LogicError("Shouldn't be possible")
@self.pg.production('expression : CONSOLE_INPUT ( )')
def program(state, p):
if self.syntax is True:
return [Node("CONSOLE_INPUT"), Node("("), Node(")")]
return Input()
@self.pg.production('expression : CONSOLE_INPUT ( expression )')
def program(state, p):
if self.syntax is True:
return [Node("CONSOLE_INPUT"), Node("("), Node("expression", p[2]), Node(")")]
return Input(expression=p[2], state=state)
@self.pg.production('statement : PRINT ( )')
def program(state, p):
if self.syntax is True:
return [Node("PRINT"), Node("("), Node(")")]
return Print()
@self.pg.production('statement : PRINT ( expression )')
def program(state, p):
if self.syntax is True:
return [Node("PRINT"), Node("("), Node("expression", p[2]), Node(")")]
return Print(expression=p[2], state=state)
@self.pg.production('expression : ABSOLUTE ( expression )')
def expression_absolute(state, p):
if self.syntax is True:
return [Node("ABSOLUTE"), Node("("), Node("expression", p[2]), Node(")")]
return Absolute(p[2], state)
@self.pg.production('expression : SIN ( expression )')
def expression_absolute(state, p):
if self.syntax is True:
return [Node("SIN"), Node("("), Node("expression", p[2]), Node(")")]
return Sin(p[2], state)
@self.pg.production('expression : COS ( expression )')
def expression_absolute(state, p):
if self.syntax is True:
return [Node("COS"), Node("("), Node("expression", p[2]), Node(")")]
return Cos(p[2], state)
@self.pg.production('expression : TAN ( expression )')
def expression_absolute(state, p):
if self.syntax is True:
return [Node("TAN"), Node("("), Node("expression", p[2]), Node(")")]
return Tan(p[2], state)
@self.pg.production('expression : POWER ( expression , expression )')
def expression_absolute(state, p):
if self.syntax is True:
return [Node("POWER"), Node("("), Node("expression", p[2]), Node(","), Node("expression", p[4]), Node(")")]
return Pow(p[2], p[4], state)
@self.pg.production('expression : IDENTIFIER')
def expression_variable(state, p):
# Cannot return the value of a variable if it isn't yet defined
if self.syntax is True:
return [Node("IDENTIFIER", p[0])]
return Variable(p[0].getstr(), state)
@self.pg.production('expression : IDENTIFIER ( )')
def expression_call_noargs(state, p):
# Cannot return the value of a function if it isn't yet defined
if self.syntax is True:
return [Node("IDENTIFIER", p[0]), Node("("), Node(")")]
return CallFunction(name=p[0].getstr(), args=None, state=state)
@self.pg.production('expression : const')
def expression_const(state, p):
if self.syntax is True:
return [Node("const", p[0])]
return p[0]
@self.pg.production('const : FLOAT')
def constant_float(state, p):
if self.syntax is True:
return [Node("FLOAT", p[0])]
return Float(p[0].getstr(), state)
@self.pg.production('const : BOOLEAN')
def constant_boolean(state, p):
if self.syntax is True:
return [Node("BOOLEAN", p[0])]
return Boolean(p[0].getstr(), state)
@self.pg.production('const : INTEGER')
def constant_integer(state, p):
if self.syntax is True:
return [Node("INTEGER", p[0])]
return Integer(p[0].getstr(), state)
@self.pg.production('const : STRING')
def constant_string(state, p):
if self.syntax is True:
return [Node("STRING", p[0])]
return String(p[0].getstr().strip('"\''), state)
@self.pg.production('const : PI')
def constant_pi(state, p):
if self.syntax is True:
return [Node("PI", p[0])]
return ConstantPI(p[0].getstr(), state)
@self.pg.production('const : E')
def constant_e(state, p):
if self.syntax is True:
return [Node("E", p[0])]
return ConstantE(p[0].getstr(), state)
@self.pg.error
def error_handle(state, token):
raise ValueError(token)
def build(self):
return self.pg.build()
| [
"quannguyen.flagship@gmail.com"
] | quannguyen.flagship@gmail.com |
bc8268af631131bd13dd39a95f902b0502e60268 | 5cd925e6e430f776831187f0f17173c5499b69ce | /VAE/helper.py | 4108e94810fda4309584cc60bbb0a21ee1786d5f | [] | no_license | AI-Composer/AIComposer | 8e51dfb0eb5f2f9c144d81ad8650375a80d9a826 | 7b97fa833fc6a878d69d61162d6013114691613d | refs/heads/master | 2022-11-06T10:53:54.326030 | 2020-06-21T15:05:05 | 2020-06-21T15:05:05 | 259,961,954 | 1 | 0 | null | 2020-05-31T02:57:53 | 2020-04-29T15:16:59 | Python | UTF-8 | Python | false | false | 1,575 | py | from VAE.model import VAENet
def train(model, loader, epoch_num=10, batch_num=600, save=None,
summary=False):
"""Helper function for training
Args:
model: VAENet
loader: loader defined in data.py
Returns:
None
"""
assert isinstance(
model,
VAENet), "VAE train method requires a VAE network, got {}".format(
model.__class__.__name__)
inputs, targets = loader.getBatches_1()
assert len(inputs) > batch_num
t_inputs = inputs[batch_num]
inputs = inputs[:batch_num]
assert len(targets) > batch_num
t_targets = targets[batch_num]
targets = targets[:batch_num]
model.train_inputs(inputs,
targets,
t_inputs,
t_targets,
control=[1, 0, 0],
epoch_num=epoch_num,
save=save,
summary=summary)
def compose(model, section):
"""Helper function for composing
Args:
model: VAENet
section: heusristic [section_length, input_depth]
Returns:
output: [section_length*repeat, input_depth]
"""
assert isinstance(
model,
VAENet), "VAE compose method requires a VAE network, got {}".format(
model.__class__.__name__)
assert section.size() == (model.section_length, model.input_depth)
section = section.unsqueeze(dim=1).unsqueeze(dim=-1)
_, output = model.forward(section, control=[1, 0, 0])
output = output.squeeze()
return output
| [
"liwei17@mails.tsinghua.edu.cn"
] | liwei17@mails.tsinghua.edu.cn |
4c0dfbd50ab31226a2bc8333c10ef07d96a180cf | e4d31420d7f79309adbbfce49151ffbf0660ed9c | /project/sla_dashboard/geit.py | 24e113041db6edcc7ed1e5afd6ab5b20cc0834be | [] | no_license | visionguo/python | c56321af5759e5c43d25871ab835850d5d91ea82 | 2de82e898524194b3b735835c044f078cc6ee5ba | refs/heads/master | 2022-12-18T17:21:25.899038 | 2021-08-26T12:30:47 | 2021-08-26T12:30:47 | 135,961,970 | 0 | 0 | null | 2022-12-08T01:56:56 | 2018-06-04T02:44:18 | HTML | UTF-8 | Python | false | false | 2,666 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Auther:VisionGuo
# Date:2018/08/29
# Brief:
# geit
# Globals:
# None
# Returns:
# succ:0
# fail:1
import requests
import json
from elasticsearch import Elasticsearch
from urllib import urlencode
from urllib import quote
import re
import shutil
import time
import logging
import ConfigParser
import datetime
import sys
import os
reload(sys)
sys.setdefaultencoding('utf-8')
def get_subbusiness_mem_all(business):
"""
获取子产品线所有内存
"""
result=[]
api="http://p8s.xxx-int.com/api/v1/query?query=" # 从p8s源获取监控数据
query="sum by (business,subbusiness) (node_memory_MemTotal_bytes {business=~\""+str(business)+"\"})" # 内存求和
respose = requests.get(api+query)
return respose.json()
def get_subbusiness_mem_maxuse(business,subbusiness):
"""
获取子产品线内存最大值
"""
result=[]
print subbusiness
api="http://p8s.xxx-intp.com/api/v1/query?query=" # 从p8s源获取监控数据
query="sum by (business,subbusiness) (node_memory_MemTotal_bytes {business=~\""+str(business)+"\",subbusiness=~\""+subbusiness+"\"}) - sum by (business,subbusiness) (min_over_time(node_memory_MemAvailable_bytes {business=~\""+str(business)+"\",subbusiness=~\""+subbusiness+"\"} [30d]) )" # 使用promsql获取最大值
respose = requests.get(api+query)
return respose.json()
def get_subbusiness_mem_prentuse(business):
"""
获取子产品线内存使用率现状
"""
result=[]
api="http://p8s.xxx-int.com/api/v1/query?query="
query="(sum by (business,subbusiness) (node_memory_MemTotal_bytes {business=~\""+str(business)+"\"}) - sum by (business,subbusiness) (min_over_time(node_memory_MemAvailable_bytes {business=~\""+str(business)+"\"} [30d]) )) *100 / sum by (business,subbusiness) (node_memory_MemTotal_bytes {business=~\""+str(business)+"\"})"
respose = requests.get(api+query)
return respose.json()
def get_subbusiness_cpu_max_prentuse(business):
"""
获取cpu当前最大值
"""
result=[]
api="http://p8s.xxx-int.com/api/v1/query_range?query="
query="avg by (business,subbusiness)( irate (node_cpu_seconds_total {business=~\""+str(business)+"\",mode=\"idle\"}[30m] ) )"
timerange="&start=2018-10-02T20:10:30.781Z&end=2018-10-09T20:11:00.781Z&step=30m"
respose = requests.get(api+query+timerange)
for r in respose.json()["data"]["result"]:
print r["metric"]["subbusiness"],float(min(r["values"], key=lambda a: a[1])[1])*100
return respose.json()
print "prent_use-----"
a=get_subbusiness_cpu_max_prentuse(str(b)) | [
"30895025+visionguo@users.noreply.github.com"
] | 30895025+visionguo@users.noreply.github.com |
602df5b5ae465bb4f6010ed5a2706665ddb9270e | 5c12bfe2c4b661986d2740aa48a402fd2fd01703 | /idlak-voice-build/pythonlib/example.py | a5bb488cea822f594fbc2031ed430ad0fb225db9 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | oplatek/idlak | fea41d797b862f5382bd17c0ca8349b346cb44d7 | 02b24dc6f79b84779e423bfbb17bdf8e70c95aec | refs/heads/import-svn-idlak | 2021-01-11T04:41:39.367915 | 2016-09-14T18:13:52 | 2016-09-14T18:13:52 | 71,112,898 | 0 | 0 | NOASSERTION | 2020-02-20T13:09:34 | 2016-10-17T07:48:11 | C++ | UTF-8 | Python | false | false | 2,121 | py | # Uses python wrappers to print out a context dependency tree
import os, sys
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
# load idlakapi wrapper
sys.path += [os.path.join(SCRIPT_DIR, 'pythonlib')]
import idlakapi
class KaldiTree:
def __init__(self, treedata):
pass
# only works for binary trees
def printevent(event, eventvector, buf):
# get children
idlakapi.IDLAK_eventmap_getchildren(event, eventvector)
# terminal node
if not idlakapi.IDLAK_eventmapvector_size(eventvector):
print 'CE', idlakapi.IDLAK_eventmap_answer(event),
else:
yes = idlakapi.IDLAK_eventmapvector_at(eventvector, 0)
no = idlakapi.IDLAK_eventmapvector_at(eventvector, 1)
idlakapi.IDLAK_eventmap_yesset(event, buf)
yesset = '[ ' + idlakapi.IDLAK_string_val(buf) + ']'
print 'SE', idlakapi.IDLAK_eventmap_key(event), yesset
print '{',
printevent(yes, eventvector, buf)
printevent(no, eventvector, buf)
print '} '
def main():
from optparse import OptionParser
usage="Usage: %prog [-h] -t kalditree\n\nPrint kaldi decision tree in ascii"
parser = OptionParser(usage=usage)
# Options
parser.add_option("-t", "--kalditree", default=None,
help="Kaldi tree")
opts, args = parser.parse_args()
if not opts.kalditree:
parser.error("Require input kaldi tree")
# convert to ascii and load as a string
context_tree = idlakapi.IDLAK_read_contextdependency_tree(opts.kalditree)
print "ContextDependency",
print idlakapi.IDLAK_contextdependency_tree_contextwidth(context_tree),
print idlakapi.IDLAK_contextdependency_tree_centralposition(context_tree),
print "ToPdf",
root = idlakapi.IDLAK_contextdependency_tree_root(context_tree)
eventvector = idlakapi.IDLAK_eventmapvector_new()
buf = idlakapi.IDLAK_string_new()
printevent(root, eventvector, buf)
idlakapi.IDLAK_eventmapvector_delete(eventvector)
idlakapi.IDLAK_string_delete(buf)
print "EndContextDependency ",
if __name__ == '__main__':
main()
| [
"blaise@cereproc.com"
] | blaise@cereproc.com |
30341627603cc0e51fee56d6cecd002a5c6b866b | b86f9b0bb909d5f809acb776f270afbcf49fffe5 | /AOC_2019_4/2.py | 51968f2dbce473a4a0821a161efc5e76bea75b2c | [] | no_license | ternovandarius/Advent-of-Code-2019 | 58a4686c8c3f3b8a9255131341f26008f75b0783 | cb1273b9d32e44d37f51da15d11cbd30f3c53804 | refs/heads/master | 2021-02-06T22:57:18.814370 | 2020-02-29T11:21:33 | 2020-02-29T11:21:33 | 243,955,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | def verify(nr):
prev_digit=nr//100000
digit_count=[0]*10
for i in range(4,-1,-1):
x=10**i
curr_digit=nr//x%10
if prev_digit<curr_digit:
prev_digit=curr_digit
else:
if prev_digit==curr_digit:
digit_count[prev_digit]+=1
else:
return False
for i in digit_count:
if i==1:
return True
return False
def main():
nr=0
for i in range(402328, 864248, 1):
if verify(i):
nr+=1
print(nr)
main() | [
"52597957+ternovandarius@users.noreply.github.com"
] | 52597957+ternovandarius@users.noreply.github.com |
9dfbbe965c519f29355baf5a53a6bbd69ffcd861 | 29a57a4df2e7b005f102030d4e517c0171104ce7 | /20210724/TofPandas.py | c43ec87cc4a2fa6a9e5ee2e9d7910543e795cbea | [] | no_license | altroy2554/DB | 26abdae31983d4640e77a585c54930fe6ecf2cce | 24ba6348e68aad66543d08699d30b6f3b5a2968b | refs/heads/main | 2023-07-07T07:22:24.721761 | 2021-08-07T07:49:15 | 2021-08-07T07:49:15 | 389,016,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | import numpy as np
import pandas as pd
a =pd.Series([1,2,3])
nameSer = pd.Series(['name1','name2','name3'])
age = pd.Series([10,20,30])
gender=pd.Series(['남','여','남'])
grade = pd.Series(['A','A','A'])
df = pd.DataFrame({'이름':nameSer,
'나이':age,
'성별':gender,
'학점':grade})
print(df)
| [
"noreply@github.com"
] | noreply@github.com |
2ebb0b0a02c0f0f1bcd8b3987a74b01fa6e79e70 | 003543c7ef10ef82dcea4c5da89a225a9fcf16a5 | /tests/data/multipoint_beam/i_comp_base.py | fb8cc409f44d2e903eab5641b3f8531f86f932e4 | [
"Apache-2.0"
] | permissive | relf/WhatsOpt-CLI | 20dda7a71fd59faa1203ee4022610e1cfe3a1e14 | a8779638763681b4f4f85df532a9de8813a705ca | refs/heads/master | 2023-07-14T02:50:02.648427 | 2023-06-27T13:41:09 | 2023-06-27T13:41:09 | 216,215,138 | 0 | 0 | Apache-2.0 | 2019-10-19T14:00:40 | 2019-10-19T14:00:39 | null | UTF-8 | Python | false | false | 580 | py | # -*- coding: utf-8 -*-
"""
i_comp_base.py generated by WhatsOpt 1.8.2
"""
# DO NOT EDIT unless you know what you are doing
# whatsopt_url: https://ether.onera.fr/whatsopt
# analysis_id: 4
import numpy as np
from numpy import nan
from os import path
from importlib import import_module
from openmdao.api import ExplicitComponent
class ICompBase(ExplicitComponent):
"""An OpenMDAO base component to encapsulate IComp discipline"""
def setup(self):
self.add_input("h", val=np.ones((1, 50)), desc="")
self.add_output("I", val=np.ones((50,)), desc="")
| [
"remi.lafage@onera.fr"
] | remi.lafage@onera.fr |
656d64575a9287e7b63813595a7bc26636cdd961 | d3e00bc976e0afe6bd36a9129a3a23e65854b400 | /Data/Code/scrapeDisease.py | e16855e7c176c609e6fd62fc8345f0f9d31a9015 | [] | no_license | cs3285/p.ai | fbd6507e74c88374c15e382916c3cc3b00627156 | 3a229609344db21cbce80284f3d7e9c002edc231 | refs/heads/master | 2021-01-01T15:53:20.794944 | 2017-12-17T23:43:53 | 2017-12-17T23:43:53 | 97,725,022 | 0 | 3 | null | 2017-10-03T02:50:10 | 2017-07-19T14:19:45 | Python | UTF-8 | Python | false | false | 5,543 | py | from selenium import webdriver
#from selenium.webdriver.common.keys import Keys
#from selenium.webdriver import ActionChains
import csv
#import json
import time
#from pdb import set_trace as bp
def saveTxt(content,animal,d_name):
path = "C:\\Users\\IrisTang\\Documents\\zzz\\aip\\diseaseData\\html\\"+animal+"_"+d_name+".txt"
text_file = open(path,"w")
text_file.write(content.encode('utf-8'))
text_file.close()
def saveData(data,animal):
path = "C:\\Users\\IrisTang\\Documents\\zzz\\aip\\diseaseData\\"+animal+"Disease.csv"
with open(path,"wb") as f:
writer = csv.writer(f)
writer.writerows(data)
def getDiseaseName(driver):
if "We can\'t seem to find the page" in driver.page_source:
return []
else:
diseaseName=driver.find_elements_by_class_name('charcoal')
return diseaseName
def getPage(driver):
if "We can\'t seem to find the page" in driver.page_source:
pager=[]
else:
pager=driver.find_elements_by_class_name('pager-item')
return pager
def nextPage(driver,pager):
if (len(pager)!=0):
temp_ul=pager[0].find_element_by_class_name('active').get_attribute('href')
driver.get(temp_ul)
pager.pop(0)
else:
pass
def getContentDict(driver):
if "We can\'t seem to find the page" in driver.page_source:
content_dict = {}
else:
content = driver.find_elements_by_css_selector('#content-content > div > div.content > *')
content_1=[]
#Create "content_1" to remove page number from text
for c in content:
if c.tag_name in ['h2','h3','p','ul']:
content_1.append(c)
else:
pass
#Get indices of headings
title_indext=[]
for i in range(len(content_1)):
if (content_1[i].tag_name in ['h2','h3']):
title_indext.append(i)
else:
pass
title_indext.append(len(content_1)-1)
content_text=[]
for c in content_1:
content_text.append(c.text)
content_dict={}
for i in range(len(title_indext)-1):
content_dict[content_text[title_indext[i]]] = content_text[title_indext[i]+1:title_indext[i+1]]
return content_dict
def openLinkInNewWindow(driver, url):
driver.execute_script("window.open('');")
driver.switch_to.window(driver.window_handles[1])
driver.get(url)
if __name__ == "__main__":
driver = webdriver.Chrome('C:/Users/IrisTang/Documents/zzz/aip/chromedriver_win32/chromedriver.exe')
animal_list=["cat","dog","bird","horse","fish","exotic","rabbit","ferret","reptile"]
for animal in animal_list:
time_animal_start=time.time()
web_catalog = ("http://www.petmd.com/"+animal+"/conditions")
driver.get(web_catalog)
diseaseElement = getDiseaseName(driver)
diseaseElement.pop(0)
url_list = []
disease_name = []
data = []
for i in range(len(diseaseElement)):
url_temp=diseaseElement[i].get_attribute('href')
name_temp=diseaseElement[i].text
url_list.append((name_temp,url_temp))
#disease_name.append(name_temp)
for url in url_list:
row = []
find = False
openLinkInNewWindow(driver,url[1])
time_disease_start=time.time()
while(not find):
if(time.time()-time_disease_start > 40):
driver.close()
driver.switch_to.window(driver.window_handles[0])
openLinkInNewWindow(driver,url[1])
time_disease_start=time.time()
print "Refreshing..."
try:
content_dict = getContentDict(driver)
page_n = getPage(driver)
html_source=driver.page_source
while (len(page_n)!=0):
nextPage(driver,page_n)
dict_temp=getContentDict(driver)
content_dict.update(dict_temp)
html_source=html_source+driver.page_source
except Exception:
continue
find = True
row.append(url[0])
for d in content_dict:
row.append(d)
row.append(content_dict[d])
data.append(row)
saveTxt(html_source,animal,url[0])
driver.close()
driver.switch_to.window(driver.window_handles[0])
print url[0]
print time.time()-time_disease_start
saveData(data,animal)
print animal
print time.time()-time_animal_start
'''
web_catalog = ("http://www.petmd.com/"+animal_list[0]+"/conditions")
driver.get(web_catalog)
diseaseElement = getDiseaseName(driver)
diseaseElement.pop(0)
url_list = []
for i in range(len(diseaseElement)):
url_temp=diseaseElement[i].get_attribute('href')
url_list.append(url_temp)
#--------------------------------test----------------------------------------
ul=url_list[9]
driver.get(ul)
content_dict = getContentDict(driver)
page_n = getPage(driver)
while (len(page_n)!=0):
nextPage(driver,page_n)
dict_temp=getContentDict(driver)
content_dict.update(dict_temp)
for a in content_dict:
print a
print content_dict[a]'''
| [
"noreply@github.com"
] | noreply@github.com |
a54dba4d3ebcdf78eb1020f011bb1570ffd11720 | 3595d51ff2499bb990b87a25b17516edf6907696 | /boards/models.py | a9fc0f9597a2654f291a202c6c60a21410fac535 | [
"Apache-2.0"
] | permissive | maxlipsky/infomate.club | 01fa55b3dfd318212b0c328dd48019b585d3ef9d | 9729b568622120f8cba3d22fefdcfba81d1b5b88 | refs/heads/master | 2020-12-08T19:53:37.231403 | 2020-01-16T19:04:14 | 2020-01-16T19:04:14 | 233,079,581 | 1 | 0 | Apache-2.0 | 2020-01-10T15:49:08 | 2020-01-10T15:49:07 | null | UTF-8 | Python | false | false | 6,435 | py | import uuid
from datetime import datetime, timedelta
from django.contrib.humanize.templatetags.humanize import naturaltime
from django.db import models
from slugify import slugify
from boards.icons import DOMAIN_ICONS
class Board(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
slug = models.SlugField(unique=True)
name = models.CharField(max_length=120, db_index=True)
avatar = models.URLField(max_length=512, null=True)
curator_name = models.CharField(max_length=120)
curator_title = models.CharField(max_length=120)
curator_url = models.URLField(null=True)
curator_bio = models.CharField(max_length=120, null=True)
curator_footer = models.TextField(null=True)
schema = models.TextField(null=True)
created_at = models.DateTimeField(db_index=True)
updated_at = models.DateTimeField()
refreshed_at = models.DateTimeField(null=True)
is_visible = models.BooleanField(default=True)
is_private = models.BooleanField(default=True)
index = models.PositiveIntegerField(default=0)
class Meta:
db_table = "boards"
ordering = ["index", "name"]
def save(self, *args, **kwargs):
if not self.created_at:
self.created_at = datetime.utcnow()
if not self.slug:
self.slug = slugify(self.name).lower()
self.updated_at = datetime.utcnow()
return super().save(*args, **kwargs)
def board_name(self):
return self.name or self.curator_name
def natural_refreshed_at(self):
if not self.refreshed_at:
return "now..."
return naturaltime(self.refreshed_at)
class BoardBlock(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
board = models.ForeignKey(Board, related_name="blocks", on_delete=models.CASCADE, db_index=True)
name = models.CharField(max_length=512, null=True)
slug = models.SlugField()
created_at = models.DateTimeField(db_index=True)
updated_at = models.DateTimeField()
index = models.PositiveIntegerField(default=0)
class Meta:
db_table = "board_blocks"
ordering = ["index"]
def save(self, *args, **kwargs):
if not self.created_at:
self.created_at = datetime.utcnow()
if not self.slug:
self.slug = slugify(self.name).lower()
self.updated_at = datetime.utcnow()
return super().save(*args, **kwargs)
class BoardFeed(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
board = models.ForeignKey(Board, related_name="feeds", on_delete=models.CASCADE, db_index=True)
block = models.ForeignKey(BoardBlock, related_name="feeds", on_delete=models.CASCADE, db_index=True)
name = models.CharField(max_length=512, null=True)
comment = models.TextField(null=True)
url = models.URLField(max_length=512)
icon = models.URLField(max_length=512, null=True)
rss = models.URLField(max_length=512, null=True)
created_at = models.DateTimeField(db_index=True)
last_article_at = models.DateTimeField(null=True)
refreshed_at = models.DateTimeField(null=True)
frequency = models.FloatField(default=0.0) # per week
columns = models.SmallIntegerField(default=1)
articles_per_column = models.SmallIntegerField(default=15)
index = models.PositiveIntegerField(default=0)
class Meta:
db_table = "board_feeds"
ordering = ["index"]
def save(self, *args, **kwargs):
if not self.created_at:
self.created_at = datetime.utcnow()
self.updated_at = datetime.utcnow()
return super().save(*args, **kwargs)
def last_articles(self):
return self.articles.all()[:15 * self.columns]
def articles_by_column(self):
articles = self.articles.all()[:self.articles_per_column * self.columns]
return [
(column, articles[column * self.articles_per_column:self.articles_per_column * (column + 1)])
for column in range(self.columns)
]
def natural_last_article_at(self):
if not self.last_article_at:
return None
return naturaltime(self.last_article_at)
class Article(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
uniq_id = models.TextField(db_index=True)
board = models.ForeignKey(Board, related_name="articles", on_delete=models.CASCADE, db_index=True)
feed = models.ForeignKey(BoardFeed, related_name="articles", on_delete=models.CASCADE, db_index=True)
url = models.URLField(max_length=2048)
type = models.CharField(max_length=16)
domain = models.CharField(max_length=256, null=True)
title = models.CharField(max_length=256)
image = models.URLField(max_length=512, null=True)
description = models.TextField(null=True)
summary = models.TextField(null=True)
created_at = models.DateTimeField(db_index=True)
updated_at = models.DateTimeField()
class Meta:
db_table = "articles"
ordering = ["-created_at"]
def save(self, *args, **kwargs):
if not self.created_at:
self.created_at = datetime.utcnow()
self.updated_at = datetime.utcnow()
return super().save(*args, **kwargs)
def icon(self):
article_icon = DOMAIN_ICONS.get(self.domain)
if not article_icon:
return ""
if article_icon.startswith("fa:"):
return f"""<i class="{article_icon[3:]}"></i> """
return f"""<img src="{article_icon}" alt="{self.domain}" class="icon"> """
def natural_created_at(self):
if not self.created_at:
return None
return naturaltime(self.created_at)
def is_fresh(self):
frequency = self.feed.frequency
now = datetime.utcnow()
if frequency <= 1:
# low frequency feed — any post this week is new
return self.created_at > now - timedelta(days=7)
elif frequency <= 20:
# average frequency — mark today posts
return self.created_at > now - timedelta(days=1)
elif frequency >= 100:
# extra high frequency — mark newest posts
return self.created_at > now - timedelta(hours=3)
# normal frequency - mark 6-hour old posts
return self.created_at > now - timedelta(hours=6)
| [
"me@vas3k.ru"
] | me@vas3k.ru |
20965d57bf76e26a205182ffc8240ddad375cf2b | 0db97db08743783019efe022190f409d22ff95bd | /aliyun/api/rest/Rds20140815DescribeModifyParameterLogRequest.py | a738e55de39ed911b27aa2c242f097a771646719 | [
"Apache-2.0"
] | permissive | snowyxx/aliyun-python-demo | 8052e2a165f1b869affe632dda484d6ca203bd9b | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | refs/heads/master | 2021-01-10T03:37:31.657793 | 2016-01-21T02:03:14 | 2016-01-21T02:03:14 | 49,921,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | '''
Created by auto_sdk on 2015.06.02
'''
from aliyun.api.base import RestApi
class Rds20140815DescribeModifyParameterLogRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DBInstanceId = None
self.EndTime = None
self.PageNumber = None
self.PageSize = None
self.StartTime = None
def getapiname(self):
return 'rds.aliyuncs.com.DescribeModifyParameterLog.2014-08-15'
| [
"snowyxx@126.com"
] | snowyxx@126.com |
4f60904ee1b4d115733e252787995abcd5e5a4a9 | 1a255b72d424be9bb557f2bf1a3d58bf41408571 | /0x0B-python-input_output/11-student.py | ce30c049801682d2bc56630aea11cb773ce75735 | [] | no_license | wassimbel/holbertonschool-higher_level_programming | b5cbde2a3d0fd37bf934f23554be05af0f5380bd | 301af526ea2e664fd4aea82b64c8940b7e9fd6a4 | refs/heads/master | 2023-02-28T09:33:06.017399 | 2021-02-08T23:49:22 | 2021-02-08T23:49:22 | 259,407,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | #!/usr/bin/python3
""" module - class Student """
class Student():
""" class Student that defines a student """
def __init__(self, first_name, last_name, age):
""" initialize self """
self.first_name = first_name
self.last_name = last_name
self.age = age
def to_json(self):
""" etrieves a dictionary """
return self.__dict__
| [
"wassim.belhedi1@gmail.com"
] | wassim.belhedi1@gmail.com |
7623abdd14407d403d71964e37efedcc8550f929 | 17277a741082f276a490558fcc735fa529b45a0b | /src/python/rpc_lib/util/headers.py | 8490320b16d1c84dbfe3d33b98194d727996f6a7 | [] | no_license | divar-ir/rpc-lib | 7a11f0a8ed3df9e229bc5944277c9586b5673374 | f32bdc1efb1dca90010f8fdd6fab6b7538007695 | refs/heads/master | 2023-01-12T06:56:31.803993 | 2019-06-19T11:34:52 | 2019-06-19T11:34:52 | 192,557,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | from rpc_lib import call_info_pb2
from rpc_lib.common import specialization
from rpc_lib.util.proto_encoding import decode_from_base64
HTTP_INTERNAL_STATE_HEADER = 'x-internal-state-bin'
HTTP_INTERNAL_TRACE_INFO_HEADER = 'x-internal-trace-info-bin'
def get_state(headers):
encoded_state = headers.get(HTTP_INTERNAL_STATE_HEADER)
return decode_from_base64(specialization.create_state_proto(), encoded_state)
def get_trace_info(headers):
encoded_trace_info = headers.get(HTTP_INTERNAL_TRACE_INFO_HEADER)
return decode_from_base64(call_info_pb2.TraceInfo(), encoded_trace_info)
| [
"mirzazadeh@divar.ir"
] | mirzazadeh@divar.ir |
99831b86797def2356ed377f20ea20834b08bcec | 94d4ccd443a37c8090a84d730d006edef869b816 | /recursion_and_backtracking/rat_in_maze.py | 8fde6e9bcdff326a7021e497c7d29b0c638e1f3d | [] | no_license | sudo-hemant/CP_CipherSchools | e0fc51d6f77f80709a88a7711ef6360f1fdd13e3 | 4f741f5f6fbbb182bd03135fb3180f5a40acbb1e | refs/heads/master | 2023-03-09T20:59:59.704014 | 2021-02-19T15:27:15 | 2021-02-19T15:27:15 | 338,825,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py |
# https://practice.geeksforgeeks.org/problems/rat-maze-with-multiple-jumps-1587115621/1/?track=DSASP-Backtracking&batchId=154
from collections import deque
def solve(n, maze):
res = [ [0 for i in range(n)] for j in range(n)]
if is_path(0, 0, res, n, maze):
print_sol(n, res)
else:
print(-1)
def is_path(i, j, res, n, maze):
if i == n - 1 and j == n - 1:
res[i][j] = 1
return True
if is_safe(i, j, n, maze):
res[i][j] = 1
for jump in range(1, maze[i][j] + 1):
if jump >= n:
break
if is_path(i, j + jump, res, n, maze):
return True
if is_path(i + jump, j, res, n, maze):
return True
res[i][j] = 0
return False
return False
def is_safe(i, j, n, maze):
if i >= 0 and j >= 0 and i < n and j < n and maze[i][j]:
return True
return False
def print_sol(n, sol):
for i in range(n):
for j in range(n):
print(sol[i][j], end=" ")
print()
if __name__ == "__main__":
t = int(input())
while(t>0):
n = int(input())
maze = [[0 for i in range(n)] for j in range(n)]
for i in range(n):
maze[i] = [int(x) for x in input().strip().split()]
solve(n, maze)
t=t-1 | [
"sudohemant@gmail.com"
] | sudohemant@gmail.com |
0310f4acd6676b09b41ba7b6b8c976f8351254cd | a00464ca9ad8cca1ba6a1349be2637f287cadd40 | /myTwitter/urls.py | 53fee002f3969ee4d6a1c3b4f8afad30c8a926a1 | [] | no_license | sannee4/myTwitter | 0611e8a413971a2734b68a298e298ae91abfa789 | 12be47962a58e4312addbb32ef481b0f0115dcb7 | refs/heads/master | 2021-02-08T19:57:24.029388 | 2020-03-01T17:18:12 | 2020-03-01T17:18:12 | 244,190,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | """myTwitter URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('twits.urls'))
]
| [
"mellnikovaaaa@gmail.com"
] | mellnikovaaaa@gmail.com |
63197e89258eac92b49c04a9513497c8358c6fb0 | 5eb338adeb89578b5bba38806f32fec6f0214630 | /lists/admin.py | 3db2a0f2394c2c19fb30f088d3485111bfbc8dc7 | [] | no_license | rkqhed1212/airbnb_clone | 68172ef7c65a6e768ab5d88c01a04c597636e0ce | 95b64850f0a91caeda7ead2a68ed43f7af8ec094 | refs/heads/master | 2023-05-26T09:47:19.836415 | 2021-06-09T00:18:06 | 2021-06-09T00:18:06 | 356,840,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from django.contrib import admin
from . import models
@admin.register(models.List)
class ListAdmin(admin.ModelAdmin):
list_display = ("name", "user", "count_rooms")
search_fields = ("name", )
filter_horizontal = ("rooms",) | [
"rkqehd12@gmail.com"
] | rkqehd12@gmail.com |
124a52883b2e8b845f66680efc60602a83c8ce79 | 73e76f353da63506ef2343823f4b676e77ed4bde | /bin/setup.py | 2c0b96ea08db80c83160fe4708844ac9d3605721 | [] | no_license | josecamachop/FCParser | 2b46a11957a5f071f24034e3e51282bbaf73b908 | 959c10679eefcd9479aa8522b63dc054ac5e05cd | refs/heads/master | 2023-08-07T15:10:18.652678 | 2023-07-31T05:44:49 | 2023-07-31T05:44:49 | 98,178,477 | 9 | 5 | null | 2021-06-09T11:26:15 | 2017-07-24T10:23:40 | Python | UTF-8 | Python | false | false | 449 | py | from setuptools import setup
setup(name='fcparser',
version='1.0',
description='Feature as a counter parser',
url='https://github.com/josecamachop/FCParser',
author='Alejandro Perez Villegas, Jose Manuel Garcia Gimenez',
author_email='alextoni@gmail.com, jgarciag@ugr.es',
license='GPLv3',
packages=['fcparser','deparser'],
install_requires=[
'IPy', 'pyyaml'
],
zip_safe=False)
| [
"josecamacho@ugr.es"
] | josecamacho@ugr.es |
80edbd1d65d545f84f4122c4822dc23a4c57785d | 70d39e4ee19154a62e8c82467ef75b601e584738 | /pyth3/mysql/just_mysql_pandas_things_.py | 84e264411f2cd3f02839fd45febb7a3e86ce9f2e | [] | no_license | babywyrm/sysadmin | 6f2724be13ae7e5b9372278856a8c072073beffb | 2a5f3d29c7529bc917d4ff9be03af30ec23948a5 | refs/heads/master | 2023-08-16T03:50:38.717442 | 2023-08-16T03:05:55 | 2023-08-16T03:05:55 | 210,228,940 | 10 | 5 | null | 2023-05-01T23:15:31 | 2019-09-22T23:42:50 | PowerShell | UTF-8 | Python | false | false | 6,634 | py | # MySQL Querying Using Pandas
# Author: Elena Adlaf
# Version 1.2, 10/16/17
# This Python file shows how to query results from table, 't', in database, 'af', stored on a local MySQL server while
# importing the values directly into a Pandas dataframe.
# The table lists details about pieces created by the custom furniture business, Artfully Functional,
# with fields for ID, size type, year built, labor hours, materials cost, sale prices (wholesale or retail,
# before or after sales tax) and potential profits. A second table, 'a', contains additional information and is
# used to demonstrate queries indexing or joining multiple tables.
# Import modules.
import mysql.connector
import pandas as pd
# Create variables for 1) a connector to the local database with user and password and 2) the read-to-pandas command
cnx = mysql.connector.connect(user='root', password='...', database='af')
g = pd.read_sql_query
# To import the entire table, 't', into a Pandas dataframe:
df = g('SELECT * FROM t', cnx)
# Look at the shape of the dataframe and index the first five records for all of the fields.
print(df.shape)
print(df.iloc[0:5, 0:14])
print(df.iloc[0:5, 14:])
# Most tables will likely be too large to import in full, so we can import only the data of interest by
# querying the database through Pandas.
# Return the column names and column info of the table, 't'.
col_names = g('SHOW COLUMNS FROM t', cnx)
print(col_names)
# Select only Name and Retail_High columns and limit the number of records returned.
namehighretail_firstten = g('SELECT Name, Retail_High FROM t LIMIT 10', cnx)
print(namehighretail_firstten)
# Select all unique values from the Yr column.
years = g('SELECT DISTINCT Yr FROM t', cnx)
print(years)
# Return the number of records in the table.
num_tablerows = g('SELECT COUNT(*) FROM t', cnx)
print(num_tablerows)
# Return the number of non-missing values in the Labor column.
num_laborvalues = g('SELECT COUNT(Labor) FROM t', cnx)
print(num_laborvalues)
# Return the number of distinct values in Yr column.
num_years = g('SELECT COUNT(DISTINCT Yr) FROM t', cnx)
print(num_years)
# Select names of all pieces with a Retail_Low value greater than or equal to $500
over500usd = g('SELECT Name FROM t WHERE Retail_Low >= 500', cnx)
print(over500usd)
# Select the ID number of all pieces whose Sale value is null.
idprofitnull = g('SELECT ID FROM t WHERE Sale IS NULL', cnx)
print(idprofitnull)
# Return the number of items whose build year is not 2017.
num_not2017 = g('SELECT COUNT(*) FROM t WHERE Yr <> 2017', cnx)
print(num_not2017)
# Select name and location (disposition) of items with a low retail price over 100 or a low wholesale price over 50.
nameloc_price = g('SELECT Name, Disposition FROM t WHERE Retail_Low > 100 OR Wholesale_Low > 50', cnx)
print(nameloc_price)
# Select the labor hours of items built in 2015 or 2017 and located at Holloway or Art Show
laborhours_notforsale = g("SELECT Labor FROM t WHERE (Yr = 2015 OR Yr = 2017) AND (Disposition = 'Holloway' OR "
"Disposition = 'Art Show')", cnx)
print(laborhours_notforsale)
# Select the class of items whose potential profit (retail high) is between 10 and 50.
class_ptlprofit = g('SELECT Class_version FROM t WHERE Ptnlprofit_rtl_High BETWEEN 10 AND 50', cnx)
print(class_ptlprofit)
# Select the disposition, class, and potential high wholesale profit for the items with disposition as Classic Tres,
# Art Show or For Sale. Calculate the sum of the returned potential profits.
ptlprofit_forsale = g("SELECT Disposition, Class_version, Ptnlprofit_whsle_High FROM t WHERE Disposition IN "
"('Classic Tres', 'Art Show', 'For Sale') AND Ptnlprofit_whsle_High > 0", cnx)
print(ptlprofit_forsale)
print(ptlprofit_forsale.sum(axis=0, numeric_only=True))
# Select the ID, name and class_version designation of all C-class items.
c_class_items = g("SELECT ID, Name, Class_version FROM t WHERE Class_version LIKE 'C%'", cnx)
print(c_class_items)
# Select name and retail prices of all tables. Calculate the lowest and highest table prices.
tables_retail = g("SELECT Name, Retail_Low, Retail_High FROM t WHERE Name LIKE '% Table' AND Retail_Low <> 0", cnx)
print(tables_retail)
print(tables_retail.agg({'Retail_Low' : ['min'], 'Retail_High' : ['max']}))
# Select names and labor hours of tables that don't include side tables.
noside = g("SELECT Name, Labor FROM t WHERE Name LIKE '% Table' AND Name NOT LIKE '%_ide %'", cnx)
print(noside)
# Return the average retail high price.
ave_rtlhigh = g('SELECT AVG(Retail_High) FROM t', cnx)
print(ave_rtlhigh)
# Return the sum of the retail low prices minus the sum of the Materials_Base column aliased as est_profit.
rtllow_minuscost = g('SELECT SUM(Retail_Low) - SUM(Materials_Base) AS est_profit FROM t', cnx)
print(rtllow_minuscost)
# Return the maximum materials base value increased by 20% aliased as max_material.
max_material = g('SELECT MAX(Materials_Base)*1.2 AS max_material FROM t', cnx)
print(max_material)
# Select the name and price of the lowest wholesale priced cabinet that is for sale, aliased as cabinet_low.
cabinet_low = g("SELECT Name, MIN(Wholesale_Low) AS cabinet_low FROM t WHERE Name LIKE '% Cabinet' AND Disposition = "
"'For Sale'", cnx)
print(cabinet_low)
# Select names of pieces built in 2017 in descending order by retail_high price.
high_to_low_priced = g('SELECT Name FROM t WHERE Yr = 2017 ORDER BY Retail_High DESC', cnx)
print(high_to_low_priced)
# Select number of items and years built grouped by year in descending order by count.
groupyear_sortcount = g('SELECT COUNT(*), Yr FROM t GROUP BY Yr ORDER BY COUNT(*) DESC', cnx)
print(groupyear_sortcount)
# Select Class_version categories (A1, B1, C1) aliased as Size and average wholesale low price grouped by Size.
size_aveprice = g("SELECT Class_version AS Size, AVG(Wholesale_Low) FROM t WHERE Class_version IN ('A1', 'B1', "
"'C1') GROUP BY Size", cnx)
print(size_aveprice)
# The items in tables 't' and 'a' have the same ID column, so information can be queried from both simultaneously with
# the JOIN command.
# Return the column names and column info of the table, 'a'.
table_a_colnames = g('SHOW COLUMNS FROM a', cnx)
print(table_a_colnames)
# Select the ID and disposition from table 't' and the corresponding number of website photos for those items from
# table 'a'.
webphotos = g('SELECT ID, Class_version, Disposition, Website FROM t JOIN a ON ID = ID2 WHERE Website > 0', cnx)
print(webphotos)
# After querying is complete, cnx.close() closes the connection to the database.
cnx.close()
| [
"noreply@github.com"
] | noreply@github.com |
70c7e0d3b15a0a5abbbb91472d8ea1a864bf77dc | 8a1ab23b056886965fec2a3e4064c5ed55e22bfb | /домашка/advanced/lesson 4/Diorditsa Ivan/socketserver_with_threads.py | a727041d4f250dcb1c56ffc95a3aced3a8cd17f7 | [] | no_license | django-group/python-itvdn | 5d8a59f06618f993d20d2f60374c36aae4ae8ab0 | 62ef87dfac947ed4bf1f5b6b890461f56814d893 | refs/heads/master | 2021-01-07T11:16:37.996524 | 2020-02-14T13:53:47 | 2020-02-14T13:53:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,855 | py | import socketserver
users = []
class MyTCPHandler(socketserver.BaseRequestHandler):
def handle(self):
#import traceback
#traceback.print_stack()
#self.request.send(f'hello, I am ECHO server, please tell me your name: '.encode())
#name = self.request.recv(1000)
#self.request.send(f'hello {name.decode()}'.encode())
users.append(self)
print(f"this self is {id(self)}")
print(f"{self.client_address=}")
while True:
b = self.request.recv(1000)
for user in users:
if user.client_address != self.client_address:
user.request.send(str(user.client_address).encode() + b': ' + b)
class MyTCPServer(socketserver.ThreadingTCPServer):
allow_reuse_address = True
request_queue_size = 10
ss = MyTCPServer(('localhost', 10001), MyTCPHandler)
ss.serve_forever()
exit()
import socketserver
class MyTCPHandler(socketserver.BaseRequestHandler):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
# self.request is the TCP socket connected to the client
self.data = self.request.recv(1024).strip()
print("{} wrote:".format(self.client_address[0]))
print(self.data)
# just send back the same data, but upper-cased
self.request.sendall(self.data.upper())
if __name__ == "__main__":
HOST, PORT = "localhost", 9999
# Create the server, binding to localhost on port 9999
with socketserver.TCPServer((HOST, PORT), MyTCPHandler) as server:
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
| [
"ivan.diorditsa@gmail.com"
] | ivan.diorditsa@gmail.com |
4ddf466dbc7cfe2e9e293e24597ae8ecf2601c0d | b083965e04d5dbc0389c2c2ee8fbbf30d60f7647 | /blog/migrations/0001_initial.py | 1e56b680fcacdd4fae765cb00c4168f2f4bd60bd | [] | no_license | JoannaTamas/moviedb1 | 1e8cce5921302eedfba289e63e2e01c2d0cb2861 | 7ea773c5c0f81129170c0a0afc9d66126bda8171 | refs/heads/master | 2020-04-06T14:45:06.671210 | 2018-11-14T13:00:15 | 2018-11-14T13:00:15 | 157,553,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # Generated by Django 2.0.9 on 2018-11-13 21:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Movies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('language', models.CharField(max_length=20)),
('director', models.CharField(max_length=30)),
('genre', models.CharField(choices=[('Horror', 'Horror'), ('Drama', 'Drama'), ('Comedy', 'Comedy'), ('SciFi', 'SciFi'), ('Action', 'Action'), ('Period', 'Period')], max_length=10)),
],
),
]
| [
"tamas_yoana@yahoo.ro"
] | tamas_yoana@yahoo.ro |
ad362e3742a78615236e5dd290559259c4b7533e | d9f86239a5cac359d09bea5ca0a671994a1ab0a0 | /mars/serialization/numpy.py | 932bac2e6b52bb34b0460c57cc1730963b71b5e0 | [
"BSD-3-Clause",
"OFL-1.1",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | SimonCqk/mars | 43ae0104c1d9681f7d34d3d859fb29d83856b8e0 | 167f1b70e074f01fa8dce4b771c4a42bd73a5587 | refs/heads/master | 2023-03-12T02:28:21.199968 | 2021-03-02T08:46:17 | 2021-03-02T08:46:17 | 319,231,908 | 0 | 0 | Apache-2.0 | 2020-12-07T07:01:34 | 2020-12-07T07:01:33 | null | UTF-8 | Python | false | false | 1,842 | py | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .core import Serializer, pickle_buffers, unpickle_buffers
class NDArraySerializer(Serializer):
serializer_name = 'np_ndarray'
def serialize(self, obj: np.ndarray):
header = {}
if obj.dtype.hasobject:
header['pickle'] = True
buffers = pickle_buffers(obj)
return header, buffers
order = 'C'
if obj.flags.f_contiguous:
order = 'F'
elif not obj.flags.c_contiguous:
obj = np.ascontiguousarray(obj)
header.update(dict(
pickle=False,
descr=np.lib.format.dtype_to_descr(obj.dtype),
shape=list(obj.shape),
strides=list(obj.strides),
order=order
))
return header, [memoryview(obj.ravel(order=order).view('uint8').data)]
def deserialize(self, header, buffers):
if header['pickle']:
return unpickle_buffers(buffers)
dtype = np.lib.format.descr_to_dtype(header['descr'])
return np.ndarray(
shape=tuple(header['shape']), dtype=dtype,
buffer=buffers[0], strides=tuple(header['strides']),
order=header['order']
)
NDArraySerializer.register(np.ndarray)
| [
"noreply@github.com"
] | noreply@github.com |
e30a14b57b614478ccad24bb5f5f92ebf782cfba | e0db388d3f3fef89b27875fb88bd509f5ad70927 | /strStr.py | 1a3413c5f5c2e30adc7ff7e50babd926aae4da78 | [] | no_license | AkiraXD0712/LeetCode-Exercises | 770fff5d10931272cf35776abd9ebaf0cade656a | 3b4d4963bebe7c0b04d420cb1920ad6928615e4a | refs/heads/master | 2020-03-18T19:34:32.168249 | 2018-07-05T08:15:37 | 2018-07-05T08:15:37 | 135,162,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | '''
Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.
Clarification:
What should we return when needle is an empty string? This is a great question to ask during an interview.
For the purpose of this problem, we will return 0 when needle is an empty string.
This is consistent to C's strstr() and Java's indexOf().
'''
class Solution:
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
if len(needle) == 0:
return 0
start = needle[0]
for i in range(len(haystack)):
if start == haystack[i]:
if i+len(needle) <= len(haystack):
if haystack[i: i+len(needle)] == needle:
return i
else:
return -1
return -1 | [
"akirachen0712@gmail.com"
] | akirachen0712@gmail.com |
b8241865b3838ea090162eb428a1c8b48348b10e | e692a9074d21c456dbdcb938ce7c74d8254f6ad9 | /Module 010/module 010.py | 815c2efdfd0e3789bf37330b86d323acc2079d0b | [] | no_license | Marius-Juston/ECE-110 | e29b08d5a305a315467635a138ef62a1a638e4ed | 962761b9bb23b02cc2a687bc691d568db8c82b31 | refs/heads/master | 2022-05-29T08:17:58.809019 | 2020-05-05T16:48:45 | 2020-05-05T16:48:45 | 261,530,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.axes import Axes
from matplotlib.figure import Figure
if __name__ == '__main__':
with open('data-20200502-1713.circuitjs.txt', 'r') as file:
time_step = float(file.readline().split(' ')[4])
lines = tuple(map(int, file.readlines()))
x = np.arange(0, len(lines)) * time_step
fig: Figure = plt.figure(figsize=(11.69, 8.27))
ax: Axes = fig.gca()
ax.plot(x, lines)
ax.set_xlabel("Time (s)")
ax.set_ylabel("Voltage (V)")
ax.set_title("Audio Output (mjuston2)")
fig.tight_layout()
fig.savefig("figure.png")
# plt.show()
| [
"Marius.juston@hotmail.fr"
] | Marius.juston@hotmail.fr |
2c4e9748a4fe10c33bdca30bdba1637018100b86 | 9ec4bc3cdba9e46fe05712daeec3e35f5b6bb704 | /hallicrafter2/device/ics.py | ce1fe1212d2c1fc2085fe53bbaeb7981d6a1c3fd | [] | no_license | derekmerck/hallicrafter | 04d7d31017079fcc0c9c9361ad7e653f6e0e6418 | b9439bb9f9b311ca1f8a224ce25c64c836901381 | refs/heads/master | 2020-06-02T03:23:23.086094 | 2019-09-03T02:06:03 | 2019-09-03T02:06:03 | 191,018,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,220 | py | from .device import Device
class SirenIC(Device):
# Control a UM3561 ic
# See https://www.instructables.com/id/Siren-Generation-using-IC-UM3561/ for pinout
#
# 1. sel1
# 2. gnd
# 3. out -> 10k ohm -> NPN transistor that drives speaker gnd line
# 4. not connected (testing)
# 5. active (3-5vin)
# 6. sel2
# 7. osc1
# 8. osc2 bridge -> osc1 with a 220k ohm resistor
#
# S1 S2 Sound
# --------------------
# NC NC Police (default)
# 5v NC Fire brigade
# Gnd NC Ambulance
# Any 5v Machine gun
class AlarmProfile(object):
POLICE = "police"
FIRE = "fire"
AMBULANCE = "ambulance"
MACHINE_GUN = "machine gun"
def __init__(self, pin_active, pin_sel1, pin_sel2, name="ic_srn0", interval=0.1, *args, **kwargs):
Device.__init__(self, name=name, interval=interval, *args, **kwargs)
import digitalio
self.pin_active = digitalio.DigitalInOut(pin_active)
self.pin_active.direction = digitalio.Direction.OUTPUT
self.pin_sel1 = digitalio.DigitalInOut(pin_sel1)
self.pin_sel1.direction = digitalio.Direction.OUTPUT
self.pin_sel2 = digitalio.DigitalInOut(pin_sel2)
self.pin_sel2.direction = digitalio.Direction.OUTPUT
self.data["active"] = False
self.data["profile"] = SirenIC.AlarmProfile.POLICE
def write(self):
if self.data["profile"] == SirenIC.AlarmProfile.POLICE:
self.pin_sel1.value = False
self.pin_sel2.value = False
elif self.data["profile"] == SirenIC.AlarmProfile.FIRE:
self.pin_sel1.value = True
self.pin_sel2.value = False
elif self.data["profile"] == SirenIC.AlarmProfile.AMBULANCE:
self.pin_sel1.value = False
self.pin_sel2.value = True
elif self.data["profile"] == SirenIC.AlarmProfile.MACHINE_GUN:
self.pin_sel1.value = True
self.pin_sel2.value = True
else:
raise ValueError("Unknown alarm profile {}".format(self.data["profile"]))
self.pin_active.value = self.data["active"]
# print("Siren is {}".format(self.pin_active.value))
| [
"derek_merck@brown.edu"
] | derek_merck@brown.edu |
1cf471f736f9047d1985610fbf89b38dffb9bb5d | aeeaf40350a652d96a392010071df8a486c6e79f | /archive/python/Python/binary_search/374.guess-number-higher-or-lower.py | 1f51d98818399ede0698e42d7d7bd0cde96a1879 | [
"MIT"
] | permissive | linfengzhou/LeetCode | 11e6c12ce43cf0053d86437b369a2337e6009be3 | cb2ed3524431aea2b204fe66797f9850bbe506a9 | refs/heads/master | 2021-01-23T19:34:37.016755 | 2018-04-30T20:44:40 | 2018-04-30T20:44:40 | 53,916,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
# def guess(num):
class Solution(object):
def guessNumber(self, n):
"""
:type n: int
:rtype: int
"""
| [
"luke.zlf@gmail.com"
] | luke.zlf@gmail.com |
4ae49460f06822543fc2ff34e14d8fef115016f7 | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res_bw/scripts/common/lib/abc.py | 37d2a8d88679def4d589700c441407cc6fa1a0d0 | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 6,294 | py | # 2015.11.10 21:32:36 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/abc.py
"""Abstract Base Classes (ABCs) according to PEP 3119."""
import types
from _weakrefset import WeakSet
class _C:
pass
_InstanceType = type(_C())
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C:
__metaclass__ = ABCMeta
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
abstracts = set((name for name, value in namespace.items() if getattr(value, '__isabstractmethod__', False)))
for base in bases:
for name in getattr(base, '__abstractmethods__', set()):
value = getattr(cls, name, None)
if getattr(value, '__isabstractmethod__', False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError('Can only register classes')
if issubclass(subclass, cls):
return
if issubclass(cls, subclass):
raise RuntimeError('Refusing to create an inheritance cycle')
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1
def _dump_registry(cls, file = None):
"""Debug helper to print the ABC registry."""
print >> file, 'Class: %s.%s' % (cls.__module__, cls.__name__)
print >> file, 'Inv.counter: %s' % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith('_abc_'):
value = getattr(cls, name)
print >> file, '%s: %r' % (name, value)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
subclass = getattr(instance, '__class__', None)
if subclass is not None and subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if cls._abc_negative_cache_version == ABCMeta._abc_invalidation_counter and subtype in cls._abc_negative_cache:
return False
return cls.__subclasscheck__(subtype)
else:
return cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype)
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
if subclass in cls._abc_cache:
return True
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
if not isinstance(ok, bool):
raise AssertionError
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
cls in getattr(subclass, '__mro__', ()) and cls._abc_cache.add(subclass)
return True
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
cls._abc_negative_cache.add(subclass)
return False
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\abc.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:32:36 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
9206cd0f235f493f39979d12a0205b3cff78c29e | c0e0139b500ec059931a73a150073db60a8b073c | /code/main_candidates.py | 096f9c3e6551760d2397a90e97166956f13596eb | [] | no_license | christopher-rowe/spotify_modeling | a3a1bc721694cba080b9a39f17345b0e9605f392 | f52160fbe2aaaa0f15fcef0625c53f4d96e461ce | refs/heads/master | 2022-12-29T10:06:33.248596 | 2020-10-12T01:57:28 | 2020-10-12T01:57:28 | 288,016,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,918 | py |
# File name: main_candidates.py
# Description: Identify and add candidates to data_candidates spotify playlist
# Author: Chris Rowe
import os
import random
import pandas as pd
import numpy as np
import pickle
import spotify_modeling as sm
from config import *
def main():
# import models
print("Importing models...")
os.chdir(os.path.dirname(os.getcwd()))
xgb_stage1_playlist_model = pickle.load(open(('saved_models/'
'xgb_playlist_model_wg.sav'),
'rb'))
xgb_stage1_score_model = pickle.load(open('saved_models/xgb_score_model_wg.sav', 'rb'))
print("--Models imported!")
# get spotify authentication token
print("Getting the spotify authentication token...")
auth, token, refresh_token = sm.get_token(username, client_id,
client_secret, redirect_uri, scope)
print("--Token recieved!")
# Processing data playlists and fitting stage 2 model
print("Processing data playlists and fitting stage 2 model...")
X_data_playlists, y_data_playlists, new_ids = sm.getDataPlaylistXY(auth, token, refresh_token)
xgb_stage2_model = sm.fitDataPlaylistModel(X_data_playlists, y_data_playlists)
print("--Stage 2 model ready!")
# obtain stage 1 and stage 2 training features
stage1_features_playlist = list(pd.read_csv('training_features/stage1_playlist_training_features.csv', names=['x']).x)
stage1_features_score = list(pd.read_csv('training_features/stage1_score_training_features.csv', names=['x']).x)
stage2_features = list(pd.read_csv('training_features/stage2_training_features.csv', names=['x']).x)
# obtain streaming history ids
og_ids = pd.read_csv('data/processed/track_features.csv')['id'].tolist()
og_ids = [x for x in og_ids if pd.isnull(x)==False]
# combine old and new ids
all_ids = og_ids + new_ids
all_ids = list(set(all_ids))
# identify candidates and push to spotify playlist
print("Obtaining Random Tracks, fitting models, and retaining top candidates...")
n_iter = 5
for __ in range(n_iter):
print('Iteration: ' + str(__) + ' of ' + str(n_iter))
all_random_tracks = []
all_random_track_genres = []
while len(all_random_tracks) < 500:
print(len(all_random_tracks))
id, uri, name, artist = sm.getRandomTrack(auth, token, refresh_token)
if id not in all_ids:
features, genres = sm.get_api_features(id, auth, token, refresh_token)
if isinstance(features, dict):
new_record = [id, uri, name, artist] + list(features.values())[0:11]
all_random_tracks.append(new_record)
all_random_track_genres.append(genres)
columns = ['id', 'uri', 'track', 'artist', 'danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo']
all_random_tracks = pd.DataFrame(all_random_tracks,
columns = columns)
all_random_tracks.dropna(inplace=True)
X_random = all_random_tracks[columns[4:15]]
# initialize list of audio features to identify genre indices
audio_features = ['danceability', 'energy', 'key',
'loudness', 'mode', 'speechiness',
'acousticness', 'instrumentalness',
'liveness', 'valence', 'tempo']
# identify index of training features where genres begin (for reconciling genres between new tracks and training data)
stage1_playlist_genre_i = [item in audio_features for item in stage1_features_playlist]
stage1_playlist_genre_i = next(idx for idx, item in enumerate(stage1_playlist_genre_i) if item==False)
stage1_score_genre_i = [item in audio_features for item in stage1_features_score]
stage1_score_genre_i = next(idx for idx, item in enumerate(stage1_score_genre_i) if item==False)
stage2_genre_i = [item in audio_features for item in stage2_features]
stage2_genre_i = next(idx for idx, item in enumerate(stage2_genre_i) if item==False)
# reconcile genres so they match those used in training model
genre_dummies = sm.getGenreDummies(all_random_track_genres)
genre_dummies_stage1_playlist = sm.reconcileGenres(genre_dummies, stage1_features_playlist[stage1_playlist_genre_i:])
genre_dummies_stage1_score = sm.reconcileGenres(genre_dummies, stage1_features_score[stage1_score_genre_i:])
genre_dummies_stage2 = sm.reconcileGenres(genre_dummies, stage2_features[stage2_genre_i:])
# generate stage1 and stage2 X matrices with appropriate genres and feature order
X_random_stage1_playlist = pd.concat((X_random, genre_dummies_stage1_playlist), axis = 1)
X_random_stage1_playlist = X_random_stage1_playlist[stage1_features_playlist]
X_random_stage1_score = pd.concat((X_random, genre_dummies_stage1_score), axis = 1)
X_random_stage1_score = X_random_stage1_score[stage1_features_score]
X_random_stage2 = pd.concat((X_random, genre_dummies_stage2), axis = 1)
X_random_stage2 = X_random_stage2[stage2_features]
# predict stage 1 outcomes
stage1_playlist_p = np.array([item[1] for item in xgb_stage1_playlist_model.predict_proba(X_random_stage1_playlist)])
stage1_score_p = xgb_stage1_score_model.predict(X_random_stage1_score)
# predict stage 2 outcomes
stage2_p = np.array([item[1] for item in xgb_stage2_model.predict_proba(X_random_stage2)])
# calculate 2-stage score and playlist outcomes
all_random_tracks['stage1_playlist_p'] = stage1_playlist_p
all_random_tracks['stage1_score_p'] = stage1_score_p
all_random_tracks['stage2_p'] = stage2_p
all_random_tracks['total_score'] = stage1_score_p*stage2_p
all_random_tracks['total_playlist'] = stage1_playlist_p*stage2_p
# select top 5 candidates for combined stage1/stage2 scores and stage2 score only
#candidates_stage1_playlist = list(all_random_tracks['uri'].loc[all_random_tracks['stage1_playlist_p']>0.5])
candidates_total_score = list(all_random_tracks.sort_values('total_score', ascending=False).iloc[0:5, 1])
candidates_total_playlist = list(all_random_tracks.sort_values('total_playlist', ascending=False).iloc[0:5, 1])
candidates_stage2 = list(all_random_tracks.sort_values('stage2_p', ascending=False).iloc[0:5, 1])
candidates = list(set(candidates_total_score + candidates_total_playlist + candidates_stage2))
sm.addCandidates(auth, token, refresh_token, candidates, target_playlist)
print("Candidate search complete, playlist updated!")
if __name__ == '__main__':
main() | [
"chris.lloyd.rowe@gmail.com"
] | chris.lloyd.rowe@gmail.com |
96e13ce85156c34b4c16aa46eb26cb5774458507 | b9e5aebb49734ad47825130529bd64e59f690ecf | /chapter_9/die.py | 6c02c0f3e4e49e928c96c1881f050c10ddd4aaf1 | [] | no_license | mikegirenko/python-learning | dab0f67d990d95035f93720986c84aaf422f7a9f | db9e3f0e3897caf703169d1f14b15a9aa1901161 | refs/heads/master | 2021-07-09T08:03:40.535653 | 2020-08-05T00:13:41 | 2020-08-05T00:13:41 | 169,983,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | from random import randint
class Die:
def __init__(self, sides=6):
self. sides = sides
def roll_die(self):
print('\t', randint(1, self.sides))
print('Printing 6 sided roll:')
six_sided_roll = Die()
for i in range(1, 11):
six_sided_roll.roll_die()
print('Printing 10 sided roll:')
ten_sided_roll = Die(10)
i = 1
while i <= 10:
ten_sided_roll.roll_die()
i += 1
print('Printing 20 sided roll:')
twenty_sided_roll = Die(20)
i = 1
while True:
if i == 11:
break
else:
twenty_sided_roll.roll_die()
i += 1
| [
"mike.girenko@cybergrx.com"
] | mike.girenko@cybergrx.com |
0ee78e09a3e33d80fdaf3e6cd8ff693dad687420 | 1c92a6a5e0b1cfb57a9736ac0b723ec9ead67043 | /api/models.py | 0fe64a85788844c2d3a90cf1a52d933acf20370e | [
"MIT"
] | permissive | zlalanne/buildnumber.io | c969966e963be685b49258251ce0917407ebc364 | 206c89c26081344269eb717c3695d82337872118 | refs/heads/master | 2021-01-18T13:46:03.972621 | 2016-01-21T07:17:53 | 2016-01-21T07:17:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | from __future__ import unicode_literals
import random
from django.db import models
from jsonfield import JSONField
class Account(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
class AccountEmail(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
account = models.ForeignKey(Account, related_name="emails")
email = models.EmailField(unique=True)
class ApiKey(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
account = models.ForeignKey(Account, related_name="api_keys")
key = models.CharField(max_length=40, unique=True) # md5
def save(self, *args, **kwargs):
if not self.pk and not self.key:
hash = random.getrandbits(128)
self.key = "%032x" % (hash,)
super(ApiKey, self).save(*args, **kwargs);
class Package(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
account = models.ForeignKey(Account, related_name="packages")
name = models.CharField(max_length=255)
class Build(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
package = models.ForeignKey(Package, related_name="builds")
build_number = models.PositiveIntegerField()
extra = JSONField(max_length=1024)
class Meta:
unique_together = ('package', 'build_number')
| [
"lois.diqual@gmail.com"
] | lois.diqual@gmail.com |
4a5537829c493633c4f10247ebb6978fcf02f5a0 | 772ae58698fee964c3c96cabe47a81d21faa8ed4 | /src/interpreter/preprocessor.py | 2b795daf6d5eaffbab99e096a8fafb7135454e6e | [] | no_license | pkkim/lisp_interpreter | 7659f18403b87b055ea239a0591cdfe7acadfba5 | a0a18fe87338af3201d773cb5516aa942b7f1064 | refs/heads/master | 2020-05-24T19:25:38.057140 | 2019-09-09T03:57:47 | 2019-09-09T03:57:47 | 187,434,003 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | def remove_comments(code):
result = []
for line in code.splitlines():
try:
comment_index = line.index(';')
except ValueError:
result.append(line)
else:
result.append(line[:comment_index])
return '\n'.join(result)
| [
"paulkimpaul@gmail.com"
] | paulkimpaul@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.