blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
adeb96ac3e7ff64f2c40272b484ed1bb79ccdef2
|
787f1d2373e908347a02e87251fd0ade0972c6ce
|
/scripts/starfit
|
da034dfee283884fb071bb00bd528624aed94677
|
[
"MIT"
] |
permissive
|
nonsk131/isochrones
|
247c9b609da28305e7a5406415388f8a002c829f
|
5a18d625cb10f772fbe69fb0f1077ea4dcf88bd4
|
refs/heads/master
| 2021-01-14T08:46:27.047088
| 2016-07-15T18:05:54
| 2016-07-15T18:05:54
| 63,885,299
| 0
| 1
| null | 2016-07-21T16:29:01
| 2016-07-21T16:29:00
| null |
UTF-8
|
Python
| false
| false
| 7,717
|
#!/usr/bin/env python
"""
A command-line program to fit a StarModel using the isochrones package
Input argument is name of a folder that contains a file
called ``star.ini``, which is a config file containing all
the observed properties of the star on which the model should
be conditioned. Multiple folder names can also be passed.
"""
from __future__ import division, print_function
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import os, os.path, re, sys
import logging
import time
from configobj import ConfigObj
import argparse
from isochrones.starmodel import StarModel, BinaryStarModel, TripleStarModel
def initLogging(filename, logger):
if logger == None:
logger = logging.getLogger()
else: # wish there was a logger.close()
for handler in logger.handlers[:]: # make a copy of the list
logger.removeHandler(handler)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s: %(message)s')
fh = logging.FileHandler(filename)
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler(sys.stdout)
logger.addHandler(sh)
return logger
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Fit physical properties of a star conditioned on observed quantities.')
parser.add_argument('folders', nargs='*', default=['.'])
parser.add_argument('--binary', action='store_true')
parser.add_argument('--triple', action='store_true')
parser.add_argument('--all', action='store_true')
parser.add_argument('--models', default='dartmouth')
parser.add_argument('--emcee', action='store_true')
parser.add_argument('--no_local_fehprior', action='store_true')
parser.add_argument('--plot_only', action='store_true')
parser.add_argument('-o','--overwrite', action='store_true')
parser.add_argument('-v','--verbose', action='store_true')
args = parser.parse_args()
try:
import pymultinest
except ImportError:
args.use_emcee = True
if args.models=='dartmouth':
from isochrones.dartmouth import Dartmouth_Isochrone
ichrone = Dartmouth_Isochrone()
elif args.models=='padova':
from isochrones.padova import Padova_Isochrone
ichrone = Padova_Isochrone()
elif args.models=='basti':
from isochrones.basti import Basti_Isochrone
ichrone = Basti_Isochrone()
else:
raise ValueError('Unknown stellar models: {}'.format(args.models))
if args.all:
multiplicities = ['single', 'binary', 'triple']
elif args.binary:
multiplicities = ['binary']
elif args.triple:
multiplicities = ['triple']
else:
multiplicities = ['single']
Models = {'single':StarModel,
'binary':BinaryStarModel,
'triple':TripleStarModel}
logger = None #dummy
for i,folder in enumerate(args.folders):
for mult in multiplicities:
Model = Models[mult]
model_filename = '{}_starmodel_{}.h5'.format(args.models, mult)
print('{} of {}: {} ({})'.format(i+1, len(args.folders), folder, mult))
#initialize logger for folder
logfile = os.path.join(folder, 'starfit.log')
logger = initLogging(logfile, logger)
name = os.path.basename(os.path.abspath(folder))
try:
start = time.time()
if args.plot_only:
try:
mod = Model.load_hdf('{}/{}'.format(folder,model_filename),
name=name)
except:
pass
else:
# Only try to fit model if it doesn't exist, unless overwrite is set
fit_model = True
try:
mod = Model.load_hdf('{}/{}'.format(folder,model_filename),
name=name)
fit_model = False
except:
pass
if fit_model or args.overwrite:
ini_file = os.path.join(folder, 'star.ini')
config = ConfigObj(ini_file)
props = {}
for kw in config.keys():
try:
props[kw] = float(config[kw])
except:
props[kw] = (float(config[kw][0]), float(config[kw][1]))
use_local_fehprior = not args.no_local_fehprior
mod = Model(ichrone, use_emcee=args.emcee, name=name,
**props)
mod.fit(basename='{}/chains/{}-'.format(folder,mult),
verbose=args.verbose, overwrite=args.overwrite)
mod.save_hdf(os.path.join(folder, model_filename))
else:
logger.info('{} exists. Use -o to overwrite.'.format(model_filename))
# Only make triangle plots if they are older
# than the starmodel hdf file
make_triangles = False
for x in ['physical', 'observed']:
f = os.path.join(folder,
'{}_triangle_{}_{}.png'.format(args.models, mult, x))
if not os.path.exists(f):
make_triangles = True
break
else:
t_mod = os.path.getmtime(os.path.join(folder,model_filename))
t_plot = os.path.getmtime(f)
if t_mod > t_plot:
make_triangles=True
if make_triangles or args.plot_only:
triangle_base = os.path.join(folder, '{}_triangle_{}'.format(args.models,
mult))
fig1,fig2 = mod.triangle_plots(triangle_base)
# Make mag plot if necessary.
magplot_file = os.path.join(folder, '{}_mags_{}.png'.format(args.models, mult))
make_magplot = True
if os.path.exists(magplot_file):
if os.path.getmtime(os.path.join(folder, model_filename)) > \
os.path.getmtime(magplot_file) or \
args.plot_only:
pass
else:
make_magplot = False
if make_magplot:
fig = mod.mag_plot()
plt.savefig(os.path.join(folder,'{}_mags_{}.png'.format(args.models,
mult)))
end = time.time()
if args.plot_only:
logger.info('{} starfit successful (plots only) for '.format(mult) +
'{} in {:.1f} minutes.'.format(folder, (end-start)/60))
else:
logger.info('{} starfit successful for '.format(mult) +
'{} in {:.1f} minutes.'.format(folder, (end-start)/60))
except KeyboardInterrupt:
logger.error('{} starfit calculation interrupted for {}.'.format(mult,folder))
raise
except:
logger.error('{} starfit calculation failed for {}.'.format(mult,folder),
exc_info=True)
|
[
"tim.morton@gmail.com"
] |
tim.morton@gmail.com
|
|
165cdb9d7a07544fe473999e467cd00dad60203f
|
3b7a07ad2d78e8526ad8ae0767530d98aaff9f3e
|
/backend/utils/update_ansible_hosts.py
|
e5bdb8dcfb128017aa99ed6974fcd500c079b546
|
[] |
no_license
|
ImmortalViolet/one-oms
|
ba2281acdd63f35eb907651e5aae240c97c16e8b
|
9b89e2773511cb4f9fe37c4cde79e9e3e47464fe
|
refs/heads/master
| 2022-04-12T17:38:59.052337
| 2020-02-28T04:29:34
| 2020-02-28T04:29:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,875
|
py
|
# -*- coding: utf-8 -*-
# author: timor
import time
import requests
import sys
reload(sys)
sys.setdefaultencoding('utf8')
apollo_conf = {
"pro": {
"config_server_url": "http://10.4.12.2:8080",
"appId": "samanage01-inventory",
"clusterName": "default",
"namespaceName": "pro",
"namespaceType": "txt",
"saveFilename": "/etc/ansible/pro-hosts",
},
"uat": {
"config_server_url": "http://10.4.12.2:8080",
"appId": "samanage01-inventory",
"clusterName": "default",
"namespaceName": "uat",
"namespaceType": "txt",
"saveFilename": "/etc/ansible/uat-hosts",
},
"fat": {
"config_server_url": "http://10.4.12.2:8080",
"appId": "samanage01-inventory",
"clusterName": "default",
"namespaceName": "fat",
"namespaceType": "txt",
"saveFilename": "/etc/ansible/fat-hosts",
},
"dev": {
"config_server_url": "http://10.4.12.2:8080",
"appId": "samanage01-inventory",
"clusterName": "default",
"namespaceName": "dev",
"namespaceType": "txt",
"saveFilename": "/etc/ansible/dev-hosts",
},
}
def load_conf(conf):
print('判断版本 {}'.format(conf["namespaceName"]))
release_url = '{}/configs/{}/{}/{}.{}'.format(conf["config_server_url"], conf["appId"], conf["clusterName"], conf["namespaceName"], conf["namespaceType"])
html = requests.get(release_url)
content = html.json()['releaseKey']
if not check_file_release(content, conf["namespaceName"]):
print('版本已更新 {}'.format(conf["namespaceName"]))
print('载入配置 {}'.format(conf["namespaceName"]))
conf_url = '{}/configfiles/json/{}/{}/{}.{}'.format(conf["config_server_url"], conf["appId"], conf["clusterName"], conf["namespaceName"], conf["namespaceType"])
html = requests.get(conf_url)
content = html.json()['content']
print('保存配置 {} ==> {}'.format(conf["namespaceName"], conf["saveFilename"]))
save_file(conf["saveFilename"], content)
else:
print('版本未更新 {}'.format(conf["namespaceName"]))
def save_file(filename, content):
with open(filename, 'w') as fn:
fn.write(content)
def check_file_release(content, namespaceName):
release_file = namespaceName + '.release'
try:
with open(release_file, 'r') as fn:
if content == fn.read():
return True
else:
return False
except Exception as e:
return False
finally:
with open(release_file, 'w') as fn:
fn.write(content)
if __name__ == '__main__':
while True:
time.sleep(5)
print(time.strftime('%Y年%m月%d日 %M时%I分%S秒', time.localtime()))
for conf in apollo_conf:
load_conf(conf)
|
[
"itimor@126.com"
] |
itimor@126.com
|
4d96020baa1dfde6710a9247ae12d7ac774a9592
|
70bdecc19c4017d5bce1cdabd2fc1c8a97b9079c
|
/hello.py
|
6c62f8b712b55051d5540d063bbb5e660a22b570
|
[] |
no_license
|
mahesh4555/sdms
|
521b9f016fa78e773caea7a603958872dc45e67d
|
e0fa480a38e1c907cdbc4412a02340efbe62c422
|
refs/heads/master
| 2020-04-14T16:28:16.100779
| 2019-01-03T10:01:58
| 2019-01-03T10:01:58
| 163,952,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
#!/usr/bin/python
import cgi
import mysql.connector
mydb=mysql.connector.connect(host="localhost", user="root", passwd="mieupro", database="student")
mycursor = mydb.cursor(dictionary=True)
mycursor.execute("SELECT User_name FROM Login_details ")
User_name = mycursor.fetchall()
print('''Content-type: text/html\r\n\r\n
<html>
<head>
<link rel='stylesheet' type='text/css' href='css_file.css'>
</head>
<body>
<h1> welcome to madras institute of technogy </h1>
<div>
<form method='post' action='hello2.py'>
<label for="name">User Name</label><br>
<select id="name" name="name">''')
for i in User_name:
print('<option value="%s" > %s </option>'%(i['User_name'],i['User_name']))
print('''</select><br>
<label for="password">Password</label><br>
<input type="password" id="password" name="password" placeholder="Enter Password" required>
<br>
<input type="submit" value="LOGIN">
</form>
</div>
</body></html>''')
|
[
"noreply@github.com"
] |
mahesh4555.noreply@github.com
|
686c784efe388198fa39f36057c42d01e0e4dac4
|
ee9f924fc571066a67e972782c985ce761d07633
|
/project/settings/development.py
|
0457667aaf8ea099eb5134f44b8aadd05a7ad2e2
|
[
"MIT"
] |
permissive
|
teopeurt/ddash2013
|
56f497d61c4cfc30d59a1ecaed496bfa10f26023
|
d1fcd95311ab51955d5ab9c1a7ba3315c6df1afa
|
refs/heads/master
| 2021-01-15T11:45:42.171597
| 2013-10-21T07:09:03
| 2013-10-21T07:09:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
import os
from .common import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'sqlite3.db'),
}
}
for i, middleware in enumerate(MIDDLEWARE_CLASSES):
if 'CommonMiddleware' in middleware:
mcs = list(MIDDLEWARE_CLASSES)
mcs.insert(i + 1, 'debug_toolbar.middleware.DebugToolbarMiddleware')
MIDDLEWARE_CLASSES = tuple(mcs)
INSTALLED_APPS += ('debug_toolbar',)
break
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
INTERNAL_IPS = ('127.0.0.1',)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
[
"dima@kukushkin.me"
] |
dima@kukushkin.me
|
d67dbb696013abf76678662d5440d940cdac7d67
|
e0895ca800f8486f5d6ede6af4106b5b60ba9c86
|
/db_tools/import_category_data.py
|
c403a6cfb96b085f07149207f216497ba9f8d31f
|
[] |
no_license
|
zhanxiangyu/MxShop
|
613fcb16849ab6ff3f54d360e4a8e110701a9e2d
|
4650f5f56fa456413ddc73290db21e7dce4fad93
|
refs/heads/master
| 2022-12-12T05:17:58.749603
| 2018-03-18T16:08:18
| 2018-03-18T16:08:18
| 118,477,285
| 0
| 0
| null | 2022-12-08T00:55:22
| 2018-01-22T15:41:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,273
|
py
|
# -*- coding:utf-8 -*-
# __author__ = 'zhan'
# __date__ = '18-1-28 下午6:13'
import sys
import os
import django
from db_tools.data.category_data import row_data
pwd = os.path.dirname(os.path.realpath(__file__))
sys.path.append(pwd+'../')
# 和manage.py文件的使用一样
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MxShop.settings")
# 启动django
django.setup()
from goods.models import GoodsCategory
for lev1_cat in row_data:
lev1_install = GoodsCategory()
lev1_install.code = lev1_cat['code']
lev1_install.name = lev1_cat['name']
lev1_install.category_type = 1
lev1_install.save()
for lev2_cat in lev1_cat['sub_categorys']:
lev2_intance = GoodsCategory()
lev2_intance.name = lev2_cat['name']
lev2_intance.code = lev2_cat['code']
lev2_intance.category_type = 2
lev2_intance.parent_category = lev1_install
lev2_intance.save()
for lev3_cat in lev2_cat['sub_categorys']:
lev3_install = GoodsCategory()
lev3_install.name = lev3_cat['name']
lev3_install.code = lev3_cat['code']
lev3_install.category_type = 3
lev3_install.parent_category = lev2_intance
lev3_install.save()
print('insert data over!!!')
|
[
"1033432955@qq.com"
] |
1033432955@qq.com
|
969b24c1b1a028a1fb0d5a602d96f4e6bdd3be2b
|
c2c4133012f9cfeb361e7773a8645dd066fc3192
|
/src/imu_read.py
|
11f89e05bf2639cf3d5ebb7038e6b3a4396d5126
|
[] |
no_license
|
larsenkw/sensors
|
bf4c4c55bbf6c1f0dd22d486c97d0494a2464e1d
|
3a0f02f5ff9bc7ae0a615a4bfd79f950011b2d02
|
refs/heads/master
| 2020-03-27T18:56:20.359235
| 2019-07-16T16:59:50
| 2019-07-16T16:59:50
| 146,954,710
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,948
|
py
|
#!/usr/bin/env python
''' Node for converting the IMU (Adafruit BNO055) messages read by imu_read into
standard ROS messages. The Arduino does not have enough memory to use the
standard ROS messages, so the values are sent over as arrays. This node
exists as a separate program to allow the IMU data to be read by the Arduino
and then converted into the correct format. There is a version of that same
code on the Raspberry Pi which will still work with this node. This way we
can seemlessly switch between using the Pi or the Arduino if need be.'''
# IMPORTANT!
# The calibration bytes need to be placed in this order:
# accel_offset_x
# accel_offset_y
# accel_offset_z
# mag_offset_x
# mag_offset_y
# mag_offset_z
# gyro_offset_x
# gyro_offset_y
# gyro_offset_z
# accel_radius
# mag_radius
import sys
import struct
import time # for sleep, time
import numpy as np
from array import array
import RPi.GPIO as gpio
import rospy
from sensors.msg import ImuArray, ImuMag, ImuCalibStatus, ImuCalibration
from Adafruit_BNO055 import BNO055
class Calibration():
def __init__(self, ax=0.0, ay=0.0, az=0.0, ar=0.0, gx=0.0, gy=0.0, \
gz=0.0, mx=0.0, my=0.0, mz=0.0, mr=0.0):
self.accel_offset_x = ax
self.accel_offset_y = ay
self.accel_offset_z = az
self.accel_radius = ar
self.gyro_offset_x = gx
self.gyro_offset_y = gy
self.gyro_offset_z = gz
self.mag_offset_x = mx
self.mag_offset_y = my
self.mag_offset_z = mz
self.mag_radius = mr
class Quat():
def __init__(self, qx=0.0, qy=0.0, qz=0.0, qw=0.0):
self.qx = qx
self.qy = qy
self.qz = qz
self.qw = qw
class Vec():
def __init__(self, x=0.0, y=0.0, z=0.0):
self.x = x
self.y = y
self.z = z
def bytes_from_calibration(calibration):
'''This function converts the calibration parameters into a list of
bytes as required by Adafruits set_calibration() method.'''
calibration_list = [calibration.accel_offset_x, \
calibration.accel_offset_y, \
calibration.accel_offset_z, \
calibration.mag_offset_x, \
calibration.mag_offset_y, \
calibration.mag_offset_z, \
calibration.gyro_offset_x, \
calibration.gyro_offset_y, \
calibration.gyro_offset_z, \
calibration.accel_radius, \
calibration.mag_radius]
# Convert integers into a list of 22 bytes (11 int16 values in Hex form)
int16_list = [struct.pack('h', x) for x in calibration_list]
# combine into one string to easily divide into bytes in the next line
bytes_joined = "".join(int16_list)
byte_list = list(bytes_joined)
# Convert hex character code bytes into values
cal_bytes = [ord(x) for x in byte_list]
return cal_bytes
def calibration_from_bytes(cal_bytes):
'''This function converts a list of int16 bytes back into a list of
integer values'''
# Convert numbers into unsigned byte Hex representation
byte_list = [struct.pack('B', x) for x in cal_bytes]
# Join into a single string for easily unpacking in the next line
bytes_joined = "".join(byte_list)
# Convert Hex string of 22 bytes into 11 integers
calibration_list = struct.unpack('hhhhhhhhhhh', bytes_joined)
calibration = Calibration()
calibration.accel_offset_x = calibration_list[0]
calibration.accel_offset_y = calibration_list[1]
calibration.accel_offset_z = calibration_list[2]
calibration.mag_offset_x = calibration_list[3]
calibration.mag_offset_y = calibration_list[4]
calibration.mag_offset_z = calibration_list[5]
calibration.gyro_offset_x = calibration_list[6]
calibration.gyro_offset_y = calibration_list[7]
calibration.gyro_offset_z = calibration_list[8]
calibration.accel_radius = calibration_list[9]
calibration.mag_radius = calibration_list[10]
return calibration
#======================================================================#
# Main Class
#======================================================================#
class ImuRead():
def __init__(self):
#===== Setup ROS node, publishers, and messages =====#
rospy.init_node("imu_test_pi")
self.imu_data = ImuArray()
self.imu_pub = rospy.Publisher("/imu/data_array", ImuArray, queue_size=1)
self.imu_mag = ImuMag()
self.mag_pub = rospy.Publisher("/imu/mag_array", ImuMag, queue_size=1)
self.imu_status = ImuCalibStatus()
self.status_pub = rospy.Publisher("/imu/status", ImuCalibStatus, queue_size=1)
self.imu_calib = ImuCalibration()
self.calib_pub = rospy.Publisher("/imu/calibration", ImuCalibration, queue_size=1)
self.rate = 30 # publish at 30 Hz
self.save_time = time.time()
#===== Set GPIO =====#
gpio.setmode(gpio.BOARD)
# reseting the board causes problems, so don't
#======================================================================#
# IMU Initialization
#======================================================================#
#===== Set calibration defaults
self.calibration = Calibration()
#===== Read in configuration parameters
calibration_param_names = ["accel_offset_x", "accel_offset_y", "accel_offset_z", "accel_radius", \
"gyro_offset_x", "gyro_offset_y", "gyro_offset_z", \
"mag_offset_x", "mag_offset_y", "mag_offset_z", "mag_radius"]
for param in calibration_param_names:
if rospy.has_param("imu/calibration/" + param):
exec "self.calibration." + param + " = rospy.get_param('imu/calibration/" + param + "')"
else:
print "No '" + param + "' found, using {0}".format(eval("self.calibration." + param))
#===== Begin BNO055
self.serial_port = "/dev/serial0"
self.bno = BNO055.BNO055(serial_port=self.serial_port)
# Initial mode should be OPERATION_MODE_M4G so magnetometer can align
# without calibration, then after loading calibration change mode to
# OPERATION_MODE_NDOF
if not self.bno.begin(BNO055.OPERATION_MODE_M4G):
raise RuntimeError("Failed to initialize BNO055. Check sensor connection.")
#===== Upload calibration parameters
# Convert calibration to bytes
initial_cal = bytes_from_calibration(self.calibration)
# Upload to IMU
self.bno.set_calibration(initial_cal)
# Change mode to OPERATION_MODE_NDOF so sensor data is fused to give
# absolute orientation
self.bno.set_mode(BNO055.OPERATION_MODE_NDOF)
rospy.loginfo("IMU initialization successful.")
#======================================================================#
# IMU Initialization
#======================================================================#
def loop(self):
#===== Read IMU data
quat = Quat()
ang = Vec()
lin = Vec()
mag = Vec()
quat.qx, quat.qy, quat.qz, quat.qw = self.bno.read_quaternion()
ang.x, ang.y, ang.z = self.bno.read_gyroscope()
lin.x, lin.y, lin.z = self.bno.read_accelerometer()
mag.x, mag.y, mag.z = self.bno.read_magnetometer()
#===== Convert to ROS message
# IMU data
self.imu_data.data[0] = quat.qw
self.imu_data.data[1] = quat.qx
self.imu_data.data[2] = quat.qy
self.imu_data.data[3] = quat.qz
self.imu_data.data[4] = ang.x
self.imu_data.data[5] = ang.y
self.imu_data.data[6] = ang.z
self.imu_data.data[7] = lin.x
self.imu_data.data[8] = lin.y
self.imu_data.data[9] = lin.z
# Magnetometer data
self.imu_mag.data[0] = mag.x
self.imu_mag.data[1] = mag.y
self.imu_mag.data[2] = mag.z
# Calibration Status
self.imu_status.system, self.imu_status.gyro, self.imu_status.accel, self.imu_status.mag = self.bno.get_calibration_status()
# Calibration parameters
# (if system status is 3 and last save time is >60sec)
if ((self.imu_status == 3) and (time.time() - self.save_time) > 60.0):
self.calibration = bytes_from_calibration(self.bno.get_calibration())
self.imu_calib.data[0] = self.calibration.accel_offset_x
self.imu_calib.data[1] = self.calibration.accel_offset_y
self.imu_calib.data[2] = self.calibration.accel_offset_z
self.imu_calib.data[3] = self.calibration.accel_radius
self.imu_calib.data[4] = self.calibration.gyro_offset_x
self.imu_calib.data[5] = self.calibration.gyro_offset_y
self.imu_calib.data[6] = self.calibration.gyro_offset_z
self.imu_calib.data[7] = self.calibration.mag_offset_x
self.imu_calib.data[8] = self.calibration.mag_offset_y
self.imu_calib.data[9] = self.calibration.mag_offset_z
self.imu_calib.data[10] = self.calibration.mag_radius
self.save_time = time.time()
self.calib_pub.publish(self.imu_calib)
#===== Publish
self.imu_pub.publish(self.imu_data)
self.mag_pub.publish(self.imu_mag)
self.status_pub.publish(self.imu_status)
def spin(self):
r = rospy.Rate(self.rate)
while not rospy.is_shutdown():
self.loop()
r.sleep()
if __name__ == "__main__":
try:
imu = ImuRead()
imu.spin()
except rospy.ROSInterruptException:
pass
finally:
gpio.cleanup()
|
[
"kylew.larsen@gmail.com"
] |
kylew.larsen@gmail.com
|
0ed3275e29ab2f8b2a80b6b7dcfc68dcc495a4e8
|
652b8d3ef73ebbacc68eaaf9b38840ef6d5ed7cd
|
/Code/Maze/maze.py
|
e35ee3818c503118612296754a8d5008bbaed134
|
[] |
no_license
|
qiuyue1993/Reinforcement-Learning-Practice
|
b316e9b82416397953d5be8083bb851d5417327a
|
0c298776fae23f4740b9756bbe382a040a220d62
|
refs/heads/master
| 2020-05-23T20:12:53.425094
| 2019-07-09T01:25:41
| 2019-07-09T01:25:41
| 186,925,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
from maze_env import Maze
from RL_brain import QLearningTable
def update():
for episode in range(100):
observation = env.reset()
while True:
env.render()
action = RL.choose_action(str(observation))
observation_, reward, done = env.step(action)
RL.learn(str(observation), action, reward, str(observation))
observation = observation_
if done:
break
print("Game Over!")
env.destroy()
if __name__=="__main__":
env = Maze()
RL = QLearningTable(actions=list(range(env.n_actions)))
env.after(100,update)
env.mainloop()
|
[
"noreply@github.com"
] |
qiuyue1993.noreply@github.com
|
51f715f1c45f6fcedf092efef860b6393b2c4769
|
3f7240da3dc81205a0a3bf3428ee4e7ae74fb3a2
|
/src/Week3/String Methods/String Edits.py
|
b4e0ea2ff9f46a21b11a95b3d8d9080d9d152bf4
|
[] |
no_license
|
theguyoverthere/CMU15-112-Spring17
|
b4ab8e29c31410b4c68d7b2c696a76b9d85ab4d8
|
b8287092b14e82d2a3aeac6c27bffbc95382eb34
|
refs/heads/master
| 2021-04-27T08:52:45.237631
| 2018-10-02T15:38:18
| 2018-10-02T15:38:18
| 107,882,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
print("This is nice. Yes!".lower())
print("So is this? Sure!!".upper())
print(" Strip removes leading and trailing whitespace only ".strip())
print("This is nice. Really nice.".replace("nice", "sweet"))
print("This is nice. Really nice.".replace("nice", "sweet", 1)) # count = 1
print("----------------")
s = "This is so so fun!"
t = s.replace("so ", "")
print(t)
print(s) # note that s is unmodified (strings are immutable!)
|
[
"tariqueanwer@outlook.com"
] |
tariqueanwer@outlook.com
|
86829936832555018cae2aab808bc61a9a58eb37
|
705e37c3e15825962d2ccd87608d23de58919dad
|
/prueba_archivo.py
|
bf6cfc06eb89ced33a20d8ecb6ca18f5f405dc25
|
[] |
no_license
|
dcabello/aprendizaje_con_python
|
d34cd028717c37776384acf30cd7a4db0fec4b63
|
d7d23833bf2ff7a699884085aa9edc017e3c3a90
|
refs/heads/master
| 2022-11-15T10:28:22.022780
| 2020-07-02T09:30:56
| 2020-07-02T09:30:56
| 276,597,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41
|
py
|
"""Mi primer archivo"""
print(__doc__)
|
[
"noreply@github.com"
] |
dcabello.noreply@github.com
|
96524fe980b5c8e70660e583d71cde6c7bdce4e6
|
8fc404bdc056315f47dbc75658d3e1a1ad3d00d3
|
/test.py
|
0b90fa3f08e0fad49e47e83865cec4c4c14bf1e3
|
[] |
no_license
|
bporcel/ScheduleNotificator
|
77c5db9eb3a06547d5a992ed76f07e777962980c
|
2afe4c7ca9c29d5d3b17aaf10ff56f70ff548398
|
refs/heads/master
| 2023-02-17T04:27:37.025285
| 2021-01-19T07:30:48
| 2021-01-19T07:30:48
| 330,898,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,225
|
py
|
import subprocess
import time
import json
from datetime import datetime
def getSchedule():
try:
with open('/home/hacko/dev/repositories/day2day/schedule.json') as readContent:
return json.load(readContent)
except:
return False
def sendNotification(hour, schedule):
currentTime = datetime.now().strftime('%H:%M')
if hour in schedule.keys():
if not schedule[hour]['notified']:
title = schedule[hour]['title'] + ' ' + currentTime
body = schedule[hour]['body']
subprocess.Popen(['notify-send', title, body])
schedule[hour]['notified'] = True
def checkTime():
seconds = str(datetime.now().second)
print(seconds)
sendNotification(seconds, schedule)
def scheduleNotificator(schedule):
checkTime()
time.sleep(1)
return scheduleNotificator(schedule)
weekDay = datetime.today().weekday()
if weekDay != 5 and weekDay != 6:
# try:
schedule = getSchedule()
if schedule:
print('Scheduler started succesfully')
scheduleNotificator(schedule)
else:
print('No he podido encontrar el horario para el día de hoy')
# except:
# print('Error during program execution')
|
[
"codamming@gmail.com"
] |
codamming@gmail.com
|
4f57bb5641b4d6779e0a30d1d6f0908bb87a6d8c
|
ff8103f0dc01fe33bc9ebdb90132242d6e34eaf6
|
/Sample/Sockets/UdpServer1.py
|
235b2c1b9acb0a01d6cd9fd9994f68127dbfbbf4
|
[] |
no_license
|
KumaKuma0421/PatchWorks
|
866aec10e1b04d2d0bda2d8ccd646a31db8e2b35
|
22bd8c0cce0b73ad7c20c2817f734c5cdf54345c
|
refs/heads/master
| 2023-01-06T21:04:25.248769
| 2020-11-03T07:14:14
| 2020-11-03T07:14:14
| 295,703,340
| 0
| 0
| null | 2020-11-03T07:14:15
| 2020-09-15T11:18:42
|
Python
|
UTF-8
|
Python
| false
| false
| 317
|
py
|
#
# UDP受信
# sa https://qiita.com/__init__/items/5c89fa5b37b8c5ed32a4
#
import socket
HOST = '127.0.0.1'
PORT = 50007
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.bind((HOST, PORT))
while True:
data, addr = s.recvfrom(1024)
print("data: {}, addr: {}".format(data, addr))
|
[
"noreply@github.com"
] |
KumaKuma0421.noreply@github.com
|
0b6d7046eb4eb6ddcc96d07d95abf3a3c8408c22
|
20ee5aabea06a6fbe97e91d5b7f8e55aba5715df
|
/ApiV1/urls.py
|
bd93d640ab5d0532be678cbd0907b35211ac6fb8
|
[] |
no_license
|
scriptyang/myblog_v2
|
870ea6664b2a4537bcf93e37af659d177c392b96
|
8d38e27dcb6c2f535a3fc5c7e14a447bba105b23
|
refs/heads/master
| 2020-06-20T14:15:48.448882
| 2020-05-15T08:52:33
| 2020-05-15T08:52:33
| 197,147,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
from django.urls import path
from .views import *
urlpatterns = [
# 用户信息
path('user_list/', UserInfo.as_view(),name='user_info'),
# 服务信息
path('Service_info/', ServiceInfo.as_view(), name='service_info'),
# 登录认证
path('LoginAuth/', LoginAuth.as_view(), name='login'),
]
|
[
"scriptyang@sina.com"
] |
scriptyang@sina.com
|
0259a750464eee4b339b86e377d9abe72faa9190
|
7db18d2e7445d02aa555f028b2eeda0e9fb74340
|
/ch23/ordered_linked_list.py
|
13404e2518093effabc52e3c3d7392a5b54c3309
|
[] |
no_license
|
MrHighHandZhao/2017A2CS
|
9a99ad452055e86046ff33d68afe893f3804a7f0
|
a6040bf29b18c93dcd6fd0e4865593555eb988f1
|
refs/heads/master
| 2021-09-10T16:34:27.050399
| 2018-03-29T11:23:16
| 2018-03-29T11:23:16
| 114,727,605
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,750
|
py
|
#S3C2 Carl Zhao Computer Science Homework
NullPointer = -1
class Node():
def __init__(self):
self.Data = ""
self.NPointer = NullPointer
class linkedList():
def _init_(self):
self.SPointer = NullPointer
self.FPointer = 0
self.record = []
newNode = None
for i in range(10):
newNode = Node()
NewNode.NPointer = i + 1
self.record.append(NewNode)
NewNode.NPointer = NullPointer
def InsertNode(self):
if FPointer!=NullPointer:
NewPointer=FPointer
self.record[NewPointer],Data=NewItem
FPointer=self.record[FPointer].Pointer
CPointer= SPointer
while CPointer!=NullPointer and self.record[CPointer].Data < NewItem:
PPointer= CPointer
CPointer=self.record[CPointer].Pointer
if PPointer= SPointer:
self.record[NewPointer].Pointer=SPointer
SPointer=NewPointer
else:
self.record[NewPointer].Pointer=self.record[PPointer].Pointer
self.record[PPointer].Pointer=NewPointer
def FindNode(self):
CPointer=SPointer
while CPointer!= NullPointer and self.record[CPointer]!= NewItem:
CPointer!=self.record[CPointer].NPointer
return CPointe
def outputNode(self):
CPointer = self.SPointer
while CPointer != NullPointer:
print(self.record[CPointer].Data, end = ",")
CPointer = self.record[CPointer].NextPointer
def printList(self):
for i in range(10):
print(self.record[i].Data, self.record[i].NPointer)
def insertNode(self):
if FPointer != NullPointer:
NewPointer = FPointer
self.record[NewPointer].Data = NewItem
FPointer = self.record[FPointer].NPointer
PPointer = NullPointer
while (CPointer != NullPointer) and (self.record[CPointer].Data < NewItem):
PPointer = CPointer
CPointer = self.record[CPointer].NPointer
if PPointer == NullPointer:
self.record[NewPointer].NPointer = SPointer
SPointer = NewPointer
else:
self.record[NewPointer].NPointer = self.record[PPointer].NextPointer
self.record[PPointer].NextPointer = NewPointer
l = linkedList()
l.insertNode(5)
l.insertNode(25)
l.insertNode(15)
l.insertNode(35)
l.printList()
|
[
"34653690+MrHighHandZhao@users.noreply.github.com"
] |
34653690+MrHighHandZhao@users.noreply.github.com
|
ea1ab812afe159437876b0e417ff2fe3c5aa5fd4
|
3750c7dc31addfd3195dbbc5698b4c34c738ca78
|
/manage.py
|
3eaafaddb9004ae4a25640e556ccb962017826b4
|
[] |
no_license
|
deiner582/Vortal
|
3adc3a9cf68150873b2f3a05a9eac07585a87dde
|
b1079a9629457c142004332c55888282d6aef703
|
refs/heads/master
| 2020-12-25T18:20:38.692139
| 2015-06-30T19:34:04
| 2015-06-30T19:34:04
| 35,785,147
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vortal.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"metallica082011@hotmail.com"
] |
metallica082011@hotmail.com
|
3597912a9add6ca33b344e9250060a3dccbb25f9
|
656fecef4a16c07c0cebf954908331a3f5ed23fb
|
/three_sum_to_zero.py
|
d6ef0f82f9b2dde6ea513db1ef9883ae855b8471
|
[] |
no_license
|
linchuyuan/leetcode
|
d5fdd68c259ff92e2adb7908d5f17d885509f2fe
|
eb4520464a758962731dcfceafefc812f6b6f844
|
refs/heads/master
| 2021-01-20T17:40:28.572630
| 2016-07-20T05:02:14
| 2016-07-20T05:02:14
| 61,839,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
#Given a array of S of n intergers. find three intergers in s such that the sum of the three is closest to the target. i.e. [-1,2,1,4] target = 1, return [-1,2,1]
def three_sum_to_zero(array):
for k in range(len(array)-2):
num1 = array[k];
num2 = array[k+1];
num3 = array[k+2];
return_me = [];
distance = num1+num2+num3;
found = False;
for i in range(len(array)):
if i == k:
continue;
i = array[i];
indicator = 0;
if not distance:
found = True;
break;
if abs(i+num2+num3) < distance:
indicator = 1;
distance = abs(num1+i+num3);
if abs(num1+i+num3) < distance:
indicator = 2;
distance = abs(i+num2+num3);
if abs(num1+num2+i) < distance:
indicator = 3;
distance = abs(i+num2+num3);
if indicator == 1: num1 = i;
elif indicator == 2: num2 = i;
elif indicator == 3: num3 = i;
if found:
return_me.append(num1);
return_me.append(num2);
return_me.append(num3);
continue;
else:
return_me = "not found";
return return_me
print three_sum_to_zero([1,-2,-2,4])
|
[
"lin.chu@yahoo.com"
] |
lin.chu@yahoo.com
|
a615255c5d7631f31f8863e1c8229310da78acfb
|
83b7b2cc1d6ca4f1a84e7229c79f5479e1cd07e1
|
/webgateway/apps/permissions/tests/test_models.py
|
5a95f6a7000f36bb47ce6a06dd3f53d3586d4bd6
|
[] |
no_license
|
Almlett/HOBBY-ApiGateway
|
6ea14ec7cfcfafcfcf8584d2ff21e3988c510c7b
|
32ad29f35b1e1164b961e429bb5472175db847da
|
refs/heads/main
| 2023-04-11T13:59:00.606385
| 2021-04-22T16:33:13
| 2021-04-22T16:33:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,470
|
py
|
"""
test for app users
"""
import json
import pytest
from django.urls import reverse
from rest_framework.exceptions import ValidationError
from permissions.models import Permission, Profile, ProfilePermission # pylint: disable=relative-beyond-top-level
pytestmark = pytest.mark.django_db
class TestPermission:
"""
Test Permission Model
"""
pytestmark = pytest.mark.django_db
def test_creation(self, permission_factory,): # pylint: disable=no-self-use
"""
test created permission
"""
permission_created = permission_factory()
permission = Permission.objects.get(id = permission_created.id)
assert permission.name == "permission_test", 'Name should be permission_test'
assert str(permission) == "permission_test"
pytestmark = pytest.mark.django_db
def test_update(self, permission_factory,): # pylint: disable=no-self-use
"""
test updated permission
"""
permission_created = permission_factory()
permission = Permission.objects.get(id = permission_created.id)
permission.name = "permission_test2"
permission.save()
assert permission.name == "permission_test2", 'name should be permission_test2'
class TestProfile:
"""
Test Profile Model
"""
pytestmark = pytest.mark.django_db
def test_creation(self, profile_factory,): # pylint: disable=no-self-use
"""
test created profile
"""
profile_created = profile_factory()
profile = Profile.objects.get(id = profile_created.id)
assert profile.name == "profile_test", 'Name should be profile_test'
assert str(profile) == "profile_test"
pytestmark = pytest.mark.django_db
def test_update(self, profile_factory,): # pylint: disable=no-self-use
"""
test updated profile
"""
profile_created = profile_factory()
profile = Profile.objects.get(id = profile_created.id)
profile.name = "profile_test2"
profile.save()
assert profile.name == "profile_test2", 'name should be profile_test2'
class TestProfilePermission:
"""
Test ProfilePermission Model
"""
pytestmark = pytest.mark.django_db
def test_creation(self, profile_permission_factory,): # pylint: disable=no-self-use
"""
test created profile_permission
"""
profile_permission_created = profile_permission_factory()
profile_permission = ProfilePermission.objects.get(id = profile_permission_created.id)
assert profile_permission.profile.name == "profile_test", 'Name should be profile_permission_test'
assert str(profile_permission) == "profile_test"
pytestmark = pytest.mark.django_db
def test_update(self, profile_permission_factory,): # pylint: disable=no-self-use
"""
test updated profile_permission
"""
profile_test = Profile()
profile_test.name="profile_permission_test2"
profile_test.key="test"
profile_test.description="test"
profile_test.save()
profile_permission_created = profile_permission_factory()
profile_permission = ProfilePermission.objects.get(id = profile_permission_created.id)
profile_permission.profile = profile_test
profile_permission.save()
assert profile_permission.profile.name == "profile_permission_test2", 'name should be profile_permission_test2'
|
[
"isc.andradealan@gmail.com"
] |
isc.andradealan@gmail.com
|
032ea0d77079d896e479004c2016c2360bde3d50
|
48a741fccfcc832609bf557723e8d069ccbf87e7
|
/Classification/utilities/nn/conv/__init__.py
|
ae161251b4ab8cee4f58d39b9cc93c5421499613
|
[] |
no_license
|
kdasilva835842/tremor_classification
|
27f00fadacba7859ff47c804ae921de85b16585d
|
d4a8f7d5f773a27230bbc9b4d3fec14c22825989
|
refs/heads/master
| 2022-12-08T13:42:02.947340
| 2020-09-07T06:34:55
| 2020-09-07T06:34:55
| 293,249,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
from .shallownet import ShallowNet
from .lenet import LeNet
from .minivggnet import MiniVGGNet
from .alexnet import AlexNet
from .resnet import ResNet
from .fcheadnet import FCHeadNet
from .lenet_mnist import LeNet_MNIST
|
[
"kelvinds@discovery.co.za"
] |
kelvinds@discovery.co.za
|
f7d576b86f66f442c7a8952459e7007695d6c4e8
|
78c127bce6c51107e46c3f53fe96a542ec7ebfc1
|
/app.py
|
cd869a98dfdd174978d02861ac867667c04f58e2
|
[] |
no_license
|
lucasblazzi/statistcs-ho
|
680c73ada9d19d529fe13c9ffd2a5883ed81c39a
|
4115371af71ee157030caf2df82f815ad2cd9498
|
refs/heads/master
| 2023-05-08T19:43:02.226800
| 2021-06-02T01:07:15
| 2021-06-02T01:07:15
| 371,530,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,164
|
py
|
import streamlit as st
import pandas as pd
import plotly.figure_factory as ff
from PIL import Image
from utils.fetch import get_data
from utils.plot import plot_scatter
from utils.plot import plot_bar
from utils.plot import plot_table
from utils.plot import plot_pie
from utils.plot import plot_box
from utils.plot import plot_multi_bar
from utils.plot import plot_histogram
from utils.text import sample_text
from utils.text import performance_text
from utils.text import work_hours_text
from utils.text import academic_impact_text
from utils.text import question_mapping
from utils.text import abstract
from utils.text import sample_analysis
from utils.text import satsxwill_text
st.set_page_config(layout='wide')
pd.set_option('display.max_colwidth', None)
side_bar = st.sidebar
def _header():
st.title('MAT013 - Probabilidade e Estatistica')
st.header('Análise comparativa de perspectivas relacionadas ao Home Office')
st.markdown("____")
st.markdown(abstract, unsafe_allow_html=True)
def _amostra(visualizations, raw_data):
cols = st.beta_columns(3)
amostras = ("Tipo de trabalho", "Categoria")
for i, amostra in enumerate(amostras):
count_amostra = visualizations[amostra].value_counts()
amostra_bar = plot_pie(count_amostra, x=amostra, y=count_amostra.index,
title=amostra)
cols[i].plotly_chart(amostra_bar)
count_raw = raw_data["Curso"].value_counts()
raw_bar = plot_bar(count_raw, x=count_raw.index, y="Curso",
title="Distribuição por curso", width=800, height=500, color=count_raw.index)
cols[0].plotly_chart(raw_bar)
cols[2].markdown("<br><br>", unsafe_allow_html=True)
cols[2].markdown(sample_analysis, unsafe_allow_html=True)
def _comparison(visualizations):
compare = ("Performance", "Carga Horária", "Impacto nos estudos")
results = (performance_text , work_hours_text, academic_impact_text)
for comp, res in zip(compare, results):
cols = st.beta_columns([0.55, 0.45])
fig = plot_multi_bar(visualizations, comp)
cols[0].plotly_chart(fig)
cols[1].markdown("<br><br><br>", unsafe_allow_html=True)
cols[1].markdown(res, unsafe_allow_html=True)
def _base_scatter(home_office):
st.markdown("____")
cols = st.beta_columns([0.7, 0.3])
x = "Qual é seu nível de satisfação com o trabalho remoto?"
y = "Horas trabalhadas"
f = plot_scatter(home_office, x, y)
cols[0].plotly_chart(f)
cols[1].markdown("<br><br>", unsafe_allow_html=True)
cols[1].write(sample_text)
def _satsxreal(visualizations):
cols = st.beta_columns([0.4, 0.3, 0.3])
comp = {"Não Remoto": "Vontade de trabalhar home office (Não Remoto)",
"Remoto" :"Satisfação com o Home Office (Remoto)"}
i = 0
c = 0
for k, val in comp.items():
v = visualizations.loc[visualizations["Categoria"] == k]
hist = plot_histogram(v, title=val)
box = plot_box(v, val, k, "Vontade x Satisfação")
cols[i].plotly_chart(hist)
if c > 0:
cols[i+1].markdown("<br><br>", unsafe_allow_html=True)
cols[i+1].plotly_chart(box)
table = plot_table(v, "Vontade x Satisfação", title=val)
cols[i+2].plotly_chart(table)
c+=1
st.markdown(satsxwill_text, unsafe_allow_html=True)
def _set_title(title):
st.markdown("____")
st.header(title)
st.markdown("<br><br>", unsafe_allow_html=True)
def main():
raw_data = get_data("raw_mat013_forms")
home_office = get_data("home_office")
not_home_office = get_data("not_home_office")
on_office = get_data("on_office")
unemployed = get_data("unemployed")
visualizations = get_data("visualizations")
sets = {
"Dados Brutos": raw_data,
"Empregado - Home Office": home_office,
"Empregado - Presencial": on_office,
"Não Empregado": unemployed,
"Nunca trabalhou home office": not_home_office,
"Vizualização Comparativa": visualizations
}
selection = side_bar.selectbox("Selecione", ["Dashboard", "Dados"])
image = Image.open("infografico.png")
if selection == "Dados":
st.image(image)
st.subheader("Questões")
st.dataframe(raw_data.columns)
exclude = ("Qual é a sua vontade de trabalhar remotamente?", "Vontade x Satisfação")
for key, value in sets.items():
st.subheader(key)
st.dataframe(value)
for i, column in enumerate(value.columns):
if i == 0 or column in exclude or key == "Dados Brutos":
continue
st.dataframe(value[column].value_counts())
st.markdown("____")
elif selection == "Dashboard":
_header()
_set_title("Contextualização e Análise Amostral")
_amostra(visualizations, raw_data)
_set_title("Análise de Perspectivas")
_comparison(visualizations)
_set_title("Outras Observações")
_satsxreal(visualizations)
st.markdown("____")
st.image(image)
if __name__ == "__main__":
main()
|
[
"lucasblazzi@hotmail.com"
] |
lucasblazzi@hotmail.com
|
35244b51575c9fc5c8cd4ce310014de26236a627
|
68482946d43336d58c200717b427e584eb759d98
|
/vowel_counter.py
|
2cde2692283a0f7507966a17e68cc5787f23c86c
|
[] |
no_license
|
jointyrost/py_beginners
|
858f2d36bd4b1d0caabebf04f4cdd2dc91b4b10e
|
d8fb8cacbeb8b25938c177cfa29dc1bc70ff94ee
|
refs/heads/main
| 2023-01-09T18:01:33.034844
| 2020-10-31T12:24:20
| 2020-10-31T12:24:20
| 308,881,516
| 1
| 0
| null | 2020-10-31T12:54:32
| 2020-10-31T12:54:32
| null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
# Count Vowels in String
def count_vowels(string):
vowels = ['a', 'e', 'i', 'o', 'u']
vowel_count = 0
for letter in string:
if letter in vowels:
vowel_count += 1
return vowel_count
|
[
"noreply@github.com"
] |
jointyrost.noreply@github.com
|
1c195a597666eeb0525514c52eb135492d328e93
|
55114a42d6cf63fed3d46b61b423bff14d1cab87
|
/web/controllers/api/Quant.py
|
21b8ffb5f921d9e69336df933e8d66a8cec89865
|
[] |
no_license
|
xzxedu/quant_demo
|
88856d24307b9fba2754f6c40a5e72fd77e09982
|
1d726c3972a84aea477815c8f709b91924e96012
|
refs/heads/master
| 2020-08-11T22:37:18.929949
| 2020-01-23T22:17:24
| 2020-01-23T22:17:24
| 214,640,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,751
|
py
|
# -*- coding: utf-8 -*-
from common.models.member.MemberCart import MemberCart
from common.models.quant.QuantCat import QuantCat
from common.libs.UrlManager import UrlManager
from common.models.quant.Quant import Quant
from web.controllers.api import route_api
from flask import request, jsonify, g
from application import app, db
from sqlalchemy import or_
import requests, json
@route_api.route("/quant/index")
def quantIndex():
resp = {'code': 200, 'msg': '操作成功', 'data': {}}
cat_list = QuantCat.query.filter_by(status=1).order_by(QuantCat.weight.desc()).all()
data_cat_list = []
data_cat_list.append({
'id': 0,
'name': '全部'
})
if cat_list:
for item in cat_list:
tmp_data = {
"id": item.id,
"name": item.name
}
data_cat_list.append(tmp_data)
resp['data']['cat_list'] = data_cat_list
quant_list = Quant.query.filter_by(status=1)\
.order_by(Quant.total_count.desc(), Quant.id.desc()).limit(3).all()
data_quant_list = []
if quant_list:
for item in quant_list:
tmp_data = {
"id": item.id,
"pic_url": UrlManager.buildImageUrl(item.main_image)
}
data_quant_list.append(tmp_data)
resp['data']['banner_list'] = data_quant_list
return jsonify(resp)
@route_api.route("/quant/search")
def quantSearch():
resp = {'code': 200, 'msg': '操作成功', 'data': {}}
req = request.values
cat_id = int(req['cat_id']) if 'cat' in req else 0
mix_kw = str(req['mix_kw']) if 'mix_kw' in req else ''
p = int(req['p']) if 'p' in req else 1
if p < 1:
p = 1
query = Quant.query.filter_by(status=1)
page_size = 10
offset = (p-1) * page_size
if cat_id > 0:
query = query.filter(Quant.cat_id == cat_id)
if mix_kw in req:
rule = or_(Quant.name.ilike("%{0}%".format(mix_kw))), Quant.tags.ilike("%{0}%".format(mix_kw))
query = query.filter(rule)
quant_list = query.order_by(Quant.total_count.desc(), Quant.id.desc())\
.offset(offset).limit(page_size).all()
data_quant_list = []
if quant_list:
for item in quant_list:
tmp_data = {
'id': item.id,
'name': "%s" % (item.name),
'price': str(item.price),
'min_price': str(item.price),
'pic_url': UrlManager.buildImageUrl(item.main_image)
}
data_quant_list.append(tmp_data)
resp['data']['list'] = data_quant_list
resp['data']['has_more'] = 0 if len(data_quant_list) < page_size else 1
return jsonify(resp)
@route_api.route("/quant/info")
def quantInfo():
resp = {'code': 200, 'msg': '操作成功', 'data': {}}
req = request.values
id = int(req['id']) if 'id' in req else 0
quant_info = Quant.query.filter_by(id=id).first()
if not quant_info and not quant_info.status:
resp['code'] = -1
resp['msg'] = "该产品已下架 "
return jsonify(resp)
member_info = g.member_info
cart_number = 0
if member_info:
cart_number = MemberCart.query.filter_by(member_id=member_info.id).count()
resp['data']['info'] = {
"id": quant_info.id,
"name": quant_info.name,
"summary": quant_info.summary,
"total_count": quant_info.total_count,
"comment_count": quant_info.comment_count,
"main_image": UrlManager.buildImageUrl(quant_info.main_image),
"price": str(quant_info.price),
"stock": quant_info.stock,
"pics": [UrlManager.buildImageUrl(quant_info.main_image)]
}
resp['data']['cart_number'] = cart_number
return jsonify(resp)
|
[
"s1787273@sms.ed.ac.uk"
] |
s1787273@sms.ed.ac.uk
|
be16a67b88abad327d6cecbee86ece8d7a52c632
|
63e116eddc626816689fca20137140c002670d1a
|
/calculateResults/migrations/0002_department.py
|
ed44d475165167ae9359b484a2ecdf037cea8c39
|
[] |
no_license
|
sarah-salaheldeen/sarah-salaheldeen.github.io
|
227790038f3834d8b96d1b23dc210c31bf675f25
|
207b99c388c8fda6e56317bcbb1200b19796f2f9
|
refs/heads/master
| 2020-04-05T17:50:11.700269
| 2018-11-11T13:10:01
| 2018-11-11T13:10:01
| 157,077,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 585
|
py
|
# Generated by Django 2.1a1 on 2018-09-25 10:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('calculateResults', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('department_id', models.IntegerField()),
('department_name', models.CharField(max_length=100)),
],
),
]
|
[
"sarahsalaheldeen@gmail.com"
] |
sarahsalaheldeen@gmail.com
|
5cb91bafbc062039775e7519eafeb5d6206a97d8
|
dcf2e42641237b2488078a31f704c3b45b883f03
|
/GitHub/Analysis.py
|
f50954e527895cbaccc50ab45b3d47a2576b0bed
|
[] |
no_license
|
jeffbender/LinkingEdX
|
acd353cbe5371690f16f6deefaf1b1285b467a47
|
400e9b0d4302ae8b52df94ad154ce7944d76ab48
|
refs/heads/master
| 2021-01-22T23:00:56.910498
| 2016-06-20T19:26:32
| 2016-06-20T19:26:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,806
|
py
|
'''
Created on Dec 21, 2015
@author: Angus
'''
import json
from Functions.CommonFunctions import ReadEdX
def AnalyzeMatchingResults(platform, path):
course_matcher_map = {}
non_duplicate_matcher_set = set()
# Read EdX learners
edx_path = path + "course_metadata/course_email_list"
edx_learners_set, edx_learners_map = ReadEdX(edx_path)
course_learners_map = {}
for learner in edx_learners_map.keys():
for course in edx_learners_map[learner]["courses"]:
if course not in course_learners_map.keys():
course_learners_map[course] = set()
course_learners_map[course].add(learner)
# 1. Explicit matching
explicit_learners = set()
explicit_path = path + platform + "/explicit_matching"
explicit_file = open(explicit_path, "r")
lines = explicit_file.readlines()
for line in lines:
array = line.replace("\n", "").split("\t")
learner = array[0]
courses = array[1].split(",")
non_duplicate_matcher_set.add(learner)
explicit_learners.add(learner)
for course in courses:
if course not in course_matcher_map.keys():
course_matcher_map[course] = set()
course_matcher_map[course].add(learner)
print "# explicit learners is:\t" + str(len(explicit_learners))
# 2. Direct matching
direct_path = path + "latest_matching_result_0"
direct_file = open(direct_path, "r")
jsonLine = direct_file.read()
direct_results_map = json.loads(jsonLine)
direct_file.close()
direct_learners = set()
for learner in direct_results_map.keys():
if platform in direct_results_map[learner]["checked_platforms"]:
if learner in edx_learners_set:
non_duplicate_matcher_set.add(learner)
if learner not in explicit_learners:
direct_learners.add(learner)
for course in edx_learners_map[learner]["courses"]:
if course not in course_matcher_map.keys():
course_matcher_map[course] = set()
course_matcher_map[course].add(learner)
print "# direct learners is:\t" + str(len(direct_learners))
# 3. Fuzzy matching
fuzzy_path = path + platform + "/fuzzy_matching"
fuzzy_file = open(fuzzy_path, "r")
lines = fuzzy_file.readlines()
for line in lines:
array = line.replace("\n", "").split("\t")
learner = array[0]
login = array[1]
if login != "":
if learner in edx_learners_set:
non_duplicate_matcher_set.add(learner)
for course in edx_learners_map[learner]["courses"]:
if course not in course_matcher_map.keys():
course_matcher_map[course] = set()
course_matcher_map[course].add(learner)
# Output analysis results
count_course_learner_map = {}
for course in course_learners_map.keys():
count_course_learner_map[course] = len(course_learners_map[course])
sorted_count_course_learner_map = sorted(count_course_learner_map.items(), key=lambda d:d[1], reverse=True)
for record in sorted_count_course_learner_map:
print str(record[0]) + "\t" + str(record[1]) + "\t" + str(len(course_matcher_map[str(record[0])]))
#print str(len(course_matcher_map[str(record[0])]))
print
print "# non-duplicate matchers is:\t" + str(len(non_duplicate_matcher_set))
path = "/Volumes/NETAC/LinkingEdX/"
platform = "github"
AnalyzeMatchingResults(platform, path)
print "Finished."
|
[
"angus.glchen@gmail.com"
] |
angus.glchen@gmail.com
|
f395f4805a0ad8c28e263e034dae773ae4706e8a
|
45fc78cb3bcd75985aa607d8092a33df6e8f9ec9
|
/chain/wenchain/core/blockchain.py
|
c09a37fe7aa910427713e5c615acce57cf4eb5ca
|
[] |
no_license
|
kafana/thinkering
|
34d94d38f82c773ed104e90aede6b6c0aeae620f
|
3e86f4e96b1336c59ab7b4373203819fe35d2860
|
refs/heads/master
| 2020-04-08T17:18:38.720642
| 2018-11-29T00:29:23
| 2018-11-29T00:29:23
| 159,561,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,313
|
py
|
import logging
from copy import deepcopy
from wenchain.core.block import Block
logger = logging.getLogger(__name__)
class BlockChain(object):
def __init__(self, chain=None):
self.chain = chain if chain != None else [Block.genesis()]
def __repr__(self):
return "<BlockChain(chain=%r)>" % (
self.chain)
def __eq__(self, other):
if not isinstance(other, BlockChain):
return False
for i in range(len(self.chain)):
if self.chain[i] != other.chain[i]:
return False
return True
def __hash__(self):
return hash(tuple([block.hash for block in self.chain]))
def __copy__(self):
return type(self)(self.chain)
# The ids param is a dict of id's to copies
# memoization avoids recursion
def __deepcopy__(self, ids):
self_id = id(self)
item = ids.get(self_id)
if item is None:
item = type(self)(deepcopy(self.chain, ids))
ids[self_id] = item
return item
def append_block(self, data):
previous_block = self.chain[-1]
block = Block.mine(previous_block, data)
self.chain.append(block)
return block
def replace_chain(self, block_chain):
if len(block_chain.chain) <= len(self.chain):
return False
if not self.is_valid(block_chain):
return False
self.chain = deepcopy(block_chain.chain)
return True
def is_valid(self, block_chain):
# Compare existing chain with new chain
for i in range(0, len(self.chain)):
if (self.chain[i].hash != block_chain.chain[i].hash or
self.chain[i].prev_hash != block_chain.chain[i].prev_hash):
return False
# Check additional blocks of the new chain
for j in range(i, len(block_chain.chain)):
prev_block = block_chain.chain[i - 1]
block = block_chain.chain[i]
if prev_block.hash != block.prev_hash or Block.create_block_hash(block) != block.hash:
return False
if not block.is_reward_transaction_valid(j):
logger.error('Invalid reward transaction for block {}'.format(block.hash))
return False
return True
|
[
"boris@kafana.org"
] |
boris@kafana.org
|
04dedfe9530ed9fec39b8883ebf4fab7b8e969bf
|
2c681979985efbb4975392ac8007a6fbb18eb1f5
|
/school/urls.py
|
fcab2a6c4e9e330a8860afa25acb8096d4a13d2a
|
[
"MIT"
] |
permissive
|
gulgis/school-b
|
36420e665e5b1a8a91a754fa5f58005a0b678981
|
8934e0e86b633f5d8c9f6b62cb7549e1203f9ab5
|
refs/heads/main
| 2023-04-19T19:46:10.900719
| 2021-05-11T17:49:59
| 2021-05-11T17:49:59
| 358,216,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
"""school URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from rest_framework.authtoken import views
urlpatterns = [
path("admin/", admin.site.urls),
path("api/", include("lab.api.urls")),
re_path(r"^api-auth/", include("rest_framework.urls")),
re_path(r"^api-token-auth/", views.obtain_auth_token),
]
|
[
"gulgielmin.w@gmail.com"
] |
gulgielmin.w@gmail.com
|
9821ea5fb99ed26dc73ef8207eacbceddf9083a6
|
e38f164ea225dc847644943f18999a21a625e65b
|
/dechat/__init__.py
|
e68dde0bf70571f11cafce4e4ea66662b259617d
|
[
"MIT"
] |
permissive
|
robobrobro/dechat
|
1f7f011b5dc8aff8bbbdccedbc89143b4b9c8f84
|
e15f4efa24088ff71c332e6ad4638d416e778bf1
|
refs/heads/master
| 2021-01-10T05:15:37.446772
| 2016-03-30T03:21:54
| 2016-03-30T03:21:54
| 54,782,007
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
"""
Dechat - Distributed, Encrypted CHAT client
"""
from . import errors, messaging, user
|
[
"foehawk@gmail.com"
] |
foehawk@gmail.com
|
6a0cd5f14df5e5c8bb28dcf4c61e75d86dbfbeb9
|
58dc4859c3e754ff10abb50d8fe3360ec6a26f11
|
/logistic regression/LR_main.py
|
4d9ac9261c1f4bf90fd5a2e6491cd2df0482e7e3
|
[] |
no_license
|
demeiyan/ml_project
|
f31ae453de9191c8956c64b2e7cfd05b27fe0acb
|
b78fa07c97b8513fd5a8fe4fd388708f1075c755
|
refs/heads/master
| 2021-07-10T20:21:01.668138
| 2019-04-25T13:13:02
| 2019-04-25T13:13:02
| 109,544,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,199
|
py
|
# -*- coding: utf-8 -*-
"""
Created on 2018/4/14 20:39
@author: dmyan
使用二分类来实现多分类问题,OvR(一对其余)
"""
import numpy as np
def train_valid_split(feature, label, valid_size=0.1):
train_indices = []
valid_indices = []
for i in range(5):
index = np.where(label == (i+1))[0]
np.random.shuffle(index)
rand = np.random.choice(index.shape[0], int(index.shape[0]*valid_size), replace=False)
valid_indices.append(index[rand].tolist())
index = np.delete(index, rand)
train_indices.append(index.tolist())
train_indices = [item for sublist in train_indices for item in sublist]
valid_indices = [item for sublist in valid_indices for item in sublist]
return feature[train_indices], label[train_indices], feature[valid_indices], label[valid_indices]
def smote(x, y, cls, k):
label_counts = []
for i in range(5):
label_counts.append((y == (i+1)).sum())
all_samples = x.shape[0]
label_sample = np.array([int(i == cls) for i in y])
n_samples = label_sample.sum()
remian = all_samples - n_samples
# print(n_samples, remian, sum(label_counts))
positive_sample = x[np.where(label_sample == 1)[0]]
# print(positive_sample.shape)
if n_samples > remian:
sampling_rate = int(n_samples/remian) + 1
negative_indices = np.where(label_sample == 0)[0]
np.random.shuffle(negative_indices)
# print(negative_indices.shape[0])
distance_vector = np.zeros(negative_indices.shape[0])
negative_sample = np.zeros((negative_indices.shape[0]*sampling_rate, 11))
negative_count = 0
for i in range(negative_indices.shape[0]):
for j in range(negative_indices.shape[0]):
distance_vector[j] = np.linalg.norm(x[negative_indices[i]]-x[negative_indices[j]])
sampling_knn = distance_vector.argsort()[0:k]
sampling_nn = np.random.choice(sampling_knn, sampling_rate, replace=False)
for j in range(sampling_nn.shape[0]):
for k in range(x.shape[1]):
negative_sample[negative_count][k] = x[i][k] + np.random.rand()*(x[negative_indices[j]][k] - x[i][k])
negative_count += 1
# print(negative_sample[1000:3000])
synthetic_feature = np.concatenate((positive_sample, negative_sample), axis=0)
synthetic_label = np.concatenate((label_sample, np.zeros(negative_sample.shape[0])), axis=0)
return synthetic_feature, synthetic_label
else:
negative_sample = np.zeros(0)
label_sample = np.ones(positive_sample.shape[0])
for i in range(5):
rand = np.random.choice(np.where(y == (i+1))[0], int(label_counts[i]*n_samples/all_samples), replace=False)
if i == 0:
negative_sample = x[rand]
else:
negative_sample = np.append(negative_sample, x[rand], axis=0)
negative_sample = np.array(negative_sample)
synthetic_feature = np.concatenate((positive_sample, negative_sample), axis=0)
synthetic_label = np.concatenate((label_sample, np.zeros(negative_sample.shape[0])), axis=0)
# print(synthetic_feature, synthetic_label)
return synthetic_feature, synthetic_label
# return synthetic_feature, synthetic_label
def sigmoid(belta, x):
try:
return np.exp(np.dot(belta, x))/(1+np.exp(np.dot(belta, x)))
except Exception as insit:
print(insit)
print(belta, x, np.dot(belta, x))
def convertdata(feature, label):
x = []
y = []
with open('./assign2_dataset/'+feature, 'r') as f:
for line in f.readlines():
if len(line.strip()) > 0:
x.append([float(x) for x in line.strip().split(' ')])
x = np.array(x)
with open('./assign2_dataset/'+label, 'r') as f:
for line in f.readlines():
y.append([float(x) for x in line.strip().split(' ')])
y = np.array(y)
return x, y
def binary_classification(x, y, one, belta, learn_rate):
y = np.array([int(i == one) for i in y])
der = sigmoid(belta, x.T) - y # sigmoid(belta, x.T) - y
der = np.reshape(der, (y.shape[0], 1))
der = np.sum(x*der, axis=0)
belta = belta - learn_rate*der
return belta
if __name__ == '__main__':
min_batch = 64
learn_rate = 0.01
validation_size = 0.1
x_train, label_train = convertdata('page_blocks_train_feature.txt', 'page_blocks_train_label.txt')
x_test, label_test = convertdata('page_blocks_test_feature.txt', 'page_blocks_test_label.txt')
# 归一化,对特征的每一个维度计算x = (x-mean)/std 归一化到正态分布
std = x_train.std(axis=0)
mean = x_train.mean(axis=0)
x_train = (x_train - mean)/std
x_test = (x_test - mean)/std
x_train = np.concatenate((x_train, np.ones((1, x_train.shape[0])).T), axis=1)
x_test = np.concatenate((x_test, np.ones((1, x_test.shape[0])).T), axis=1)
label_train = np.array([int(x) for x in label_train])
label_test = np.array([int(x) for x in label_test])
# split data to train_data and valid_data
x_train, label_train, x_valid, label_valid = train_valid_split(x_train, label_train, 0.1)
datasize = label_train.shape[0]
belta = np.zeros((5, 11))
#belta[0] = binary_classification(x_train[0 * min_batch:(0 + 1) * min_batch], label_train[0 * min_batch:(0 + 1) * min_batch], 1, belta[0], learn_rate)
# for epoch in range(100):
# print(belta[0])
# for i in range(int(datasize/min_batch)):
# belta[0] = binaryclassification(x_train[i*min_batch:(i+1)*min_batch], label_train[i*min_batch:(i+1)*min_batch], 5, belta[0], learn_rate)
for i in range(5):
# x_train, label_train =
# x, y = smote(x_train, label_train, i+1, 15)
# print(x,y)
for epoch in range(1000):
for j in range(int(datasize/min_batch)):
belta[i] = binary_classification(x_train[j*min_batch:(j+1)*min_batch], label_train[j*min_batch:(j+1)*min_batch], i+1, belta[i], learn_rate)
result = np.where(sigmoid(belta[i], x_train.T) >= 0.5)[0]
label = np.where(label_train == (i+1))[0]
count = 0
for index in result:
if index in label:
count += 1
loss = label.shape[0] - count
print(str(i)+' loss :'+str(loss/label.shape[0]))
# loss = (result != label_train).sum()/label_train.shape[0]*100
# 统计各类样本数目
# label_counts = []
# for i in range(5):
# label_counts.append((label_train == (i+1)).sum())
#
# print(sum(label_counts), label_counts)
accuracy = sigmoid(belta, x_valid.T)
# # 阈值飘移,再缩放(rescaling)
# for i in range(accuracy.shape[0]):
# accuracy[i] = accuracy[i] * ((sum(label_counts) - label_counts[i])/label_counts[i])
accuracy = np.argmax(accuracy, axis=0) + 1
print('precision : %.2f%%' % ((accuracy == label_valid).sum()/label_test.shape[0]*100))
# test
label_predict = np.argmax(sigmoid(belta, x_test.T), axis=0) + 1
precision = []
recall = []
print('\t\tpredict\ttest\tcorrect')
for i in range(5):
predict = label_predict == (i+1)
test = label_test == (i+1)
predict_indices = [j for j, x in enumerate(label_predict) if x == (i+1)]
test_indices = [j for j, x in enumerate(label_test) if x == (i+1)]
correct = 0
for index in predict_indices:
if index in test_indices:
correct += 1
print(str(i+1)+' sample ' + str(predict.sum())+'\t'+str(test.sum())+'\t'+str(correct))
if predict.sum() != 0:
precision.append(correct/predict.sum()*100)
else:
precision.append(0.0)
recall.append(correct/test.sum()*100)
accuracy = (label_predict == label_test).sum()/label_test.shape[0]*100
print('precision\t: %.2f%% %.2f%% %.2f%% %.2f%% %.2f%%' % (precision[0], precision[1], precision[2], precision[3], precision[4]))
print('recall\t: %.2f%% %.2f%% %.2f%% %.2f%% %.2f%%' % (recall[0], recall[1], recall[2], recall[3], recall[4]))
print('test result : %.2f%%' % accuracy)
|
[
"1312480794@qq.com"
] |
1312480794@qq.com
|
844c27fc00fb9fd926028f0c00b3c89d861a3585
|
f78300d307d4067d5f0f920da5090749126a02d4
|
/tests/test_install_services.py
|
83e0a0eb8b1a0a905cb02ccf6d6212e085a9cce0
|
[
"MIT"
] |
permissive
|
thomasjpfan/server_setup
|
910b022522c2da833875b31bd384308a8a0a532f
|
f8ade284a3039f05f6acd22aa4730aa69ffbf428
|
refs/heads/master
| 2021-09-23T20:56:12.684930
| 2018-09-27T15:19:52
| 2018-09-27T15:19:52
| 119,753,102
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
import pytest
testinfra_hosts = ['tests_api2_1', 'tests_api2_1']
services = [
'fail2ban',
'ntp',
'unattended-upgrades',
'ufw'
]
@pytest.mark.parametrize('service', services)
def test_fail2ban_active(host, service):
h_service = host.service(service)
assert h_service.is_running
assert h_service.is_enabled
def test_swarm_is_active(host):
cmd = "docker info -f '{{ .Swarm.LocalNodeState }}'"
output = host.check_output(cmd)
assert output == "active"
|
[
"thomasjpfan@gmail.com"
] |
thomasjpfan@gmail.com
|
31ecfdd585ab0fa9cf8ad0e5a761816a7e63aefd
|
4d963892c71028bd793d4ad765d442627827e8f8
|
/afiniczne.py
|
8b50a0296bdac28ff99aec6b4295a62b5e244144
|
[] |
no_license
|
pablitochmiel/przeksztalcenia_obrazow
|
a093a03447f5addb19d5de03eb860f47d0b22183
|
e408db94112d5620b89514d0f1455d2122102dc0
|
refs/heads/master
| 2022-12-03T21:01:34.570721
| 2020-09-01T09:42:17
| 2020-09-01T09:42:17
| 291,953,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,341
|
py
|
from PIL import Image
import numpy as np
def afiniczne(imageName):
im = Image.open(imageName)
imarray=np.array(im)
#af=[[1.2, 0],[0,0.6]]
#af=[[0.707,-0.707],[0.707,0.707]]
#af=[[1,8],[3,5]]
af=[[1.2,-0.7],[1.3,0.4]]
#outshape=(round(args[0]*imarray.shape[0]),round(args[1]*imarray.shape[1]))
#outshape=imarray.shape
wys=[0,round(imarray.shape[1]*af[0][1]),round(imarray.shape[0]*af[0][0]),round(imarray.shape[0]*af[0][0]+imarray.shape[1]*af[0][1])]
szer=[0,round(imarray.shape[1]*af[1][1]),round(imarray.shape[0]*af[1][0]),round(imarray.shape[0]*af[1][0]+imarray.shape[1]*af[1][1])]
outshape=(max(wys)-min(wys),max(szer)-min(szer))
det=float(af[0][0]*af[1][1]-af[0][1]*af[1][0])
if(det==0):
print("nie mozna wykonac operacji")
return
afin=[[af[1][1]/det,-af[0][1]/det],[-af[1][0]/det,af[0][0]/det]]
outarray=[]
for i in range(outshape[0]):
temp=[]
for j in range(outshape[1]):
x=round(imarray.shape[0]/2+(i-outshape[0]/2)*afin[0][0]+(j-outshape[1]/2)*afin[0][1])
y=round(imarray.shape[1]/2+(i-outshape[0]/2)*afin[1][0]+(j-outshape[1]/2)*afin[1][1])
if(x>=imarray.shape[0] or x<0 or y>=imarray.shape[1] or y<0):
temp.append(np.zeros_like(imarray[0,0]))
else:
temp.append(imarray[x,y])
outarray.append(temp)
outarray=np.array(outarray)
wynik=Image.fromarray(outarray)
wynik.save("wynik.tif")
|
[
"pawcio27610@wp.pl"
] |
pawcio27610@wp.pl
|
bde004baf17ae9f877abce01ec4a6dc7a4aaef01
|
5056daf2cfdcc305099605d14856fcdecd9ac3f9
|
/apps/login/urls.py
|
008d11eb28c06720b63f2ddf2cfba50e76723ecb
|
[] |
no_license
|
Choapinus/django-web
|
1bfdbdc19fa2e29c71437f7f1845879f4c9225d8
|
afcdd49a0c29b36cb75601b2e880db78d8fb7971
|
refs/heads/master
| 2020-03-11T11:33:38.237866
| 2018-06-01T06:02:56
| 2018-06-01T06:02:56
| 129,972,671
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from django.urls import path
from apps.login import views
from django.conf import settings
urlpatterns = [
path('', views.login_auth, name='login'),
path('logout', views.logout_auth, name='logout'),
path('index', views.index, name='index'),
]
|
[
"wxtem@hotmail.com"
] |
wxtem@hotmail.com
|
a1762b7d73a2f97e2da4a50a41c73f759987c0fb
|
05a793cc2933a16199d0e82f1569c5518c5786a4
|
/class2/class3.py
|
52db27930fd8bc013f9a904f00b6ef50be7024cc
|
[] |
no_license
|
hsnylmzz/Python-Learning
|
6cd69d793617df1877a110f2bef74fdb71483b9f
|
88db218478b633878c8a46da95097d87b6051e31
|
refs/heads/master
| 2023-03-21T17:47:10.549945
| 2023-03-13T11:04:37
| 2023-03-13T11:04:37
| 198,965,665
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
class Sınıf():
sınıf_niteliği = 0
def __init__(self, veri):
self.veri = veri
def örnek_metodu(self):
return self.veri
@classmethod
def sınıf_metodu(cls):
return cls.sınıf_niteliği
@staticmethod
def statik_metot():
print('merhaba statik metot!')
|
[
"noreply@github.com"
] |
hsnylmzz.noreply@github.com
|
24cc6db0fbe2cd2487c78f9d4ce7236b60d01f12
|
b656c527a0a6bcb8df45512e202d1607fe33638e
|
/migrations/versions/0079_update_rates.py
|
4699e99b696d8b578ae43695903e1c80bc50b35a
|
[
"MIT"
] |
permissive
|
alphagov/notifications-api
|
fb6d0b6b952f0e2e8c98776be9adf836cce85c54
|
b4c5be42583ef6d6cd004e8c24acaf810d86f65c
|
refs/heads/main
| 2023-09-01T08:58:52.353509
| 2023-08-31T15:00:31
| 2023-08-31T15:00:31
| 46,422,101
| 61
| 33
|
MIT
| 2023-09-14T14:18:19
| 2015-11-18T13:57:17
|
Python
|
UTF-8
|
Python
| false
| false
| 706
|
py
|
"""empty message
Revision ID: 0079_update_rates
Revises: 0078_sent_notification_status
Create Date: 2017-05-03 12:31:20.731069
"""
# revision identifiers, used by Alembic.
revision = "0079_update_rates"
down_revision = "0078_sent_notification_status"
from alembic import op
def upgrade():
op.get_bind()
op.execute("UPDATE RATES SET rate = 0.0158 WHERE valid_from = '2017-04-01 00:00:00'")
op.execute("UPDATE RATES SET rate = 0.0165 WHERE valid_from = '2016-05-18 00:00:00'")
def downgrade():
op.get_bind()
op.execute("UPDATE RATES SET rate = 1.58 WHERE valid_from = '2017-04-01 00:00:00'")
op.execute("UPDATE RATES SET rate = 1.65 WHERE valid_from = '2016-05-18 00:00:00'")
|
[
"ken.tsang@digital.cabinet-office.gov.uk"
] |
ken.tsang@digital.cabinet-office.gov.uk
|
dfd51bc9c4f3faafaf0d818311a06a8b7c1cf0a2
|
2dd9b049e4a2d933fba4114fad70411ba8e25935
|
/test/test_event_ranking.py
|
5b1cec56a42bca393509950a63f6f48eeab99bd5
|
[] |
no_license
|
MHuguenard/tba-api-client-python
|
c58ab4f90f3820dd33e5231bd9a5b51fcb28f6f7
|
7bb6e254fad793ba6204279b5d45fb94266b9860
|
refs/heads/master
| 2020-04-30T16:39:07.317247
| 2019-03-05T20:27:18
| 2019-03-05T20:27:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
# coding: utf-8
"""
The Blue Alliance API v3
# Overview Information and statistics about FIRST Robotics Competition teams and events. If you are looking for the old version (v2) of the API, documentation can be found [here](/apidocs/v2). # Authentication All endpoints require an Auth Key to be passed in the header `X-TBA-Auth-Key`. If you do not have an auth key yet, you can obtain one from your [Account Page](/account). A `User-Agent` header may need to be set to prevent a 403 Unauthorized error.
OpenAPI spec version: 3.0.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import net.thefletcher.tbaapi.v3client
from net.thefletcher.tbaapi.v3client.rest import ApiException
from net.thefletcher.tbaapi.v3client.models.event_ranking import EventRanking
class TestEventRanking(unittest.TestCase):
""" EventRanking unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testEventRanking(self):
"""
Test EventRanking
"""
# FIXME: construct object with mandatory attributes with example values
#model = net.thefletcher.tbaapi.v3client.models.event_ranking.EventRanking()
pass
if __name__ == '__main__':
unittest.main()
|
[
"travis@example.org"
] |
travis@example.org
|
7e307696364fca18ac96a5230d2d0461f29568c3
|
ee79e734486c0ca550bb8238ef54c78c7727384a
|
/Tools/codetesters/pytesters/extenddict.py
|
bc0142de5cb40a36fc350c885c900cf1a0cfc625
|
[] |
no_license
|
neilrobertson/BICRCode
|
212636e5395f0c0e4dfb3ac3c133f01eb07273ee
|
7b3f4da9cdefd7680f07b707339aee59faece1d2
|
refs/heads/master
| 2020-03-30T16:07:22.637571
| 2018-10-03T10:12:57
| 2018-10-03T10:12:57
| 151,394,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 121
|
py
|
#!/usr/bin/env python
class Wrapper(dict):
def __init__(self):
pass
w = Wrapper()
w["hello"] = 3
print w["hello"]
|
[
"neil.alistair.robertson@hotmail.co.uk"
] |
neil.alistair.robertson@hotmail.co.uk
|
14e56fd0a985b0a85eabcc994807d20c19d0b5d0
|
e97b602d47b736305ec0912b442d232d94e5dc57
|
/hw1/1_2_1/pic_layer2_weight.py
|
bb2e02cebe70fbfbee3181b0ee9977e84b1ab6ef
|
[] |
no_license
|
piepie01/MLDS2018SPRING
|
787cb3a49b8df7a93d1ea06fe5bf50d2cecec436
|
894877cdee77f8480c4b4f21da8cda2faf59c184
|
refs/heads/master
| 2022-01-26T05:36:44.379213
| 2018-07-06T08:57:59
| 2018-07-06T08:57:59
| 125,801,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,761
|
py
|
import matplotlib.pyplot as plt
import numpy as np
def get(filename):
one = []
two = []
with open(filename,'r') as f:
li = f.read().split('\n')[:-1]
for i in range(len(li)):
li[i] = li[i].split()
for i in li[::87]:
one.append(i[0])
two.append(i[1])
return [np.array(one).astype(float),np.array(two).astype(float)]
def get_acc(filename):
one = []
with open(filename,'r') as f:
li = f.read().split('\n')[:-1]
for i in li[::87]:
i = format(float(i), '.2f')
one.append(i)
return np.array(one).astype(float)
data = []
for i in range(8):
tmp = get('weight/layer_'+str(i))
data.append(tmp)
acc = []
for i in range(8):
tmp = get_acc('loss/'+str(i))
acc.append(tmp)
plt.figure()
plt.title('Weight')
#plt.xlabel('Epoch_num')
#plt.ylabel('loss')
size = 2
color_list = ['black','red','green','yellow','orange','pink','blue','purple']
plt.plot(data[0][0], data[0][1],color = 'black')
plt.plot(data[1][0], data[1][1],color = 'red')
plt.plot(data[2][0], data[2][1],color = 'green')
plt.plot(data[3][0], data[3][1],color = 'yellow')
plt.plot(data[4][0], data[4][1],color = 'orange')
plt.plot(data[5][0], data[5][1],color = 'pink')
plt.plot(data[6][0], data[6][1],color = 'blue')
plt.plot(data[7][0], data[7][1],color = 'purple')
for i in range(8):
for j,txt in enumerate(acc[i]):
plt.annotate(txt,(data[i][0][j],data[i][1][j]),color = color_list[i],size = 7)
#plt.scatter(data[6][0], data[6][1],color = 'blue',s = size)
#plt.scatter(data[7][0], data[7][1],color = 'purple',s = size)
#plt.plot(index, deep, color='red',label = 'deep')
#plt.plot(index, medium, color='yellow',label = 'medium')
plt.legend()
plt.show()
|
[
"b05902031@ntu.edu.tw"
] |
b05902031@ntu.edu.tw
|
fca403463fb91a34133991efbe312ab97a153f30
|
9c3bb98eb9d0a587a302bdfa811f7b5c6a5a0a37
|
/Week 08/id_510/dp/Leetcode_198_510.py
|
991d26ca177b0445b9c7160727d2b58aca96467d
|
[] |
permissive
|
chenlei65368/algorithm004-05
|
842db9d9017556656aef0eeb6611eec3991f6c90
|
60e9ef1051a1d0441ab1c5484a51ab77a306bf5b
|
refs/heads/master
| 2020-08-07T23:09:30.548805
| 2019-12-17T10:48:22
| 2019-12-17T10:48:22
| 213,617,423
| 1
| 0
|
Apache-2.0
| 2019-12-17T10:48:24
| 2019-10-08T10:50:41
|
Java
|
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
"""
你是一个专业的小偷,计划偷窃沿街的房屋。每间房内都藏有一定的现金,影响你偷窃的唯一制约因素就是相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警。
给定一个代表每个房屋存放金额的非负整数数组,计算你在不触动警报装置的情况下,能够偷窃到的最高金额。
示例 1:
输入: [1,2,3,1]
输出: 4
解释: 偷窃 1 号房屋 (金额 = 1) ,然后偷窃 3 号房屋 (金额 = 3)。
偷窃到的最高金额 = 1 + 3 = 4 。
示例 2:
输入: [2,7,9,3,1]
输出: 12
解释: 偷窃 1 号房屋 (金额 = 2), 偷窃 3 号房屋 (金额 = 9),接着偷窃 5 号房屋 (金额 = 1)。
偷窃到的最高金额 = 2 + 9 + 1 = 12 。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/house-robber
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
class Solution:
def rob(self, nums: List[int]) -> int:
pass
|
[
"changyg@shpso.com"
] |
changyg@shpso.com
|
95516f957bdc1e3fb2432098dcbeb87da1ada279
|
654ad266ea5a25c6dac9b5ca925049c2c821993d
|
/test/imputation/cs/test_fast_knn.py
|
eb6530d3bf1363c5734dd6d295f43695b5782efb
|
[
"MIT"
] |
permissive
|
tahmidmehdi/impyute
|
71acdb450371599e788022db75f459e4c4b9abc4
|
232497d53c68f47c3ed600b3de4a386cb6d4f2f3
|
refs/heads/master
| 2020-06-21T09:19:03.910183
| 2019-07-17T14:34:37
| 2019-07-17T14:34:37
| 197,405,979
| 0
| 0
|
MIT
| 2019-07-17T14:31:08
| 2019-07-17T14:31:07
| null |
UTF-8
|
Python
| false
| false
| 1,977
|
py
|
"""test_fast_knn.py"""
import unittest
import numpy as np
import impyute as impy
import functools
# pylint:disable=invalid-name
class TestFastKNN(unittest.TestCase):
""" Tests for Fast KNN """
def setUp(self):
"""
self.data_c: Complete dataset/No missing values
self.data_m: Incommplete dataset/Has missing values
"""
n = 100
self.data_c = np.random.normal(size=n*n).reshape((n, n))
self.data_m = self.data_c.copy()
for _ in range(int(n*0.3*n)):
self.data_m[np.random.randint(n)][np.random.randint(n)] = np.nan
def test_return_type(self):
""" Check return type, should return an np.ndarray"""
imputed = impy.fast_knn(self.data_m)
self.assertTrue(isinstance(imputed, np.ndarray))
def test_impute_missing_values(self):
""" After imputation, no NaN's should exist"""
imputed = impy.fast_knn(self.data_m)
self.assertFalse(np.isnan(imputed).any())
def test_impute_value(self):
data = np.array([[ 0. , 1. , np.nan, 3. , 4. ],
[ 5. , 6. , 7. , 8. , 9. ],
[10. , 11. , 12. , 13. , 14. ],
[15. , 16. , 17. , 18. , 19. ],
[20. , 21. , 22. , 23. , 24. ]])
imputed = impy.fast_knn(data, k=2)
assert np.isclose(imputed[0][2], 8.38888888888889)
def test_impute_value_custom_idw(self):
data = np.array([[ 0. , 1. , np.nan, 3. , 4. ],
[ 5. , 6. , 7. , 8. , 9. ],
[10. , 11. , 12. , 13. , 14. ],
[15. , 16. , 17. , 18. , 19. ],
[20. , 21. , 22. , 23. , 24. ]])
idw = functools.partial(impy.util.inverse_distance_weighting.shepards, power=1)
imputed = impy.fast_knn(data, k=2, idw=idw)
assert np.isclose(imputed[0][2], 8.913911092686593)
if __name__ == "__main__":
unittest.main()
|
[
"eltonlaw296@gmail.com"
] |
eltonlaw296@gmail.com
|
dbae08d5e4dfdf8ffa55d7bef5a5f46151ee7ecc
|
aaabcb0d8291f883d5ad7071defe0b491b4900f5
|
/learn-python-for-data-science-1/challenge.py
|
efdc3a69c4821c05d7db3262a4d4801bf2189544
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
jlcanela/learn-data
|
4ad26808a43c25a8479ee63b2784aee2303ed910
|
536b4f3b8083979e5552241545fcf2eb04bc2e3a
|
refs/heads/master
| 2021-01-23T04:19:24.311642
| 2017-06-02T23:29:59
| 2017-06-02T23:29:59
| 92,924,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
from sklearn import tree
from sklearn import svm
from sklearn import neighbors
from sklearn import discriminant_analysis
from sklearn import linear_model
dt = tree.DecisionTreeClassifier()
# CHALLENGE - create 3 more classifiers...
# 1
lsvc = svm.LinearSVC()
# 2
kn = neighbors.KNeighborsClassifier(3)
# 3
svc = svm.SVC()
classifiers = [ dt, lsvc, kn, svc ]
# [height, weight, shoe_size]
X = [[181, 80, 44], [177, 70, 43], [160, 60, 38], [154, 54, 37], [166, 65, 40],
[190, 90, 47], [175, 64, 39],
[177, 70, 40], [159, 55, 37], [171, 75, 42], [181, 85, 43]]
male = 'male'
female = 'female'
Y = [male, male, female, female, male, male, female, female,
female, male, male]
# CHALLENGE - ...and train them on our data
for clf in classifiers:
clf = clf.fit(X, Y)
prediction = clf.predict([[190, 70, 43]])
print("%s %s" % (clf, prediction))
# CHALLENGE compare their results and print the best one!
|
[
"jlcanelam@gmail.com"
] |
jlcanelam@gmail.com
|
fb76ffe9aa7296483ceb8d68d649e56a75ff2800
|
8eb8b77be9700e24375c6dfe3a80cfef271afcbb
|
/plugins/urls/config.py
|
bfccdee8188c03c16cc64bc92a70fd68feb6863a
|
[] |
no_license
|
shakatakshak/Cardinal
|
fd25cfae92c302df8385abf93e79274ab3448ab0
|
0fbfe0f958710a0f6a35d1e71cd65e1e5e079f4a
|
refs/heads/master
| 2021-01-18T19:00:46.530798
| 2013-08-03T21:08:51
| 2013-08-03T21:08:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
# Copyright (c) 2013 John Maguire <john@leftforliving.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# This is time (in seconds) to attempt to load a website to get its title
# before giving up. (default: 10)
TIMEOUT = 10
# This is the number of bytes to read before giving up on finding a title tag
# on the page. (default: 512KB (512 * 1024))
READ_BYTES = 512 * 1024
|
[
"john@leftforliving.com"
] |
john@leftforliving.com
|
4da584c743d156c0b4418955d2e1bacb77be7788
|
77a65099e331558249e07d5d33e16c54f00dd528
|
/keystrokeserver.py
|
5e6502c5b6966473e01a0f55ad80b1e42bd74094
|
[] |
no_license
|
windsurfer7563/keystroke-dynamics
|
ca69fe0d2c0689a7a78473f0f74775b432679f9b
|
8bc7587901f95766771cb5be3db363aefec8e691
|
refs/heads/master
| 2021-04-29T08:35:53.498496
| 2017-02-03T11:43:57
| 2017-02-03T11:43:57
| 77,672,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,636
|
py
|
import socketserver
import pickle
import os
import socket
import anomalydetector
class MyTCPHandler(socketserver.BaseRequestHandler):
"""
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
# self.request is the TCP socket connected to the client
self.data = self.request.recv(1024).strip()
print("{} wrote:".format(self.client_address[0]))
received_obj=pickle.loads(self.data)
print(received_obj['user'])
userFilePath = (os.path.join(os.path.dirname(os.path.abspath(__file__)), "accounts", received_obj['user'] + '_' + 'NN'+'.dat'))
#print(userFilePath)
try:
ad=pickle.load(open(userFilePath,"rb"))
except BaseException as e:
print("error threw a {}".format(type(e).__name__))
raise # Reraise the exception
send_obj = (True, 0, 0)
self.request.sendall(pickle.dumps(send_obj))
return
predict, dist, tresh = ad.predict(received_obj['data'])
self.request.sendall(pickle.dumps((predict, dist, tresh)))
if __name__ == "__main__":
HOST, PORT = '', 9999
#HOST, PORT = socket.gethostname(), 9999
# Create the server, binding to localhost on port 9999
server = socketserver.TCPServer((HOST, PORT), MyTCPHandler)
print("Server started at {}".format(HOST))
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
|
[
"C:\\Users\\igor.GAZINTEK2\\AppData\\Roaming\\The Bat!"
] |
C:\Users\igor.GAZINTEK2\AppData\Roaming\The Bat!
|
682505d45eedaaef73966e002fd5a321e091d57e
|
ec390da9ff15cb2114e16742b6cad780732913b5
|
/iter2/47.py
|
f9a2f61857dd6c414bb53e2b7ef2d30a08821e9b
|
[] |
no_license
|
xiaomi388/LeetCode
|
6107487537eeb1ac6128955cd30868985d1509ea
|
877d0fbccc684e4fd7fee2aca63d2656f71a0241
|
refs/heads/master
| 2021-09-26T19:52:34.207507
| 2021-09-17T20:32:53
| 2021-09-17T20:32:53
| 241,884,416
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
ans = set()
def dfs(picked, left):
if not left:
ans.add(tuple(picked))
return
for i in range(len(left)):
# optimization
if i > 0 and left[i-1] == left[i]: continue
dfs(picked + [left[i]], left[:i]+left[i+1:])
# optimization
nums.sort()
dfs(list(), nums)
return list(ans)
|
[
"xiaomi388@gmail.com"
] |
xiaomi388@gmail.com
|
bb308552da3533c0e8de09b41a365499058f8a9e
|
acbf6fcea93fdf22766485d00bebec98bc7d6e5a
|
/tensorflow-test/3/breakpoint/mnist_test.py
|
4069a5181035972052eff9b38436196d05ed62c6
|
[] |
no_license
|
lei977/data-analysis-python
|
27c25c81c21979a4b08acca03f66e3c7ca70122b
|
3b791ec4208c5d07164186a8245f7f4f9a2c031b
|
refs/heads/master
| 2020-05-19T06:09:01.800677
| 2019-05-31T10:33:12
| 2019-05-31T10:33:12
| 184,867,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,614
|
py
|
# coding:utf-8
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import mnist_backward
TEST_INTERVAL_SECS = 5
def test(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
y = mnist_forward.forward(x, None)
ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
ema_restore = ema.variables_to_restore()
saver = tf.train.Saver(ema_restore)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score))
else:
print('No checkpoint file found')
return
time.sleep(TEST_INTERVAL_SECS)
def main():
mnist = input_data.read_data_sets("./data/", one_hot=True)
test(mnist)
if __name__ == '__main__':
main()
|
[
"lei977@126.com"
] |
lei977@126.com
|
8ffc26e790e5436df5df70093011d373aa671134
|
52b5fa23f79d76883728d8de0bfd202c741e9c43
|
/kubernetes/test/test_v1_api_resource_list.py
|
909eb7aeccdbff10b21dbb0c80e6a7a69b7fc2b8
|
[] |
no_license
|
kippandrew/client-python-tornado
|
5d00810f57035825a84e37ff8fc89a7e79aed8da
|
d479dfeb348c5dd2e929327d800fe033b5b3b010
|
refs/heads/master
| 2021-09-04T13:01:28.275677
| 2018-01-18T23:27:34
| 2018-01-18T23:27:34
| 114,912,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.8.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_api_resource_list import V1APIResourceList # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1APIResourceList(unittest.TestCase):
"""V1APIResourceList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1APIResourceList(self):
"""Test V1APIResourceList"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_api_resource_list.V1APIResourceList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"andy@rstudio.com"
] |
andy@rstudio.com
|
b3cc281de2255b939278dd9a1f211d27dc305aa8
|
53b57b50c2bc250b6c27c5c3e761602d5442d3f8
|
/sLines.py
|
972f62de706a561f6623807946838e4c1b8f01eb
|
[] |
no_license
|
kinglei/p2013
|
472b4eeaa4c86b133b05eefccf0ed9c4931d7ffb
|
2c9a0d18c2e57fcc9e34dddc4aa7dfb0a8f1904c
|
refs/heads/master
| 2021-01-10T19:14:27.297989
| 2013-04-08T07:24:26
| 2013-04-08T07:24:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
#!/usr/bin/python
import os
fname = raw_input("Please input the file name: ")
fobj = open(fname)
num = 0
for i in fobj.readlines():
num += 1
print num
fobj.close()
|
[
"jinxl560@gmail.com"
] |
jinxl560@gmail.com
|
65dc7ec333538a8de51915b9e91952e5cce61970
|
c5f58af61e3577ded52acda210f4f664651b598c
|
/template/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py
|
0629f83a9a90edf8fc14db487241333766d4e02a
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hojihun5516/object-detection-level2-cv-02
|
0a4ee5cea9a77ef5d43fb61a4b37fe3a87cb0eac
|
bc8a08286935b31b8e7e597c4b1ca2cbbaeb9109
|
refs/heads/master
| 2023-08-31T09:50:59.150971
| 2021-10-16T15:00:19
| 2021-10-16T15:00:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,584
|
py
|
_base_ = ["./cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py"]
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style="caffe",
init_cfg=dict(type="Pretrained", checkpoint="open-mmlab://detectron2/resnet50_caffe"),
)
)
# use caffe img_norm
img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type="LoadImageFromFile"),
dict(type="LoadAnnotations", with_bbox=True, with_mask=True),
dict(type="Resize", img_scale=[(1333, 640), (1333, 800)], multiscale_mode="range", keep_ratio=True),
dict(type="RandomFlip", flip_ratio=0.5),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="DefaultFormatBundle"),
dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels", "gt_masks"]),
]
test_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="ImageToTensor", keys=["img"]),
dict(type="Collect", keys=["img"]),
],
),
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline),
)
|
[
"hojihun5516@daum.net"
] |
hojihun5516@daum.net
|
1adc1f9d82a9be7148a086a1ab937fc2f4098c34
|
2acfde78f24576c8420d8cfd7eebc1681c287b34
|
/tfx/experimental/templates/penguin/e2e_tests/local_e2e_test.py
|
8b4725c055f4d502534d0ca9ba579c09292baf07
|
[
"Apache-2.0"
] |
permissive
|
hsahovic/tfx
|
7f29f16a7cc59bfb41744291887ccc69ef668521
|
5d4448c03d408c24bf77b84bd4da3973f2389d9b
|
refs/heads/master
| 2023-03-11T12:34:07.185893
| 2021-03-03T06:39:30
| 2021-03-03T06:40:24
| 344,190,366
| 0
| 0
|
Apache-2.0
| 2021-03-03T16:33:20
| 2021-03-03T16:27:11
| null |
UTF-8
|
Python
| false
| false
| 3,627
|
py
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""E2E test using local orchestrator for penguin template."""
import os
import subprocess
import sys
import unittest
from absl import logging
import tensorflow as tf
from tfx.experimental.templates import test_utils
@unittest.skipIf(tf.__version__ < '2',
'Uses keras Model only compatible with TF 2.x')
class PenguinTemplateLocalEndToEndTest(test_utils.BaseEndToEndTest):
"""This test runs all components in the template."""
def setUp(self):
super().setUp()
self._pipeline_name = 'PENGUIN_TEMPLATE_E2E_TEST'
def _getAllUnitTests(self):
for root, _, files in os.walk(self._project_dir):
base_dir = os.path.relpath(root, self._project_dir)
if base_dir == '.': # project_dir == root
base_module = ''
else:
base_module = base_dir.replace(os.path.sep, '.') + '.'
for filename in files:
if filename.endswith('_test.py'):
yield base_module + filename[:-3]
def testGeneratedUnitTests(self):
self._copyTemplate('penguin')
for m in self._getAllUnitTests():
logging.info('Running unit test "%s"', m)
# A failed googletest will raise a CalledProcessError.
_ = subprocess.check_output([sys.executable, '-m', m])
def testLocalPipeline(self):
self._copyTemplate('penguin')
os.environ['LOCAL_HOME'] = os.path.join(self._temp_dir, 'local')
# Create a pipeline with only initial components.
result = self._runCli([
'pipeline',
'create',
'--engine',
'local',
'--pipeline_path',
'local_runner.py',
])
self.assertEqual(0, result.exit_code)
self.assertIn(
'Pipeline "{}" created successfully.'.format(self._pipeline_name),
result.output)
# Run the pipeline.
result = self._runCli([
'run',
'create',
'--engine',
'local',
'--pipeline_name',
self._pipeline_name,
])
self.assertEqual(0, result.exit_code)
# Update the pipeline to include all components.
updated_pipeline_file = self._addAllComponents()
logging.info('Updated %s to add all components to the pipeline.',
updated_pipeline_file)
# Lowers required threshold to make tests stable.
self._replaceFileContent(
os.path.join('pipeline', 'configs.py'), [
('EVAL_ACCURACY_THRESHOLD = 0.6',
'EVAL_ACCURACY_THRESHOLD = 0.1'),
])
result = self._runCli([
'pipeline',
'update',
'--engine',
'local',
'--pipeline_path',
'local_runner.py',
])
self.assertEqual(0, result.exit_code)
self.assertIn(
'Pipeline "{}" updated successfully.'.format(self._pipeline_name),
result.output)
# Run the updated pipeline.
result = self._runCli([
'run',
'create',
'--engine',
'local',
'--pipeline_name',
self._pipeline_name,
])
self.assertEqual(0, result.exit_code)
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow-extended-nonhuman@googlegroups.com"
] |
tensorflow-extended-nonhuman@googlegroups.com
|
340498d21e1f3af49bbeebb7a5b6f871a8f9459f
|
1210704ac426c8f7f74b3a901f2fea2d22b59ad3
|
/agents/epsilon_greedy_decay.py
|
fa034c8ce7e7895a8c20024113d1e2735c90f015
|
[
"MIT"
] |
permissive
|
a-pedram/kaggle-mab
|
3babfd6338b722ee0e0495289d5746cafb5eba7f
|
5d9d6d47541f6b71a5a886146928aa57a5c77591
|
refs/heads/main
| 2023-03-07T15:01:31.246270
| 2021-02-25T12:54:43
| 2021-02-25T12:54:43
| 342,224,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
import math
import random
epsilon = 0.1
last_bandit = -1
total_reward = 0
sums_of_reward = None
numbers_of_selections = None
random.seed(42)
def agent(observation, configuration):
global sums_of_reward, numbers_of_selections, last_bandit, total_reward
if observation.step == 0:
numbers_of_selections = [0] * configuration.banditCount
sums_of_reward = [0] * configuration.banditCount
if last_bandit > -1:
reward = observation.reward - total_reward
sums_of_reward[last_bandit] += reward
total_reward += reward
if random.random() < epsilon:
bandit = random.randint(0, configuration.banditCount-1)
last_bandit = bandit
else:
bandit = 0
max_upper_bound = 0
for i in range(0, configuration.banditCount):
if numbers_of_selections[i] > 0:
decay = 0.97 ** numbers_of_selections[i]
upper_bound = decay * sums_of_reward[i] / numbers_of_selections[i]
else:
upper_bound = 1e400
if upper_bound > max_upper_bound and last_bandit != i:
max_upper_bound = upper_bound
bandit = i
last_bandit = bandit
numbers_of_selections[bandit] += 1
if bandit is None:
bandit = 0
return bandit
|
[
"mehdi.pedram@gmail.com"
] |
mehdi.pedram@gmail.com
|
8f6173cbd4819d5e59480ff795d01c48baf38f4a
|
21b88cced289293123f987a4887c2eaaa1202248
|
/simplesocial/groups/views.py
|
d84637035e7a3a873784bf9ed0217311104b50d3
|
[] |
no_license
|
Mehdierli/M78
|
0ae73f7aeced1f0d15dbfc7b18ad8f2053ec986b
|
76bb85c508aeb0dbe6af2f15806ae04060a41520
|
refs/heads/master
| 2020-04-03T23:16:33.581573
| 2018-10-31T21:35:14
| 2018-10-31T21:35:14
| 155,625,175
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,291
|
py
|
from django.contrib import messages
from django.contrib.auth.mixins import(
LoginRequiredMixin,
PermissionRequiredMixin
)
from django.urls import reverse
from django.db import IntegrityError
from django.shortcuts import get_object_or_404
from django.views import generic
from groups.models import Group,GroupMember
from . import models
class CreateGroup(LoginRequiredMixin, generic.CreateView):
fields = ("name", "description")
model = Group
template_name='groups/group_form.html'
class SingleGroup(generic.DetailView):
model = Group
template_name='groups/group_detail.html'
class ListGroups(generic.ListView):
model = Group
template_name='groups/group_list.html'
context_object_name='my_group_list'
class JoinGroup(LoginRequiredMixin, generic.RedirectView):
def get_redirect_url(self, *args, **kwargs):
return reverse("groups:single",kwargs={"slug": self.kwargs.get("slug")})
def get(self, request, *args, **kwargs):
group = get_object_or_404(Group,slug=self.kwargs.get("slug"))
try:
GroupMember.objects.create(user=self.request.user,group=group)
except IntegrityError:
messages.warning(self.request,("Warning, already a member of {}".format(group.name)))
else:
messages.success(self.request,"You are now a member of the {} group.".format(group.name))
return super().get(request, *args, **kwargs)
class LeaveGroup(LoginRequiredMixin, generic.RedirectView):
def get_redirect_url(self, *args, **kwargs):
return reverse("groups:single",kwargs={"slug": self.kwargs.get("slug")})
def get(self, request, *args, **kwargs):
try:
membership = models.GroupMember.objects.filter(
user=self.request.user,
group__slug=self.kwargs.get("slug")
).get()
except models.GroupMember.DoesNotExist:
messages.warning(
self.request,
"You can't leave this group because you aren't in it."
)
else:
membership.delete()
messages.success(
self.request,
"You have successfully left this group."
)
return super().get(request, *args, **kwargs)
|
[
"miaoml0604@gmail.com"
] |
miaoml0604@gmail.com
|
c94807dd2c5b920935a9be896f8e96eeabd9d7fd
|
c1af1454e3fbb839b5a760efb1d8eca66cee6281
|
/accounts/forms/__init__.py
|
630b020e59687209bd27dcd1dd370c0b192745cd
|
[
"MIT"
] |
permissive
|
tavoxr/django-crm
|
d197b9bae2cc71fc515881f6d673515e68c12cae
|
d6ce34b8e8e93c3ae9853df34641868d4c891125
|
refs/heads/main
| 2023-06-20T02:07:17.883200
| 2021-07-16T19:16:10
| 2021-07-16T19:16:10
| 385,318,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
from .orderForm import *
from .CreateUserForm import *
from .customerForm import *
|
[
"tavoxr23@gmail.com"
] |
tavoxr23@gmail.com
|
80cc1d7e56e131d41164abf18a9060f646c4b962
|
a8d1245601d5cb0b2e08ef54263486d1f9f6d64f
|
/exercises/12_useful_modules/task_12_3.py
|
57c00328043011f1ea6bed8e22541bacc381dcbc
|
[] |
no_license
|
darkfait/first_repo
|
be3182babeb5945bce5c08c6dfa4480032f8106c
|
82655016d751da8110572d3359594a55ac6199cc
|
refs/heads/master
| 2023-02-07T21:49:02.398279
| 2020-11-06T16:43:15
| 2020-11-06T16:43:15
| 254,878,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
# -*- coding: utf-8 -*-
"""
Задание 12.3
Создать функцию print_ip_table, которая отображает таблицу доступных и недоступных IP-адресов.
Функция ожидает как аргументы два списка:
* список доступных IP-адресов
* список недоступных IP-адресов
Результат работы функции - вывод на стандартный поток вывода таблицы вида:
Reachable Unreachable
----------- -------------
10.1.1.1 10.1.1.7
10.1.1.2 10.1.1.8
10.1.1.9
Функция не должна изменять списки, которые переданы ей как аргументы.
То есть, до выполнения функции и после списки должны выглядеть одинаково.
Для этого задания нет тестов
"""
from tabulate import tabulate
def print_ip_table(reach_list, unreach_list):
columns = ['Reachable','Unreachable']
print(tabulate([reach_list,unreach_list], headers = columns))
return reach_list, unreach_list
if __name__ == "__main__":
rea = ['1.1.1.1','2.2.2.2']
unrea = ['3.3.3.3']
print_ip_table(rea,unrea)
|
[
"fait-forever@mail.ru"
] |
fait-forever@mail.ru
|
1af8417d6df914404094dc3dab175fc14aa8b7c4
|
13f5958c1dd2d1e356d09f6bed26cd221cd62443
|
/后端源码/bbb/crawler_script/dangdang/selling_24h_script.py
|
6becafe6c4bfae1def2be871668ef85423e4eccc
|
[] |
no_license
|
13900/xiaodangdang
|
ffcd8bc9e534759635fb4187136c16a20356c558
|
dc03c636f8ba54c306d1ae93622075eab2efcbb0
|
refs/heads/main
| 2023-06-11T15:47:27.121600
| 2021-06-24T07:41:11
| 2021-06-24T07:41:11
| 379,776,973
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
import requests
from lxml import etree
from fake_useragent import UserAgent
import random
class selling_24h():
STATUS = ''
ITEMS = dict()
@classmethod
def get_24h(cls, ip, url):
ua = UserAgent()
list = random.choice(ip)
proxy = {'https': 'https://' + list.ip_address + ':' + list.ip_port}
header = {"user-agent": ua.random}
req = requests.get(url, headers=header, proxies=proxy)
if req.status_code != 200:
return '当当网爬取失败'
txt = etree.HTML(req.text)
id_1 = txt.xpath("//li/div[1]/text()")
book_name = txt.xpath("//div[2]/ul/li/div[3]/a/text()")
img = txt.xpath("//div[@class='pic']/a/img/@src")
comments_1 = txt.xpath("//div[@class='star']/a/text()")
recommended_1 = txt.xpath("//div[@class='star']/span/text()")
author = txt.xpath("//ul/li/div[5]/a[1]/text()")
date = txt.xpath("//div[@class='publisher_info']/span/text()")
publishing = txt.xpath("//div[@class='publisher_info'][2]/a/text()")
price_1 = txt.xpath("//div[@class='price']/p/span[1]/text()")
url = txt.xpath("//li/div[@class='name']/a/@href")
id = []
price = []
comments = []
recommended = []
for i in range(len(id_1)):
id.append(id_1[i].strip('.'))
price.append((price_1[i].strip('¥')))
comments.append(comments_1[i].strip('条评论'))
recommended.append(recommended_1[i].strip('推荐'))
cls.ITEMS = {
'id': id, 'book_name': book_name, 'img': img, 'comments': comments, 'recommended': recommended,
'author': author, 'date': date, 'publishing': publishing, 'price': price, 'url': url
}
|
[
"2877929996@qq.com"
] |
2877929996@qq.com
|
1f47b4ee4cc0d1594aa057a3dc9d87a984854bf5
|
af38a64ba0908f6fff36760e8e2c6807991fb2db
|
/lesson_02/personal/chapter_6/04_item_count.py
|
e51209dc6823b35a615098c5a641c2e23be22d3a
|
[] |
no_license
|
Vinod096/learn-python
|
badbeac9a27ee6c22c70a5a057837586cede0849
|
0017174e3dbeea19ca25aaa348e75b2b8203caef
|
refs/heads/master
| 2022-05-26T13:22:07.490484
| 2022-05-01T10:19:32
| 2022-05-01T10:19:32
| 244,166,014
| 1
| 0
| null | 2020-03-04T08:16:14
| 2020-03-01T14:41:23
|
Python
|
UTF-8
|
Python
| false
| false
| 478
|
py
|
#Assume that a file containing a series of names(as strings) is named names.txt and exists
#on the computer’s disk. Write a program that displays the number of names that are stored
#in the file. (Hint: Open the file and read every string stored in it. Use a variable to keep a count of the number of items that are read from the file.)
count = 0
names = open("names.txt","r")
count = names.readlines()
number_of_items = len(count)
print("number of items :",number_of_items)
|
[
"vinod.raipati@hotmail.com"
] |
vinod.raipati@hotmail.com
|
a5143f9123a8801653f08638eea943ed82a91241
|
5330918e825f8d373d3907962ba28215182389c3
|
/SimCalorimetry/Configuration/python/SimCalorimetry_setPreshowerHighGain_cff.py
|
de04a1da84ceffd653cc68498795deba536dc861
|
[] |
no_license
|
perrozzi/cmg-cmssw
|
31103a7179222c7aa94f65e83d090a5cf2748e27
|
1f4cfd936da3a6ca78f25959a41620925c4907ca
|
refs/heads/CMG_PAT_V5_18_from-CMSSW_5_3_22
| 2021-01-16T23:15:58.556441
| 2017-05-11T22:43:15
| 2017-05-11T22:43:15
| 13,272,641
| 1
| 0
| null | 2017-05-11T22:43:16
| 2013-10-02T14:05:21
|
C++
|
UTF-8
|
Python
| false
| false
| 488
|
py
|
import FWCore.ParameterSet.Config as cms
def customise(process):
process.simEcalPreshowerDigis.ESNoiseSigma = cms.untracked.double(6)
process.simEcalPreshowerDigis.ESGain = cms.untracked.int32(2)
process.simEcalPreshowerDigis.ESMIPADC = cms.untracked.double(55)
process.simEcalUnsuppressedDigis.ESGain = cms.int32(2)
process.simEcalUnsuppressedDigis.ESNoiseSigma = cms.double(6)
process.simEcalUnsuppressedDigis.ESMIPADC = cms.double(55)
return(process)
|
[
"sha1-3dce45789e317cc29a75783a879c7e618b062b82@cern.ch"
] |
sha1-3dce45789e317cc29a75783a879c7e618b062b82@cern.ch
|
3d304f607994f08ea42670d2847673aa643f4557
|
97f65c398ee2330c754db86eab5c8ba734347ecc
|
/DeepGM/inception.py
|
d48e46e6dd4319871f8e3ea865f2d9540702b3ba
|
[
"MIT"
] |
permissive
|
izzatum/BoMb-OT
|
025b2125577c2f81560f0b7b2be453577462904d
|
d5337a13c3396a2e521bf8797f360e94981babdb
|
refs/heads/master
| 2023-08-28T17:34:46.399114
| 2021-10-30T08:46:22
| 2021-10-30T08:46:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,678
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
try:
from torchvision.models.utils import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# Inception weights ported to Pytorch from
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth'
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = models.inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x,
size=(299, 299),
mode='bilinear',
align_corners=False)
if self.normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
def fid_inception_v3():
"""Build pretrained Inception model for FID computation
The Inception model for FID computation uses a different set of weights
and has a slightly different structure than torchvision's Inception.
This method first constructs torchvision's Inception and then patches the
necessary parts that are different in the FID Inception model.
"""
inception = models.inception_v3(num_classes=1008,
aux_logits=False,
pretrained=False,
init_weights=True)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
inception.load_state_dict(state_dict)
return inception
class FIDInceptionA(models.inception.InceptionA):
"""InceptionA block patched for FID computation"""
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionC(models.inception.InceptionC):
"""InceptionC block patched for FID computation"""
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_1(models.inception.InceptionE):
"""First InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_2(models.inception.InceptionE):
"""Second InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: The FID Inception model uses max pooling instead of average
# pooling. This is likely an error in this specific Inception
# implementation, as other Inception models use average pooling here
# (which matches the description in the paper).
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
[
"khai199733@gmail.com"
] |
khai199733@gmail.com
|
95fe70f13e2813bdeafcd0958be55afdc67e8ebb
|
32458c4e0e0157314a5ab8f224715ea84a8c43a9
|
/models/attention.py
|
a011b77d7bb7235d12eede6a994b37eeb4386cfb
|
[
"MIT"
] |
permissive
|
colinsongf/Global-Encoding
|
aaa75e2c37ccbba1ff6909cb84cfadf31be27b83
|
f717f6a922bd7a129e1026511dc90617a1ff8a23
|
refs/heads/master
| 2020-03-17T17:27:19.959451
| 2018-05-15T05:22:55
| 2018-05-15T05:22:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,526
|
py
|
'''
@Date : 2017/12/18
@Author: Shuming Ma
@mail : shumingma@pku.edu.cn
@homepage: shumingma.com
'''
import math
import torch
import torch.nn as nn
class luong_attention(nn.Module):
def __init__(self, hidden_size, emb_size, pool_size=0):
super(luong_attention, self).__init__()
self.hidden_size, self.emb_size, self.pool_size = hidden_size, emb_size, pool_size
self.linear_in = nn.Linear(hidden_size, hidden_size)
if pool_size > 0:
self.linear_out = maxout(2*hidden_size + emb_size, hidden_size, pool_size)
else:
self.linear_out = nn.Sequential(nn.Linear(2*hidden_size + emb_size, hidden_size), nn.SELU(), nn.Linear(hidden_size, hidden_size), nn.Tanh())
self.softmax = nn.Softmax(dim=1)
def init_context(self, context):
self.context = context.transpose(0, 1)
def forward(self, h, x):
gamma_h = self.linear_in(h).unsqueeze(2) # batch * size * 1
weights = torch.bmm(self.context, gamma_h).squeeze(2) # batch * time
weights = self.softmax(weights) # batch * time
c_t = torch.bmm(weights.unsqueeze(1), self.context).squeeze(1) # batch * size
output = self.linear_out(torch.cat([c_t, h, x], 1))
return output, weights
class luong_gate_attention(nn.Module):
def __init__(self, hidden_size, emb_size, prob=0.1):
super(luong_gate_attention, self).__init__()
self.hidden_size, self.emb_size = hidden_size, emb_size
self.linear_in = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.Dropout(p=prob))
self.feed = nn.Sequential(nn.Linear(2*hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob), nn.Linear(hidden_size, hidden_size), nn.Sigmoid(), nn.Dropout(p=prob))
self.remove = nn.Sequential(nn.Linear(2*hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob), nn.Linear(hidden_size, hidden_size), nn.Sigmoid(), nn.Dropout(p=prob))
self.linear_out = nn.Sequential(nn.Linear(2*hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob), nn.Linear(hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob))
self.mem_gate = nn.Sequential(nn.Linear(2*hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob), nn.Linear(hidden_size, hidden_size), nn.Sigmoid(), nn.Dropout(p=prob))
self.softmax = nn.Softmax(dim=1)
self.selu = nn.SELU()
self.simple = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.SELU(), nn.Linear(hidden_size, hidden_size), nn.Sigmoid())
def init_context(self, context):
self.context = context.transpose(0, 1)
def forward(self, h, embs, m, hops=1, selfatt=False):
if hops == 1:
gamma_h = self.linear_in(h).unsqueeze(2)
#gamma_h = self.selu(gamma_h)
weights = torch.bmm(self.context, gamma_h).squeeze(2)
if selfatt:
weights = weights / math.sqrt(512)
weights = self.softmax(weights)
c_t = torch.bmm(weights.unsqueeze(1), self.context).squeeze(1)
memory = m
output = self.linear_out(torch.cat([h, c_t], 1))
return output, weights, memory
x = h
for i in range(hops):
gamma_h = self.linear_in(x).unsqueeze(2)
weights = torch.bmm(self.context, gamma_h).squeeze(2)
weights = self.softmax(weights)
c_t = torch.bmm(weights.unsqueeze(1), self.context).squeeze(1)
x = c_t + x
feed_gate = self.feed(torch.cat([x, h], 1))
remove_gate = self.remove(torch.cat([x, h], 1))
memory = (remove_gate * m) + (feed_gate * (x+h))
mem_gate = self.mem_gate(torch.cat([memory, h], 1))
m_x = mem_gate * x
output = self.linear_out(torch.cat([m_x, h], 1))
return output, weights, memory
class bahdanau_attention(nn.Module):
def __init__(self, hidden_size, emb_size, pool_size=0):
super(bahdanau_attention, self).__init__()
self.linear_encoder = nn.Linear(hidden_size, hidden_size)
self.linear_decoder = nn.Linear(hidden_size, hidden_size)
self.linear_v = nn.Linear(hidden_size, 1)
self.linear_r = nn.Linear(hidden_size*2+emb_size, hidden_size*2)
self.hidden_size = hidden_size
self.emb_size = emb_size
self.softmax = nn.Softmax(dim=1)
self.tanh = nn.Tanh()
def init_context(self, context):
self.context = context.transpose(0, 1)
def forward(self, h, x):
gamma_encoder = self.linear_encoder(self.context) # batch * time * size
gamma_decoder = self.linear_decoder(h).unsqueeze(1) # batch * 1 * size
weights = self.linear_v(self.tanh(gamma_encoder+gamma_decoder)).squeeze(2) # batch * time
weights = self.softmax(weights) # batch * time
c_t = torch.bmm(weights.unsqueeze(1), self.context).squeeze(1) # batch * size
r_t = self.linear_r(torch.cat([c_t, h, x], dim=1))
output = r_t.view(-1, self.hidden_size, 2).max(2)[0]
return output, weights
class maxout(nn.Module):
def __init__(self, in_feature, out_feature, pool_size):
super(maxout, self).__init__()
self.in_feature = in_feature
self.out_feature = out_feature
self.pool_size = pool_size
self.linear = nn.Linear(in_feature, out_feature*pool_size)
def forward(self, x):
output = self.linear(x)
output = output.view(-1, self.out_feature, self.pool_size)
output = output.max(2)[0]
return output
|
[
"justinlin930319@hotmail.com"
] |
justinlin930319@hotmail.com
|
8dddee5d45a2b617dde233e1b0837f788ad5af74
|
8ce5eaba8a2848c17f9a47eb083afdd90887e17e
|
/scripts/dumping/sequence_update/dump_restmap_dataset.py
|
38df07a892b224f6d2f363e6298ce4bc85decf71
|
[
"MIT"
] |
permissive
|
yeastgenome/SGDBackend-Nex2
|
cad926729f9da738b3e82fc85d29b936ed6b8f1a
|
017e7248fbc3f1d1e8062d67f26937ec00c3cb8a
|
refs/heads/master
| 2023-08-23T12:24:15.220178
| 2023-06-22T01:22:11
| 2023-06-22T01:22:11
| 46,740,355
| 5
| 7
|
MIT
| 2023-09-06T20:28:05
| 2015-11-23T18:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,343
|
py
|
import sys
from scripts.loading.database_session import get_session
from src.models import Locusdbentity, Dnasequenceannotation, Taxonomy, Contig, So,\
Proteinsequenceannotation
from scripts.dumping.sequence_update import generate_dna_seq_file
__author__ = 'sweng66'
genomicFile = "scripts/dumping/sequence_update/data/restrictionMapper/orf_genomic.seq"
TAXON = "TAX:559292"
SEQ_FORMAT = 'plain'
FILE_TYPE = 'ALL'
def dump_data():
nex_session = get_session()
taxonomy = nex_session.query(Taxonomy).filter_by(taxid=TAXON).one_or_none()
taxonomy_id = taxonomy.taxonomy_id
dbentity_id_to_data = dict([(x.dbentity_id, (x.systematic_name, x.gene_name, x.sgdid, x.qualifier, x.description)) for x in nex_session.query(Locusdbentity).filter_by(dbentity_status = 'Active').all()])
so_id_to_display_name = dict([(x.so_id, x.term_name) for x in nex_session.query(So).all()])
contig_id_to_chr = dict([(x.contig_id, x.display_name) for x in nex_session.query(Contig).filter(Contig.display_name.like('Chromosome %')).all()])
generate_dna_seq_file(nex_session, taxonomy_id, dbentity_id_to_data, contig_id_to_chr,
so_id_to_display_name, genomicFile, 'GENOMIC', SEQ_FORMAT, FILE_TYPE)
nex_session.close()
if __name__ == '__main__':
dump_data()
|
[
"noreply@github.com"
] |
yeastgenome.noreply@github.com
|
df834d024d1583ec890986cbe4b75060df1791aa
|
301baff8d17536c7a909a5c02457d7e041a78682
|
/blog/migrations/0003_auto_20210405_0729.py
|
0a279d9249c6f67e1d83c580cf603cb7001d630b
|
[] |
no_license
|
resbrotherx/resbrotherx
|
6442260d3b5e397a46cff7f1702e34ac07832c61
|
a430de9bcb4affca6815af64248ec31cc88a7a5a
|
refs/heads/main
| 2023-05-02T22:20:46.527426
| 2021-05-21T23:26:26
| 2021-05-21T23:26:26
| 277,882,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
# Generated by Django 3.1.3 on 2021-04-05 18:29
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0002_auto_20210405_0729'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='likes',
field=models.ManyToManyField(blank=True, null=True, related_name='loved', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='dislikes',
field=models.ManyToManyField(blank=True, null=True, related_name='post_disliked', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='likes',
field=models.ManyToManyField(blank=True, null=True, related_name='post_loved', to=settings.AUTH_USER_MODEL),
),
]
|
[
"francisdaniel140@gmail.com"
] |
francisdaniel140@gmail.com
|
b592cb7cc91694ddd3f470b85990042cd4b5adc2
|
b08f5367ffd3bdd1463de2ddc05d34cbfba6796e
|
/recursion/powersets.py
|
fa71f8d354af43b94b4d0f5bcb00a170e05c74ad
|
[] |
no_license
|
uohzxela/fundamentals
|
cb611fa6c820dc8643a43fd045efe96bc43ba4ed
|
6bbbd489c3854fa4bf2fe73e1a2dfb2efe4aeb94
|
refs/heads/master
| 2020-04-04T03:56:44.145222
| 2018-04-05T01:08:14
| 2018-04-05T01:08:14
| 54,199,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
# O(2^n)
class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
self.subsets_(nums, 0, [], res)
return res
def subsets_(self, nums, i, partial, res):
if i > len(nums)-1:
res.append(list(partial))
return
partial.append(nums[i])
self.subsets_(nums, i+1, partial, res)
partial.pop()
self.subsets_(nums, i+1, partial, res)
|
[
"uohzxela@gmail.com"
] |
uohzxela@gmail.com
|
02646842ebbaa4cb3831316bebc75e6db92decdf
|
eba3c6eb8d5ae18f3bcbb39df9b391a3d4a40ebd
|
/src/storage.py
|
15a4676ee9c6be24f22407b09d9bae4f203cd195
|
[] |
no_license
|
aleefrank/VPP-Simulator
|
294381d5dafe6316dc21180683110081bfbb91ca
|
44f2663c905d6c6ecdd098d05a74a9d66fb31650
|
refs/heads/main
| 2023-03-23T15:50:31.444004
| 2021-03-13T11:11:19
| 2021-03-13T11:11:19
| 347,332,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,030
|
py
|
from src.enums import ProfileType
from src.profile import Profile
# All parameters are percentage (beside efficiencies)
class SimpleStorage(Profile):
def __init__(self, max_charge, initial_charge=25, max_delta_charge=60, max_delta_discharge=60,
charge_efficiency=0.95, discharge_efficiency=0.95, timestamps=96):
super().__init__([0] * timestamps, ProfileType.STORAGE, 0, 0, timestamps)
self.initial_charge = max_charge * initial_charge / 100
self.max_charge = max_charge
self.max_delta_charge = self.setup_array_for_property(max_charge * max_delta_charge / 100)
self.max_delta_discharge = self.setup_array_for_property(max_charge * max_delta_discharge / 100)
self.charge_efficiency = charge_efficiency
self.discharge_efficiency = discharge_efficiency
def get_flexibility(self, type='minimized'):
if type == 'minimized':
return self.max_delta_discharge
if type == 'maximized':
return self.max_delta_charge
|
[
"alessandro.f95@live.it"
] |
alessandro.f95@live.it
|
3385d7bccfd7533f8e424eca23c2e897f3a99e2e
|
4f1b6a8001280c76f6fa40064251ccf684f2b28b
|
/apps/client/api.py
|
12819fbba4937849a90f2324145e49f4ee385dd2
|
[] |
no_license
|
asorici/envived
|
f51c40f49b8dbee13ebde7709437e4efa12cd335
|
2b818240d6cef7d64f9c7a4ea4ecb52695c82878
|
refs/heads/master
| 2016-09-05T23:37:40.798340
| 2014-07-04T13:35:25
| 2014-07-04T13:35:25
| 11,955,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40,117
|
py
|
from client.authorization import AnnotationAuthorization, UserAuthorization, \
FeatureAuthorization
from client.validation import AnnotationValidation
from coresql.models import Environment, Area, Announcement, History, UserProfile, \
ResearchProfile, UserContext, UserSubProfile
from coresql.utils import str2bool
from datetime import datetime
from django.conf.urls import patterns
from django.core.exceptions import MultipleObjectsReturned
from tastypie import fields, http
from tastypie.api import Api
from tastypie.authentication import Authentication
from tastypie.exceptions import ImmediateHttpResponse, NotFound
from tastypie.resources import ModelResource
class UserResource(ModelResource):
first_name = fields.CharField(readonly = True)
last_name = fields.CharField(readonly = True)
class Meta:
queryset = UserProfile.objects.all()
resource_name = 'user'
detail_allowed_methods = ["get", "put"]
list_allowed_methods = ["get"]
#fields = ['first_name']
excludes = ["id", "timestamp", "is_anonymous"]
authentication = Authentication()
authorization = UserAuthorization()
def build_filters(self, filters = None):
"""
enable filtering by environment and area (which do not have their own fields in this resource)
"""
if filters is None:
filters = {}
orm_filters = super(UserResource, self).build_filters(filters)
if "area" in filters:
area_id = filters['area']
area = Area.objects.get(id = area_id)
#checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]
orm_filters["pk__in"] = [user_ctx.user.pk
for user_ctx in UserContext.objects.filter(currentArea = area)]
elif "environment" in filters:
environment_id = filters['environment']
environment = Environment.objects.get(id = environment_id)
#checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]
orm_filters["pk__in"] = [user_ctx.user.pk
for user_ctx in UserContext.objects.filter(currentEnvironment = environment)]
return orm_filters
def dehydrate_first_name(self, bundle):
return bundle.obj.user.first_name
def dehydrate_last_name(self, bundle):
return bundle.obj.user.last_name
def dehydrate_research_profile(self, bundle):
import inspect, sys
research_dict = {}
if bundle.obj.research_profile:
for f in ResearchProfile._meta.fields:
if not f.primary_key and not hasattr(f, 'foreign_key'):
field_class = f.__class__
extension_classes = inspect.getmembers(sys.modules["coresql.db.fields"],
lambda c: inspect.isclass(c) and c.__module__ == "coresql.db.fields")
if (field_class.__name__, field_class) in extension_classes:
research_dict[f.name] = getattr(bundle.obj.research_profile, f.name).to_serializable()
else:
research_dict[f.name] = getattr(bundle.obj.research_profile, f.name)
return research_dict
def dehydrate(self, bundle):
#if 'research_profile' in bundle.data and not bundle.obj.research_profile:
# del bundle.data['research_profile']
""" dehydrate UserSubProfiles if requested """
if 'showprofile' in bundle.request.GET and \
bundle.request.GET['showprofile'] in UserSubProfile.get_subclass_list() + ['all']:
## get downcasted versions directly of all the subprofiles associated with this userprofile
profile_type = bundle.request.GET['showprofile']
subprofiles = []
if profile_type == 'all':
subprofiles = bundle.obj.subprofiles.all().select_subclasses()
else:
subprofiles = bundle.obj.subprofiles.all().select_subclasses(profile_type)
subprofiles_dict = {}
for profile in subprofiles:
data = profile.to_serializable()
if data:
subprofiles_dict.update(data)
if subprofiles_dict:
bundle.data['subprofiles'] = subprofiles_dict
""" if the user is requesting his own data then return his email too as it
is an identifying element """
if hasattr(bundle.request, "user") and not bundle.request.user.is_anonymous():
user_profile = bundle.request.user.get_profile()
if user_profile.pk == bundle.obj.pk:
bundle.data['email'] = bundle.obj.user.email
""" remove c2dm data from bundle """
if 'c2dm_id' in bundle.data:
del bundle.data['c2dm_id']
return bundle
def get_list(self, request, **kwargs):
## override the list retrieval part to verify additionally that an ``environment`` or ``area`` filter exists
## otherwise reject the call with a HttpMethodNotAllowed
if 'environment' in request.GET or 'area' in request.GET:
return super(UserResource, self).get_list(request, **kwargs)
else:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
def apply_sorting(self, obj_list, options=None):
## apply a default sorting of user by their last_name
return obj_list.order_by("user__last_name")
def obj_update(self, bundle, skip_errors=False, **kwargs):
"""
Could be an intentional action that the default obj_update treats DoesNotExist and MultipleObjectReturned
as acceptable exceptions which get transformed into a CREATE operation.
We don't want such a behavior. So we catch does exceptions and throw an BadRequest message
"""
from tastypie.serializers import Serializer
try:
serdes = Serializer()
deserialized = None
try:
deserialized = serdes.deserialize(bundle.request.raw_post_data,
format=bundle.request.META.get('CONTENT_TYPE', 'application/json'))
except Exception:
deserialized = None
del serdes
if deserialized is None:
return ImmediateHttpResponse(response = http.HttpBadRequest())
if 'unregister_c2dm' in deserialized and deserialized['unregister_c2dm'] == True:
bundle.data['c2dm_id'] = None
updated_bundle = super(UserResource, self).obj_update(bundle, skip_errors=skip_errors, **kwargs)
return updated_bundle
except (NotFound, MultipleObjectsReturned):
raise ImmediateHttpResponse(response = http.HttpBadRequest())
class EnvironmentResource(ModelResource):
features = fields.ListField()
parent = fields.ForeignKey('self', 'parent', null = True)
owner = fields.ForeignKey(UserResource, 'owner', full = True)
class Meta:
queryset = Environment.objects.all()
resource_name = 'environment'
#api_name = 'v1/resources'
#fields = ['name', 'data', 'tags', 'parentID', 'category', 'latitude', 'longitude', 'timestamp']
excludes = ['width', 'height']
detail_allowed_methods = ['get']
list_allowed_methods = ['get']
authentication = Authentication()
default_format = "application/json"
def dehydrate_tags(self, bundle):
return bundle.obj.tags.to_serializable()
def dehydrate_parent(self, bundle):
if not bundle.data['parent'] is None:
parent_data = bundle.data['parent']
parent_name = bundle.obj.parent.name
return {'uri' : parent_data, 'name': parent_name}
return None
def dehydrate_features(self, bundle):
## retrieve the value of the virtual flag
virtual = get_virtual_flag_from_url(bundle.request)
## return a list of dictionary values from the features of this environment
feature_list = []
for feature in bundle.obj.features.select_subclasses():
feature_resource_class = feature.__class__.get_resource_class()
if feature_resource_class:
feat_dict = feature.to_serializable(virtual = virtual)
if feat_dict:
## attach resource_uri and environment_uri
#feat_dict['resource_uri'] = FeatureResource().get_resource_uri(feature)
feat_dict['resource_uri'] = feature_resource_class().get_resource_uri(feature)
feat_dict['environment'] = self.get_resource_uri(bundle)
feature_list.append(feat_dict)
return feature_list
def dehydrate(self, bundle):
""" Delete the img_thumbnail_url if it is null """
if bundle.obj.img_thumbnail_url is None:
del bundle.data['img_thumbnail_url']
""" append layout url if a level filter exists in the request """
if "level" in bundle.request.GET:
level = int(bundle.request.GET["level"])
bundle.data["layout_url"] = bundle.obj.layouts.get(level=level).mapURL
"""
make bundle consistent for location parsing on mobile client:
add a location_type entry in the bundle.data
put all the rest of the data under location_data
"""
location_data = bundle.data.copy()
bundle.data.clear()
bundle.data['location_type'] = self._meta.resource_name
bundle.data['location_data'] = location_data
return bundle
class AreaResource(ModelResource):
parent = fields.ForeignKey(EnvironmentResource, 'environment')
features = fields.ListField()
owner = fields.DictField()
admin = fields.ForeignKey(UserResource, 'admin', null = True, full = True)
class Meta:
queryset = Area.objects.all()
resource_name = 'area'
allowed_methods = ['get']
excludes = ['shape', 'layout']
filtering = {
'parent': ['exact'],
}
authentication = Authentication()
def get_list(self, request, **kwargs):
## override the list retrieval part to verify additionally that an ``environment`` filter exists
## otherwise reject the call with a HttpMethodNotAllowed
if 'parent' in request.GET or 'q' in request.GET:
return super(AreaResource, self).get_list(request, **kwargs)
else:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
def build_filters(self, filters = None):
"""
enable filtering by level (which does not have its own field)
"""
if filters is None:
filters = {}
orm_filters = super(AreaResource, self).build_filters(filters)
if "level" in filters:
orm_filters["layout__level"] = int(filters["level"])
return orm_filters
def dehydrate_tags(self, bundle):
return bundle.obj.tags.to_serializable()
def dehydrate_parent(self, bundle):
parent_data = bundle.data['parent']
parent_name = bundle.obj.environment.name
return {'uri' : parent_data, 'name': parent_name}
def dehydrate_owner(self, bundle):
user_res = UserResource()
user_bundle = user_res.build_bundle(bundle.obj.environment.owner, request=bundle.request)
user_bundle = user_res.full_dehydrate(user_bundle)
return user_bundle.data
def dehydrate_features(self, bundle):
## retrieve the value of the virtual flag
virtual = get_virtual_flag_from_url(bundle.request)
## return a list of dictionary values from the features of this area
feature_list = []
for feature in bundle.obj.features.select_subclasses():
feature_resource_class = feature.__class__.get_resource_class()
if feature_resource_class:
feat_dict = feature.to_serializable(request = bundle.request, virtual = virtual)
if feat_dict:
## attach resource_uri and area_uri
# feat_dict['resource_uri'] = FeatureResource().get_resource_uri(feature)
feat_dict['resource_uri'] = feature_resource_class().get_resource_uri(feature)
feat_dict['area'] = self.get_resource_uri(bundle)
feature_list.append(feat_dict)
## then see if environment features which also apply to the area are available - e.g. program, order
environment = bundle.obj.environment
environment_features = environment.features.select_subclasses().filter(is_general = True)
for env_feat in environment_features:
env_feat_resource_class = env_feat.__class__.get_resource_class()
if env_feat_resource_class:
feat_dict = env_feat.to_serializable(request = bundle.request, virtual = virtual)
if feat_dict:
## attach resource_uri and area_uri
#feat_dict['resource_uri'] = FeatureResource().get_resource_uri(env_feat)
feat_dict['resource_uri'] = env_feat_resource_class().get_resource_uri(env_feat)
feat_dict['environment'] = EnvironmentResource().get_resource_uri(environment)
feature_list.append(feat_dict)
return feature_list
def dehydrate(self, bundle):
""" delete admin field from bundle.data if the model field is null """
if bundle.obj.admin is None:
del bundle.data['admin']
""" Delete the img_thumbnail_url if it is null """
if bundle.obj.img_thumbnail_url is None:
del bundle.data['img_thumbnail_url']
""" append level data from the layout reference of the Area obj """
bundle.data['level'] = bundle.obj.layout.level
"""
make bundle consistent for location parsing on mobile client:
add a location_type entry in the bundle.data
put all the rest of the data under location_data
"""
location_data = bundle.data.copy()
bundle.data.clear()
bundle.data['location_type'] = self._meta.resource_name
bundle.data['location_data'] = location_data
return bundle
class FeatureResource(ModelResource):
environment = fields.ForeignKey(EnvironmentResource, 'environment', null = True)
area = fields.ForeignKey(AreaResource, 'area', null = True)
category = fields.CharField(attribute = 'category')
data = fields.DictField()
class Meta:
# queryset = Feature.objects.select_subclasses()
# resource_name = 'feature'
allowed_methods = ['get']
excludes = ['id', 'is_general']
filtering = {
'area' : ['exact'],
'environment' : ['exact'],
'category' : ['exact']
}
authentication = Authentication()
authorization = FeatureAuthorization()
def base_urls(self):
from django.conf.urls.defaults import url
from tastypie.utils.urls import trailing_slash
"""
The standard URLs this ``Resource`` should respond to.
"""
return [
url(r"^features/(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^features/(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^features/(?P<resource_name>%s)/set/(?P<%s_list>\w[\w/;-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('get_multiple'), name="api_get_multiple"),
url(r"^features/(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
def get_list(self, request, **kwargs):
"""
override the list retrieval part to verify additionally that an ``area`` or ``environment``
and a ``category`` filter exist otherwise reject the call with a HttpMethodNotAllowed
"""
# if ('area' in request.GET or 'environment' in request.GET) and 'category' in request.GET:
if 'area' in request.GET or 'environment' in request.GET:
return super(FeatureResource, self).get_list(request, **kwargs)
else:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
def get_object_list(self, request):
from django.db.models import Q
feature_obj_list = super(FeatureResource, self).get_object_list(request)
if 'area' in request.GET:
area_id = request.GET['area']
try:
area = Area.objects.get(id = area_id)
q1 = Q(area = area)
q2 = Q(environment = area.environment) & Q(is_general = True)
return feature_obj_list.filter(q1 | q2)
except Area.DoesNotExist, ex:
raise ImmediateHttpResponse(response=http.HttpBadRequest(content=ex.get_message()))
return feature_obj_list
def build_filters(self, filters = None):
if filters is None:
filters = {}
if 'area' in filters:
## remove the filter since it has been handled in get_obj_list
del filters['area']
orm_filters = super(FeatureResource, self).build_filters(filters)
return orm_filters
def dehydrate_data(self, bundle):
## retrieve the value of the virtual flag
virtual = get_virtual_flag_from_url(bundle.request)
filters = bundle.request.GET.copy()
return bundle.obj.get_feature_data(bundle, virtual, filters)
def dehydrate(self, bundle):
if bundle.obj.environment is None:
del bundle.data['environment']
elif bundle.obj.area is None:
del bundle.data['area']
return bundle
class AnnouncementResource(ModelResource):
environment = fields.ForeignKey(EnvironmentResource, 'environment')
area = fields.ForeignKey(AreaResource, 'area', null = True)
class Meta:
queryset = Announcement.objects.all()
resource_name = 'announcement'
allowed_methods = ['get']
fields = ['data', 'timestamp']
excludes = ['id']
filtering = {
'area': ['exact'],
'environment': ['exact'],
'timestamp': ['gt', 'gte'],
}
authentication = Authentication()
def get_list(self, request, **kwargs):
## override the list retrieval part to verify additionally that an ``environment`` or ``area`` filter exists
## otherwise reject the call with a HttpMethodNotAllowed
if 'environment' in request.GET or 'area' in request.GET:
return super(AnnouncementResource, self).get_list(request, **kwargs)
else:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
def get_object_list(self, request):
## override the usual obj_list retrieval by filtering out only the yet to be given announcements
## for the current environment (which we **know** must exist)
timestamp = datetime.now()
## get default object list
announcement_obj_list = super(AnnouncementResource, self).get_object_list(request)
if 'environment' in request.GET:
try:
env_id = request.GET['environment']
environ = Environment.objects.get(id=env_id)
announcement_obj_list = announcement_obj_list.filter(environment=environ)
except Exception:
pass
if 'area' in request.GET:
try:
area_id = request.GET['area']
area = Area.objects.get(id=area_id)
announcement_obj_list = announcement_obj_list.filter(area=area)
except Exception:
pass
try:
id_list = []
## loop through each announcement and see if any of its
## triggers are greater than the current timestamp
for obj in announcement_obj_list:
triggers = obj.triggers.getList()
## maybe make the following a little less hardcoded
if obj.repeatEvery == "day":
for trig in triggers:
trig.replace(year=timestamp.year, month = timestamp.month, day = timestamp.day)
elif obj.repeatEvery == "week":
## see which triggers are within "weeks" of the timestamp
for trig in triggers:
diff = timestamp.date() - trig.date()
if diff.days % 7 != 0:
triggers.remove(trig)
## and then update the day only for those
for trig in triggers:
trig.replace(year=timestamp.year, month = timestamp.month, day = timestamp.day)
## and now we can do easy comparisons
should_be_included = False
for dt in obj.triggers.getList():
if dt >= timestamp:
should_be_included = True
break
if should_be_included:
id_list.append(obj.id)
return announcement_obj_list.filter(id__in = id_list)
except Exception:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
class AnnotationResource(ModelResource):
environment = fields.ForeignKey(EnvironmentResource, 'environment', null = True)
area = fields.ForeignKey(AreaResource, 'area', null = True)
user = fields.ForeignKey(UserResource, 'user', null = True)
data = fields.DictField()
class Meta:
# queryset = Annotation.objects.select_subclasses()
# resource_name = 'annotation'
detail_allowed_methods = ['get', 'put', 'delete']
list_allowed_methods = ['get', 'post']
## need to put complete list of fields because otherwise the related ones will not get inherited in
## subclasses of AnnotationResource.
## Not sure yet if this is desired functionality or a bug in Tastypie.
fields = ['environment', 'area', 'user', 'data', 'category', 'timestamp']
filtering = {
'area': ['exact'],
'environment': ['exact'],
'timestamp': ['gt', 'gte', 'lt', 'lte'],
'category': ['exact'],
}
ordering = ['timestamp']
authentication = Authentication()
authorization = AnnotationAuthorization()
#validation = FormValidation(form_class = AnnotationForm)
validation = AnnotationValidation()
always_return_data = True
def base_urls(self):
from django.conf.urls.defaults import url
from tastypie.utils.urls import trailing_slash
"""
The standard URLs this ``Resource`` should respond to.
"""
return [
url(r"^annotations/(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^annotations/(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^annotations/(?P<resource_name>%s)/set/(?P<%s_list>\w[\w/;-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('get_multiple'), name="api_get_multiple"),
url(r"^annotations/(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
def get_list(self, request, **kwargs):
## override the list retrieval part to verify additionally that an ``area`` or ``environment`` filter exists
## otherwise reject the call with a HttpMethodNotAllowed
# if ('area' in request.GET or 'environment' in request.GET) and 'category' in request.GET:
if 'area' in request.GET or 'environment' in request.GET:
return super(AnnotationResource, self).get_list(request, **kwargs)
else:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
"""
The following methods combined ensure that the environment=1&all=true query is handled successfully
"""
def build_filters(self, filters = None):
if filters is None:
filters = {}
if 'environment' in filters and 'all' in filters and filters['all'] == 'true':
"""
if environment and all are in the filters, don't apply them any more because it will have
already been handled in get_object_list
"""
del filters['environment']
del filters['all']
orm_filters = super(AnnotationResource, self).build_filters(filters)
return orm_filters
def get_object_list(self, request):
from django.db.models import Q
if 'environment' in request.GET and 'all' in request.GET and request.GET['all'] == 'true':
try:
environment_pk = request.GET['environment']
environment = Environment.objects.get(pk=environment_pk)
q1 = Q(environment=environment)
q2 = Q(area__in=list(environment.areas.all()))
return super(AnnotationResource, self).get_object_list(request).filter(q1 | q2)
except Exception, ex:
#print ex
raise ImmediateHttpResponse(response=http.HttpBadRequest(content=ex.get_message()))
return super(AnnotationResource, self).get_object_list(request)
def dehydrate_data(self, bundle):
## return the data representation of this annotation according to its type
# user_profile = bundle.request.user.get_profile()
return bundle.obj.get_annotation_data()
def dehydrate_timestamp(self, bundle):
from pytz import timezone
local_tz = timezone("Europe/Bucharest")
return local_tz.localize(bundle.obj.timestamp)
def dehydrate(self, bundle):
"""
return additionally for each annotation
bundle the name of the environment/area for which the annotation was made
"""
if not bundle.obj.environment is None:
## make the environment response a dictionary, containing resource_uri and name
bundle.data['environment'] = {'resource_uri': bundle.data['environment'], 'name': bundle.obj.environment.name}
if not bundle.obj.area is None:
## make the area response a dictionary, containing resource_uri and name
bundle.data['area'] = {'resource_uri': bundle.data['area'], 'name': bundle.obj.area.name}
"""
bundle in the user's first and last name under the ['data']['user'] entry
"""
first_name = "Anonymous"
last_name = "Guest"
user_profile = bundle.obj.user
if not user_profile is None and not user_profile.is_anonymous:
first_name = user_profile.user.first_name
last_name = user_profile.user.last_name
bundle.data['data']['user'] = { 'first_name' : first_name,
'last_name' : last_name
}
"""
now remove also null area/environment data
"""
if not bundle.data['environment']:
del bundle.data['environment']
if not bundle.data['area']:
del bundle.data['area']
"""
if no data is found remove the 'data' attribute from the bundle to avoid useless processing on
the mobile side
"""
if not bundle.data['data']:
del bundle.data['data']
return bundle
def obj_create(self, bundle, **kwargs):
## because of the AnnotationAuthorization class, request.user will have a profile
user_profile = bundle.request.user.get_profile()
updated_bundle = super(AnnotationResource, self).obj_create(bundle, user=user_profile)
return updated_bundle
def obj_update(self, bundle, skip_errors=False, **kwargs):
"""
Could be an intentional feature that the default obj_update treats DoesNotExist and MultipleObjectReturned
as acceptable exceptions which get transformed into a CREATE operation.
We don't want such a behavior. So we catch those exceptions and throw a BadRequest message
"""
try:
updated_bundle = super(AnnotationResource, self).obj_update(bundle, skip_errors=skip_errors, **kwargs)
return updated_bundle
except NotFound, enf:
raise ImmediateHttpResponse(response = http.HttpBadRequest(content=enf.get_message()))
except MultipleObjectsReturned, emult:
raise ImmediateHttpResponse(response = http.HttpBadRequest(content=emult.get_message()))
def _make_c2dm_notification(self, registration_id, collapse_key, bundle, params = None):
import socket, pickle, c2dm, sys
if params is None:
params = {}
if not registration_id is None:
#collapse_key = "annotation_" + bundle.obj.category
resource_uri = self.get_resource_uri(bundle)
environment = bundle.obj.environment
if not bundle.obj.area is None:
environment = bundle.obj.area.environment
location_uri = EnvironmentResource().get_resource_uri(environment)
feature = bundle.obj.category
# prepare notification data
registration_ids = [registration_id]
notification_data = {'location_uri' : location_uri,
'resource_uri' : resource_uri,
'feature' : feature,
}
notification_data['params'] = params
delay_while_idle = False
ttl = None
if not collapse_key is None:
ttl = 600
# pickle notification data and send it
data = pickle.dumps((registration_ids, collapse_key, delay_while_idle, ttl, notification_data))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Connect to server and send data
sock.connect((c2dm.GCMServer.HOST, c2dm.GCMServer.PORT))
sock.sendall(data + "\n")
# Receive data from the server and shut down
received = sock.recv(1024)
if received == "OK":
print >> sys.stderr, "[Annotation GCM] Notification enqueued"
else:
print >> sys.stderr, "[Annotation GCM] Notification NOT enqueued"
except Exception, ex:
print >>sys.stderr, "[Annotation GCM] failure enqueueing annotation: ", ex
finally:
sock.close()
class HistoryResource(ModelResource):
environment = fields.ForeignKey(EnvironmentResource, 'environment')
area = fields.ForeignKey(AreaResource, 'area')
user = fields.ForeignKey(UserResource, 'user')
class Meta:
resource_name = 'history'
queryset = History.objects.all()
excludes = ['user']
allowed_methods = ['get']
filtering = {
'user': ['exact'],
}
ordering = ['-timestamp']
def get_list(self, request, **kwargs):
## override the list retrieval part to verify additionally that an ``user`` filter exists
## otherwise reject the call with a HttpMethodNotAllowed
if 'user' in request.GET:
return super(AnnotationResource, self).get_list(request, **kwargs)
else:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
class EnvrionmentContextResource(ModelResource):
PEOPLE_COUNT = "peoplecount"
response = fields.DictField()
class Meta:
queryset = Environment.objects.all()
resource_name = 'environmentcontext'
detail_allowed_methods = ['get']
list_allowed_methods = []
fields = ['']
def get_detail(self, request, **kwargs):
## override the list retrieval part to verify additionally that an ``user`` filter exists
## otherwise reject the call with a HttpMethodNotAllowed
if 'request' in request.GET:
return super(EnvrionmentContextResource, self).get_detail(request, **kwargs)
else:
raise ImmediateHttpResponse(response=http.HttpBadRequest())
def dehydrate_response(self, bundle):
## see what the context request is
context_request = bundle.request.GET['request']
if context_request == EnvrionmentContextResource.PEOPLE_COUNT:
environment = bundle.obj
environment_people_count = UserContext.objects.filter(currentEnvironment = environment).count()
return environment_people_count
else:
raise ImmediateHttpResponse(response=http.HttpNotImplemented())
#############################################################################################################
#############################################################################################################
class ClientApi(Api):
def __init__(self, *args, **kwargs):
super(ClientApi, self).__init__(*args, **kwargs)
'''
def prepend_urls(self):
from django.conf.urls.defaults import url, include
from client.views import checkin, checkout, login, logout, register, create_anonymous, delete_anonymous
prepended_urls = Api.prepend_urls(self)
## add all general actions
prepended_urls.extend([
url(r"^%s/actions/create_anonymous/$" % self.api_name, create_anonymous, name="create_anonymous"),
url(r"^%s/actions/delete_anonymous/$" % self.api_name, delete_anonymous, name="delete_anonymous"),
url(r"^%s/actions/register/$" % self.api_name, register, name="register"),
url(r"^%s/actions/login/$" % self.api_name, login, name="login"),
url(r"^%s/actions/logout/$" % self.api_name, logout, name="logout"),
url(r"^%s/actions/checkin/$" % self.api_name, checkin, name="checkin"),
url(r"^%s/actions/checkout/$" % self.api_name, checkout, name="checkout")
])
## add all per feature resource urls
"""
for feat_res_cls in FeatureResource.__subclasses__():
prepended_urls.append(url(r"^(?P<api_name>%s)/resources/features/" % self.api_name, include(feat_res_cls().urls)))
## add all per feature annotation urls
for ann_res_cls in AnnotationResource.__subclasses__():
prepended_urls.append(url(r"^(?P<api_name>%s)/resources/annotations/" % self.api_name, include(ann_res_cls().urls)))
"""
## add all client api urls under the `resources' url-path
for name in sorted(self._registry.keys()):
self._registry[name].api_name = self.api_name
prepended_urls.append(url(r"^(?P<api_name>%s)/resources/" % self.api_name, include(self._registry[name].urls)))
return prepended_urls
'''
@property
def urls(self):
"""
Provides URLconf details for the ``Api`` and all registered
``Resources`` beneath it.
"""
from django.conf.urls.defaults import url, include
from tastypie.utils.urls import trailing_slash
from client.views import checkin, checkout, login, logout, register, create_anonymous, delete_anonymous
pattern_list = [
url(r"^(?P<api_name>%s)%s$" % (self.api_name, trailing_slash()), self.wrap_view('top_level'), name="api_%s_top_level" % self.api_name),
]
for name in sorted(self._registry.keys()):
self._registry[name].api_name = self.api_name
pattern_list.append((r"^(?P<api_name>%s)/resources/" % self.api_name, include(self._registry[name].urls)))
## then add the actions
pattern_list.extend([
url(r"^%s/actions/create_anonymous/$" % self.api_name, create_anonymous, name="create_anonymous"),
url(r"^%s/actions/delete_anonymous/$" % self.api_name, delete_anonymous, name="delete_anonymous"),
url(r"^%s/actions/register/$" % self.api_name, register, name="register"),
url(r"^%s/actions/login/$" % self.api_name, login, name="login"),
url(r"^%s/actions/logout/$" % self.api_name, logout, name="logout"),
url(r"^%s/actions/checkin/$" % self.api_name, checkin, name="checkin"),
url(r"^%s/actions/checkout/$" % self.api_name, checkout, name="checkout")
])
urlpatterns = self.prepend_urls()
urlpatterns += patterns('',
*pattern_list
)
return urlpatterns
#############################################################################################################
#############################################################################################################
def get_virtual_flag_from_url(request):
## retrieve the value of the virtual flag
virtual = str(request.GET.get('virtual'))
if virtual is None:
raise ImmediateHttpResponse(response = http.HttpBadRequest(content='No "virtual" flag in request url'))
try:
virtual = str2bool(virtual)
except ValueError:
raise ImmediateHttpResponse(response = http.HttpBadRequest(content='"virtual" flag could not be parsed to a boolean'))
return virtual
def get_timestamp_from_url(date_string):
timestamp = None
try:
## first try the format %Y-%m-%dT%H:%M:%S
time_format = "%Y-%m-%dT%H:%M:%S"
timestamp = datetime.strptime(date_string, time_format)
except ValueError:
pass
try:
## then try the format %Y-%m-%d %H:%M:%S
time_format = "%Y-%m-%d %H:%M:%S"
timestamp = datetime.strptime(date_string, time_format)
except ValueError:
pass
return timestamp
|
[
"alex.sorici@gmail.com"
] |
alex.sorici@gmail.com
|
1034cac0f27eee232621c40f56a8602ebbce4fbc
|
dcaf395142d8365a008bd162a2eb5e14f659f712
|
/Homework 1/Submission/HW1_puma.py
|
5a7170120f60491f559d0ededc202c91303d1b01
|
[] |
no_license
|
Peaches491/DM-MP
|
6d63d979c370265b41a7baf830f87607b030bae8
|
6cc1e54b32f2375c1205ca7de56cbcb312587be5
|
refs/heads/master
| 2021-01-19T18:56:01.960300
| 2015-03-25T17:39:32
| 2015-03-25T17:39:32
| 31,974,722
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,697
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# HW1 for RBE 595/CS 525 Motion Planning
# code based on the simplemanipulation.py example
import time
import sys
import openravepy
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
_arm_joint_names = \
['_shoulder_pan_joint',
'_shoulder_lift_joint',
'_upper_arm_roll_joint',
'_elbow_flex_joint',
'_forearm_roll_joint',
'_wrist_flex_joint',
'_wrist_roll_joint']
def waitrobot(robot):
"""busy wait for robot completion"""
while not robot.GetController().IsDone():
time.sleep(0.01)
def tuckarms(env, robot):
with env:
jointnames = ['l_shoulder_lift_joint', 'l_elbow_flex_joint', 'l_wrist_flex_joint',
'r_shoulder_lift_joint', 'r_elbow_flex_joint', 'r_wrist_flex_joint']
robot.SetActiveDOFs([robot.GetJoint(name).GetDOFIndex() for name in jointnames])
robot.SetActiveDOFValues([1.29023451, -2.32099996, -0.69800004,
1.27843491, -2.32100002, -0.69799996])
robot.GetController().SetDesired(robot.GetDOFValues())
waitrobot(robot)
def set_arm_pose(robot, joint_values, is_left = True):
joint_set = []
if is_left:
joint_set = ['l' + j for j in _arm_joint_names]
else:
joint_set = ['r' + j for j in _arm_joint_names]
with env:
robot.SetActiveDOFs([robot.GetJoint(j_name).GetDOFIndex() for j_name in joint_set])
robot.SetActiveDOFValues(joint_values)
robot.GetController().SetDesired(robot.GetDOFValues())
waitrobot(robot)
if __name__ == "__main__":
env = Environment()
env.SetViewer('qtcoin')
env.Reset()
# load a scene from ProjectRoom environment XML file
env.Load('data/pr2test2.env.xml')
time.sleep(0.1)
# 1) get the 1st robot that is inside the loaded scene
# 2) assign it to the variable named 'robot'
robot = env.GetRobots()[0]
# tuck in the PR2's arms for driving
tuckarms(env, robot)
# #### YOUR CODE HERE ####
PumaTransform = array([[1, 0, 0, -3.3232],
[0, 1, 0, -0.4878],
[0, 0, 1, 0.0000],
[0, 0, 0, 1]])
robot2 = env.ReadRobotXMLFile('robots/pumaarm.zae')
with env:
robot2.SetTransform(PumaTransform)
env.Add(robot2, True)
CameraTransform = array([[-0.23753303, -0.35654009, 0.90358023, -4.93138313],
[-0.97133858, 0.07864789, -0.22431198, -0.44495657],
[ 0.00891154, -0.93096384, -0.36500262, 1.32067215],
[ 0.00000000, 0.00000000, 0.00000000, 1.00000000]])
view = env.GetViewer()
view.SetCamera(CameraTransform)
set_arm_pose(robot, [0, 0, 0, 0, 0, 0, 0], is_left=True)
print "Collision Results:", env.CheckCollision(robot, robot2)
raw_input("Press enter to move to collision...")
set_arm_pose(robot, [1.374, 1.200, 0, -2.2, 3.14, -1, 0], is_left=True)
print "Collision Results:", env.CheckCollision(robot, robot2)
# Code for running input loop, prompting for comma-separated joint values
if False:
import StringIO
previous = "0, 0, 0, 0, 0, 0, 0"
inp = "1234"
while 'quit' not in inp:
inp = raw_input("Testing: ")
values = [float(x) for x in inp.split(',')]
if len(values) is 7:
set_arm_pose(robot, values, is_left=True)
else:
print "Need 7 joint values"
time.sleep(0.01)
else:
raw_input("Press enter to exit...")
# #### END OF YOUR CODE ###
|
[
"Peaches491@gmail.com"
] |
Peaches491@gmail.com
|
71b0afa35f1904fb77f644e80655f1e39e405a29
|
ff24a235a1643ff7db6cbefb2be6b4e7f477635a
|
/yosim/alerts/templatetags/custom_pagination.py
|
7f9a712743583f87187b3781fc851a1c3e1dbbc8
|
[
"MIT"
] |
permissive
|
thoongnv/yosim
|
15108f91e0a8cae83b88b52fefbe643211a411f5
|
22bcaceb2c40735363496d9404970a73c4b944bc
|
refs/heads/master
| 2023-01-11T00:42:18.841630
| 2020-04-02T07:32:16
| 2020-04-02T07:32:16
| 252,385,913
| 5
| 0
|
MIT
| 2022-12-26T20:39:55
| 2020-04-02T07:31:28
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
# -*- coding: utf-8 -*-
from django import template
register = template.Library()
@register.filter(name="get_paging_list")
def get_paging_list(value, curr_page_num):
"""
Return a list of pages which skip pages in the middle
Use: {% for a in value_list|get_paging_list:curr_page_num %}
"""
num_of_pages_per_side = 7 # num pages in the left or right
num_of_pages = len(value) # length of value
new_value = []
if num_of_pages <= 20:
return value
closest_curr_page = [x for x in range(
curr_page_num - 2, curr_page_num + 3) if x >= 0]
default_left_value = list(range(1, num_of_pages_per_side + 1))
default_right_value = list(range(
num_of_pages - num_of_pages_per_side, num_of_pages + 1))
if curr_page_num <= num_of_pages // 2:
left_value = []
if closest_curr_page[-1] > num_of_pages_per_side:
left_value = list(range(1, len(closest_curr_page) + 1))
left_value += closest_curr_page
else:
left_value = default_left_value
new_value = left_value + ['skip'] + default_right_value
else:
right_value = []
if closest_curr_page[0] >= (num_of_pages - num_of_pages_per_side):
right_value = default_right_value
else:
right_value = closest_curr_page
right_value += list(range(
num_of_pages - len(closest_curr_page), num_of_pages + 1))
new_value = default_left_value + ['skip'] + right_value
return new_value
|
[
"thoongnv@gmail.com"
] |
thoongnv@gmail.com
|
e5673244d7a65ed433c8ef18ff944d9dda5be7f3
|
f3e1814436faac544cf9d56182b6658af257300a
|
/CODECHEF/Positive AND.py
|
8a3f3ebe7fd2c6670f25a7185216f4963b423d3e
|
[] |
no_license
|
preetmodh/COMPETETIVE_CODING_QUESTIONS
|
36961b8b75c9f34e127731eb4ffb5742e577e8a2
|
c73afb87d197e30d801d628a9db261adfd701be9
|
refs/heads/master
| 2023-07-15T03:19:15.330633
| 2021-05-16T10:17:20
| 2021-05-16T10:17:20
| 279,030,727
| 2
| 1
| null | 2021-01-12T03:55:26
| 2020-07-12T09:18:57
|
Python
|
UTF-8
|
Python
| false
| false
| 775
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 3 09:36:48 2020
@author: PREET MODH
"""
def isTwo(n):
if (n == 0):
return False
while (n != 1):
if (n % 2 != 0):
return False
n = n // 2
return True
for _ in range(int(input())):
n=int(input())
if n==1:
print(n)
elif isTwo(n):
print(-1)
else:
a=[]
i=n
while(i>3):
if isTwo(i-1):
a.append(i-1)
a.append(i)
i=i-2
else:
a.append(i)
i=i-1
a.append(1)
a.append(3)
a.append(2)
a.reverse()
print(' '.join(map(str,a)))
|
[
"noreply@github.com"
] |
preetmodh.noreply@github.com
|
ddad622ade716b1be95c399186dc7519091686cb
|
2d34b3a61a5fc9a00c01a9177ecb0f0f94599bd6
|
/pickle84.py
|
f51178d198ba37b1081a0a350419e1f002b7807f
|
[] |
no_license
|
Trilokpandey/pythonprogrammings
|
9e3077961a99c749843ce0dd5e13436cfed92cb8
|
2641f4bb62ba09914344a4fc16745006fc793a09
|
refs/heads/oops_concept
| 2020-05-03T06:16:54.966904
| 2019-10-03T06:32:32
| 2019-10-03T06:32:32
| 178,469,460
| 0
| 0
| null | 2019-10-03T06:34:15
| 2019-03-29T20:09:45
|
Python
|
UTF-8
|
Python
| false
| false
| 289
|
py
|
import pickle
#pickling a python obj
#cars=["suzuki","audi","ferrari","hundai","inova"]
#file="mycar.pkl"
#fileobj=open(file,"wb")
#pickle.dump(cars,fileobj)
#fileobj.close()
file="mycar.pkl"
fileobj=open(file,"rb")
mycar=pickle.load(fileobj)
print(mycar)
print(type(mycar))
|
[
"noreply@github.com"
] |
Trilokpandey.noreply@github.com
|
7c8010869b179425dcbad925ffae1938f0d89fb0
|
075f70880eb2496a97cf792fa34fdff281b73bbc
|
/hat.py
|
ddae45848e8e5c0f50473d218970221de68b4130
|
[
"MIT"
] |
permissive
|
MiodragDronjakDev/DES-Python
|
d877dda312918beeb53ecaa32c7f87c77142f625
|
20e605c77eee5e6019ca3427ca27f5b41a13a8b8
|
refs/heads/master
| 2021-05-27T05:50:32.959075
| 2014-04-16T17:39:29
| 2014-04-16T17:39:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,097
|
py
|
# # ---------------------------------------------------------------------- *
# * A Monte Carlo simulation of the hat check girl problem. *
# * *
# * Name : hat.c *
# * Authors : Steve Park & Dave Geyer *
# * Language : ANSI C *
# * Latest Revision : 09-16-95 *
# # Translated by : Philip Steele
# # Language : Python 3.3
# # Latest Revision : 3/26/14
# * ---------------------------------------------------------------------- */
from rng import putSeed, random
# global variables */
#i # replication index */
#arr # array */
count = 0 # # of times a match occurs */
#p # probability estimate */
SIZE = 10 # array size */
N = 10000 # number of replications */
def Equilikely(a,b):
# # ------------------------------------------------
# * generate an Equilikely random variate, use a < b
# * ------------------------------------------------
# */
return (a + int((b - a + 1) * random()))
# ============================== */
def Initialize(a):
# ============================== */
for j in range(0,SIZE):
a[j] = j
# =========================== */
def Shuffle(a):
# =========================== */
for j in range(0,SIZE-1): # shuffle an array */
t = Equilikely(j, (SIZE - 1)) # in such a way that all */
hold = a[j] # permutations are equally */
a[j] = a[t] # likely to occur */
a[t] = hold
# ============================ */
def Check(a):
# ============================ */
j = 0
test = 0
condition = True
while(condition==True): # test to see if at least */
test = (a[j] == j) # one element is in its */
j += 1 # 'natural' position */
condition = (j != SIZE) and (test==0) # - return a 1 if so */
# - return a 0 otherwise */
if (test == 1):
return(1)
else:
return(0)
###############################Main Program##############################
putSeed(0)
arr = [None for i in range(0,SIZE)]
Initialize(arr)
for i in range(0,N): # do N Monte Carlo replications */
Shuffle(arr)
count += Check(arr)
p = float(N - count) / N # estimate the probability */
print("\nfor {0:1d} replications and an array of size {1:d}".format(N, SIZE))
print("the estimated probability is {0:5.3f}".format(p))
# c output:
# Enter a positive integer seed (9 digits or less) >> 123456789
# for 10000 replications and an array of size 10
# the estimated probability is 0.369
|
[
"notphilip@gmail.com"
] |
notphilip@gmail.com
|
5b2fdecddf64b6b884a9b0c2fd18f1775d9a7c74
|
224723b94160827e1a4ba700d74503b05b40a283
|
/basics_data_structure/map.py
|
df57f431b4964be9d12fd4b1129e357750bc10a5
|
[
"MIT"
] |
permissive
|
corenel/algorithm-exercises
|
471dff88c9a67845f47d1def2a56b9efd1cfe41f
|
f3f31f709e289e590c98247c019d36fc9cc44faf
|
refs/heads/master
| 2020-04-21T15:45:03.283015
| 2019-03-01T14:20:05
| 2019-03-01T14:20:05
| 169,678,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
"""
Map
https://algorithm.yuanbin.me/zh-hans/basics_data_structure/map.html
"""
|
[
"xxdsox@gmail.com"
] |
xxdsox@gmail.com
|
17f1b975b7a237e1d20bbe04ad9c21ace1aabc72
|
3bb4f4c802cdbdb16ce869dfcbadcfa6e393dd34
|
/src/est_dir/PI_XY.py
|
ef3b86733a0c0219645d30f1fb2070d300d367bf
|
[
"MIT"
] |
permissive
|
Megscammell/Estimate-of-direction-in-RSM
|
b5ed1b4d00ebf08e0f2d127552274d307e24c5af
|
3ab2678617203a7046f61b9c38b0f4878ed336fe
|
refs/heads/main
| 2023-04-18T03:04:50.782508
| 2022-04-20T12:31:42
| 2022-04-20T12:31:42
| 339,388,391
| 0
| 0
|
MIT
| 2022-07-05T17:13:04
| 2021-02-16T12:15:48
|
Python
|
UTF-8
|
Python
| false
| false
| 14,618
|
py
|
import numpy as np
import time
import est_dir
def compute_direction_XY(n, m, centre_point, f, func_args, no_vars, region):
"""
Compute estimate of the search direction by multiplying the transpose of
the design matrix with the response vector.
Parameters
----------
n : integer
Number of observations of the design matrix (rows).
m : integer
Number of variables of the design matrix (columns).
centre_point : 1-D array with shape (m,)
Centre point of design.
f : function
response function.
`f(point, *func_args) -> float`
where point` is a 1-D array with shape(d, ) and func_args is
a tuple of arguments needed to compute the response function value.
func_args : tuple
Arguments passed to the function f.
no_vars : integer
If no_vars < m, the size of the resulting
design matrix is (n, no_vars). Since the centre_point is of size
(m,), a random subset of variables will need to be chosen
to evaluate the design matrix centred at centre_point. The
parameter no_vars will be used to generate a random subset of
positions, which correspond to the variable indices of
centre_point in which to centre the design matrix.
region : float
Region of exploration around the centre point.
Returns
-------
direction : 1-D array
Estimated search direction.
func_evals : integer
Number of times the response function has been evaluated
to compute the search direction.
"""
act_design, y, positions, func_evals = (est_dir.compute_random_design
(n, m, centre_point, no_vars,
f, func_args, region))
direction = np.zeros((m,))
direction[positions] = est_dir.divide_abs_max_value(act_design.T @ y)
assert(max(abs(direction) == 1))
return direction, func_evals
def calc_first_phase_RSM_XY(centre_point, init_func_val, f, func_args,
n, m, const_back, back_tol, const_forward,
forward_tol, step, no_vars, region):
"""
Compute iteration of local search, where search direction is estimated
by compute_direction_XY(), and the step size is computed using either
forward or backward tracking.
Parameters
----------
centre_point : 1-D array with shape (m,)
Centre point of design.
init_func_val: float
Initial function value at centre_point. That is,
f(centre_point, *func_args).
f : function
response function.
`f(point, *func_args) -> float`
where point` is a 1-D array with shape(d, ) and func_args is
a tuple of arguments needed to compute the response function value.
func_args : tuple
Arguments passed to the function f.
n : integer
Number of observations of the design matrix.
m : integer
Number of variables.
const_back : float
If backward tracking is required, the initial guess of the
step size will be multiplied by const_back at each iteration
of backward tracking. That is,
t <- t * const_back
It should be noted that const_back < 1.
back_tol : float
It must be ensured that the step size computed by backward
tracking is not smaller than back_tol. If this is the case,
iterations of backward tracking are terminated. Typically,
back_tol is a very small number.
const_forward : float
The initial guess of the
step size will be multiplied by const_forward at each
iteration of forward tracking. That is,
t <- t * const_back
It should be noted that const_forward > 1.
forward_tol : float
It must be ensured that the step size computed by forward
tracking is not greater than forward_tol. If this is the
case, iterations of forward tracking are terminated.
step : float
Initial guess of step size.
no_vars : integer
If no_vars < m, the size of the resulting
design matrix is (n, no_vars). Since the centre_point is of size
(m,), a random subset of variables will need to be chosen
to evaluate the design matrix centred at centre_point. The
parameter no_vars will be used to generate a random subset of
positions, which correspond to the variable indices of
centre_point in which to centre the design matrix.
region : float
Region of exploration around the centre point.
Returns
-------
upd_point : 1-D array
Updated centre_point after applying local search with
estimated direction and step length.
f_new : float
Response function value at upd_point. That is,
f(upd_point, *func_args).
total_func_evals_step : integer
Total number of response function evaluations
to compute step length.
total_func_evals_dir : integer
Total number of response function evaluations
to compute direction.
"""
direction, total_func_evals_dir = (compute_direction_XY
(n, m, centre_point, f,
func_args, no_vars,
region))
(upd_point,
f_new, total_func_evals_step) = (est_dir.combine_tracking
(centre_point, init_func_val,
direction, step, const_back,
back_tol, const_forward,
forward_tol, f, func_args))
return (upd_point, f_new, total_func_evals_step,
total_func_evals_dir)
def calc_its_until_sc_XY(centre_point, f, func_args, n, m,
f_no_noise, func_args_no_noise,
no_vars, region, max_func_evals,
const_back=0.5, back_tol=0.000001,
const_forward=2, forward_tol=100000000):
"""
Compute iterations of Phase I of response surface methodology
until some stopping criterion is met.
The direction is estimated by compute_direction_XY() at each iteration.
The step length is computed by forward or backward tracking.
Parameters
----------
centre_point : 1-D array with shape (m,)
Centre point of design.
f : function
response function.
`f(point, *func_args) -> float`
where point` is a 1-D array with shape(d, ) and func_args is
a tuple of arguments needed to compute the response function value.
func_args : tuple
Arguments passed to the function f.
n : integer
Number of observations of the design matrix.
m : integer
Number of variables.
f_no_noise : function
response function with no noise.
`f_no_noise(point, *func_args_no_noise) -> float`
where point` is a 1-D array with shape(d, ) and
func_args_no_noise is a tuple of arguments needed to compute
the response function value.
func_args_no_noise : tuple
Arguments passed to the function f_no_noise.
no_vars : integer
If no_vars < m, the size of the resulting
design matrix is (n, no_vars). Since the centre_point is of size
(m,), a random subset of variables will need to be chosen
to evaluate the design matrix centred at centre_point. The
parameter no_vars will be used to generate a random subset of
positions, which correspond to the variable indices of
centre_point in which to centre the design matrix.
region : float
Region of exploration around the centre point.
max_func_evals : int
Maximum number of function evaluations before stopping.
const_back : float
If backward tracking is required, the initial guess of the
step size will be multiplied by const_back at each iteration
of backward tracking. That is,
t <- t * const_back
It should be noted that const_back < 1.
back_tol : float
It must be ensured that the step size computed by backward
tracking is not smaller than back_tol. If this is the case,
iterations of backward tracking are terminated. Typically,
back_tol is a very small number.
const_forward : float
The initial guess of the
step size will be multiplied by const_forward at each
iteration of forward tracking. That is,
t <- t * const_back
It should be noted that const_forward > 1.
forward_tol : float
It must be ensured that the step size computed by forward
tracking is not greater than forward_tol. If this is the
case, iterations of forward tracking are terminated.
Returns
-------
upd_point : 1-D array
Updated centre_point after applying local search with
estimated direction and step length.
init_func_val : float
Initial function value at initial centre_point.
f_val : float
Final response function value after stopping criterion
has been met for phase I of RSM.
full_time : float
Total time taken.
total_func_evals_step : integer
Total number of response function evaluations
to compute step length for all iterations.
total_func_evals_dir : integer
Total number of response function evaluations
to compute direction for all iterations.
no_iterations : integer
Total number of iterations of Phase I of RSM.
store_good_dir : integer
Number of 'good' search directions. That is, the number
of times moving along the estimated search direction
improves the response function value with no noise.
store_good_dir_norm : list
If a 'good' direction is determined,
distance of point and minimizer at the k-th
iteration subtracted by the distance of point
and minimizer at the (k+1)-th iteration is
stored.
store_good_dir_func : list
If a 'good' direction is determined,
store the response function value with point
at the k-th iteration, subtracted by response
function value with point at the
(k+1)-th iteration.
"""
t0 = time.time()
if (no_vars > m):
raise ValueError('Incorrect no_vars choice')
store_good_dir = 0
store_good_dir_norm = []
store_good_dir_func = []
total_func_evals_step = 0
total_func_evals_dir = 0
step = 1
init_func_val = f(centre_point, *func_args)
(upd_point, f_val,
func_evals_step,
func_evals_dir) = (calc_first_phase_RSM_XY
(centre_point, np.copy(init_func_val), f, func_args,
n, m, const_back, back_tol, const_forward,
forward_tol, step, no_vars, region))
total_func_evals_step += func_evals_step
total_func_evals_dir += func_evals_dir
no_iterations = 1
if (f_no_noise(centre_point, *func_args_no_noise) >
f_no_noise(upd_point, *func_args_no_noise)):
store_good_dir += 1
store_good_dir_norm.append(np.linalg.norm(centre_point - func_args[0])
- np.linalg.norm(upd_point - func_args[0]))
store_good_dir_func.append(f_no_noise(centre_point,
*func_args_no_noise)
- f_no_noise(upd_point,
*func_args_no_noise))
while (total_func_evals_step + total_func_evals_dir + n) < max_func_evals:
centre_point = upd_point
new_func_val = f_val
step = 1
(upd_point, f_val,
func_evals_step,
func_evals_dir) = (calc_first_phase_RSM_XY
(centre_point, np.copy(new_func_val), f, func_args,
n, m, const_back, back_tol, const_forward,
forward_tol, step, no_vars, region))
total_func_evals_step += func_evals_step
total_func_evals_dir += func_evals_dir
no_iterations += 1
if (f_no_noise(centre_point, *func_args_no_noise) >
f_no_noise(upd_point, *func_args_no_noise)):
store_good_dir += 1
store_good_dir_norm.append(np.linalg.norm(centre_point -
func_args[0])
- np.linalg.norm(upd_point -
func_args[0]))
store_good_dir_func.append(f_no_noise(centre_point,
*func_args_no_noise)
- f_no_noise(upd_point,
*func_args_no_noise))
t1 = time.time()
full_time = t1-t0
return (upd_point, init_func_val, f_val, full_time,
total_func_evals_step, total_func_evals_dir,
no_iterations, store_good_dir,
store_good_dir_norm, store_good_dir_func)
|
[
"scammellm@cardiff.ac.uk"
] |
scammellm@cardiff.ac.uk
|
668f5984e9c406fbaded1f3c078e0316c04105b0
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Apps/Blog/code-snippets/Logging-Basics/log-sample.py
|
279bd14fe78f041e2443662a48a09ee6b517a618
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:5e05432d84deb8f3ab66bcb1fb3e421f96a12ed93a6a429cea802ed2cee299b4
size 1363
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
3c5e2a719407980c49c1e4b327810659aa3890a4
|
cf31d0972db205dad89f75bbb844e7e47b5b8414
|
/Season 10 - Advanced Python Development/Episode 09 - TIming your code with python.py
|
fbe1e622bce2a6cdfcfd5b12cdb9d21c20c5b27d
|
[
"MIT"
] |
permissive
|
barawalojas/Python-tutorial
|
0479e6af4ca36ebec1f089cb4deebd5657c6500a
|
3f4b2b073e421888b3d62ff634658317d9abcb9b
|
refs/heads/main
| 2023-07-21T23:52:17.998627
| 2021-07-30T05:45:46
| 2021-07-30T05:45:46
| 391,827,816
| 1
| 0
|
MIT
| 2021-08-02T05:50:30
| 2021-08-02T05:39:38
| null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
# Timing your code
import time
def powers(limit):
return [x**2 for x in range(limit)]
# start = time.time() # that means time module and the time() function within that module gives current time in seconds since 1970.
# powers(5000000)
# end = time.time()
#
# print(end - start)
def measure_runtime(func):
start = time.time()
func()
end = time.time()
print(end - start)
measure_runtime(lambda: powers(500000))
import timeit # timeit is used to get the time that the running function will be going to take.
print(timeit.timeit('[x**2 for x in range(10)]'))
|
[
"noreply@github.com"
] |
barawalojas.noreply@github.com
|
81ffd6cd55fc6f300cfccc0c640f162a9e461e86
|
b94df1e9f32d4461aea16fb4104e88a3cbe2f7d2
|
/cimg/io/get_web_images.py
|
eb962c82428d05579854a5501b1c425a88084590
|
[] |
no_license
|
piandpower/cio-images
|
f985916cc6bd0b249ced4149c51ea3dbe7de735f
|
c75f01e7d9288d00e4bd0bad2cf04b51653e8fe5
|
refs/heads/master
| 2021-01-18T11:12:56.216731
| 2016-02-18T18:52:48
| 2016-02-18T18:52:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 864
|
py
|
import os
import urllib
import cv2
def get_web_images(url_list_file=None,credentials_file=None,save_dir=None):
with open(credentials_file,'r') as c:
creds = c.readline().rstrip() # rstrip strips the newline character
with open(url_list_file,'r') as f:
for i, line in enumerate(f):
line = line.rsplit('\n',1)[0] # strip newline character
url = line.split('//',1)[0] + '//' + creds +line.split('//',1)[1]
image = url.rsplit('/',1)[1]
img_path = os.path.join(save_dir,image)
urllib.urlretrieve(url,img_path)
file, ext = os.path.splitext(os.path.basename(img_path))
if ext is "":
img = cv2.imread(img_path)
cv2.imwrite(img_path+'.jpg',img)
os.remove(img_path)
|
[
"ryoungblood@continuum.io"
] |
ryoungblood@continuum.io
|
90748839c67cf0ab73582813cd4b72ecd3518bb9
|
8fb859aaad34da2dd219acc06c515f9101ff6ccf
|
/emailing/migrations/0002_emailcampaign_is_announcement.py
|
62a3c5e5f1765b8dafd64abd29c5b36bfb5498f4
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
establishment/django-establishment
|
7a63e1e4928a13bd4a7601cda9e1b382c407e97a
|
0ef8fe4f02e530f60ff9fed6d45d56d1f61304a8
|
refs/heads/master
| 2023-08-30T21:03:01.675185
| 2023-08-30T14:12:01
| 2023-08-30T14:12:01
| 84,206,858
| 1
| 0
| null | 2023-08-09T14:31:32
| 2017-03-07T14:13:01
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-10 16:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('emailing', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='emailcampaign',
name='is_announcement',
field=models.BooleanField(default=True),
),
]
|
[
"mihai.ciucu@gmail.com"
] |
mihai.ciucu@gmail.com
|
cfb75f088ee81a003c4743909773e4dd113a8fe6
|
a77506512555402b6a2063349c28e9a1e87b9f5c
|
/bin/easy_install
|
270d5f574254e41d2df787c32e4767c01587ba0d
|
[] |
no_license
|
daleighan/MLsandbox-service
|
8d06bd409f7346290f816218022d9c367c7aace6
|
685258bd342c3306edc2c8e85dc2a5c53f64e93e
|
refs/heads/master
| 2022-12-13T21:26:33.954514
| 2018-02-06T01:57:06
| 2018-02-06T01:57:06
| 107,054,605
| 0
| 1
| null | 2022-12-08T00:41:07
| 2017-10-15T22:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 270
|
#!/Users/alexleigh/Documents/handwriting/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"daleighan@gmail.com"
] |
daleighan@gmail.com
|
|
db311ac4dfca17b199304e3bcd251f41e2a32afe
|
395f974e62eafed74572efebcd91d62966e61639
|
/tstl/tocorpus.py
|
9d48e1807e4996764aad03ba0c12fbc3cc97166f
|
[
"Apache-2.0"
] |
permissive
|
agroce/tstl
|
ad386d027f0f5ff750eab19a722a4b119ed39211
|
8d43ef7fa49534868e6cdf1697863748260405c7
|
refs/heads/master
| 2023-08-08T19:14:52.020314
| 2023-07-26T17:51:36
| 2023-07-26T17:51:36
| 32,408,285
| 106
| 33
|
NOASSERTION
| 2021-01-26T19:05:17
| 2015-03-17T17:14:04
|
Python
|
UTF-8
|
Python
| false
| false
| 736
|
py
|
from __future__ import print_function
import sys
import os
# Appending current working directory to sys.path
# So that user can run randomtester from the directory where sut.py is located
current_working_dir = os.getcwd()
sys.path.append(current_working_dir)
if "--help" not in sys.argv:
import sut as SUT
def main():
if "--help" in sys.argv:
print("Usage: tstl_toafl <outputdir> <files>")
sys.exit(0)
sut = SUT.sut()
outputDir = sys.argv[1]
files = sys.argv[2:]
i = 0
for f in files:
t = sut.loadTest(f)
sut.saveTest(
t,
outputDir +
"/" +
os.path.basename(f) +
".afl",
afl=True)
i += 1
|
[
"agroce@gmail.com"
] |
agroce@gmail.com
|
391ffd365fa50354ba0543521936c9fc3a692916
|
4c17ccb8ffb4b60d9ad3570aa22f2091e1560d6e
|
/config_example.py
|
15222daa63f1f894339def9f55960f8f74371977
|
[] |
no_license
|
joshuasellers/SpotifyScript
|
9590ce053bffc3d7d925bf50c9809655cfbc8e09
|
b90460db8d7a50a6fd6143c8dab98058544eb75b
|
refs/heads/master
| 2022-02-14T21:22:42.967462
| 2019-07-26T18:05:02
| 2019-07-26T18:05:02
| 198,123,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 58
|
py
|
username = 'YOUR USERNAME HERE'
token = 'YOUR TOKEN HERE'
|
[
"joshuasellersd@gmail.com"
] |
joshuasellersd@gmail.com
|
1d8e72b32fde703e932a856adb5776e009e9de15
|
2bac6c01e1d881cdd786ce5a79e1c7941870ff5f
|
/src/warmup_project/src/PID_controller.py
|
533411bbd321d0004b319ed380557c009413e674
|
[] |
no_license
|
dianavermilya/comprobo2014
|
c701521b254d564d565fd9b1791481d0e72cbb47
|
00128653e85dddc9ebe8863aa20c8e8926e134a3
|
refs/heads/master
| 2020-12-31T03:03:00.815513
| 2014-10-07T20:27:09
| 2014-10-07T20:27:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
import math
class PIDcontroller(object):
def __init__(self, name="rbt"):
self.name = name
self._distanceFromWall = 0
self._angleOfWallNormal = 0
self.rotation = 0
def setDistanceFromWall(self, distance):
self._distanceFromWall = distance
def setAngleOfWallNormal(self, smallest_idx):
self._angleOfWallNormal = smallest_idx
def calculateRotationToFollowWall(self):
beNormalToWall = (self._angleOfWallNormal-90)*(0.01)
beNearWall = (self._distanceFromWall - 0.3)*(0.8)
print self._angleOfWallNormal, self._distanceFromWall
print beNormalToWall, beNearWall
self.rotation = beNormalToWall + beNearWall
def calculateRotationToFollowObject(self, error):
self.rotation = error*0.01
|
[
"dianavermilya@gmail.com"
] |
dianavermilya@gmail.com
|
7f2e195719c3c38d26f1630fb572a0be0a1a054f
|
a5cd88d80d2c6af43528153199c86eacbea22f55
|
/python/7. Reverse Integer.py
|
0d2ae14b79b2e7ba62664199c6fcc8ec8941adbf
|
[] |
no_license
|
qingmm/leetcode
|
c23a674a9051227b26288105482f99ce128abb00
|
ae2ac905cb6ad216e554332388f1ec37e45ed0df
|
refs/heads/master
| 2020-03-11T15:14:55.444601
| 2018-09-23T13:58:22
| 2018-09-23T13:58:22
| 130,078,586
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
isPosit = 1
if - 10 < x < 10:
return x
if x < 0:
isPosit = -1
x = -x
x = str(x)
r = x[::-1]
if r[0] == '0':
r = r[1:]
r = int(r) * isPosit
if - 2 ** 31 > r or r > (2 ** 31 -1):
return 0
else:
return r
|
[
"noreply@github.com"
] |
qingmm.noreply@github.com
|
0c0f28675227bbb1264910438397c03f89078fc4
|
c99e6313d6c6a16ea7e48f76d28a9bdd49e536e9
|
/vframe_cli/commands/pipe/blur.py
|
5cf1b7ae398a34650f2908ddfaae718fc2e65901
|
[
"MIT"
] |
permissive
|
julescarbon/vframe
|
5df679a9442f1639b45e2d0d7ffa61c75d670079
|
0798841fa9eb7e1252e4cdf71d68d991c26acab8
|
refs/heads/master
| 2022-12-26T04:10:12.819755
| 2020-10-07T21:24:46
| 2020-10-07T21:24:46
| 298,809,000
| 1
| 0
|
MIT
| 2020-09-26T12:25:28
| 2020-09-26T12:25:27
| null |
UTF-8
|
Python
| false
| false
| 2,289
|
py
|
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
from vframe.utils.click_utils import processor
@click.command('')
@click.option( '-n', '--name', 'opt_data_keys',
multiple=True,
help='Data key for ROIs')
@click.option('--fac', 'opt_factor',
default=0.5,
show_default=True,
help='Strength to apply redaction filter')
@click.option('--iters', 'opt_iters',
default=2,
show_default=True,
help='Blur iterations')
@click.option('--expand', 'opt_expand',
default=0.25,
show_default=True,
help='Percentage to expand')
@processor
@click.pass_context
def cli(ctx, pipe, opt_data_keys, opt_factor, opt_iters, opt_expand):
"""Blurs BBoxes"""
from vframe.settings import app_cfg
from vframe.models import types
from vframe.utils import im_utils
# ---------------------------------------------------------------------------
# TODO
# - add oval shape blurring
# ---------------------------------------------------------------------------
# initialize
log = app_cfg.LOG
# ---------------------------------------------------------------------------
# Example: process images as they move through pipe
while True:
pipe_item = yield
header = ctx.obj['header']
im = pipe_item.get_image(types.FrameImage.DRAW)
dim = im.shape[:2][::-1]
# get data keys
if not opt_data_keys:
data_keys = header.get_data_keys()
else:
data_keys = opt_data_keys
# iterate data keys
for data_key in data_keys:
if data_key not in header.get_data_keys():
log.warn(f'data_key: {data_key} not found')
# get data
item_data = header.get_data(data_key)
# blur data
if item_data:
for obj_idx, detection in enumerate(item_data.detections):
bbox = detection.bbox.expand_per(opt_expand).redim(dim)
# TODO: handle segmentation mask
for i in range(opt_iters):
im = im_utils.blur_roi(im, bbox)
# resume pipe stream
pipe_item.set_image(types.FrameImage.DRAW, im)
pipe.send(pipe_item)
|
[
"adam@ahprojects.com"
] |
adam@ahprojects.com
|
5668b8decddce2b5c526c284361d630bcb104ae0
|
916609f4634935a9c6661baf5540f15182860941
|
/Integration/Bluetooth/example_advertisement.py
|
defeea7c88b4db1ac5f05f42817f90aa71ae39be
|
[
"MIT"
] |
permissive
|
Vidur-CUBoulder/GroceryRecommendataion
|
0211e57e9f6c667fd41a774fc97aef64b14790d5
|
26de07156f4cd4ba5b008c8296c9c23be4032a54
|
refs/heads/master
| 2021-04-06T08:44:19.746367
| 2018-03-12T18:14:21
| 2018-03-12T18:14:21
| 124,931,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,123
|
py
|
#!/usr/bin/python
import dbus
import dbus.exceptions
import dbus.mainloop.glib
import dbus.service
import array
import gobject
from random import randint
mainloop = None
BLUEZ_SERVICE_NAME = 'org.bluez'
LE_ADVERTISING_MANAGER_IFACE = 'org.bluez.LEAdvertisingManager1'
DBUS_OM_IFACE = 'org.freedesktop.DBus.ObjectManager'
DBUS_PROP_IFACE = 'org.freedesktop.DBus.Properties'
LE_ADVERTISEMENT_IFACE = 'org.bluez.LEAdvertisement1'
class InvalidArgsException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.freedesktop.DBus.Error.InvalidArgs'
class NotSupportedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.NotSupported'
class NotPermittedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.NotPermitted'
class InvalidValueLengthException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.InvalidValueLength'
class FailedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.Failed'
class Advertisement(dbus.service.Object):
PATH_BASE = '/org/bluez/example/advertisement'
def __init__(self, bus, index, advertising_type):
self.path = self.PATH_BASE + str(index)
self.bus = bus
self.ad_type = advertising_type
self.service_uuids = None
self.manufacturer_data = None
self.solicit_uuids = None
self.service_data = None
self.local_name = None
self.include_tx_power = None
dbus.service.Object.__init__(self, bus, self.path)
def get_properties(self):
properties = dict()
properties['Type'] = self.ad_type
if self.service_uuids is not None:
properties['ServiceUUIDs'] = dbus.Array(self.service_uuids,
signature='s')
if self.solicit_uuids is not None:
properties['SolicitUUIDs'] = dbus.Array(self.solicit_uuids,
signature='s')
if self.manufacturer_data is not None:
properties['ManufacturerData'] = dbus.Dictionary(
self.manufacturer_data, signature='qv')
if self.service_data is not None:
properties['ServiceData'] = dbus.Dictionary(self.service_data,
signature='sv')
if self.local_name is not None:
properties['LocalName'] = dbus.String(self.local_name)
if self.include_tx_power is not None:
properties['IncludeTxPower'] = dbus.Boolean(self.include_tx_power)
return {LE_ADVERTISEMENT_IFACE: properties}
def get_path(self):
return dbus.ObjectPath(self.path)
def add_service_uuid(self, uuid):
if not self.service_uuids:
self.service_uuids = []
self.service_uuids.append(uuid)
def add_solicit_uuid(self, uuid):
if not self.solicit_uuids:
self.solicit_uuids = []
self.solicit_uuids.append(uuid)
def add_manufacturer_data(self, manuf_code, data):
if not self.manufacturer_data:
self.manufacturer_data = dbus.Dictionary({}, signature='qv')
self.manufacturer_data[manuf_code] = dbus.Array(data, signature='y')
def add_service_data(self, uuid, data):
if not self.service_data:
self.service_data = dbus.Dictionary({}, signature='sv')
self.service_data[uuid] = dbus.Array(data, signature='y')
def add_local_name(self, name):
if not self.local_name:
self.local_name = ""
self.local_name = dbus.String(name)
@dbus.service.method(DBUS_PROP_IFACE,
in_signature='s',
out_signature='a{sv}')
def GetAll(self, interface):
print 'GetAll'
if interface != LE_ADVERTISEMENT_IFACE:
raise InvalidArgsException()
print 'returning props'
return self.get_properties()[LE_ADVERTISEMENT_IFACE]
@dbus.service.method(LE_ADVERTISEMENT_IFACE,
in_signature='',
out_signature='')
def Release(self):
print '%s: Released!' % self.path
class TestAdvertisement(Advertisement):
def __init__(self, bus, index):
Advertisement.__init__(self, bus, index, 'peripheral')
self.add_service_uuid('180D')
self.add_service_uuid('180F')
self.add_manufacturer_data(0xffff, [0x00, 0x01, 0x02, 0x03, 0x04])
self.add_service_data('9999', [0x00, 0x01, 0x02, 0x03, 0x04])
self.add_local_name('TestAdvertisement')
self.include_tx_power = True
def register_ad_cb():
print 'Advertisement registered'
def register_ad_error_cb(error):
print 'Failed to register advertisement: ' + str(error)
mainloop.quit()
def find_adapter(bus):
remote_om = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, '/'),
DBUS_OM_IFACE)
objects = remote_om.GetManagedObjects()
for o, props in objects.iteritems():
if LE_ADVERTISING_MANAGER_IFACE in props:
return o
return None
def advertisement_main():
global mainloop
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
adapter = find_adapter(bus)
if not adapter:
print 'LEAdvertisingManager1 interface not found'
return
adapter_props = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, adapter),
"org.freedesktop.DBus.Properties");
adapter_props.Set("org.bluez.Adapter1", "Powered", dbus.Boolean(1))
ad_manager = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, adapter),
LE_ADVERTISING_MANAGER_IFACE)
test_advertisement = TestAdvertisement(bus, 0)
mainloop = gobject.MainLoop()
ad_manager.RegisterAdvertisement(test_advertisement.get_path(), {},
reply_handler=register_ad_cb,
error_handler=register_ad_error_cb)
mainloop.run()
#if __name__ == '__main__':
# main()
|
[
"vidur.sarin@colorado.edu"
] |
vidur.sarin@colorado.edu
|
d1b49a0805ebc3d542b9038960df61c3a804cba4
|
4433346bc204ebfc5d327df83a5aeca6e81a9a85
|
/review_service/review_service/oauth/google/endpoints.py
|
dd0c5333a2490560c9177defe6bde520e5a238c5
|
[] |
no_license
|
vladlubenskiy/review-service
|
b33e3c2381d6959847e44c3883cfdedfad4e6e17
|
f5d929aa7bbc57eacd25ba105d32b42d8d121457
|
refs/heads/master
| 2021-01-10T20:56:39.247725
| 2012-08-27T18:35:12
| 2012-08-27T18:35:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
import os
# Google's OAuth 2.0 endpoints
AUTH_URL = "https://accounts.google.com/o/oauth2/auth"
CODE_ENDPOINT = "https://accounts.google.com/o/oauth2/token"
TOKENINFO_URL = "https://accounts.google.com/o/oauth2/tokeninfo"
USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo"
SCOPE = "https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile"
LOGOUT_URI = 'https://accounts.google.com/logout'
# client ID / secret & cookie key
CLIENT_ID = '219211176840.apps.googleusercontent.com'
CLIENT_SECRET = 'PitmLoQjvrjnuofnfj8iN1qq'
COOKIE_KEY = os.urandom(64)
is_secure = os.environ.get('HTTPS') == 'on'
protocol = {False: 'http', True: 'https'}[is_secure]
SECURE_ROOT_URL = protocol + '://localhost:8002'
UNSAFE_ROOT_URL = "http://localhost:8003"
RESPONSE_TYPE = 'code'
REDIRECT_URL = 'https://localhost:8002/auth/oauth2callback/'
CATCHTOKEN_URL = SECURE_ROOT_URL + '/auth/catchtoken'
|
[
"vlad.lubenskiy@gmail.com"
] |
vlad.lubenskiy@gmail.com
|
e1244958989cc5a101c7e4a074dec0ca57a8d273
|
e0045eec29aab56212c00f9293a21eb3b4b9fe53
|
/sale_management/models/__init__.py
|
040f6be7de7ea82cb3fba7030e42b9eaad22f15f
|
[] |
no_license
|
tamam001/ALWAFI_P1
|
a3a9268081b9befc668a5f51c29ce5119434cc21
|
402ea8687c607fbcb5ba762c2020ebc4ee98e705
|
refs/heads/master
| 2020-05-18T08:16:50.583264
| 2019-04-30T14:43:46
| 2019-04-30T14:43:46
| 184,268,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
# -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
from . import digest
from . import res_config_settings
from . import sale_order
from . import sale_order_template
|
[
"50145400+gilbertp7@users.noreply.github.com"
] |
50145400+gilbertp7@users.noreply.github.com
|
06ac4a4a4c52dd3b9a8c2bf9d77d1b99d89d0edb
|
57e4e8ff1db386b0f94152f210abae8fbe3b4ad8
|
/ex6/ex6.py
|
608f4b744eeab28d89421bbf5e52e69cbf551357
|
[] |
no_license
|
jo1jun/Machine-Learning-Python
|
4938a88e4a1d50ecd684c475b7082baf6e04b1df
|
391361da6d0e31166c869432f9084463af968d4d
|
refs/heads/main
| 2023-03-04T22:06:12.414124
| 2021-02-19T10:26:47
| 2021-02-19T10:26:47
| 332,097,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,199
|
py
|
import os
os.chdir(os.path.dirname(os.path.abspath(__file__)))
import scipy.io
from plotData import plotData
import sklearn.svm as svm
from visualizeBoundaryLinear import visualizeBoundaryLinear
import matplotlib.pyplot as plt
from gaussianKernel import gaussianKernel
from visualizeBoundary import visualizeBoundary
from dataset3Params import dataset3Params
## Machine Learning Online Class
# Exercise 6 | Support Vector Machines
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# exercise. You will need to complete the following functions:
#
# gaussianKernel.m
# dataset3Params.m
# processEmail.m
# emailFeatures.m
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
#
## =============== Part 1: Loading and Visualizing Data ================
# We start the exercise by first loading and visualizing the dataset.
# The following code will load the dataset into your environment and plot
# the data.
#
print('=============== Part 1: Loading and Visualizing Data ================')
print('Loading and Visualizing Data ...\n')
# Load from ex6data1:
# You will have X, y in your environment
mat = scipy.io.loadmat('ex6data1.mat')
X, y = mat['X'], mat['y'].flatten()
#print(X.shape) #(51, 2)
#print(y.shape) #(51,)
# Plot training data
plotData(X, y)
## ==================== Part 2: Training Linear SVM ====================
# The following code will train a linear SVM on the dataset and plot the
# decision boundary learned.
#
print('==================== Part 2: Training Linear SVM ====================')
# Load from ex6data1:
# You will have X, y in your environment
mat = scipy.io.loadmat('ex6data1.mat')
X, y = mat['X'], mat['y'].flatten()
print('Training Linear SVM ...')
# You should try to change the C value below and see how the decision
# boundary varies (e.g., try C = 1000)
# sklearn 을 사용해서 구현
# reference : https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html#sklearn.svm.SVC
C = 1
classifier = svm.SVC(C=C, kernel='linear', tol=1e-3)
model = classifier.fit(X, y)
#.fit(X, y[, sample_weight]) Fit the SVM model according to the given training data.
plt.figure()
plt.title('C = 1')
visualizeBoundaryLinear(X, y, model)
#C 가 크지 않은 경우, margin 을 크게 남기려 하는 경향이 적다. 따라서 outlier 를 크게 신경쓰지 않는다.
C = 100
classifier = svm.SVC(C=C, kernel='linear', tol=1e-3)
model = classifier.fit(X, y)
plt.figure()
plt.title('C = 100')
visualizeBoundaryLinear(X, y, model)
#C 가 매우 큰 경우, margin 을 크게 남겨야 비용함수의 크기를 줄일 수 있기 때문에 outlier 를 크게 신경쓴다.
## =============== Part 3: Implementing Gaussian Kernel ===============
# You will now implement the Gaussian kernel to use
# with the SVM. You should complete the code in gaussianKernel.m
#
print('=============== Part 3: Implementing Gaussian Kernel ===============')
print('\nEvaluating the Gaussian Kernel ...\n')
x1 = [1, 2, 1]
x2 = [0, 4, -1]
sigma = 2
sim = gaussianKernel(x1, x2, sigma)
print('Gaussian Kernel between x1 = [1 2 1], x2 = [0 4 -1], sigma = {} :' \
'\n\t{}\n(for sigma = 2, this value should be about 0.324652)\n'.format(sigma, sim))
## =============== Part 4: Visualizing Dataset 2 ================
# The following code will load the next dataset into your environment and
# plot the data.
#
print('=============== Part 4: Visualizing Dataset 2 ================')
print('Loading and Visualizing Data ...\n')
# Load from ex6data2:
# You will have X, y in your environment
mat = scipy.io.loadmat('ex6data2.mat')
X, y = mat['X'], mat['y'].flatten() #X.shape = (863,2) , y.shape = (863,)
# Plot training data
plt.figure()
plotData(X, y)
## ========== Part 5: Training SVM with RBF Kernel (Dataset 2) ==========
# After you have implemented the kernel, we can now use it to train the
# SVM classifier.
#
print('========== Part 5: Training SVM with RBF Kernel (Dataset 2) ==========')
print('\nTraining SVM with RBF Kernel (this may take 1 to 2 minutes) ...\n')
# Load from ex6data2:
# You will have X, y in your environment
mat = scipy.io.loadmat('ex6data2.mat')
X, y = mat['X'], mat['y'].flatten() #X.shape = (863,2) , y.shape = (863,)
# SVM Parameters
C = 1
sigma = 0.1
# We set the tolerance and max_passes lower here so that the code will run
# faster. However, in practice, you will want to run the training to
# convergence.
# kernel='rbf' 는 exp(-gamma*||x-x'||^2) 을 따른다. 따라서 gamma 만 가우시안 커널 공식에 맞게 맞춰주면 된다.
# reference : https://scikit-learn.org/stable/modules/svm.html 에서 kernel function 부분
g = 1 / (2 * sigma ** 2)
classifier = svm.SVC(C=C, kernel='rbf', tol=1e-3, gamma = g)
model = classifier.fit(X, y)
visualizeBoundary(X, y, model)
## =============== Part 6: Visualizing Dataset 3 ================
# The following code will load the next dataset into your environment and
# plot the data.
#
print('=============== Part 6: Visualizing Dataset 3 ================')
print('Loading and Visualizing Data ...\n')
# Load from ex6data3:
# You will have X, y in your environment
mat = scipy.io.loadmat('ex6data3.mat')
X, y = mat['X'], mat['y'].flatten() #X.shape = (211,2) , y.shape = (211,)
# Plot training data
plt.figure()
plotData(X, y)
## ========== Part 7: Training SVM with RBF Kernel (Dataset 3) ==========
# This is a different dataset that you can use to experiment with. Try
# different values of C and sigma here.
#
print('========== Part 7: Training SVM with RBF Kernel (Dataset 3) ==========')
# Load from ex6data3:
# You will have X, y in your environment
mat = scipy.io.loadmat('ex6data3.mat')
X, y = mat['X'], mat['y'].flatten() #X.shape = (211,2) , y.shape = (211,)
Xval, yval = mat['Xval'], mat['yval'].flatten() #Xval.shape = (200,2) , yval.shape = (200,)
# Try different SVM Parameters here
C, sigma = dataset3Params(X, y, Xval, yval)
# Train the SVM
classifier = svm.SVC(C=C, kernel='rbf', tol=1e-3, gamma = g)
model = classifier.fit(X, y)
visualizeBoundary(X, y, model)
|
[
"andrew6072@naver.com"
] |
andrew6072@naver.com
|
2fb0206d6c16edafcd4cfb224f3237d8d927171a
|
f65afea25b89cbea0c9d98c69f1f1b67d225ebe5
|
/code/.ipynb_checkpoints/baseline2.0-checkpoint.py
|
23e1a1b34271da9c2698bf5e3a9f9f00df1d75b5
|
[] |
no_license
|
ness001/KDD2020-Debiasing-Team666
|
7685d03d65050421de22001d00db91e977195ac8
|
538ebb356691887b2a45d0b0356344d7c1ea27db
|
refs/heads/master
| 2022-11-08T02:23:34.253899
| 2020-06-19T08:40:27
| 2020-06-19T08:40:27
| 266,018,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,111
|
py
|
import pandas as pd
from tqdm import tqdm
from collections import defaultdict
import math
def get_sim_item(df_, user_col, item_col, use_iif=False):
df = df_.copy()
user_item_ = df.groupby(user_col)[item_col].agg(list).reset_index()
user_item_dict = dict(zip(user_item_[user_col], user_item_[item_col]))
user_time_ = df.groupby(user_col)['time'].agg(list).reset_index() # 引入时间因素
user_time_dict = dict(zip(user_time_[user_col], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int) # 商品被点击次数
for user, items in tqdm(user_item_dict.items()):
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
t1 = user_time_dict[user][loc1] # 点击时间提取
t2 = user_time_dict[user][loc2]
sim_item[item].setdefault(relate_item, 0)
if not use_iif:
if loc1 - loc2 > 0:
sim_item[item][relate_item] += 1 * 0.7 * (0.8 ** (loc1 - loc2 - 1)) * (
1 - (t1 - t2) * 10000) / math.log(1 + len(items)) # 逆向 ???
else:
sim_item[item][relate_item] += 1 * 1.0 * (0.8 ** (loc2 - loc1 - 1)) * (
1 - (t2 - t1) * 10000) / math.log(1 + len(items)) # 正向
else:
sim_item[item][relate_item] += 1 / math.log(1 + len(items)) #???
sim_item_corr = sim_item.copy() # 引入AB的各种被点击次数
for i, related_items in tqdm(sim_item.items()):
for j, cij in related_items.items():
sim_item_corr[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
return sim_item_corr, user_item_dict
def recommend(sim_item_corr, user_item_dict, user_id, top_k, item_num):
'''
input:item_sim_list, user_item, uid, 500, 50
# 用户历史序列中的所有商品均有关联商品,整合这些关联商品,进行相似性排序
'''
rank = {}
interacted_items = user_item_dict[user_id]
interacted_items = interacted_items[::-1] #该user关联的商品
for loc, i in enumerate(interacted_items):
a=sorted(sim_item_corr[i].items(), reverse=True) #该商品关联的商品
for j, wij in sorted(sim_item_corr[i].items(), reverse=True)[0:top_k]:
if j not in interacted_items:
rank.setdefault(j, 0)
rank[j] += wij * (0.7 ** loc)# ???
return sorted(rank.items(), key=lambda d: d[1], reverse=True)[:item_num]
# fill user to 50 items
def get_predict(df, pred_col, top_fill):
top_fill = [int(t) for t in top_fill.split(',')]
scores = [-1 * i for i in range(1, len(top_fill) + 1)]
ids = list(df['user_id'].unique())
fill_df = pd.DataFrame(ids * len(top_fill), columns=['user_id'])
fill_df.sort_values('user_id', inplace=True)
fill_df['item_id'] = top_fill * len(ids)
fill_df[pred_col] = scores * len(ids)
df = df.append(fill_df)
df.sort_values(pred_col, ascending=False, inplace=True)
df = df.drop_duplicates(subset=['user_id', 'item_id'], keep='first')
df['rank'] = df.groupby('user_id')[pred_col].rank(method='first', ascending=False)
df = df[df['rank'] <= 50]
df = df.groupby('user_id')['item_id'].apply(lambda x: ','.join([str(i) for i in x])).str.split(',',
expand=True).reset_index()
return df
now_phase = 0
train_path = '../data/underexpose_train'
test_path = '../data/underexpose_test'
recom_item = []
whole_click = pd.DataFrame()
for c in range(now_phase + 1):
print('phase:', c)
click_train = pd.read_csv(train_path + '/underexpose_train_click-{}.csv'.format(c), header=None,
names=['user_id', 'item_id', 'time'])
click_test = pd.read_csv(test_path + '/underexpose_test_click-{}/underexpose_test_click-{}.csv'.format(c, c),
header=None, names=['user_id', 'item_id', 'time'])
all_click = click_train.append(click_test)
whole_click = whole_click.append(all_click)
whole_click = whole_click.drop_duplicates(subset=['user_id', 'item_id', 'time'], keep='last')
whole_click = whole_click.sort_values('time')
item_sim_list, user_item = get_sim_item(whole_click, 'user_id', 'item_id', use_iif=False)
for i in tqdm(click_test['user_id'].unique()):
rank_item = recommend(item_sim_list, user_item, i, 500, 500)
for j in rank_item:
recom_item.append([i, j[0], j[1]])
# find most popular items
top50_click = whole_click['item_id'].value_counts().index[:50].values
top50_click = ','.join([str(i) for i in top50_click])
top50_click.to_csv('')
recom_df = pd.DataFrame(recom_item, columns=['user_id', 'item_id', 'sim'])
result = get_predict(recom_df, 'sim', top50_click)
result.to_csv('baseline.csv', index=False, header=None)
|
[
"liang.li.ness@gmail.com"
] |
liang.li.ness@gmail.com
|
9a753d7ed86987ee240f92c2ddb890d0f3e5242f
|
d77dded6a3919834e52a302b940ec7d0b1720c2e
|
/plots/avgs.py
|
8bb8485288ed2412fda2aefb697a2ef0967792c9
|
[] |
no_license
|
MCC04/GPUfarm
|
6ae2ff0e212afa3deeb9d863b081e193a16b0a4e
|
cc9d654e879e2d2e9d6ecd3a32784dba59576dca
|
refs/heads/master
| 2021-07-20T03:03:14.234081
| 2020-05-08T12:53:16
| 2020-05-08T12:53:16
| 158,508,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,259
|
py
|
#!/usr/bin/env python3
# coding=utf-8
#import glob
import os
import matplotlib.pyplot as plt
import numpy as np
import math
import csv
import sys
testNum=5
##########
###MAIN###
##########
#import pdb; pdb.set_trace()
def main():
res_path = "../results/"+sys.argv[1]+"/"
for file in os.listdir(res_path):
if file.endswith(".txt"):
print(os.path.join(res_path, file))
if file[0:4]=="dev_":
##########################
# get all measures in .txt
##########################
f=os.path.join(res_path, file)
resType=file[4:7]
#common
lbls=[]
evTimes=[]
chronoTimes=[]
nStreams=[]
blocks=[]
grids=[]
#cos only
chunkSizes=[]
N=[]
M=[]
#mat only
matSize=[]
numMat=[]
#blur only
imgNum=[]
sizeImg=[]
if resType=="cos":
lbls,evTimes,chronoTimes,chunkSizes,N,M,nStreams,blocks,grids = getCosDatas(f)
elif resType=="mat":
lbls,evTimes,chronoTimes,matSize,numMat,nStreams,blocks,grids = getMatDatas(f)
elif resType=="blu":
lbls,evTimes,chronoTimes,sizeImg,imgNum,nStreams,blocks,grids = getBlurDatas(f)
csvPath="./output/"+sys.argv[1]+"/"+resType+"_avgs.csv"
print("Ev len: ",len(evTimes))
print("Chrono len: ",len(chronoTimes))
print("Lbl len: ",len(lbls))
#######################
# get measures averages
#######################
evtmp=[]
chtmp=[]
evAvgs=[]
chAvgs=[]
i=0
for i in range(len(evTimes)):
if i%testNum!=0 or i==0:
evtmp.append(evTimes[i])
chtmp.append(chronoTimes[i])
else:
print("Ev TMP: ",evtmp)
print("Ch TMP: ",chtmp)
e, c = getChunkAvg(evtmp,chtmp)
evAvgs.append(e)
chAvgs.append(c)
evtmp=[]
chtmp=[]
evtmp.append(evTimes[i])
chtmp.append(chronoTimes[i])
if len(evtmp)==testNum:
e, c = getChunkAvg(evtmp,chtmp)
evAvgs.append(e)
chAvgs.append(c)
#####################
# shrink input arrays
#####################
if resType=="cos":
chunkSizes=chunkSizes[0:len(chunkSizes):testNum]
N=N[0:len(N):testNum]
M=M[0:len(M):testNum]
#nStreams=nStreams[0:len(nStreams):testNum]
elif resType=="mat":
matSize=matSize[0:len(matSize):testNum]
numMat=numMat[0:len(numMat):testNum]
elif resType=="blu":
imgNum=imgNum[0:len(imgNum):testNum]
sizeImg=sizeImg[0:len(sizeImg):testNum]
lbls=lbls[0:len(lbls):testNum]
nStreams=nStreams[0:len(nStreams):testNum]
blocks=blocks[0:len(blocks):testNum]
grids=grids[0:len(grids):testNum]
print("Event Averages: ",evAvgs)
print("Chrono Averages: ",chAvgs)
print("LEN Event Avg: ",len(evAvgs))
print("LEN Chrono Avg: ",len(chAvgs))
print("LBLs: ",lbls)
#########################
# write avgs in csv files
#########################
lbl=lbls[0]
with open(csvPath, "wb") as fcsv:
writer = csv.writer(fcsv)
writer.writerow([lbl])
writeCaption(resType, writer)
for i in range(len(evAvgs)):
if(lbl!=lbls[i]):
lbl=lbls[i]
writer.writerow([lbl])
writeCaption(resType, writer)
if resType=="cos":
#import pdb; pdb.set_trace()
writer.writerow([evAvgs[i],chAvgs[i],N[i],M[i],chunkSizes[i],nStreams[i],blocks[i],grids[i]])
elif resType=="mat":
writer.writerow([evAvgs[i],chAvgs[i],numMat[i],matSize[i],nStreams[i],blocks[i],grids[i]])
elif resType=="blu":
writer.writerow([evAvgs[i],chAvgs[i],imgNum[i],sizeImg[i],nStreams[i],blocks[i],grids[i]])
####################
### WRITE TO CSV ###
####################
def writeCaption(resType, writer):
if resType=="cos":
writer.writerow(['EVENTS','CHRONO','N SIZE','M ITERS','CHUNK','N STREAMS','BLOCK','GRID'])
elif resType=="mat":
writer.writerow(['EVENTS','CHRONO','NUM MAT','MAT SIZE','N STREAMS','BLOCK','GRID'])
elif resType=="blu":
writer.writerow(['EVENTS','CHRONO','NUM IMG','IMG SIZE','N STREAMS','BLOCK','GRID'])
#########################
### GET DATA FROM TXT ###
#########################
def getCosDatas(str):
linesSeq = open(str, 'r')
charsSeq = [line.rstrip('\n') for line in linesSeq]
tokens=[]
lbls=[]
evTimes=[]
chronoTimes=[]
chunkSizes=[]
N=[]
M=[]
nStreams=[]
blocks=[]
grids=[]
#import pdb; pdb.set_trace()
for line in charsSeq:
tokens= line.split(',',11)
lbls.append(tokens[0])
evTimes.append(float(tokens[1]))
chronoTimes.append(float(tokens[2]))
chunkSizes.append(int(tokens[3]))
N.append(int(tokens[4]))
M.append(int(tokens[5]))
nStreams.append(int(tokens[6]))
blocks.append(int(tokens[8]))
grids.append(int(tokens[9]))
return lbls, evTimes, chronoTimes, chunkSizes, N, M, nStreams, blocks, grids
def getMatDatas(str):
linesSeq = open(str, 'r')
charsSeq = [line.rstrip('\n') for line in linesSeq]
tokens=[]
lbls=[]
evTimes=[]
chronoTimes=[]
matSize=[]
numMat=[]
nStreams=[]
blocks=[]
grids=[]
for line in charsSeq:
tokens= line.split(',',11)
lbls.append(tokens[0])
evTimes.append(float(tokens[1]))
chronoTimes.append(float(tokens[2]))
matSize.append(int(tokens[4]))
numMat.append(int(tokens[5]))
nStreams.append(int(tokens[6]))
blocks.append(int(tokens[7]))
grids.append(int(tokens[8]))
return lbls,evTimes,chronoTimes,matSize,numMat,nStreams,blocks,grids
def getBlurDatas(str):
linesSeq = open(str, 'r')
charsSeq = [line.rstrip('\n') for line in linesSeq]
tokens=[]
lbls=[]
evTimes=[]
chronoTimes=[]
imgNum=[]
sizeImg=[]
nStreams=[]
blocks=[]
grids=[]
for line in charsSeq:
tokens= line.split(',',11)
lbls.append(tokens[0])
evTimes.append(float(tokens[1]))
chronoTimes.append(float(tokens[2]))
sizeImg.append(int(tokens[3]))
imgNum.append(int(tokens[4]))
nStreams.append(int(tokens[5]))
blocks.append(int(tokens[7]))
grids.append(int(tokens[8]))
return lbls,evTimes,chronoTimes,sizeImg,imgNum,nStreams,blocks,grids
###############
### GET AVG ###
###############
def getChunkAvg(evT,chT):
evT.remove(min(evT))
chT.remove(min(chT))
evT.remove(max(evT))
chT.remove(max(chT))
ch = sum(chT[:])/len(chT)
ev = sum(evT[:])/len(evT)
return ev,ch
if __name__ == "__main__":
main()
|
[
"chiarina04@hotmail.it"
] |
chiarina04@hotmail.it
|
db81b608a888971075980eb4ab83e3186458f300
|
47bea1533bbe1339110f7de9eaea8e7ca7d390b5
|
/RNN-tensorflow/RNN-GRU.py
|
5eff2cc3ed89fbb66df0fd4d853e2e26de43b370
|
[] |
no_license
|
Xavier-Pan/Hello-Github
|
4f8f44c813cf25e3f1178617bf60070104c3c247
|
b1bb7d041f262317844f2d5333326f76bda729f1
|
refs/heads/master
| 2020-12-25T13:45:20.859731
| 2018-04-28T01:14:43
| 2018-04-28T01:14:43
| 64,754,689
| 0
| 0
| null | 2016-08-02T12:54:13
| 2016-08-02T12:25:25
| null |
UTF-8
|
Python
| false
| false
| 4,267
|
py
|
'''
A Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
Author: Aymeric Damien
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np
# Import MNIST data
#from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
'''
To classify images using a recurrent neural network, we consider every image
row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then
handle 28 sequences of 28 steps for every sample.
'''
# Parameters
learning_rate = 0.001
training_iters = 100000
batch_size = 100
display_step = 10
imdb = np.load('imdb_word_emb.npz')
X_train = imdb['X_train']
y_train_ = imdb['y_train']
X_test = imdb['X_test']
y_test_ = imdb['y_test']
test_size=np.shape(y_test_)[0]
train_size=np.shape(y_train_)[0]
y_train=np.zeros([train_size,2])
y_test=np.zeros([test_size,2])
# Network Parameters
n_input = 128 # MNIST data input (img shape: 28*28)
n_steps = 80 # timesteps
n_hidden = 64 # hidden layer num of features
n_classes = 2 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def RNN(x, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.unstack(x, n_steps, 1)
# Define a lstm cell with tensorflow
# tf.get_variable_scope().reuse_variables()
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = RNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
with tf.variable_scope(tf.get_variable_scope(), reuse=False):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
total_batch=int(25000/batch_size)
# Launch the graph
with tf.Session() as sess:
sess.run(init)
print ('Accuracy:' ,accuracy.eval({x:X_test, y:y_test}))
# Keep training until reach max iterations
epoch_loss = 0
for i in range(total_batch):
epoch_x = X_train[batch_size*i:batch_size*(i+1),:,:]
epoch_y = y_train[batch_size*i:batch_size*(i+1),:]
epoch_x = epoch_x.reshape((batch_size, 80, 128))
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
_, c = sess.run([optimizer,cost], feed_dict = {x: epoch_x, y: epoch_y})
epoch_loss += c
# Run optimization op (backprop)
if i % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: epoch_x, y: epoch_y})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: epoch_x, y: epoch_y})
print("Iter " + str(i) + ", Minibatch Loss= " + \
"{:}".format(loss) + ", Training Accuracy= " + \
"{:}".format(acc))
print("Optimization Finished!")
print ('Accuracy:' ,accuracy.eval({x:X_test, y:y_test}))
# Calculate accuracy for 128 mnist test images
|
[
"noreply@github.com"
] |
Xavier-Pan.noreply@github.com
|
1c61431e6435612953cca7403a4120d9fe13543a
|
abaa59cd609b523728d43c84a2a7de15b6298a22
|
/playground/find_domain_occurences.py
|
b156231d28c51a970fabe33d40bfa0881334dd46
|
[] |
no_license
|
surchs/dataset_search_demo
|
999de3668db45ee9b187ef5a6fa819df9b7e072e
|
236ba83f222357a061609aff30302758063c1e08
|
refs/heads/main
| 2023-08-21T12:39:14.461325
| 2021-10-18T17:40:59
| 2021-10-18T17:40:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,713
|
py
|
import json
from pathlib import Path
import pandas as pd
import requests
from requests.auth import HTTPBasicAuth
NIDM_CONTEXT = '''
PREFIX afni: <http://purl.org/nidash/afni#>
PREFIX ants: <http://stnava.github.io/ANTs/>
PREFIX bids: <http://bids.neuroimaging.io/>
PREFIX birnlex: <http://bioontology.org/projects/ontologies/birnlex/>
PREFIX crypto: <http://id.loc.gov/vocabulary/preservation/cryptographicHashFunctions#>
PREFIX datalad: <http://datasets.datalad.org/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX dcat: <http://www.w3.org/ns/dcat#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX dctypes: <http://purl.org/dc/dcmitype/>
PREFIX dicom: <http://neurolex.org/wiki/Category:DICOM_term/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX freesurfer: <https://surfer.nmr.mgh.harvard.edu/>
PREFIX fsl: <http://purl.org/nidash/fsl#>
PREFIX ilx: <http://uri.interlex.org/base/>
PREFIX ncicb: <http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#>
PREFIX ncit: <http://ncitt.ncit.nih.gov/>
PREFIX ndar: <https://ndar.nih.gov/api/datadictionary/v2/dataelement/>
PREFIX nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#>
PREFIX nidm: <http://purl.org/nidash/nidm#>
PREFIX niiri: <http://iri.nidash.org/>
PREFIX nlx: <http://uri.neuinfo.org/nif/nifstd/>
PREFIX obo: <http://purl.obolibrary.org/obo/>
PREFIX onli: <http://neurolog.unice.fr/ontoneurolog/v3.0/instrument.owl#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX pato: <http://purl.obolibrary.org/obo/pato#>
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX scr: <http://scicrunch.org/resolver/>
PREFIX sio: <http://semanticscience.org/ontology/sio.owl#>
PREFIX spm: <http://purl.org/nidash/spm#>
PREFIX vc: <http://www.w3.org/2006/vcard/ns#>
PREFIX xml: <http://www.w3.org/XML/1998/namespace>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
'''
DOG_ROOT = 'http://star.braindog.net'
DOG_DB = 'nidm-openneuro'
DOG_PORT = 5820
query_url = f'{DOG_ROOT}:{DOG_PORT}/{DOG_DB}/query'
headers = {'Content-Type': 'application/sparql-query', 'Accept': 'application/sparql-results+json'}
OUT_PATH = Path(__file__).parent / '../data'
def parse_response(resp):
_results = json.loads(resp.decode('utf-8'))
return pd.DataFrame([{k: v['value'] for k, v in res.items()} for res in _results['results']['bindings']])
# Find all ages
data_element_query = '''
SELECT DISTINCT ?label ?description ?source ?concept ?levels
WHERE {
?de a/rdfs:subClassOf* nidm:DataElement.
OPTIONAL {?de rdfs:label ?label . } .
OPTIONAL {?de dct:description ?description . } .
OPTIONAL {?de nidm:sourceVariable ?source . } .
OPTIONAL {?de nidm:isAbout ?concept . } .
OPTIONAL {?de nidm:levels ?levels . } .
}
'''
response = requests.post(url=query_url, data=NIDM_CONTEXT + data_element_query, headers=headers,
auth=HTTPBasicAuth('admin', 'admin'))
de = parse_response(response.content)
# Match a number of things
def match(df, cols, keywords):
"""
Create an index where any string in the cols matches any of the keywords
"""
return [any([str(word).lower() in str(row[col]).lower()
for col in cols
for word in keywords])
for rid, row in df.iterrows()]
# Diagnosis
columns = ['concept', 'description', 'label', 'source']
diag_keys = ['diagnosis', 'disorder', 'condition', 'clinical', 'medical', 'disease', 'syndrome', 'impairment', 'health',
'control', 'typical', 'group']
diagnosis_index = match(de, columns, diag_keys)
de_diagnosis = de.loc[diagnosis_index]
# Age
age_keys = ['age', 'years', 'birth']
age_index = match(de, columns, age_keys)
de_age = de.loc[age_index]
# Sex
sex_keys = ['sex', 'gender', 'male', 'female']
sex_index = match(de, columns, sex_keys)
de_sex = de.loc[sex_index]
# Assessment
instrument_keys = ['assessment', 'response', 'test', 'instrument', 'symptom', 'observation']
instrument_index = match(de, columns, instrument_keys)
de_instrument = de.loc[instrument_index]
# No concepts
de_no_concepts = de.query('concept.isna()', engine='python')
# Unclassified
any_index = [not any(i) for i in zip(*[diagnosis_index, age_index, sex_index, instrument_index])]
de_unclassified = de.loc[any_index]
# Save the dataframes
de_diagnosis.to_csv(OUT_PATH / 'de_diagnosis.tsv', sep='\t')
de_age.to_csv(OUT_PATH / 'de_age.tsv', sep='\t')
de_sex.to_csv(OUT_PATH / 'de_sex.tsv', sep='\t')
de_instrument.to_csv(OUT_PATH / 'de_instrument.tsv', sep='\t')
de_no_concepts.to_csv(OUT_PATH / 'de_no_concepts.tsv', sep='\t')
de_unclassified.to_csv(OUT_PATH / 'de_unclassified.tsv', sep='\t')
print('Done')
|
[
"sebastian.urchs@gmail.com"
] |
sebastian.urchs@gmail.com
|
ea00b5f086e493ae9e9c6cfd1f40a609dce2dde3
|
f6eede1b4dbc7888499a6324a02826a7629a4875
|
/ChupiFlum/loginusers/apps.py
|
4592f3d1910687fccaaa4a3fed462546c635044f
|
[] |
no_license
|
IvanVilla1585/RefrescosChupiFlum
|
6a8061e19ee0e5a08e3ab2c52cde2852360969dd
|
69db38bc95575b274e6a9b2784053a8fca1eb047
|
refs/heads/master
| 2021-01-21T14:40:13.926118
| 2016-11-12T02:50:00
| 2016-11-12T02:50:00
| 59,258,661
| 1
| 0
| null | 2017-05-15T00:02:04
| 2016-05-20T02:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 136
|
py
|
from __future__ import unicode_literals
from django.apps import AppConfig
class LoginusersConfig(AppConfig):
name = 'loginusers'
|
[
"trezeguet55@gmail.com"
] |
trezeguet55@gmail.com
|
7c4253fa425c4055c10b47528af28d5ac744dfa6
|
e8f3b9e3af61e8c786748f19815e2942f2635a52
|
/python/chapter06/6.4_main.py
|
230ea9c8fa79be4e65596132c9cb5c98d21d06c2
|
[] |
no_license
|
seratch/learning-ci-scala
|
a28a2683bbcd69df90efd3560bb274ee1d0e9880
|
972b80e286a2c50acc30343e4c5093a87e85bb19
|
refs/heads/master
| 2021-04-09T14:01:53.230304
| 2011-03-25T23:06:00
| 2011-03-25T23:06:00
| 1,287,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
#!/usr/bin/env python
import docclass
classifier = docclass.classifier()
classifier.train_sample_documents()
print classifier.get_word_prb('quick', 'good')
|
[
"seratch@gmail.com"
] |
seratch@gmail.com
|
83ba94e176b60819a8a5fbe1947a3e518ed15782
|
a3f74e48fb768a1dd2d370f7899c017b0255427e
|
/finalproject/collegesearch/migrations/0002_remove_region_neighbors.py
|
bfd003451ee6022fa8e3d2baedd0ac7a66bdc2a5
|
[] |
no_license
|
robirahman/csci29-final-project
|
0545fb0ef9d83e7e665bbeed3d4b7f4ef57a68b5
|
862ec1e694425c5b5a68df154e34e0f981abed7d
|
refs/heads/master
| 2023-05-07T08:52:59.534711
| 2021-05-12T00:30:44
| 2021-05-12T00:30:44
| 362,150,347
| 0
| 0
| null | 2021-05-11T21:09:15
| 2021-04-27T14:54:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 259
|
py
|
# Generated by Django 3.2 on 2021-05-02 20:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("collegesearch", "0001_initial")]
operations = [migrations.RemoveField(model_name="region", name="neighbors")]
|
[
"robirahman94@gmail.com"
] |
robirahman94@gmail.com
|
ff9cc8dd6bbb8ac840a16f65427ba694d3404b26
|
97c612ef7145d8cb2d6d7c1e18d167d4bf280495
|
/tests/test_deployment.py
|
b96fd2eb1d79608552a6914453ffcb943f102a9f
|
[
"Unlicense"
] |
permissive
|
QARancher/k8s_client
|
3edcd1cf059e22769ee9e590012d8300aca86150
|
b290caa5db12498ed9fbb2c972ab20141ff2c401
|
refs/heads/master
| 2022-07-04T08:55:46.436285
| 2020-05-10T08:14:14
| 2020-05-10T08:14:14
| 237,614,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,219
|
py
|
import pytest
from helpers.k8s_deployment import K8sDeployment
from helpers.k8s_image import K8sImage
from tests.asserts_wrapper import assert_not_none, assert_equal, assert_in_list
from tests.basetest import BaseTest
mysql_image_obj = K8sImage(image_name="mysql", version="8.0.0")
alpine_image_obj = K8sImage(image_name="alpine", version="latest")
@pytest.mark.parametrize("image", [alpine_image_obj, mysql_image_obj],
ids=[alpine_image_obj.image_name, mysql_image_obj.image_name])
class TestK8sDeployment(BaseTest):
"""
Test class for functionality tests of deployment.
multiple images can be run for each test, use @pytest.mark.parametrize to
pass it a list of image object (K8sImage)
Steps:
0. Create namespace with hte name test. uses fixture 'create_namespace'
1. Create deployment and verify that the deployment is running by checking
the status of the deployment, returned in the k8s object.
2. Get list of all deployments running in name space, verify that the
created images are in the list.
3. Get list of names of deployments, verify that all created images are in
the list name
4. Get pods of deployments
"""
@pytest.mark.dependency(name="create_deployment")
def test_create_deployment(self, orc, create_namespace, image):
deployment_obj = K8sDeployment(name=image.image_name)
deployment_obj.namespace = create_namespace
deployment_obj.labels = {"app": image.image_name}
deployment_obj.selector.update(
{"matchLabels": {"app": image.image_name}})
deployment_obj.add_container_to_deployment(image_obj=image,
command="sleep 99")
res = orc.deployment.create(body=deployment_obj, max_threads=5)
assert_not_none(actual_result=res)
res = orc.deployment.get(name=image.image_name,
namespace=create_namespace)
assert_equal(actual_result=res.metadata.name,
expected_result=image.image_name)
@pytest.mark.dependency(name="deployment_list",
depends=["create_deployment"])
def test_list_deployments(self, orc, image, create_namespace):
dep_list = orc.deployment.list(all_namespaces=True)
assert_not_none(actual_result=dep_list)
filtered_dep_list = [dep.status.available_replicas for dep in dep_list
if image.image_name in dep.metadata.name]
assert_not_none(actual_result=filtered_dep_list)
assert_in_list(searched_list=filtered_dep_list, wanted_element=1)
@pytest.mark.dependency(name="deployments_names_list",
depends=["create_deployment"])
def test_list_names_deployments(self, orc, image, create_namespace):
dep_list = orc.deployment.list_names(namespace=create_namespace)
assert_not_none(actual_result=dep_list)
assert_in_list(searched_list=dep_list, wanted_element=image.image_name)
@pytest.mark.dependency(name="get_deployment",
depends=["create_deployment"])
def test_get_deployment(self, orc, image, create_namespace):
dep = orc.deployment.get(name=image.image_name,
namespace=create_namespace)
assert_not_none(actual_result=dep)
assert_equal(actual_result=dep.metadata.name,
expected_result=image.image_name)
@pytest.mark.dependency(name="get_pods_of_deployment",
depends=["create_deployment"])
def test_get_pods_of_deployment(self, orc, image, create_namespace):
pod_list = orc.deployment.get_pods(name=image.image_name,
namespace=create_namespace)
assert_not_none(actual_result=pod_list)
for pod in pod_list:
assert_equal(actual_result=pod.status.phase,
expected_result="Running",
message=f"Pod {pod.metadata.name} is not running "
f"for deployment: {image.image_name} "
f"in namespace: {create_namespace}")
|
[
"yakovpids@gmail.com"
] |
yakovpids@gmail.com
|
d3061ef35686197d532d71f2d42b12c5f5c92bfd
|
ef373530387ddd38930aa06d0b7c63451463fc74
|
/QC/PIRCH_average_TSS.py
|
fd3457ff25bb37803254365bc9a7439be0a62545
|
[] |
no_license
|
zhouyu/PIRCh
|
641f11c722caac64cbe287c5a2017f35172ae0bf
|
e207d2073a7260717f256bfdeefbd951090c2708
|
refs/heads/master
| 2020-11-29T09:14:26.350336
| 2019-04-11T02:37:17
| 2019-04-11T02:37:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,303
|
py
|
#!/usr/bin/python
from __future__ import division
import sys
import re
import math
reffile=sys.argv[1] #read in referance file
TSSfile=sys.argv[2]
chrfile=sys.argv[3]
outfile=sys.argv[-1]
bedfile=[] #read in different bedGraph file
chrs=[]
count=[]
chip={}
total_reads={}
TSScount=0
for i in range(4,len(sys.argv)-1):
bedfile.append(sys.argv[i])
chipfile = open(outfile,'w')
chipfile.write("Chip_file")
for i in range(4000):
chipfile.write("\t%d"%(i))
chipfile.write("\n")
for line in open (chrfile):
line=line.rstrip()
data=line.split('\t')
chrs.append(data[0])
for line in open (TSSfile):
line=line.rstrip()
data=line.split('\t')
count.append(data[0])
TSScount+=1
for bed in bedfile:
total_reads[bed]=0
chip[bed]={}
for i in range(4000):
chip[bed][i]=0
for line in open(bed):
line=line.rstrip()
data=line.split('\t')
total_reads[bed]+=float(data[3])*(int(data[2])-int(data[1]))
for chr in chrs:
flag={}
for line in open (reffile):
if(re.match('#',line)): print chr
else:
line=line.rstrip()
data=line.split('\t')
if data[2] == chr:
if data[1] in count:
tss=int(data[4])
for i in range(tss-2000,tss+2000):
flag[i]=1
exp={}
for bed in bedfile:
exp[bed]={}
for line in open(bed):
line=line.rstrip()
data=line.split('\t')
if data[0] == chr:
for i in range(int(data[1]),int(data[2])+1):
if flag.has_key(i) and float(data[3])>0:
exp[bed][i]=float(data[3])*10**9/total_reads[bed]
for line in open (reffile):
line=line.rstrip()
data=line.split('\t')
if data[2] == chr:
if data[1] in count:
tss=int(data[4])
for bed in bedfile:
for i in range(tss-2000,tss+2000):
if exp[bed].has_key(i) and (exp[bed][i]!=0):
chip[bed][i-tss+2000]+=math.log(exp[bed][i],2)
for bed in bedfile:
chipfile.write("%s"%(bed))
for i in range(4000):
chipfile.write("\t%f"%(chip[bed][i]/TSScount))
chipfile.write("\n")
chipfile.close()
|
[
"noreply@github.com"
] |
zhouyu.noreply@github.com
|
c2563228793b610809dd5b77e7f387e38db5016b
|
9bb1042feea90a82101aa7df4f4befefd6f2d3d3
|
/tests/unit/states/lvs_server_test.py
|
e71442b602b31d8c18cabd81da2bed4ca05e5519
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
lyft/salt
|
a5243b02553968eb94d5b8c381eb6dd433654f8f
|
2715908423a412f736253d0e5d3cfe185a0179a2
|
refs/heads/stable-2015.8
| 2023-08-20T11:13:33.249352
| 2019-10-14T17:38:56
| 2019-10-14T17:38:56
| 18,227,966
| 3
| 1
|
NOASSERTION
| 2019-10-14T17:38:58
| 2014-03-28T22:09:30
|
Python
|
UTF-8
|
Python
| false
| false
| 5,476
|
py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import lvs_server
lvs_server.__salt__ = {}
lvs_server.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class LvsServerTestCase(TestCase):
'''
Test cases for salt.states.lvs_server
'''
# 'present' function tests: 1
def test_present(self):
'''
Test to ensure that the named service is present.
'''
name = 'lvsrs'
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
mock_check = MagicMock(side_effect=[True, True, True, False, True,
False, True, False, False, False,
False])
mock_edit = MagicMock(side_effect=[True, False])
mock_add = MagicMock(side_effect=[True, False])
with patch.dict(lvs_server.__salt__, {'lvs.check_server': mock_check,
'lvs.edit_server': mock_edit,
'lvs.add_server': mock_add}):
with patch.dict(lvs_server.__opts__, {'test': True}):
comt = ('LVS Server lvsrs in service None(None) is present')
ret.update({'comment': comt})
self.assertDictEqual(lvs_server.present(name), ret)
comt = ('LVS Server lvsrs in service None(None) is present '
'but some options should update')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(lvs_server.present(name), ret)
with patch.dict(lvs_server.__opts__, {'test': False}):
comt = ('LVS Server lvsrs in service None(None) '
'has been updated')
ret.update({'comment': comt, 'result': True,
'changes': {'lvsrs': 'Update'}})
self.assertDictEqual(lvs_server.present(name), ret)
comt = ('LVS Server lvsrs in service None(None) '
'update failed(False)')
ret.update({'comment': comt, 'result': False, 'changes': {}})
self.assertDictEqual(lvs_server.present(name), ret)
with patch.dict(lvs_server.__opts__, {'test': True}):
comt = ('LVS Server lvsrs in service None(None) is not present '
'and needs to be created')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(lvs_server.present(name), ret)
with patch.dict(lvs_server.__opts__, {'test': False}):
comt = ('LVS Server lvsrs in service None(None) '
'has been created')
ret.update({'comment': comt, 'result': True,
'changes': {'lvsrs': 'Present'}})
self.assertDictEqual(lvs_server.present(name), ret)
comt = ('LVS Service lvsrs in service None(None) '
'create failed(False)')
ret.update({'comment': comt, 'result': False, 'changes': {}})
self.assertDictEqual(lvs_server.present(name), ret)
# 'absent' function tests: 1
def test_absent(self):
'''
Test to ensure the LVS Real Server in specified service is absent.
'''
name = 'lvsrs'
ret = {'name': name,
'result': None,
'comment': '',
'changes': {}}
mock_check = MagicMock(side_effect=[True, True, True, False])
mock_delete = MagicMock(side_effect=[True, False])
with patch.dict(lvs_server.__salt__, {'lvs.check_server': mock_check,
'lvs.delete_server': mock_delete}):
with patch.dict(lvs_server.__opts__, {'test': True}):
comt = ('LVS Server lvsrs in service None(None) is present'
' and needs to be removed')
ret.update({'comment': comt})
self.assertDictEqual(lvs_server.absent(name), ret)
with patch.dict(lvs_server.__opts__, {'test': False}):
comt = ('LVS Server lvsrs in service None(None) '
'has been removed')
ret.update({'comment': comt, 'result': True,
'changes': {'lvsrs': 'Absent'}})
self.assertDictEqual(lvs_server.absent(name), ret)
comt = ('LVS Server lvsrs in service None(None) removed '
'failed(False)')
ret.update({'comment': comt, 'result': False, 'changes': {}})
self.assertDictEqual(lvs_server.absent(name), ret)
comt = ('LVS Server lvsrs in service None(None) is not present,'
' so it cannot be removed')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(lvs_server.absent(name), ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(LvsServerTestCase, needs_daemon=False)
|
[
"jayeshk@saltstack.com"
] |
jayeshk@saltstack.com
|
77b63cee5d360600ff658625946036a082545d0f
|
0d44e67224e774f7a53190eb8c89d5d3ea9c205c
|
/excercises/fundamentals/letter_swap.py
|
c42d18d301f08080175ff1d7c3b669afc7032d86
|
[] |
no_license
|
katarinarosiak/python_fundamentals
|
c7d84db53623ad25ba0847220add1b5a3e613e2a
|
d78197a9a69c697ff83d10d1ed7b59df52dc1a8a
|
refs/heads/master
| 2023-08-17T19:03:24.084150
| 2021-09-13T16:33:10
| 2021-09-13T16:33:10
| 363,259,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
# Letter Swap
# Given a string of words separated by spaces, write a method that takes this string of words and returns a string in which the first and last letters of every word are swapped.
# You may assume that every word contains at least one letter, and that the string will always contain at least one word. You may also assume that each string contains nothing but words and spaces
# - separet a string into a list of word
# - iterate throuch each word and separet each word into list of characters
# - assign first word to a var and second word to another var
# - reassign first and the last letter
# - join
# - join with spaces
# - return
def swap(str):
words = str.split(" ")
final = []
for word in words:
word = list(word)
first = word[0]
last = word[-1]
word[0] = last
word[-1] = first
final.append("".join(word))
return " ".join(final)
# Examples:
# Copy Code
print(swap("Oh what a wonderful day it is") == "hO thaw a londerfuw yad ti si")
print(swap("Abcde") == "ebcdA")
print(swap("a") == "a")
|
[
"katarinarosiak@gmail.com"
] |
katarinarosiak@gmail.com
|
04f475eb45a82c1c921c8bc5e70513d38abb3ebb
|
a59a26dbd28bc8c7fa6bb1083478a4b279e3a1de
|
/evaluating/baselines/french/parse_larousse.py
|
7b64b00dd290f64f57e0107fea97ffbbd74fb47b
|
[] |
no_license
|
ksipos/polysemy-assessment
|
4696a2ec8f2f8d5a1d5c5685cd34274a36bcb245
|
26543640ed6cd826f2e86abb45571c2fa281464a
|
refs/heads/master
| 2021-04-17T13:02:37.876218
| 2020-05-13T08:15:56
| 2020-05-13T08:15:56
| 251,556,520
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
from os import listdir
from bs4 import BeautifulSoup
import requests
import time
filenames = ['larousse/' + f for f in listdir('larousse')]
output = open('larousse_list.txt', 'w')
total = len(filenames)
for name in filenames:
with open(name, 'r') as f:
html = f.read()
soup = BeautifulSoup(html, 'lxml')
total_senses = 0
try:
content = soup.find(id="definition")
for pos in content.find_all("li", {"class": "DivisionDefinition"}):
total_senses += 1
output.write(name.split("/")[-1][:-5] +
"," + str(total_senses) + "\n")
except Exception:
continue
output.close()
|
[
"ksipos@windowslive.com"
] |
ksipos@windowslive.com
|
d9d098ee0007add2e32fdbac268dc362a2cfbd0c
|
13f900b9dc0c3e838ff788febaa59514b97d1128
|
/Proyecto/api/serializers.py
|
66036ceb8053f9bfd04791ba6c87d07ca4547de9
|
[] |
no_license
|
JorgitoR/App-Proyectos-Slabcode
|
68439c5fe0dbe58a004b9f04be807f6756d84a7f
|
173ea655bf00f8b5ae7fb0eb4ee0cf0ed5e6f3a7
|
refs/heads/main
| 2023-04-12T21:52:16.339073
| 2021-04-10T21:02:57
| 2021-04-10T21:02:57
| 356,660,392
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,219
|
py
|
from rest_framework import serializers
from django.contrib.auth import get_user_model
from django.utils.timesince import timesince
from Proyecto.models import Proyecto, Tarea
from socio.api.serializers import SocioSerializer
from rest_framework.validators import UniqueValidator
from django.contrib.auth.password_validation import validate_password
from datetime import datetime, timedelta
from django.forms.fields import DateField
import logging
from hashlib import sha1
logger = logging.getLogger(__name__)
Usuario = get_user_model()
from Prueba.db.mail import send_mail_async as send_mail
from django.conf import settings
class UsarioDisplaySerializer(serializers.ModelSerializer):
class Meta:
model = Usuario
fields = [
'username',
'first_name',
'last_name',
]
class ProyectoSerializer(serializers.ModelSerializer):
usuario = UsarioDisplaySerializer(read_only=True)
creado_por = UsarioDisplaySerializer(read_only=True)
class Meta:
model = Proyecto
fields = [
'id',
'numero',
'creado_por',
'titulo',
'socio',
'usuario',
'descripcion',
'estado',
'creado_tiempo'
]
def get_fecha_display(self, obj):
return obj.creado_tiempo.strftime("%b %d %I:%M %p")
def get_timesince(self, obj):
return timesince(obj.creado_tiempo) + "Hace"
class RegistrarSerializer(serializers.ModelSerializer):
email = serializers.EmailField(
required=True,
validators = [UniqueValidator(queryset=Usuario.objects.all())]
)
password = serializers.CharField(write_only=True, required=True, validators=[validate_password])
password2 = serializers.CharField(write_only=True, required=True)
class Meta:
model = Usuario
fields = [
"username",
"password",
"password2",
"email",
"first_name",
"last_name"
]
extra_kwargs = {
'first_name':{"required":True},
'last_name':{"required":True}
}
def validate(self, attrs):
if attrs['password'] != attrs['password2']:
raise serializers.ValidationError({'password':'Las contraseña no concuerdan'})
return attrs
def create(self, validated_data):
usuario = Usuario.objects.create(
username = validated_data['username'],
email = validated_data['email'],
first_name = validated_data['first_name'],
last_name = validated_data['last_name']
)
usuario.set_password(validated_data['password'])
usuario.save()
if usuario:
self.enviar_email(usuario)
return usuario
def enviar_email(self, obj):
email = []
if obj.email:
print(obj.email)
email.append(obj.email)
if len(email):
logger.info("[Usuario %s] Enviando credenciales al correo %s", obj.username, obj.email)
values = {
'nombre':obj.first_name,
'apellido':obj.last_name,
'titulo':'Credenciales De inicio de sesion',
'username': obj.username,
'password': 'clave1234',
'sign': settings.SITIO_HEADER,
}
email_template = settings.CREDENCIALES_USUARIO
try:
send_mail(
'[{app}][{usuario}] Credenciales de inicio de sesion'.format(app=settings.APP_NAME, usuario=obj.username),
email_template.format(**values),
settings.APP_EMAIL,
email
)
except Exception as e:
logger.warning("[Tarea #%S] Error tratando de enviar un Email a la tarea creada - %s: %s",
obj.username, e.__class__.__name__, str(e)
)
class UsuarioSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
def create(self, validated_data):
user = Usuario.objects.create(
username = validated_data['username']
)
user.set_password(validated_data['password'])
user.save()
return user
class Meta:
model = Usuario
fields = ("id", "username", "password")
class TareaSerializer(serializers.ModelSerializer):
usuario = UsarioDisplaySerializer(read_only=True)
proyecto = ProyectoSerializer(read_only=True)
class Meta:
model = Tarea
fields = (
'proyecto',
'usuario',
'descripcion',
'terminado',
'fecha'
)
def clean(self, obj):
limpiar = obj.cleaned_data
datos2 = limpiar.get('fecha')
if str(obj.fecha)<=(datetime.today() - timedelta(days=1)).strftime('%Y-%m-%d'):
raise serializers.ValidationError("La fecha no puede ser del pasado")
return obj
|
[
"jorgitouribe133@gmail.com"
] |
jorgitouribe133@gmail.com
|
5c12ac510f60e933cac12be692128077b9bfb0d3
|
3d0cdb529217a84ca33cfaf1f614ccfc96050fe9
|
/Evaluation.py
|
abb150e66fe6feceac5234281a4f6fbdb901b20e
|
[] |
no_license
|
linharesh/now-neo4j
|
5ce9cacac132ee4a1011659216a5460c9600d0ae
|
483fba901551c11efaf8effd803516c827efb750
|
refs/heads/master
| 2020-09-11T17:37:58.459323
| 2019-11-16T18:20:02
| 2019-11-16T18:20:02
| 222,140,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
class Evaluation:
def __init__(self, trial_id, ev_id, checkpoint, cc_id, activation_id, representation, member_container_activation_id, member_container_id, name, typeof):
self.trial_id = trial_id
self.ev_id = ev_id
self.checkpoint = checkpoint
self.cc_id = cc_id
self.activation_id = activation_id
self.representation = representation
self.member_container_activation_id = member_container_activation_id
self.member_container_id = member_container_id
self.name = name
self.typeof = typeof
|
[
"linhares.h@gmail.com"
] |
linhares.h@gmail.com
|
11fc195b7e715240f72f316eca12e705de9be2d9
|
23afbfe3f8dbe8d1e89e746edfb7f2b920109a7f
|
/test.py
|
1f52b08929879dd76afbfc03364921d5a304fe96
|
[] |
no_license
|
samensah/Neuroscience-Lab
|
3f2695a928e27ba3b3be0934596b9f709e75321f
|
a54e57b087c902fdece990aa94c2f6ec3bb06b46
|
refs/heads/master
| 2021-01-10T03:56:18.229772
| 2020-06-28T14:06:57
| 2020-06-28T14:06:57
| 55,796,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,813
|
py
|
#__author__ = 'samuel'
from __future__ import division
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
"""
Created on Wed Feb 25 13:57:47 2015
Example of use of odeint for system of equations.
Here we solve the HH model
dx/dt = ax-bxy
dy/dt = cxy - dy
or written as
dY/dt = F(Y,t)
where Y = [x,y]
and F(Y,t) = [ax-bxy,cxy-dy]
"""
"""define ODEs"""
def am(v):
return 0.1*(v+40)/(1-np.exp(-(v+40)/10))
def an(v):
return 0.01*(v+55)/(1-np.exp(-(v+55)/10))
def ah(v):
return 0.07*np.exp(-(v+65)/20)
def bm(v):
return 4*np.exp(-(v+65)/18)
def bn(v):
return 0.125*np.exp(-(v+65)/80)
def bh(v):
return 1/(1+np.exp(-(v+35)/10))
def Ie(t):
return 2
def f(Y, t, p):
vdot = p[0]*Y[1]**3*Y[3]*(p[3]-Y[0])+p[1]*Y[2]**4*(p[4]-Y[0])+p[2]*(p[5]-Y[0])+Ie(t)
mdot = am(Y[0])*(1-Y[1])-bm(Y[0])*Y[1]
ndot = an(Y[0])*(1-Y[2])-bn(Y[0])*Y[2]
hdot = ah(Y[0])*(1-Y[3])-bh(Y[0])*Y[3]
return [vdot, mdot, ndot, hdot]
"""initial conditions"""
Y0 = [-65, am(-65)/(am(-65)+bm(-65)), an(-65)/(an(-65)+bn(-65)), ah(-65)/(ah(-65)+bh(-65))]
print(Y0)
"""Values of the parameters"""
gn = 120
gk = 36
gl = 0.3
ENa = 50
EK = -77
El = -54.4
"""Lump parameters together to pass to ODE"""
p = [gn, gk, gl, ENa, EK, El]
"""Time to simulate system over"""
t = np.linspace(0, 100, 5000)
"""Solve system"""
Y = odeint(f, Y0, t, args=(p, ))
"""plot system"""
plt.plot(Y[:,2], Y[:,3])
plt.show()
plt.plot(t, Y[:,0],'r',label='Voltage')
plt.legend()
plt.xlabel('Time')
plt.ylabel('V')
plt.show()
plt.plot(t, Y[:,1], 'b', label='m')
plt.plot(t, Y[:,2], 'r', label='n')
plt.plot(t, Y[:,3], 'y', label='h')
plt.legend()
#plt.xlabel('Time')Lotka Volterra model
plt.ylabel('V')
#plt.title('Lotka Volterra model of predator-prey system')
plt.show()
|
[
"samuel@aims.edu.gh"
] |
samuel@aims.edu.gh
|
d0280f52f5bb8c08f51b8de7dbd97c8001954c21
|
cf790f8169918606770683da323c436a20f4fb04
|
/torchaudio/datasets/libritts.py
|
2c978c426ee15546c012b1ad09d19b3e93f47190
|
[
"BSD-2-Clause"
] |
permissive
|
krishnakalyan3/audio
|
f6d3dabf456cd5ce9a8f033b83301ce7bff47f19
|
0cd25093626d067e008e1f81ad76e072bd4a1edd
|
refs/heads/master
| 2023-07-05T08:20:37.366199
| 2021-11-15T14:23:52
| 2021-11-15T14:23:52
| 176,415,764
| 1
| 0
|
BSD-2-Clause
| 2019-03-19T03:27:23
| 2019-03-19T03:27:22
| null |
UTF-8
|
Python
| false
| false
| 4,967
|
py
|
import os
from typing import Tuple, Union
from pathlib import Path
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
URL = "train-clean-100"
FOLDER_IN_ARCHIVE = "LibriTTS"
_CHECKSUMS = {
"http://www.openslr.org/60/dev-clean.tar.gz": "0c3076c1e5245bb3f0af7d82087ee207",
"http://www.openslr.org/60/dev-other.tar.gz": "815555d8d75995782ac3ccd7f047213d",
"http://www.openslr.org/60/test-clean.tar.gz": "7bed3bdb047c4c197f1ad3bc412db59f",
"http://www.openslr.org/60/test-other.tar.gz": "ae3258249472a13b5abef2a816f733e4",
"http://www.openslr.org/60/train-clean-100.tar.gz": "4a8c202b78fe1bc0c47916a98f3a2ea8",
"http://www.openslr.org/60/train-clean-360.tar.gz": "a84ef10ddade5fd25df69596a2767b2d",
"http://www.openslr.org/60/train-other-500.tar.gz": "7b181dd5ace343a5f38427999684aa6f",
}
def load_libritts_item(
fileid: str,
path: str,
ext_audio: str,
ext_original_txt: str,
ext_normalized_txt: str,
) -> Tuple[Tensor, int, str, str, int, int, str]:
speaker_id, chapter_id, segment_id, utterance_id = fileid.split("_")
utterance_id = fileid
normalized_text = utterance_id + ext_normalized_txt
normalized_text = os.path.join(path, speaker_id, chapter_id, normalized_text)
original_text = utterance_id + ext_original_txt
original_text = os.path.join(path, speaker_id, chapter_id, original_text)
file_audio = utterance_id + ext_audio
file_audio = os.path.join(path, speaker_id, chapter_id, file_audio)
# Load audio
waveform, sample_rate = torchaudio.load(file_audio)
# Load original text
with open(original_text) as ft:
original_text = ft.readline()
# Load normalized text
with open(normalized_text, "r") as ft:
normalized_text = ft.readline()
return (
waveform,
sample_rate,
original_text,
normalized_text,
int(speaker_id),
int(chapter_id),
utterance_id,
)
class LIBRITTS(Dataset):
"""Create a Dataset for LibriTTS.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
Allowed type values are ``"dev-clean"``, ``"dev-other"``, ``"test-clean"``,
``"test-other"``, ``"train-clean-100"``, ``"train-clean-360"`` and
``"train-other-500"``. (default: ``"train-clean-100"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"LibriTTS"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_original_txt = ".original.txt"
_ext_normalized_txt = ".normalized.txt"
_ext_audio = ".wav"
def __init__(
self,
root: Union[str, Path],
url: str = URL,
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False,
) -> None:
if url in [
"dev-clean",
"dev-other",
"test-clean",
"test-other",
"train-clean-100",
"train-clean-360",
"train-other-500",
]:
ext_archive = ".tar.gz"
base_url = "http://www.openslr.org/resources/60/"
url = os.path.join(base_url, url + ext_archive)
# Get string representation of 'root' in case Path object is passed
root = os.fspath(root)
basename = os.path.basename(url)
archive = os.path.join(root, basename)
basename = basename.split(".")[0]
folder_in_archive = os.path.join(folder_in_archive, basename)
self._path = os.path.join(root, folder_in_archive)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum)
extract_archive(archive)
self._walker = sorted(str(p.stem) for p in Path(self._path).glob('*/*/*' + self._ext_audio))
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, int, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str, str, int, int, str):
``(waveform, sample_rate, original_text, normalized_text, speaker_id, chapter_id, utterance_id)``
"""
fileid = self._walker[n]
return load_libritts_item(
fileid,
self._path,
self._ext_audio,
self._ext_original_txt,
self._ext_normalized_txt,
)
def __len__(self) -> int:
return len(self._walker)
|
[
"noreply@github.com"
] |
krishnakalyan3.noreply@github.com
|
b45eae05c2a53c9835a8d4151aa5e70165e04eb3
|
343a325d23e834fc4a099ddd9941f9465e370e05
|
/apps/users/migrations/0001_initial.py
|
c21521ea6354de09aad62e8428699df477321599
|
[] |
no_license
|
maxwell912/social-app
|
2d8cff535962e79806d8d50516f8d5a51c30c937
|
4089c3f084d7460f64517158eefb54b3b93a01e8
|
refs/heads/master
| 2022-12-09T01:35:33.835915
| 2020-09-11T06:29:16
| 2020-09-11T06:29:16
| 294,612,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,949
|
py
|
# Generated by Django 3.0.8 on 2020-08-25 12:31
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(max_length=255, verbose_name='Username')),
('first_name', models.CharField(max_length=255, verbose_name='First name')),
('last_name', models.CharField(max_length=255, verbose_name='Last name')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='Email')),
('is_active', models.BooleanField(default=True, verbose_name='Is active')),
('is_staff', models.BooleanField(default=False, help_text='The user will have access to admin interface.', verbose_name='Is staff')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Date joined')),
('phone_number', models.CharField(blank=True, help_text='Users phone number.', max_length=12, null=True, verbose_name='Phone number')),
('avatar', models.ImageField(blank=True, help_text='Users profile photo.', null=True, upload_to='static/imagination', verbose_name='Avatar')),
('birthday', models.DateField(blank=True, help_text="User's date of birth.", null=True, verbose_name='Birthday')),
('gender', models.CharField(blank=True, choices=[('MALE', 'Male'), ('FEMALE', 'Female')], help_text='User gender.', max_length=255, null=True, verbose_name='Gender')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Users',
'db_table': 'users',
'ordering': ('email',),
},
),
]
|
[
"ivan912ivan@gmail.com"
] |
ivan912ivan@gmail.com
|
5cd87059b73544cc78993d00b2fb611eab7bf72a
|
a57def2cdde6249e658afc3022902dc83ab9761d
|
/Conditional/Lista 2/Everton/04.py
|
4f9d0d49627a2c5f6546dd583be9f4a0f59f94bc
|
[
"MIT"
] |
permissive
|
LourdesOshiroIgarashi/algorithms-and-programming-1-ufms
|
09ee5ddf72d11680b1bda7616e63d7d4a7c45153
|
7f0f0cb57223ec41f09c61f6109bc7c5f85f3742
|
refs/heads/main
| 2023-06-07T06:15:47.180937
| 2021-06-28T21:24:36
| 2021-06-28T21:24:36
| 370,764,915
| 7
| 8
|
MIT
| 2021-06-28T21:24:36
| 2021-05-25T16:48:43
|
Python
|
UTF-8
|
Python
| false
| false
| 340
|
py
|
a = int(input('NÚMERO DE ALUNOS: '))
m = int(input('NÚMERO DE MONITORES: '))
total = a + m
qtd_viagens = total // 50
if qtd_viagens%50 != 0:
print('Serão necessárias {} viagens para que todos chegem ao topo'.format(qtd_viagens + 1))
else:
print('Serão necessárias {} viagens para que todos chegem ao topo'.format(qtd_viagens))
|
[
"noreply@github.com"
] |
LourdesOshiroIgarashi.noreply@github.com
|
df9aa3e527f8805e42b4c2af45e519a91fe9d185
|
9be08135ec57ea7a40fde31a74e0aba735249165
|
/day2/test_day2.py
|
9db4edc6751d4e01c5a8d3fdb446d884bce4ca6f
|
[] |
no_license
|
johanjeppsson/advent2019
|
4ec696f0714cbfa20f331a956c241dcbb7940a03
|
7158e8621f61e7d9e01f0ac20065613fb0a0f227
|
refs/heads/master
| 2020-09-22T13:44:18.078723
| 2019-12-17T21:19:33
| 2019-12-17T21:23:17
| 225,223,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from utils import get_data
from .__main__ import run
def test_day2():
prog = list(map(int, get_data(__file__).split(",")))
assert run(prog, 12, 2) == 4090689
assert run(prog, 77, 33) == 19690720
assert run(prog, 77, 32) != 19690720
|
[
"johjep@gmail.com"
] |
johjep@gmail.com
|
5ace0bc3c925e242df973b9d28bee8ccfde56724
|
8f01699858df67f8ba211344eaf2cafc73292740
|
/ambuild2/frontend/paths_test.py
|
c9cf26b212da5caf9d9ceb4f36943a4e83b6128f
|
[
"BSD-3-Clause"
] |
permissive
|
alliedmodders/ambuild
|
990132f583cf1a39be768d96b5b4085a8d8c39d9
|
9392da7c7e0a148bf8e0089b88d372a6eb62c65c
|
refs/heads/master
| 2023-01-05T07:20:31.480403
| 2022-12-21T21:30:18
| 2022-12-21T21:30:18
| 19,415,744
| 47
| 37
|
BSD-3-Clause
| 2023-09-12T10:00:04
| 2014-05-04T01:00:38
|
Python
|
UTF-8
|
Python
| false
| false
| 509
|
py
|
# vim: set sts=4 ts=8 sw=4 tw=99 et:
import ntpath
import unittest
from ambuild2.frontend import paths
class IsSubPathTests(unittest.TestCase):
def runTest(self):
self.assertEqual(paths.IsSubPath("/a/b/c", "/a"), True)
self.assertEqual(paths.IsSubPath("/t/b/c", "/a"), False)
self.assertEqual(paths.IsSubPath("t", "./"), True)
self.assertEqual(paths.IsSubPath(r"C:\blah", "C:\\", ntpath), True)
self.assertEqual(paths.IsSubPath(r"C:\blah", "D:\\", ntpath), False)
|
[
"dvander@alliedmods.net"
] |
dvander@alliedmods.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.