blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4d012607d045af72d09fba80cfd1625c1e627d90 | 3b7474148c07df7f4755106a3d0ada9b2de5efdc | /training/c26_api_requests/client/lab05_HTTPBIN_GETJSON.py | 1e47277097b0a36464989ac7362dd529aaf4bb11 | [] | no_license | juancsosap/pythontraining | 7f67466846138f32d55361d64de81e74a946b484 | 1441d6fc9544042bc404d5c7efffd119fce33aa7 | refs/heads/master | 2021-08-26T05:37:15.851025 | 2021-08-11T22:35:23 | 2021-08-11T22:35:23 | 129,974,006 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | import requests
def main():
url = 'http://httpbin.org/get'
response = requests.get(url)
if response.status_code == 200:
response_json = response.json() # Dictionary
origin = response_json['origin']
print(origin)
if __name__ == '__main__':
main()
| [
"user.nuage@gmail.com"
] | user.nuage@gmail.com |
69443fd6ce76067dbc1afcbe80e1509d0d32777d | 411a2fc51902aef288a7382aa3d4950132c2ba7d | /prob_015.py | 679c21e047a18f465bc60e6850e0f504ab847378 | [] | no_license | bdjackson/ProjectEuler | c73a1f55a9e7995f8674b02dc486bbd433cb6ea7 | 3517c39ee5ae3e90499ced2ca47b94bef3f2fb8f | refs/heads/master | 2021-01-01T19:47:17.902466 | 2013-11-03T19:07:39 | 2013-11-03T19:07:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,543 | py | #!/usr/bin/env python
# ============================================================================
import sys
# ===========================================================================
# = http://projecteuler.net/problem=15 =
# = - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - =
# = How paths are there in a 20x20 grid (21x21 nodes) without backtracking =
# ===========================================================================
# ----------------------------------------------------------------------------
def dumpGrid(grid):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
print grid
for r in xrange(len(grid)):
line = ""
for c in xrange(len(grid[r])):
line += '%4s' % grid[r][c]
print line
# ----------------------------------------------------------------------------
def getNumPaths(rows, cols):
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
grid = []*(rows+1)
for r in xrange(rows+1):
grid.append([0]*(cols+1))
for r in xrange(rows, -1, -1):
for c in xrange(cols, -1, -1):
print '%d - %d' % (r, c)
right = 0 if c+1 >= cols+1 else grid[r][c+1]
down = 0 if r+1 >= rows+1 else grid[r+1][c]
grid[r][c] = right + down
if grid[r][c] == 0:
grid[r][c] = 1
dumpGrid(grid)
print 'A %dx%d grid has %d paths without backtracking' % ( rows
, cols
, grid[0][0]
)
# ============================================================================
def main():
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
getNumPaths(1,1)
print '====================================================='
getNumPaths(2,2)
print '====================================================='
getNumPaths(3,3)
print '====================================================='
getNumPaths(4,4)
print '====================================================='
getNumPaths(20,20)
# ============================================================================
if __name__ == "__main__":
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
sys.exit( main() )
# ============================================================================
# ============
# = solution =
# ============
# 137846528820
| [
"bjack3@gmail.com"
] | bjack3@gmail.com |
45de4b48e69e78beab1276afde0493a05412c0fc | de0bf3ddc4fedc14a24e085007d4babe598d2e66 | /generate_col_num.py | a30edd1329a0d56d907ba9f3750e6b63255e1cf6 | [] | no_license | Scandium627/usseful_data_fuction | aa5531d71b4f163a3580a9b9302bf55dc5298095 | 476671e9a5117b587d3dfc6f605e483841c7db1c | refs/heads/master | 2022-08-15T08:11:34.602656 | 2020-05-24T08:09:05 | 2020-05-24T08:09:05 | 208,982,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,276 | py | import os
import pandas as pd
import csv
def get_files(file_dir):
list_fife = []
for root, dirs, files in os.walk(file_dir):
for name in files:
if name.endswith('py') or name.endswith('css') or name.endswith('html') or name.endswith('js') or name.endswith('vue') or name.endswith('sh'):
list_fife.append(os.path.join(root, name))
return list_fife
def open_file(file_name):
#if file_name.endswith('py') or file_name.endswith('css') or file_name.endswith('html'):
try:
data = pd.read_table(file_name)
list_out = data.values.T.tolist()[0]
#list_out.insert(0,file_name.replace(project_dir , ''))
list_out.append('\n')
return list_out
except:
try:
list_out = []
with open(file_name,"r", encoding= 'utf-8') as file1:
#list_out.append(file_name.replace(project_dir, '') + '\n')
list_out.append('\n')
for row in file1.readlines():
list_out.append(row)
list_out.append('\n')
return list_out
except:
print('camnt',file_name)
return []
def build_input_col_num(input_num):
output = " "
rep = len(str(input_num))
return str(input_num)+output[rep:]
if __name__ == '__main__':
max_num = 178388
front_file = 'front_file.txt'
last_file = 'last_file.txt'
with open(r'{a}_front_40_col_num.txt'.format(a=front_file.split('.')[0]),'w',newline= '', encoding='gb18030') as file_witre:
num_front = 1
list_for_write = open_file(front_file)
for write_line in list_for_write:
file_witre.write(build_input_col_num(num_front) +str(write_line)+ '\n')#+ '\n'
num_front += 1
print(front_file,num_front)
with open(r'{a}_last_40_col_num.txt'.format(a=last_file.split('.')[0] ),'w', newline= '', encoding='gb18030') as file_witre:
list_for_write = open_file(last_file)
len_list = len(list_for_write)
num_last = max_num - len_list + 1
for write_line in list_for_write:
file_witre.write(build_input_col_num(num_last) +str(write_line)+ '\n')
num_last += 1
print(last_file,num_last)
| [
"360134299@qq.com"
] | 360134299@qq.com |
44c46ba93a07fded493503c407a780d5ac75ad25 | 11d8e5c2ea583b837469491b25c5f186368e0b78 | /test/test.py | d4e137565c58df5475b8473dd7a041ca2d254395 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sloria/pattern | dcd77a9e2de9d31677c28208d4c5a0b7dbae55c8 | d1d719540f195f3028e24f4d2e536d73e9fef210 | refs/heads/master | 2021-01-18T08:48:02.400336 | 2013-09-22T14:56:25 | 2013-09-22T14:56:25 | 11,158,672 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | import os, sys; sys.path.insert(0, os.path.join(".."))
import unittest
import test_metrics
import test_web
import test_db
import test_de
import test_en
import test_es
import test_fr
import test_it
import test_nl
import test_text
import test_search
import test_vector
import test_graph
#---------------------------------------------------------------------------------------------------
# Run all tests.
# pattern.db tests require a valid username and password for MySQL.
# pattern.web tests require a working internet connection
# and API license keys (see pattern.web.api.py) for Google and Yahoo API's.
def suite():
suite = unittest.TestSuite()
suite.addTest(test_metrics.suite())
suite.addTest(test_web.suite())
suite.addTest(test_db.suite(host="localhost", port=3306, username="root", password="root"))
suite.addTest(test_de.suite())
suite.addTest(test_en.suite())
suite.addTest(test_es.suite())
suite.addTest(test_fr.suite())
suite.addTest(test_it.suite())
suite.addTest(test_nl.suite())
suite.addTest(test_text.suite())
suite.addTest(test_search.suite())
suite.addTest(test_vector.suite())
suite.addTest(test_graph.suite())
return suite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=1).run(suite()) | [
"tom@organisms.be"
] | tom@organisms.be |
b415842158a3d373c9b8c0d3a087ea55ea60a8a9 | f4bf81d4e80468331a09401dbaeef12465aca853 | /lib/python/helpers/profiler/prof_util.py | b529b5ece1393da4c6c1318d21f61e7e9cfb71d6 | [] | no_license | nottyo/intellibot | 45c41d673608a0a1291c6387f9d33ef449f18837 | 0547d987deaad90260abe33db5284eae9704eb9b | refs/heads/master | 2020-12-30T23:59:29.795725 | 2017-04-10T07:53:59 | 2017-04-10T07:53:59 | 86,574,980 | 1 | 0 | null | 2017-03-29T11:37:54 | 2017-03-29T11:37:53 | null | UTF-8 | Python | false | false | 3,343 | py | __author__ = 'traff'
import threading
import os
import sys
import tempfile
from _prof_imports import Stats, FuncStat, Function
try:
execfile=execfile #Not in Py3k
except NameError:
#We must redefine it in Py3k if it's not already there
def execfile(file, glob=None, loc=None):
if glob is None:
import sys
glob = sys._getframe().f_back.f_globals
if loc is None:
loc = glob
# It seems that the best way is using tokenize.open(): http://code.activestate.com/lists/python-dev/131251/
import tokenize
stream = tokenize.open(file) # @UndefinedVariable
try:
contents = stream.read()
finally:
stream.close()
#execute the script (note: it's important to compile first to have the filename set in debug mode)
exec(compile(contents+"\n", file, 'exec'), glob, loc)
def save_main_module(file, module_name):
sys.modules[module_name] = sys.modules['__main__']
sys.modules[module_name].__name__ = module_name
from imp import new_module
m = new_module('__main__')
sys.modules['__main__'] = m
if hasattr(sys.modules[module_name], '__loader__'):
setattr(m, '__loader__', getattr(sys.modules[module_name], '__loader__'))
m.__file__ = file
return m
class ProfDaemonThread(threading.Thread):
def __init__(self):
super(ProfDaemonThread, self).__init__()
self.setDaemon(True)
self.killReceived = False
def run(self):
self.OnRun()
def OnRun(self):
pass
def generate_snapshot_filepath(basepath, local_temp_dir=False, extension='.pstat'):
basepath = get_snapshot_basepath(basepath, local_temp_dir)
n = 0
path = basepath + extension
while os.path.exists(path):
n+=1
path = basepath + (str(n) if n>0 else '') + extension
return path
def get_snapshot_basepath(basepath, local_temp_dir):
if basepath is None:
basepath = 'snapshot'
if local_temp_dir:
basepath = os.path.join(tempfile.gettempdir(), os.path.basename(basepath.replace('\\', '/')))
return basepath
def stats_to_response(stats, m):
if stats is None:
return
ystats = Stats()
ystats.func_stats = []
m.ystats = ystats
for func, stat in stats.items():
path, line, func_name = func
cc, nc, tt, ct, callers = stat
func = Function()
func_stat = FuncStat()
func.func_stat = func_stat
ystats.func_stats.append(func)
func_stat.file = path
func_stat.line = line
func_stat.func_name = func_name
func_stat.calls_count = nc
func_stat.total_time = ct
func_stat.own_time = tt
func.callers = []
for f, s in callers.items():
caller_stat = FuncStat()
func.callers.append(caller_stat)
path, line, func_name = f
cc, nc, tt, ct = s
caller_stat.file = path
caller_stat.line = line
caller_stat.func_name = func_name
caller_stat.calls_count = cc
caller_stat.total_time = ct
caller_stat.own_time = tt
# m.validate()
| [
"traitanit.hua@ascendcorp.com"
] | traitanit.hua@ascendcorp.com |
b542ddb755239f61ca7234b388116278a6242a35 | c163e238d0a0818a613fe107471e7d824febd280 | /users/migrations/0001_initial.py | bfb13135d37b1cdf7a5aa6a7e2761d46ee7e1063 | [] | no_license | Thxpatoxx/brigido | c6302bee8e353e0b68c6f98b4b96f899b8de9d98 | 58a5c3bdda15ce2f5acc6400251faadef3552d8a | refs/heads/master | 2020-09-12T22:27:40.537696 | 2019-11-19T02:54:40 | 2019-11-19T02:54:40 | 222,579,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,464 | py | # Generated by Django 2.2.7 on 2019-11-19 00:19
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rut', models.CharField(max_length=20)),
('nombre', models.CharField(max_length=20)),
('apellido', models.CharField(max_length=20)),
('direccion', models.CharField(max_length=50)),
('telefono', models.IntegerField()),
('correo_electronico', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Proveedor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=20)),
('rut', models.CharField(max_length=20)),
('persona_contacto', models.CharField(max_length=20)),
('telefono', models.IntegerField()),
('direccion', models.CharField(max_length=50)),
('rubro', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Venta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateTimeField(default=django.utils.timezone.now)),
('monto_pago', models.IntegerField()),
('detalle_venta', models.TextField()),
('cliente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Cliente')),
],
),
migrations.CreateModel(
name='Producto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion', models.CharField(max_length=20)),
('precio', models.IntegerField()),
('marca', models.CharField(max_length=20)),
('existencia_actual', models.IntegerField()),
('cod_familia', models.IntegerField()),
('fecha_vencimiento', models.IntegerField()),
('proveedor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Proveedor')),
],
),
migrations.CreateModel(
name='Pedido',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateTimeField(default=django.utils.timezone.now)),
('detalle_pedido', models.TextField()),
('proveedor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Proveedor')),
],
),
migrations.CreateModel(
name='Credito',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateTimeField(default=django.utils.timezone.now)),
('monto_pago', models.IntegerField()),
('estado', models.CharField(choices=[('CANCELADA', 'CANCELADA'), ('PENDIENTE', 'PENDIENTE')], default='DISPONIBLE', max_length=80)),
('detalle_venta', models.TextField()),
('deudor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Cliente')),
],
),
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"tu@ejemplo.com"
] | tu@ejemplo.com |
7a542dffb43bea50b4904ad35af6bd28f8eb9309 | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/read_pyexcal_20201111165203.py | 37ff68e7a0fcf5143221bdcfbab09e0d81af48b8 | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | from openpyxl import load_workbook
# wb = load_workbook(filename = 'empty_book.xlsx')
# sheet_ranges = wb['range names']
# print(sheet_ranges['D18'].value)
# wb = load_workbook(filename= "empty_book.xlsx")
# sheet_ranges = wb['range names']
# print(sheet_ranges["D12"].value)
wb = load_workbook(filename = "empty_book.xlsx")
sheet_ranges = wb['Test3']
print(sheet_ranges['F1'].value)
for i in range(1,9):
print(sheet_ranges.cell(column=i,row=i).value | [
"zhangyingxbba@gmail.com"
] | zhangyingxbba@gmail.com |
313a803ee823c95518610dc7129331ac6979b773 | f200708b8e5a67074f6c805a736311e9b1637532 | /django_base/mysite3/bookstore/urls.py | 76d264928b1d3498fb92d6293e248ff485af607f | [] | no_license | vivid-ZLL/tedu | 48b78951eae07f5f0433ba85f7cc4e07cd76011d | 319daf56d88e92f69ee467e0ccf83c01367ed137 | refs/heads/master | 2021-04-23T22:40:11.391574 | 2020-01-25T11:26:56 | 2020-01-25T11:26:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^add_book", views.add_view),
url(r"^add_auth",views.auth_view),
]
| [
"283438692@qq.com"
] | 283438692@qq.com |
ee45c47184689072aa55d84bb11c3d5e6ca2b91b | 40a1ca8ddbdcd96a58703913f98b29b435a42745 | /antipodes average.py | 99638ba6aad58bc25581fe72ccc0789c1d462ff9 | [] | no_license | GaganDureja/Algorithm-practice | 3eaca2cfc03fcee3671b87b5efda1f950fd36212 | d40e08287754594d016801a093becc3f69f4bcc1 | refs/heads/master | 2023-05-06T11:58:35.471799 | 2021-06-01T03:49:58 | 2021-06-01T03:49:58 | 292,361,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | #Link: https://edabit.com/challenge/oF8T7Apf7jfagC4fD
def antipodes_average(lst):
l = len(lst)//2
left = lst[:l]
right = lst[-l:][::-1]
return [(a+b)/2 for a,b in zip(left,right)]
print(antipodes_average([1, 2, 3, 4])) | [
"gagandureja675@gmail.com"
] | gagandureja675@gmail.com |
4463a41f0e2d05814c9a3f223ac97c262569bd68 | f8dd8d046100f1223713e047074f30c7ce5a59cd | /testing/epilogue/utils/cache_utils.py | 144e4fb20c3dfeaab976cb3fcc6260d51ccc9cdd | [] | no_license | dotslash227/98fitcortex | 57aed99270799eff68fdff62db0b8c1d9aabd4a2 | bd4002151e5def00c3dea1f5a1abfb06ba3e809a | refs/heads/master | 2022-12-17T00:51:20.302948 | 2019-02-27T13:54:22 | 2019-02-27T13:54:22 | 197,362,824 | 0 | 0 | null | 2022-12-08T00:02:42 | 2019-07-17T09:55:14 | HTML | UTF-8 | Python | false | false | 3,107 | py | import functools
import datetime
import types
from django.utils import timezone
from django.core.cache import cache
KEY_TEMPLATE = "%d_%s" #Template for cache "userid_key"
modules = types.SimpleNamespace()
modules.SLEEP_LOGS = "sleep_logs"
modules.SLEEP_AGGREGATE = "sleep_aggregate"
modules.SLEEP = "sleep"
modules.ACTIVITY_LOGS = "activity_logs"
modules.ACTIVITY_AGGREGATE = "activity_aggregate"
modules.ACTIVITY = "activity"
modules.DIET_DASHBOARD_STRING = "diet_dashboard_string"
modules.DIET_PLAN = "diet_plan"
modules.DIET = "diet"
@functools.lru_cache()
def get_cache_key(user, module):
'''
Get Cache key for the user for the module specified
'''
data = {
modules.SLEEP_LOGS : get_sleep_logs_cache_key,
modules.SLEEP_AGGREGATE : get_sleep_aggregate_cache_key,
modules.ACTIVITY_LOGS : get_activity_logs_cache_key,
modules.ACTIVITY_AGGREGATE : get_activity_aggregate_cache_key,
modules.DIET_DASHBOARD_STRING : get_diet_dashboard_string_cache_key,
modules.SLEEP : get_sleep_cache_keys,
modules.DIET_PLAN : get_dietplan_cache_key,
modules.DIET : get_diet_cache_keys
}
return data.get(module)(user)
@functools.lru_cache()
def get_sleep_logs_cache_key(user):
return KEY_TEMPLATE%(
user.id, modules.SLEEP_LOGS
)
@functools.lru_cache()
def get_sleep_aggregate_cache_key(user):
return KEY_TEMPLATE%(
user.id, modules.SLEEP_AGGREGATE
)
@functools.lru_cache()
def get_activity_logs_cache_key(user):
return KEY_TEMPLATE%(
user.id, modules.ACTIVITY_LOGS
)
@functools.lru_cache()
def get_activity_aggregate_cache_key(user):
return KEY_TEMPLATE%(
user.id, modules.ACTIVITY_AGGREGATE
)
@functools.lru_cache()
def get_diet_dashboard_string_cache_key(user):
return KEY_TEMPLATE %(
user.id, modules.DIET_DASHBOARD_STRING
)
@functools.lru_cache()
def get_dietplan_cache_key(user):
return KEY_TEMPLATE%(
user.id, modules.DIET_PLAN
)
@functools.lru_cache()
def get_sleep_cache_keys(user):
'''
Return all the cache keys for a user belonging to a particular module
'''
return [
KEY_TEMPLATE%(user.id, e) for e in [modules.SLEEP_LOGS, modules.SLEEP_AGGREGATE]
]
@functools.lru_cache()
def get_diet_cache_keys(user):
'''
Return all the keys for a user belonging to diet module
'''
return [
KEY_TEMPLATE%(user.id, e) for e in [
modules.DIET_DASHBOARD_STRING, modules.DIET_PLAN
]
]
def get_time_to_midnight(time = None):
'''
Return the seconds to coming midnight
'''
if not time:
time = datetime.datetime.now( tz = timezone.get_current_timezone())
mid = time.replace(hour = 0, minute = 0, second = 0)
return 86400 - (time - mid).seconds
def invalidate_cache(user, module):
'''
Invalidate Caches of a module for a user
'''
key = get_cache_key(user, module)
if isinstance(key, list):
return cache.delete_many(key)
elif isinstance(key, str):
return cache.delete(key)
return
| [
"shikhar.chauhan@live.com"
] | shikhar.chauhan@live.com |
25c41d6ca45c55f09b718638c22bb3d7861e20cd | 66a530b297725b1a2d1c95f95883145c04614ae1 | /0x08-python-more_classes/6-rectangle.py | fa82234524d1f9de24bfbba4566ad040c31042c0 | [] | no_license | Yagomfh/holbertonschool-higher_level_programming | 4e6f28186eae18eaba60017fe49ac446a02cbdc5 | 1d15597a6040a8ee15b08447c478d0a2e79b5854 | refs/heads/main | 2023-04-23T18:23:28.096644 | 2021-05-18T08:12:27 | 2021-05-18T08:12:27 | 319,253,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | #!/usr/bin/python3
"""This module defines a rectangle"""
class Rectangle():
"""This class defines a empty rectangle
"""
number_of_instances = 0
def __init__(self, width=0, height=0):
"""Initialise a rectangle
Args:
width (int): width of the rectangle
height (int): height of the rectangle
"""
self.width = width
self.height = height
Rectangle.number_of_instances += 1
def __str__(self):
"""Output for end user: a rectangle
Returns: a string
"""
res = ""
for y in range(self.height):
for x in range(self.width):
res += '#'
if y != self.height - 1:
res += '\n'
return res
def __repr__(self):
"""Output for developer: info about the class and its storage in mem
Returns: a string
"""
return 'Rectangle(%d, %d)' % (self.width, self.height)
def __del__(self):
"""Deletes a rectangle"""
Rectangle.number_of_instances -= 1
print("Bye rectangle...")
def area(self):
"""Function that returns the area of the rectangle"""
return self.width * self.height
def perimeter(self):
"""Function that returns the perimeter of a rectangle"""
if self.width == 0 or self.height == 0:
return 0
return (self.width * 2) + (self.height * 2)
@property
def width(self):
"""Gets/sets width of the rectangle"""
return self.__width
@width.setter
def width(self, value):
if isinstance(value, int) is False:
raise TypeError('width must be an integer')
if value < 0:
raise ValueError('width must be >= 0')
self.__width = value
@property
def height(self):
"""Gets/sets the height of the rectangle"""
return self.__height
@height.setter
def height(self, value):
if isinstance(value, int) is False:
raise TypeError('height must be an integer')
if value < 0:
raise ValueError('height must be >= 0')
self.__height = value
| [
"yagomfh@gmail.com"
] | yagomfh@gmail.com |
e26cb78ff75c0518fef95e5020ff201bdd5bfd96 | afd7207ec79198ed8b515c66a4ff951692fc5756 | /Backend/users/views.py | 005de0eee1fc2083a3fa4f7aa1f26b3fb1605619 | [] | no_license | mdarifulislamroni21/Backend-project | 469e58ee1c8395a56f45434efc238eccd2adea77 | 4a999c7cb520c811fb0a051015822944f5d8479d | refs/heads/master | 2023-06-24T19:55:57.562157 | 2021-07-23T08:39:37 | 2021-07-23T08:39:37 | 388,731,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,763 | py | from django.shortcuts import render
from form.models import main_user,buyproduct
from form.form import main_users,buyproducts
# Create your views here.
def s_user(request):
load_main_user=main_user.objects.order_by('user_name')
diction={'main_user_data':load_main_user}
return render(request,'pages/main_page/view_user.html',context=diction)
def view_user(request,user_id):
load_main_user=main_user.objects.get(pk=user_id)
load_buyproduct=buyproduct.objects.filter(user_name=user_id)
diction={'main_user_data':load_main_user,'buyproduct_data':load_buyproduct}
return render(request,'pages/argument/view_user.html',context=diction)
def edid_user(request,user_ids):
load_main_user=main_user.objects.get(pk=user_ids)
load_main_form=main_users(instance=load_main_user)
diction={'edid_user_form':load_main_form}
if request.method == 'POST':
submit_data=main_users(request.POST,instance=load_main_user)
if submit_data.is_valid():
submit_data.save(commit=True)
diction.update({'success':'Thank You! Your User Data Changed Successfully'})
return render(request,'pages/argument/edid_user.html',context=diction)
#######
def edid_product(request,product_id):
load_product=buyproduct.objects.get(pk=product_id)
load_product_form=buyproducts(instance=load_product)
diction={'edid_product':load_product_form}
if request.method == 'POST':
load_product_data=buyproducts(request.POST,instance=load_product)
if load_product_data.is_valid():
load_product_data.save(commit=True)
diction.update({'success':'Thank You! Your Product Data Changed Successfully'})
return render(request,'pages/argument/edid_product.html',context=diction)
| [
"mdarifulislamroni21@gmail.com"
] | mdarifulislamroni21@gmail.com |
e8dd6d093db04e14c283f3fd1568fca70c9c39bb | b8b26feac86b66b0b534996cf9c3fbf7ec660240 | /codejam/18/0q/b-trouble-sort.py | 1eb82f601040fa07b503be04eeaee11988becb31 | [
"MIT"
] | permissive | neizod/problems | 775fffe32166c5b124d0e4c973b8d0aba7f3900b | 180aaf7d0ecfc3d0dd5f1d4345a7a4d83b1b884a | refs/heads/master | 2021-07-08T12:30:31.100320 | 2021-05-26T09:34:19 | 2021-05-26T09:34:19 | 6,245,523 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | #!/usr/bin/env python3
from itertools import count
def trouble_sort(ls):
ls[0::2] = sorted(ls[0::2])
ls[1::2] = sorted(ls[1::2])
return ls
def check_trouble_sort(ls):
ls = trouble_sort(ls)
it = iter(ls); next(it)
for index, a, b in zip(count(), ls, it):
if a > b:
return index
return -1
def main():
for case in range(int(input())):
input()
ls = [int(n) for n in input().split()]
answer = check_trouble_sort(ls)
if answer == -1:
answer = 'OK'
print('Case #{}: {}'.format(case+1, answer))
if __name__ == '__main__':
main()
| [
"neizod@gmail.com"
] | neizod@gmail.com |
ba38f0f9fe91ce95584b1cdebbee4b4300ddff82 | 31c75012d7b1e86bc12c0b3bcc811e6b6acb5e6f | /tests/test_utils_pcap_utils.py | e1c9248d6c474a433d0df011161937ba1ffd3d10 | [
"Apache-2.0"
] | permissive | lostminty/NetML-arff | 46b3228ef233e00141894f34245acbb896a4f9fa | 11e933f8772f8502f5b45acf226eaab08abfb090 | refs/heads/master | 2023-04-07T10:42:49.202946 | 2020-01-06T21:42:49 | 2020-01-06T21:42:49 | 228,263,765 | 0 | 0 | Apache-2.0 | 2023-03-31T14:49:53 | 2019-12-15T22:45:41 | Python | UTF-8 | Python | false | false | 2,268 | py | from networkml.parsers.pcap.pcap_utils import extract_macs
from networkml.parsers.pcap.pcap_utils import extract_protocol
from networkml.parsers.pcap.pcap_utils import extract_session_size
from networkml.parsers.pcap.pcap_utils import is_external
from networkml.parsers.pcap.pcap_utils import is_private
from networkml.parsers.pcap.pcap_utils import is_protocol
from networkml.parsers.pcap.pcap_utils import packet_size
def test_extract_macs():
source, dest = extract_macs('123456789ABCDEF123456780')
assert dest == '12:34:56:78:9A:BC'
assert source == 'DE:F1:23:45:67:80'
def test_is_private():
private = is_private('10.10.10.10')
assert private == True
private = is_private('1.2.3.4')
assert private == False
private = is_private('192.168.1.1')
assert private == True
private = is_private('192.169.1.1')
assert private == False
private = is_private('172.16.4.4')
assert private == True
private = is_private('172.15.1.3')
assert private == False
private = is_private('172.32.3.1')
assert private == False
private = is_private('2014::1')
assert private == False
private = is_private('fe80::1')
assert private == True
private = is_private('fd13::13')
assert private == True
private = is_private('asdf')
assert private == False
def test_packet_size():
packet = ['0', '1234567890123456789012345678901234567890']
size = packet_size(packet)
assert size == 13398
def test_extract_session_size():
session = [['0', '1234567890123456789012345678901234567890']]
session_size = extract_session_size(session)
assert session_size == 13398
def test_extract_protocol():
session = [['0', '12345678901234567890123456789012345678901234567890']]
protocol = extract_protocol(session)
assert protocol == '78'
def test_is_external():
external = is_external('10.10.10.10', '192.168.0.1')
assert external == False
external = is_external('10.10.10.10', '1.2.3.4')
assert external == True
def test_is_protocol():
session = [['0', '12345678901234567890123456789012345678901234567890']]
protocol = is_protocol(session, '78')
assert protocol == True
protocol = is_protocol(session, 78)
assert protocol == False
| [
"clewis@iqt.org"
] | clewis@iqt.org |
23c31ae42243e1603913f6239aa8310c2ae362a8 | 8a25ada37271acd5ea96d4a4e4e57f81bec221ac | /home/pi/GrovePi/Software/Python/others/temboo/Library/Twitter/Tweets/StatusesDestroy.py | 07e9c00de2b583db36cbd869f08bd139b4004dfe | [
"MIT",
"Apache-2.0"
] | permissive | lupyuen/RaspberryPiImage | 65cebead6a480c772ed7f0c4d0d4e08572860f08 | 664e8a74b4628d710feab5582ef59b344b9ffddd | refs/heads/master | 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 4,450 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# StatusesDestroy
# Deletes a specified status.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class StatusesDestroy(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the StatusesDestroy Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(StatusesDestroy, self).__init__(temboo_session, '/Library/Twitter/Tweets/StatusesDestroy')
def new_input_set(self):
return StatusesDestroyInputSet()
def _make_result_set(self, result, path):
return StatusesDestroyResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return StatusesDestroyChoreographyExecution(session, exec_id, path)
class StatusesDestroyInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the StatusesDestroy
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret provided by Twitter or retrieved during the OAuth process.)
"""
super(StatusesDestroyInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token provided by Twitter or retrieved during the OAuth process.)
"""
super(StatusesDestroyInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The API Key (or Consumer Key) provided by Twitter.)
"""
super(StatusesDestroyInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The API Secret (or Consumer Secret) provided by Twitter.)
"""
super(StatusesDestroyInputSet, self)._set_input('ConsumerSecret', value)
def set_ID(self, value):
"""
Set the value of the ID input for this Choreo. ((required, string) The numerical ID of the status to delete.)
"""
super(StatusesDestroyInputSet, self)._set_input('ID', value)
def set_TrimUser(self, value):
"""
Set the value of the TrimUser input for this Choreo. ((optional, boolean) When set to true, each tweet returned in a timeline will include a user object including only the status authors numerical ID.)
"""
super(StatusesDestroyInputSet, self)._set_input('TrimUser', value)
class StatusesDestroyResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the StatusesDestroy Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Twitter.)
"""
return self._output.get('Response', None)
class StatusesDestroyChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return StatusesDestroyResultSet(response, path)
| [
"lupyuen@gmail.com"
] | lupyuen@gmail.com |
d3fe008be694769fe5816841e56399d04c7fc6fe | 8f70ad12af7eba07efa52eb29b8f99ed3900dbb9 | /AGTGA data/AGTGA/LifeHack/LifeHack 2/TestSuite/TestSuite/TestCase06.py | 19733c6a1b7ac5041c28934fdfc987459aa9461d | [] | no_license | Georgesarkis/AGTGARowData | 768952dc03dc342bcbe0902bf2fb1720853d0e14 | e1faa7dc820b051a73b0844eac545e597a97da16 | refs/heads/master | 2022-10-01T17:06:04.758751 | 2020-06-05T07:25:41 | 2020-06-05T07:25:41 | 267,772,437 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | import time
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from TestSuite.TestSuiteHelper import ElementFinder
port = 'http://localhost:4723/wd/hub'
driver = webdriver.Remote(command_executor=port, desired_capabilities={'automationName' : 'UiAutomator2','deviceName': 'Moto G (5)','platformName': 'Android', 'app': 'C:/Users/ze0396/Desktop/AGTGA/APKS/LifeHack.apk' , 'autoGrantPermissions' : 'true', 'appWaitActivity' : '*.*','fullreset' : 'false','noReset' : 'true' } )
time.sleep(2)
time.sleep(2)
el = ElementFinder(driver, 66,1260)
el.click()
time.sleep(2)
el = ElementFinder(driver, 198,370)
el.click()
time.sleep(2)
el = ElementFinder(driver, 918,87)
el.click()
driver.press_keycode(3)
driver.close_app()
driver.quit()
print('TestCase finished successfully') | [
"32592901+Georgesarkis@users.noreply.github.com"
] | 32592901+Georgesarkis@users.noreply.github.com |
28c55f04edb78696643ed582fe5f5af8380b09c7 | c68b99bf1671d1fb5a1a5a0d6df7bb164dd1d20d | /Medium/394-DecodeString.py | 4af7e17cc5a884ed7770314baf35d36aaface6e3 | [] | no_license | r50206v/Leetcode-Practice | 8db9333e2e3d2a335f439d7e9e57e8c36b69ae6d | f9302e93c441f06cc14949605da20978c4289202 | refs/heads/master | 2022-05-17T18:09:48.857263 | 2022-04-27T01:02:12 | 2022-04-27T01:02:12 | 192,258,017 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | '''
Iteration + 2 Stack
Time: O(N)
Space: O(m + n)
m: the number of letters in s
n: the number of numbers in s
'''
class Solution:
def decodeString(self, s: str) -> str:
result = ""
strStack = []
countStack = []
base = ""
for index in range(len(s)):
if s[index].isdigit():
base += s[index]
elif s[index] == '[':
strStack.append(result)
result = ""
countStack.append(int(base))
base = ""
elif s[index] == ']':
print(strStack, countStack, result)
# very important !!
result = strStack.pop() + (result * countStack.pop())
else:
result += s[index]
return result
| [
"r50206v@gmail.com"
] | r50206v@gmail.com |
863f4da890f4e1a30367db93383f73c09e951f1c | 9ac99a99dc8f79f52fbbe3e8a5b311b518fe45d9 | /apps/lms/api/services/LMS_SurveyTemplateService.py | 6c4bd27a38f6d95a6418cab80b7915b8834b249a | [] | no_license | nttlong/quicky-01 | eb61620e01f04909d564244c46a03ca2b69dfecc | 0f5610aa7027429bdd9ca9b45899a472c372c6cc | refs/heads/master | 2020-03-25T17:45:31.633347 | 2018-11-27T15:02:30 | 2018-11-27T15:02:30 | 143,994,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,316 | py | from bson import ObjectId
from .. import models
import logging
import threading
from .. import common
from ..views.views import SYS_VW_ValueList
import qmongo
def get_list(args):
searchText = args['data'].get('search', '')
pageSize = args['data'].get('pageSize', 0)
pageIndex = args['data'].get('pageIndex', 20)
sort = args['data'].get('sort', 20)
where = args['data'].get('where')
pageIndex = (lambda pIndex: pIndex if pIndex != None else 0)(pageIndex)
pageSize = (lambda pSize: pSize if pSize != None else 20)(pageSize)
ret = qmongo.models.LMS_SurveyTemplate.aggregate
if where.has_key("survey_type") and where["survey_type"] != None:
ret.match("survey_type == {0}", where["survey_type"])
ret.left_join(qmongo.models.auth_user_info, "created_by", "username", "uc")
ret.left_join(qmongo.models.auth_user_info, "modified_by", "username", "um")
ret.left_join(qmongo.views.SYS_VW_ValueList, "survey_type", "value", "val")
ret.match("val.list_name == {0}", "LMS_LSurveyType")
if (sort != None):
ret.sort(sort)
ret.project(
survey_id = 1,
survey_type = 1,
temp_survey_1 = 1,
temp_survey_2 = 1,
order = 1,
created_by="uc.login_account",
created_on="created_on",
modified_on="switch(case(modified_on!='',modified_on),'')",
modified_by="switch(case(modified_by!='',um.login_account),'')",
active=1,
question_list=1,
survey_type_name="val.caption"
)
data = ret.get_page(pageIndex, pageSize)
return data
def insert_survey_template(data):
result = qmongo.models.LMS_SurveyTemplate.insert(data)
return result
def update_survey_template(data):
code = data['survey_id']
del data['survey_id']
result = qmongo.models.LMS_SurveyTemplate.update(
data,
"survey_id == {0}",
code
)
return result
def delete_survey_template(data):
result = qmongo.models.LMS_SurveyTemplate.delete(
"survey_id in @id",
id = [x['survey_id'] for x in data]
)
return result
def get_list_value_and_caption(data):
result =qmongo.models.LMS_SurveyTemplate.aggregate()\
.project(
_id = 0,
survey_id = 1,
temp_survey_1 = 1
).get_list()
return result | [
"zugeliang2000@gmail.com"
] | zugeliang2000@gmail.com |
63aa115716ff7fe5f2eaf64fe7b6afc4667e0365 | d4280eca1a9badb0a4ad2aa22598616eedece373 | /Tools/repleace.py | 04ff63a750e4afd2bcff0e396159483b20cd2d02 | [] | no_license | Little-Captain/py | 77ec12bb2aaafe9f709a70831266335b03f63663 | 74ba3c3449e7b234a77500a17433e141e68169f7 | refs/heads/master | 2021-06-09T11:33:23.205388 | 2019-11-22T01:17:44 | 2019-11-22T01:17:44 | 131,844,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | #!/usr/bin/env python
import os
folders = filter(os.path.isdir, os.listdir())
for folder in folders:
os.chdir(folder)
try:
process = os.popen('git remote -v')
output = process.read()
output = output.split()
output = output[1]
output = output.replace('172.31.103.221', 'www.qxxlgogs.com')
os.system('git remote remove origin')
os.system('git remote add origin %s'%output)
except:
pass
os.chdir('..')
| [
"littlecaptain@foxmail.com"
] | littlecaptain@foxmail.com |
34720c2c7fb2d14e18209377a6429e1ffb825f7b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_getting.py | 12500bcd909462b932ec388267fef20736df99a6 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py |
from xai.brain.wordbase.verbs._get import _GET
#calss header
class _GETTING(_GET, ):
def __init__(self,):
_GET.__init__(self)
self.name = "GETTING"
self.specie = 'verbs'
self.basic = "get"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
0d5933e334cdf1f8a9aa2c79d39b3949cd95af95 | db303c68682dfd18965a04026ff14e15c1ba6120 | /ch09/ans89.py | 48bb019536f5610904e0a23682919dd0e224fdf9 | [] | no_license | Toshiyana/nlp100v2020 | 1a89f164de0c720da6d42c19b3fa60f8013d662c | 37d4d208d5d527d163356793b630f36eb7595779 | refs/heads/master | 2023-07-15T15:01:28.454515 | 2021-08-21T13:20:03 | 2021-08-21T13:20:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | from tqdm import tqdm
import torch
from torch import optim
from torchtext import data
from transformers import BertForSequenceClassification
def eval_net(model, data_loader, device='cpu'):
model.eval()
ys = []
ypreds = []
for x, y, _ in data_loader:
with torch.no_grad():
loss, logit = model(input_ids=x, labels=y)
_, y_pred = torch.max(logit, 1)
ys.append(y)
ypreds.append(y_pred)
ys = torch.cat(ys)
ypreds = torch.cat(ypreds)
print(f'test acc: {(ys == ypreds).sum().item() / len(ys)}')
return
TEXT = data.Field(sequential=True, lower=True, batch_first=True)
LABELS = data.Field(sequential=False, batch_first=True, use_vocab=False)
train, val, test = data.TabularDataset.splits(
path='ch06', train='train2.txt',
validation='valid2.txt', test='test2.txt', format='tsv',
fields=[('TEXT', TEXT), ('LABEL', LABELS)])
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
train_iter, val_iter, test_iter = data.Iterator.splits(
(train, val, test), batch_sizes=(64, 64, 64), device=device, repeat=False, sort=False)
TEXT.build_vocab(train, min_freq=2)
LABELS.build_vocab(train)
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=4)
model = model.to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01)
for epoch in tqdm(range(10)):
losses = []
model.train()
for batch in train_iter:
x, y = batch.TEXT, batch.LABEL
loss, logit = model(input_ids=x, labels=y)
model.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
_, y_pred_train = torch.max(logit, 1)
eval_net(model, test_iter, device)
| [
"upura0@gmail.com"
] | upura0@gmail.com |
ef72c65accec37d5eb0fb33a531bfa3395a4a130 | 74e8b6ad9fa20b6caa61f7ced4825442f671f506 | /curator/__init__.py | a9b39d35f54f3dab18f6bc3ed85d95b68be2e251 | [
"Apache-2.0"
] | permissive | agent001/curator | 2297191ea05903cdca5f2510b6423cf333961ef5 | 86fd4af224bbb1eb7993b754e47cd67e32f10d7a | refs/heads/master | 2021-01-18T23:06:19.334462 | 2016-07-14T16:23:34 | 2016-07-14T16:23:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | from .exceptions import *
from .settings import *
from .logtools import *
from .utils import *
from .indexlist import IndexList
from .snapshotlist import SnapshotList
from .actions import *
from .cli import *
from .repomgrcli import *
| [
"aaron@mildensteins.com"
] | aaron@mildensteins.com |
12e4bdeb469eebeac17b5a318bd159f07f6041d2 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_6404600001200128_1/Python/DaniTunes/p1.py | cffa3f03a45fe53db87b1c794440401ea7e6dd94 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | #!/bin/env python
# google code jam 2015 problem 1
def solve(ms):
a = 0
n = len(ms)
md = 0
for i in range(1, n):
d = ms[i-1] - ms[i]
md = max(d, md)
if d > 0:
a += d
b = 0
for i in range(0, n-1):
b += min(ms[i], md)
return a, b
tests = int(raw_input())
for k in range(tests):
n = int(raw_input())
ms = [int(x) for x in raw_input().split()]
#print n, len(ms), ms
a, b = solve(ms)
print "Case #%d: %d %d" % (k+1, a, b)
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
315294feedef65535c8cb675b0557e0718d5e388 | 1eee2c9c105148904d0fb47cee227cfd20241b76 | /cutils/setup.py | 4b8f01285208f2e111388993010a95b9ade689a3 | [] | no_license | fred-hz/zeta | be9f6f466b75767cc1a45a4004d1c84e5d559b6b | e7b631447fff6e58928d6ac15702338b7cc8e3e7 | refs/heads/master | 2021-09-05T01:03:31.387379 | 2018-01-23T04:15:58 | 2018-01-23T04:15:58 | 118,187,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | from distutils.core import setup, Extension
import numpy
from Cython.Distutils import build_ext
setup(
cmdclass={'build_ext': build_ext},
ext_modules=[Extension("rank",
sources=["_rank.pyx", "rank.cpp"],
language="c++",
include_dirs=[numpy.get_include()])],
)
| [
"zibo_zhao@berkeley.edu"
] | zibo_zhao@berkeley.edu |
4f810e9abaca21391af002ed8d1c9698941921e4 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_243/ch116_2020_03_31_22_45_24_453815.py | 400553a42665308fa6ffd15da0619b304d274ada | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | def raiz_quadrada(x):
i=1
num=0
while x>=0:
num==x-i
i+=2
if num==0:
raiz=num/i
print (raiz)
| [
"you@example.com"
] | you@example.com |
c07b661a1ea80740df76e137477763e6ae83bce8 | 27c12a37b58548c1b3ace6184d452cdcf4128168 | /src/neurounits/visitors/__init__.py | 7af18da334fdbb01db0248858e6ceb7c81b0cbb6 | [] | no_license | stevemarsh/NeuroUnits | 48ca63b42ee74bf3a3d3107aa86e50d1c9aa4bc2 | 350663588f7d6f4cf85c1472debc1d2312b877e8 | refs/heads/master | 2020-12-25T16:13:56.983904 | 2013-03-29T12:19:32 | 2013-03-29T12:20:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -------------------------------------------------------------------------------
from .bases.base_visitor import ASTVisitorBase
from .bases.base_actioner_default import ASTActionerDefault
from .bases.base_actioner import SingleVisitPredicate
| [
"mikehulluk@googlemail.com"
] | mikehulluk@googlemail.com |
a43b24f70964cf35080f97edde3f21e4610f166d | 21a341a52b0305c70def05a141243cc46160cd0d | /esp/main.py | 7bd5f8cf81395f548c29257f1a9a6c5b5ce8ff85 | [
"MIT"
] | permissive | BPI-STEAM/mpy-flasher | 980c9084fec9108a316f70a965c7c8ee032da385 | 03a1be35ae2e0aaafb71ea6fb7adcfce823e7717 | refs/heads/master | 2020-05-25T03:38:08.464466 | 2019-05-28T05:14:02 | 2019-05-28T05:14:02 | 187,608,277 | 5 | 4 | MIT | 2019-11-02T21:20:18 | 2019-05-20T09:19:44 | Python | UTF-8 | Python | false | false | 4,127 | py | #!/usr/bin/env python
# coding: utf-8
'''
@File :erase.py
@Author :youxinweizhi
@Date :2019/3/28
@Github :https://github.com/youxinweizhi
'''
import esp.control
import sys
import time
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox
from esp.mainWindow import Ui_Form
from PyQt5.QtCore import QTimer
import threading
mutex = threading.Lock()
class MyWindow(QMainWindow, Ui_Form):
def __init__(self):
super().__init__()
self.setupUi(self)
self.pushButton.clicked.connect(self.main)
self.checkBox_3.stateChanged.connect(self.disable_op)
# self.comboBox.highlighted.connect(self.get_com)
# self.comboBox.activated.connect(self.get_com)
self.setFixedSize(self.width(), self.height()) # 固定窗口大小
# self.setWindowIcon(QIcon('./image/icon.ico'))
self.statusBar().showMessage("Put the firmware in the same directory can be burning")
self.list_serial = []
self.get_com()
self.get_bin()
self.get_borad()
self.get_config()
self.timer = QTimer(self)
self.timer.timeout.connect(self.get_com)
self.timer.start(3000)
def get_config(self):
import esp.config
config = esp.config.get_confg_dict()
if 'erase' in config:
self.checkBox.setCheckState(config['erase'] == 'True')
if 'advanced' in config:
self.checkBox_3.setCheckState(config['advanced'] == 'True')
if 'auto' in config and config['auto'] == 'True':
self.main()
def disable_op(self):
if self.checkBox_3.isChecked():
self.comboBox_2.setDisabled(True)
self.comboBox_3.setDisabled(True)
self.checkBox.setDisabled(True)
else:
self.comboBox_2.setDisabled(False)
self.comboBox_3.setDisabled(False)
self.checkBox.setDisabled(False)
def get_com(self):
tmp = esp.control.list_serial()
# print(tmp)
if len(tmp) != len(self.list_serial):
self.list_serial = tmp
self.comboBox.clear()
self.comboBox.addItems(tmp)
# print(self.comboBox.count())
def check_com(self):
result = len(self.com) > 1 # and open(com) test
if result is False:
self.statusBar().showMessage('The selected serial port is not exist')
return result
def get_bin(self):
self.comboBox_2.addItems(esp.control.list_bin())
def get_borad(self):
self.comboBox_3.addItems(esp.control.list_board())
def erase(self):
self.statusBar().showMessage('Start to erase firmware...')
self.statusBar().showMessage(esp.control.flash_erase(self.com))
time.sleep(1)
self.flash()
def flash(self):
self.statusBar().showMessage('Start to flash firmware...')
try:
self.statusBar().showMessage(esp.control.flash_bin(self.board, self.com, self.firmware))
finally:
self.pushButton.setDisabled(False)
self.statusBar().showMessage('Ready To GO')
def flash_adv(self):
self.statusBar().showMessage('Start to advanced flash...')
try:
import esp.advanced
self.statusBar().showMessage(esp.advanced.flash_bin(self.com))
finally:
self.pushButton.setDisabled(False)
self.statusBar().showMessage('Ready To GO')
def main(self):
self.com = self.comboBox.currentText().split(" - ", 1)[0]
self.firmware = self.comboBox_2.currentText()
if self.check_com():
self.board = self.comboBox_3.currentText()
print(self.com,self.firmware,self.board)
self.pushButton.setDisabled(True)
with mutex:
task = self.flash_adv if self.checkBox_3.isChecked() else self.erase if self.checkBox.isChecked() else self.flash
threading.Thread(target=task).start()
def run():
app = QApplication(sys.argv)
myWin = MyWindow()
myWin.show()
sys.exit(app.exec_())
if __name__ == '__main__':
run() | [
"junhuanchen@qq.com"
] | junhuanchen@qq.com |
9777359834936fdc8271ec94988ad65e6584a4ce | 031dbb2a3ea47a0483db310db9f98796cc83c500 | /658_Find K Closest Elements.py | f6a1201e4d36fb08d5c4bbdcd397c0ad6874f1d6 | [] | no_license | Shwan-Yu/Data_Structures_and_Algorithms | 429fb127983e32931f2168f44ef1484c1cc4c87f | 9126c2089e41d4d7fd3a204115eba2b5074076ad | refs/heads/master | 2020-03-27T11:46:59.947303 | 2019-08-23T15:15:21 | 2019-08-23T15:15:21 | 146,507,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | class Solution(object):
def findClosestElements(self, arr, k, x):
"""
:type arr: List[int]
:type k: int
:type x: int
:rtype: List[int]
"""
# O(lg(n-k)) keep track of the interval
l, r = 0, len(arr)-k
while l < r:
mid = (l+r)//2
if x-arr[mid] > arr[mid+k]-x:
l = mid+1
else:
r = mid
return arr[l:l+k]
# passed, write myself, O(lg(n)+k)
# def bs(arr, x):
# l, r = 0, len(arr)-1
# while l <= r:
# mid = (l+r)//2
# val = arr[mid]
# if val == x: return mid
# elif val < x: l = mid+1
# else: r = mid-1
# return l
# pos = bs(arr, x)
# l, r = pos-1, pos
# while k > 0:
# if l < 0:
# r += k
# break
# elif r > len(arr)-1:
# l -= k
# break
# if abs(arr[l]-x) <= abs(arr[r]-x): l-=1
# else: r += 1
# k -= 1
# return arr[l+1:r]
| [
"noreply@github.com"
] | Shwan-Yu.noreply@github.com |
a0cb2913c3bfaa4b5be6c7b4e94e17a951198076 | 9ef548f1e5457a18fe56f5c38d84cb835f98c7c3 | /main/seq2seq.py | 073d6fac5e16077011a0df330a49fa70b2ae6a9c | [] | no_license | sarikayamehmet/txt-summarization | 00c799535a5e1513fa35ac476eb094e8fbb16042 | 132340e2ba75497793686841b819abb35c47d76d | refs/heads/master | 2020-11-25T09:17:41.299564 | 2019-05-27T20:53:16 | 2019-05-27T20:53:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,767 | py | import torch.nn as nn
import torch.nn.functional as f
from main.encoder import Encoder
from main.reduce_encoder import ReduceEncoder
from main.decoder import Decoder
from main.encoder_attention import *
from main.decoder_attention import DecoderAttention
from main.common.vocab import *
from main.common.common import *
class Seq2Seq(nn.Module):
def __init__(self, vocab: Vocab, embedding=None):
super(Seq2Seq, self).__init__()
self.emb_size = conf('emb-size')
self.enc_hidden_size = conf('enc-hidden-size')
self.dec_hidden_size = conf('dec-hidden-size')
self.vocab_size = conf('vocab-size')
self.max_dec_steps = conf('max-dec-steps')
self.share_dec_weight = conf('share-dec-weight')
self.pointer_generator = conf('pointer-generator')
self.vocab = vocab
self.embedding = embedding
if self.embedding is None:
self.embedding = nn.Embedding(self.vocab.size(), self.emb_size, padding_idx=TK_PADDING['id'])
self.encoder = Encoder()
self.reduce_encoder = ReduceEncoder()
self.decoder = Decoder()
self.enc_att = EncoderAttention()
self.dec_att = DecoderAttention()
combined_hidden_size = self.dec_hidden_size + 2 * self.enc_hidden_size + self.dec_hidden_size
if self.pointer_generator is True:
self.ptr_gen = nn.Linear(combined_hidden_size, 1)
# sharing decoder weight
if self.share_dec_weight is True:
proj_layer = nn.Linear(combined_hidden_size, self.emb_size)
output_layer = nn.Linear(self.emb_size, self.vocab_size)
output_layer.weight.data = self.embedding.weight.data # sharing weight with embedding
self.vocab_gen = nn.Sequential(
proj_layer,
output_layer
)
else:
self.vocab_gen = nn.Linear(combined_hidden_size, self.vocab_size)
'''
:params
x : B, L
x_len : B
extend_vocab_x : B, L
max_oov_len : C
:returns
y : B, L
att : B, L
'''
def forward(self, x, x_len, extend_vocab_x, max_oov_len):
batch_size = len(x)
x = self.embedding(x)
enc_outputs, (enc_hidden_n, enc_cell_n) = self.encoder(x, x_len)
enc_hidden_n, enc_cell_n = self.reduce_encoder(enc_hidden_n, enc_cell_n)
# initial decoder hidden
dec_hidden = enc_hidden_n
# initial decoder cell
dec_cell = cuda(t.zeros(batch_size, self.dec_hidden_size))
# initial decoder input
dec_input = cuda(t.tensor([TK_START['id']] * batch_size))
# encoder padding mask
enc_padding_mask = t.zeros(batch_size, max(x_len))
for i in range(batch_size):
enc_padding_mask[i, :x_len[i]] = t.ones(1, x_len[i])
enc_padding_mask = cuda(enc_padding_mask)
# stop decoding mask
stop_dec_mask = cuda(t.zeros(batch_size))
# initial encoder context vector
enc_ctx_vector = cuda(t.zeros(batch_size, 2 * self.enc_hidden_size))
enc_attention = None
enc_temporal_score = None
pre_dec_hiddens = None
y = None
for i in range(self.max_dec_steps):
# decoding
vocab_dist, dec_hidden, dec_cell, enc_ctx_vector, enc_att, enc_temporal_score, _, _ = self.decode(
dec_input,
dec_hidden,
dec_cell,
pre_dec_hiddens,
enc_outputs,
enc_padding_mask,
enc_temporal_score,
enc_ctx_vector,
extend_vocab_x,
max_oov_len)
enc_attention = enc_att.unsqueeze(1).detach() if enc_attention is None else t.cat([enc_attention, enc_att.unsqueeze(1).detach()], dim=1)
## output
dec_output = t.max(vocab_dist, dim=1)[1].detach()
y = dec_output.unsqueeze(1) if y is None else t.cat([y, dec_output.unsqueeze(1)], dim=1)
## stop decoding mask
stop_dec_mask[dec_output == TK_STOP['id']] = 1
if len(stop_dec_mask[stop_dec_mask == 1]) == len(stop_dec_mask):
break
pre_dec_hiddens = dec_hidden.unsqueeze(1) if pre_dec_hiddens is None else t.cat([pre_dec_hiddens, dec_hidden.unsqueeze(1)], dim=1)
dec_input = dec_output
return y, enc_attention
'''
:params
dec_input : B
dec_hidden : B, DH
dec_cell : B, DH
pre_dec_hiddens : B, T, DH
enc_hiddens : B, L, EH
enc_padding_mask : B, L
enc_temporal_score : B, L
enc_ctx_vector : B, 2EH
extend_vocab_x : B, L
max_oov_len : C
:returns
vocab_dist : B, V + OOV
dec_hidden : B, DH
dec_cell : B, DH
enc_ctx_vector : B, 2EH
enc_attention : B, L
enc_temporal_score : B, L
dec_ctx_vector : B, DH
dec_attention : B, L
'''
def decode(self, dec_input,
dec_hidden,
dec_cell,
pre_dec_hiddens,
enc_hiddens,
enc_padding_mask,
enc_temporal_score,
enc_ctx_vector,
extend_vocab_x,
max_oov_len):
dec_input = self.embedding(dec_input)
dec_hidden, dec_cell = self.decoder(dec_input, dec_hidden, dec_cell, enc_ctx_vector)
# intra-temporal encoder attention
enc_ctx_vector, enc_att, enc_temporal_score = self.enc_att(dec_hidden, enc_hiddens, enc_padding_mask, enc_temporal_score)
# intra-decoder attention
dec_ctx_vector, dec_att = self.dec_att(dec_hidden, pre_dec_hiddens)
# vocab distribution
combined_input = t.cat([dec_hidden, enc_ctx_vector, dec_ctx_vector], dim=1)
vocab_dist = f.softmax(self.vocab_gen(combined_input), dim=1)
# pointer-generator
if self.pointer_generator is True:
ptr_prob = t.sigmoid(self.ptr_gen(combined_input))
ptr_dist = ptr_prob * enc_att
vocab_dist = (1 - ptr_prob) * vocab_dist
final_vocab_dist = cuda(t.zeros(len(dec_input), self.vocab.size() + max_oov_len))
final_vocab_dist[:, :self.vocab.size()] = vocab_dist
final_vocab_dist.scatter_add(1, extend_vocab_x, ptr_dist)
else:
final_vocab_dist = vocab_dist
return final_vocab_dist, dec_hidden, dec_cell, enc_ctx_vector, enc_att, enc_temporal_score, dec_ctx_vector, dec_att
'''
:params
x : article
:returns
y : summary
att : attention
'''
def evaluate(self, x):
self.eval()
words = x.split()
x = cuda(t.tensor(self.vocab.words2ids(words) + [TK_STOP['id']]).unsqueeze(0))
x_len = cuda(t.tensor([len(words) + 1]))
extend_vocab_x, oov = self.vocab.extend_words2ids(words)
extend_vocab_x = extend_vocab_x + [TK_STOP['id']]
extend_vocab_x = cuda(t.tensor(extend_vocab_x).unsqueeze(0))
max_oov_len = len(oov)
y, att = self.forward(x, x_len, extend_vocab_x, max_oov_len)
return ' '.join(self.vocab.ids2words(y[0].tolist(), oov)), att[0]
| [
"leangsotheara@gmail.com"
] | leangsotheara@gmail.com |
a7c89e3ba731b9cccd6ce9f60d3b144001fa296b | bc02e2c69f425e03b609f466b0a2d52a455765dc | /1212/1239. Maximum Length of a Concatenated String with Unique Characters.py | 662a56e27f9c558011a42f71d223602f521d25f4 | [] | no_license | gaberani/AlgorithmStudy | d795f449fe185c3993df90173f27b7eb74e02366 | 6d9d20ac29446d22f2e0ef7037f131c4a2f48762 | refs/heads/master | 2023-02-03T03:24:45.039238 | 2020-12-22T12:53:52 | 2020-12-22T12:53:52 | 287,101,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | # Given an array of strings arr.
# String s is a concatenation of a sub-sequence of arr which have unique characters.
# Return the maximum possible length of s.
# 1 <= arr.length <= 16
# 1 <= arr[i].length <= 26
# arr[i] contains only lower case English letters.
class Solution(object):
def maxLength2(self, arr):
if len(arr) == 1:
return len(arr[0])
answer = 0
L = len(arr)
for i in range(L-1):
for j in range(i+1, L):
one_case = len(arr[i]+arr[j])
if one_case > answer and len(set(arr[i]+arr[j])) == one_case:
print(arr[i]+arr[j])
answer = one_case
return answer
def maxLength(self, arr):
uniqELements = ['']
answer = 0
for i in range(len(arr)):
sz = len(uniqELements)
print(sz)
for j in range(sz):
x = arr[i] + uniqELements[j]
# print(x)
if (len(x) == len(set(x))):
uniqELements.append(x)
print(uniqELements)
answer = max(answer, len(x))
print(uniqELements)
return answer
test = Solution()
print(test.maxLength(["un","iq","ue"])) # 4
print(test.maxLength(["cha","r","act","ers"])) # 6
print(test.maxLength(["abcdefghijklmnopqrstuvwxyz"])) # 26 | [
"khs0783@naver.com"
] | khs0783@naver.com |
bfe2190a8c6124aa6bad4f012e9a3873003340da | 83e21dcd88961e01d7b6d76c1e7d3e0c405bb7a2 | /tests/components/climate/common.py | 4ac6f553091470acdb1d8908a859bc38e18e9aee | [
"Apache-2.0"
] | permissive | skalavala/home-assistant | 0a61886a8e399d6c46bf791927a69557edfdebb3 | 66d6db7934db1af0c560ccffd92cf4a114ef5841 | refs/heads/dev | 2020-04-04T11:35:24.377362 | 2018-11-02T17:40:05 | 2018-11-02T17:40:05 | 155,896,654 | 3 | 1 | Apache-2.0 | 2018-11-02T17:00:10 | 2018-11-02T17:00:09 | null | UTF-8 | Python | false | false | 3,373 | py | """Collection of helper methods.
All containing methods are legacy helpers that should not be used by new
components. Instead call the service directly.
"""
from homeassistant.components.climate import (
_LOGGER, ATTR_AUX_HEAT, ATTR_AWAY_MODE, ATTR_FAN_MODE, ATTR_HOLD_MODE,
ATTR_HUMIDITY, ATTR_OPERATION_MODE, ATTR_SWING_MODE, ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW, DOMAIN, SERVICE_SET_AWAY_MODE, SERVICE_SET_HOLD_MODE,
SERVICE_SET_AUX_HEAT, SERVICE_SET_TEMPERATURE, SERVICE_SET_HUMIDITY,
SERVICE_SET_FAN_MODE, SERVICE_SET_OPERATION_MODE, SERVICE_SET_SWING_MODE)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE)
from homeassistant.loader import bind_hass
@bind_hass
def set_away_mode(hass, away_mode, entity_id=None):
"""Turn all or specified climate devices away mode on."""
data = {
ATTR_AWAY_MODE: away_mode
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_AWAY_MODE, data)
@bind_hass
def set_hold_mode(hass, hold_mode, entity_id=None):
"""Set new hold mode."""
data = {
ATTR_HOLD_MODE: hold_mode
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_HOLD_MODE, data)
@bind_hass
def set_aux_heat(hass, aux_heat, entity_id=None):
"""Turn all or specified climate devices auxiliary heater on."""
data = {
ATTR_AUX_HEAT: aux_heat
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_AUX_HEAT, data)
@bind_hass
def set_temperature(hass, temperature=None, entity_id=None,
target_temp_high=None, target_temp_low=None,
operation_mode=None):
"""Set new target temperature."""
kwargs = {
key: value for key, value in [
(ATTR_TEMPERATURE, temperature),
(ATTR_TARGET_TEMP_HIGH, target_temp_high),
(ATTR_TARGET_TEMP_LOW, target_temp_low),
(ATTR_ENTITY_ID, entity_id),
(ATTR_OPERATION_MODE, operation_mode)
] if value is not None
}
_LOGGER.debug("set_temperature start data=%s", kwargs)
hass.services.call(DOMAIN, SERVICE_SET_TEMPERATURE, kwargs)
@bind_hass
def set_humidity(hass, humidity, entity_id=None):
"""Set new target humidity."""
data = {ATTR_HUMIDITY: humidity}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_HUMIDITY, data)
@bind_hass
def set_fan_mode(hass, fan, entity_id=None):
"""Set all or specified climate devices fan mode on."""
data = {ATTR_FAN_MODE: fan}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_FAN_MODE, data)
@bind_hass
def set_operation_mode(hass, operation_mode, entity_id=None):
"""Set new target operation mode."""
data = {ATTR_OPERATION_MODE: operation_mode}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_OPERATION_MODE, data)
@bind_hass
def set_swing_mode(hass, swing_mode, entity_id=None):
"""Set new target swing mode."""
data = {ATTR_SWING_MODE: swing_mode}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_SWING_MODE, data)
| [
"paulus@home-assistant.io"
] | paulus@home-assistant.io |
91aecef65480009d7dbbe337141cf68bbe514f8a | 6f56da8db171d4a6c006b5d944437bf061069faf | /XCat.v.0.0.1/source/healpy/query_disc_func.py | 860b9d7fb4bfb7b36c397658f7cf006776b6d060 | [] | no_license | afarahi/XCat | 16819bef7087e994907c413dd6331cdebde72ffb | 498602eb7f61696d169f071185115345c68bcf86 | refs/heads/master | 2021-01-21T01:59:36.907059 | 2013-05-03T05:12:07 | 2013-05-03T05:12:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,163 | py | #
# This file is part of Healpy.
#
# Healpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Healpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Healpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# For more information about Healpy, see http://code.google.com/p/healpy
#
"""This module provides the function query_disc and its helper functions.
Written by B. Crill.
"""
def ring_num(nside,z):
"""Return the ring number given z of a point on the sphere.
Input:
- nside: a power of 2
- z : a float within [-1,1]
Return:
the ring number
"""
from numpy import sqrt
twothird = 2.0 /3.0
shift = 0.5
iring = int( nside*(2.0-1.5*z) + shift)
if (z>twothird):
my_iring = int(nside*sqrt(3.0*(1-z)) + shift)
iring = my_iring
if (my_iring < 1): iring = 1
if (z < -1*twothird):
my_iring = int( nside* sqrt(3.0*(1.0+z)) + shift)
iring = my_iring
if (my_iring < 1): iring = 1
iring = 4*nside - my_iring
return iring
def ring2z(nside,iz):
"""Return the z component from ring number.
Input:
- nside: a power of 2
- iz: a ring number
Return:
the z component of the ring
"""
dth1 = 1.0 / (3.0*float(nside)**2)
dth2 = 2.0 / (3.0*float(nside))
if (iz <= (nside-1)):
zring = 1.0 - float(iz)**2 * dth1
elif (iz <=3*nside):
zring = float(2*nside-iz) * dth2
else: zring = - 1.0 + float(4L*nside-iz)**2 * dth1
return zring
def in_ring(nside,iz,phi0,dphi,nest=False):
"""Compute the list of pixels in ring number iz in phi interval [phi0,phi0+dphi]
Input:
- nside: a power of 2
- iz: ring number
- phi0: the starting longitude
- dphi: interval of longitude
Keyword:
- nested: if True, return pixel number in nested scheme. Default: False (RING)
Return:
- list of pixel numbers
"""
from numpy import pi,arange,concatenate,hstack,fabs,round
from pixelfunc import nside2npix,ring2nest
npix = nside2npix(nside)
take_all = 0
to_top = 0
twopi = 2.0 * pi
ncap = 2*nside*(nside-1)
listir = -1
nir = 0
phi_low = (phi0 - dphi) % twopi
if (phi_low < 0): phi_low = phi_low + twopi
phi_hi = (phi0 + dphi) % twopi
if (phi_hi < 0): phi_hi = phi_hi + twopi
if (fabs(dphi-pi) < 1e-6): take_all = 1
# equatorial region
if ((iz >= nside) & (iz <= 3*nside)):
ir = iz - nside + 1
ipix1 = ncap + 4*nside*(ir-1)
ipix2 = ipix1 + 4*nside - 1
kshift = ir % 2
nr = nside*4
else:
if (iz < nside):
ir = iz
ipix1 = 2*ir*(ir-1)
ipix2 = ipix1 + 4*ir - 1
else:
ir = 4*nside - iz
ipix1 = npix - 2*ir*(ir+1)
ipix2 = ipix1 + 4*ir - 1
nr = ir*4
kshift = 1
if (take_all == 1):
nir = ipix2 - ipix1 + 1
listir = arange(ipix1,ipix2+1,1)
if (take_all == 0):
shift = kshift * .5
ip_low = int(round (nr * phi_low / twopi - shift))
ip_hi = int(round(nr * phi_hi / twopi - shift))
ip_low = ip_low % nr
ip_hi = ip_hi % nr
if (ip_low > ip_hi): to_top = 1
ip_low = ip_low + ipix1
ip_hi = ip_hi + ipix1
if (to_top == 1):
nir1 = ipix2 - ip_low + 1
nir2 = ip_hi - ipix1 + 1
nir = nir1 + nir2
if ((nir1 > 0) & (nir2 > 0)):
#listir = concatenate(arange(0,nir2,1)+ipix1, arange(0,nir1,1)+ip_low)
list1 = arange(0,nir1,1)+ip_low
list2 = arange(0,nir2,1)+ipix1
listir = concatenate((list1,list2))
else:
if (nir1 == 0) : listir = arange(0,nir2,1)+ipix1
if (nir2 == 0) : listir = arange(0,nir1,1)+ip_low
else:
nir = ip_hi - ip_low + 1
listir = arange(0,nir,1)+ip_low
if (nest): listir = ring2nest(nside,listir)
return listir
def query_disc(nside,v0,radius,nest=False,deg=True):
"""Return the list of pixels within angle 'radius' from vector direction 'v0'
Input:
- nside: a power of 2
- v0: the vector describing the direction of the center of the disc
- radius: the opening angle of the disc
Keywords:
- nest: if True, pixel returned in nested scheme. Default: False (RING)
- deg: if False, radius angle expected in radian. Default: True (DEGREE)
Return:
- list of pixel (as a numpy array)
"""
# assumes input in degrees
from numpy import sqrt,sin,cos,pi,array,fabs,arccos,arcsin,size,empty,concatenate,arctan2,asarray
from pixelfunc import vec2pix,nside2npix
npix = nside2npix(nside)
ang_conv = 1.0
if (deg): ang_conv = pi/180.0
cosang = cos(radius*ang_conv)
# radius in radians
radius_eff = radius * ang_conv
v0 = asarray(v0)
v0 /= sqrt((v0**2).sum())
x0,y0,z0 = v0
a = x0*x0 + y0*y0
phi0 = 0.0
if ((x0!= 0.0)|(y0!=0.0)): phi0 = arctan2(y0, x0)
cosphi0 = cos(phi0)
rlat0 = arcsin(z0)
rlat1 = rlat0 + radius_eff
rlat2 = rlat0 - radius_eff
if (rlat1 >= pi/2.0): zmax = 1.0
else : zmax = sin(rlat1)
irmin = max(ring_num(nside,zmax) - 1,1)
if (rlat2 <= -pi/2.0): zmin = -1.0
else : zmin = sin(rlat2)
irmax = min(ring_num(nside,zmin) + 1,4*nside-1)
#first = 1
work = [[]]
for iz in xrange(irmin,irmax+1):
skip = 0
z = ring2z(nside,iz)
b = cosang - z*z0
c = 1.0 - z*z
cosdphi = b/sqrt(a*c)
if ((x0 == 0)&(y0==0)):
cosdphi = -1.0
dphi = pi
if (fabs(cosdphi) <= 1):
dphi = arccos(cosdphi)
else:
if (cosphi0 < cosdphi):
skip = 1
dphi = pi
if (skip == 0):
listir = in_ring(nside, iz, phi0, dphi,nest=nest)
nir = size(listir)
if (nir>0):
work.append(listir)
if len(work) > 1:
work = concatenate(work[1:])
else:
work = asarray([], dtype=int)
return work
| [
"aryaf66@gmail.com"
] | aryaf66@gmail.com |
b3c8d616e489ff6dc141e07a0074a2878e1459bc | 48e9d0e84238daf0de290551e3588e9ff3f49549 | /api/apiget.py | e4a87a5f3719acb9b9a59313730984215342957e | [] | no_license | celord/PythonGreencore | 9606af569738703b66d80bce6e423c9a313fa539 | 259aadcc346203f8092f6c6d286e3fca2e9fc550 | refs/heads/master | 2020-05-30T23:18:15.542876 | 2019-06-19T14:39:59 | 2019-06-19T14:39:59 | 190,014,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | import requests
import json
token = "55d87efda345913f136102ce791c622ffe7341c74dcb4790c9d0e463edc831d1"
headers = {"Authorization": token}
r =requests.get('https://api.ciscopark.com/v1/people/me', headers = headers)
r.status_code | [
"celord@gmail.com"
] | celord@gmail.com |
1a8cd084b7d6fe1ca312a37929a9ebd93b0edb00 | 2337351b228818e41be3002bd38f68f77c2aa074 | /services/datastream/models/prefix.py | 0d10a5ac433def71fd723d7eb22131d738db29e8 | [
"BSD-3-Clause"
] | permissive | nocproject/noc | 57d40c680a1499374463e472434f9595ed6d1374 | 6e6d71574e9b9d822bec572cc629a0ea73604a59 | refs/heads/master | 2023-08-31T01:11:33.544573 | 2023-08-30T17:31:11 | 2023-08-30T17:31:11 | 107,815,776 | 105 | 33 | BSD-3-Clause | 2023-07-31T07:57:45 | 2017-10-21T21:04:33 | Python | UTF-8 | Python | false | false | 1,077 | py | # ----------------------------------------------------------------------
# prefix datastream model
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from typing import Optional, List
# Third-party modules
from pydantic import BaseModel, Field
# NOC modules
from .utils import StateItem, ProjectItem
class PrefixProfileItem(BaseModel):
id: str
name: str
class VRFItem(BaseModel):
id: str
name: str
class ASItem(BaseModel):
id: str
name: str
asf: str = Field(alias="as")
class PrefixDataStreamItem(BaseModel):
id: str
name: str
change_id: str
prefix: str
afi: str
source: str
state: StateItem
profile: PrefixProfileItem
description: Optional[str]
labels: Optional[List[str]]
tags: Optional[List[str]]
project: Optional[ProjectItem]
vrf: Optional[VRFItem]
asf: Optional[ASItem] = Field(None, alias="as")
| [
"dvolodin7@gmail.com"
] | dvolodin7@gmail.com |
6c7421dce123b13a65b46ab1af4a596e22aae589 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/required_field_1.py | 277122f083ebb95072706186a5e5838369bac68f | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 650 | py | from __future__ import annotations
from dataclasses import dataclass, field
from travelport.models.required_field_name_1 import RequiredFieldName1
__NAMESPACE__ = "http://www.travelport.com/schema/common_v52_0"
@dataclass
class RequiredField1:
"""
Parameters
----------
name
The name of the required field
"""
class Meta:
name = "RequiredField"
namespace = "http://www.travelport.com/schema/common_v52_0"
name: None | RequiredFieldName1 = field(
default=None,
metadata={
"name": "Name",
"type": "Attribute",
"required": True,
}
)
| [
"chris@komposta.net"
] | chris@komposta.net |
6f436c757d9095dc9ac7eb488a5fbd3eba77d8c0 | 990e7410c3debec7332ba09aa4c6504127ba2638 | /examples/opengl/opengl_core.py | 7dd09a10fd535cba1b54058f2e5fed71c6e3bdd8 | [
"LicenseRef-scancode-free-unknown",
"BSD-3-Clause"
] | permissive | nachogon1/pyglet | 2b2699703b16118cb33dd009857e3cccccccf8ab | d6a85579f24a48e5f8e3e9f59f7bd9d5ebf2b049 | refs/heads/master | 2023-08-23T16:43:13.609153 | 2021-10-28T00:46:52 | 2021-10-28T00:46:52 | 422,730,754 | 0 | 0 | BSD-3-Clause | 2021-10-29T22:45:17 | 2021-10-29T22:45:17 | null | UTF-8 | Python | false | false | 3,702 | py | import pyglet
from pyglet.gl import *
# pyglet.options['debug_gl_shaders'] = True
window = pyglet.window.Window(width=540, height=540, resizable=True)
batch = pyglet.graphics.Batch()
print("OpenGL Context: {}".format(window.context.get_info().version))
##########################################################
# TESTS !
##########################################################
label = pyglet.text.Label("This is a test", x=0, y=180, dpi=200, color=(255, 25, 255, 150), batch=batch)
vertex_list = pyglet.graphics.vertex_list(3, ('position3f', (100, 300, 0, 200, 250, 0, 200, 350, 0)),
('colors4f', (1, 0, 0, 1, 0, 1, 0, 1, 0.3, 0.3, 1, 1)))
def create_quad_vertex_list(x, y, z, width, height):
return x, y, z, x + width, y, z, x + width, y + height, z, x, y + height, z
batch.add_indexed(4, GL_TRIANGLES, None, [0, 1, 2, 0, 2, 3],
('position3f', create_quad_vertex_list(480, 270, -11, 50, 50)),
('colors4f', (1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1)))
batch.add_indexed(4, GL_TRIANGLES, None, [0, 1, 2, 0, 2, 3],
('position3f', (400, 400, 0, 400+50, 400, 0, 400+50, 400+50, 0, 400, 400+50, 0)),
('colors4f', (1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1, 1, 0.5, 0.2, 1)))
img = pyglet.image.load("pyglet.png")
img.anchor_x = img.width // 2
img.anchor_y = img.height // 2
red = pyglet.image.SolidColorImagePattern((255, 0, 0, 255)).create_image(50, 50)
green = pyglet.image.SolidColorImagePattern((0, 255, 0, 255)).create_image(50, 50)
blue = pyglet.image.SolidColorImagePattern((0, 0, 255, 255)).create_image(50, 50)
white = pyglet.image.SolidColorImagePattern((255, 255, 255, 255)).create_image(50, 50)
sprites = [pyglet.sprite.Sprite(img=img, x=60, y=80, batch=batch),
pyglet.sprite.Sprite(img=img, x=110, y=90, batch=batch),
pyglet.sprite.Sprite(img=img, x=160, y=100, batch=batch),
pyglet.sprite.Sprite(img=img, x=210, y=110, batch=batch)]
for sprite in sprites:
sprite.opacity = 220
sprite2 = pyglet.sprite.Sprite(img=red, x=200, y=400, batch=batch)
sprite3 = pyglet.sprite.Sprite(img=green, x=300, y=300, batch=batch)
sprite4 = pyglet.sprite.Sprite(img=blue, x=400, y=200, batch=batch)
sprite5 = pyglet.sprite.Sprite(img=white, x=500, y=100, batch=batch)
standalone_sprite = pyglet.sprite.Sprite(img=white, x=600, y=0)
##########################################################
# Modify the sprite scale value by scrolling the mouse
##########################################################
@window.event
def on_mouse_scroll(x, y, mouse, direction):
for spr in sprites:
spr.scale += direction / 10
###########################################################
#
###########################################################
@window.event
def on_draw():
window.clear()
# pyglet.graphics.draw(3, GL_TRIANGLES, ('position3f', (100, 100, 0, 200, 100, 0, 150, 200, 0)),
# ('colors3f', (1, 0.5, 0.2, 1, 0.5, 0.2, 1, 0.5, 0.2)))
#
# pyglet.graphics.draw_indexed(4, GL_TRIANGLES, [0, 1, 2, 0, 2, 3],
# ('position2i', (225, 300, 250, 300, 250, 325, 225, 325)),
# ('colors3f', (0.5, 1, 0.2, 0.5, 0.2, 1, 0.2, 0.5, 1, 1, 0.5, 0.2)))
vertex_list.draw(GL_TRIANGLES)
batch.draw()
standalone_sprite.draw()
def update(dt):
for sprite in sprites:
sprite.rotation += 100 * dt % 360
if __name__ == "__main__":
pyglet.gl.glClearColor(0.2, 0.3, 0.3, 1)
pyglet.clock.schedule_interval(update, 1/60)
pyglet.app.run()
| [
"benmoran@protonmail.com"
] | benmoran@protonmail.com |
f3832061ad2374d819506a18348aa03d27a95d26 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_slipped.py | b9ab1c3a9d8d932d06c99f0ba3cd3a162dee6b49 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
from xai.brain.wordbase.nouns._slip import _SLIP
#calss header
class _SLIPPED(_SLIP, ):
def __init__(self,):
_SLIP.__init__(self)
self.name = "SLIPPED"
self.specie = 'nouns'
self.basic = "slip"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
d41b809d954f96b6e5eed80c4f4f724e850412aa | d7bc683cc14198ba4b1ae9193f7feaec1808e9f3 | /python练习题/day01/Check variable type.py | 2ad3cf67da0abeab24045e029c5e21b4fb137f79 | [] | no_license | yuanshaohui/python-learn | 8bd4e6a0a5cc0ed622e32bdf0c9e833460ee3b90 | 8c9c6ecccb4219e29fec7593e9bc4b45e218da3e | refs/heads/master | 2021-03-14T09:44:50.142244 | 2020-03-21T13:48:18 | 2020-03-21T13:48:18 | 246,756,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | """
检查变量类型
Author:袁亮亮
Date:2019-11-19
"""
a = 100
b = 100.1
c = "字符串"
d = 10 + 5j
e = True
print(type(a))
print(type(b))
print(type(c))
print(type(d))
print(type(e)) | [
"123456@qq.com"
] | 123456@qq.com |
7f418b7bee3176104ad9a89194ca1f4a3fa27ee1 | 3a9b154aa9d5e379683476f80f30630bf44d2102 | /Server_v1/api/urls.py | d9480d7b24288746c33ed7570e2a86d0a4d9360c | [] | no_license | KevinDon/py_amazon_analysis | 81995e360d2b536e1df6e515aae9457054edae29 | 13b5fbb046ca6516ac3a47e8f7867baf358011f4 | refs/heads/master | 2022-12-13T00:27:27.511783 | 2019-08-14T11:45:53 | 2019-08-14T11:45:53 | 185,160,162 | 0 | 1 | null | 2022-12-10T05:38:15 | 2019-05-06T08:56:40 | TSQL | UTF-8 | Python | false | false | 8,094 | py | # coding:utf-8
from django.conf.urls import url
from django.urls import include, path
# from rest_framework import routers
from rest_framework_swagger.views import get_swagger_view
from api.views import *
# router = routers.DefaultRouter() # 创建路由对象
# router.register(r'statvisitqrcodeskus', StatVisitQrcodeSkusSet, basename='sku')
# router.register(r'statvisitqrcodeskuday', StatVisitQrcodeSkuDaySet, basename='sku')
# schema_view = get_schema_view(title='API DOCS', renderer_classes=[OpenAPIRenderer, SwaggerUIRenderer])
urlpatterns = [
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
# url(r'^docs$',schema_view),
# url(r'^(?P<version>[v1|v2]+)/',include(router.urls)),
# url(r'^test$',TestSet.as_view()),
url(r'^docs$', get_swagger_view(title='Docs API')),
url(r'^(?P<version>[v1|v2]+)/apitokenauth', LoginSet.as_view(), name='apitokenauth'),
url(r'^(?P<version>[v1|v2]+)/userprofile', UserProfileSet.as_view(), name='userprofile'),
# url(r'^api-token-auth', obtain_jwt_token),
url(r'^(?P<version>[v1|v2]+)/statamazonsku/', StatAmazonSkuSet.as_view(), name='statamazonsku'),
url(r'^(?P<version>[v1|v2]+)/statamazonskulist/', StatAmazonSkuListGet.as_view(), name='statamazonskulist'),
url(r'^(?P<version>[v1|v2]+)/statamazonskuuvday/', StatAmazonSkuUvDaySet.as_view(), name='statamazonskuuvday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskuuvmonth/', StatAmazonSkuUvMonthSet.as_view(), name='statamazonskuuvmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskuuvweek/', StatAmazonSkuUvWeekSet.as_view(), name='statamazonskuuvweek'),
url(r'^(?P<version>[v1|v2]+)/statamazonskupvday/', StatAmazonSkuPvDaySet.as_view(), name='statamazonskupvday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskupvmonth/', StatAmazonSkuPvMonthSet.as_view(), name='statamazonskupvmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskupvweek/', StatAmazonSkuPvWeekSet.as_view(), name='statamazonskupvweek'),
url(r'^(?P<version>[v1|v2]+)/statamazonskutotalitemsday/', StatAmazonSkuTotalItemsDaySet.as_view(), name='statamazonskutotalitemsday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskutotalitemsmonth/', StatAmazonSkuTotalItemsMonthSet.as_view(), name='statamazonskutotalitemsmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskutotalitemsweek/', StatAmazonSkuTotalItemsWeekSet.as_view(), name='statamazonskutotalitemsweek'),
url(r'^(?P<version>[v1|v2]+)/statamazonskubuyboxday/', StatAmazonSkuBuyBoxDaySet.as_view(), name='statamazonskubuyboxday'),
# amazon category
url(r'^(?P<version>[v1|v2]+)/statmazoncategorylist/', StatAmazonCategoryListSet.as_view(), name='statamazoncategorylistset'),
url(r'^(?P<version>[v1|v2]+)/statmazoncategorys/', StatAmazonCategorySet.as_view(), name='statamazoncategorysset'),
# keyword
url(r'^(?P<version>[v1|v2]+)/statamazonkeywordlistset/', StatAmazonKeywordListSet.as_view(), name='statamazonkeywordlistset'),
url(r'^(?P<version>[v1|v2]+)/statamazonkeywordsset/', StatAmazonKeywordsSet.as_view(), name='statamazonkeywordsset'),
# template variant
url(r'^(?P<version>[v1|v2]+)/statamazonvariantlistset/', StatAmazonVariantListSet.as_view(), name='statamazonvariantlistset'),
# proxy ip
url(r'^(?P<version>[v1|v2]+)/statamazonproxyiplistset/', StatAmazonProxyIpListSet.as_view(), name='statamazonproxyiplistset'),
# line
url(r'^(?P<version>[v1|v2]+)/statamazonline/', StatAmazonLineSet.as_view(), name='statamazonline'),
url(r'^(?P<version>[v1|v2]+)/statamazonlineuvday/', StatAmazonLineUvDaySet.as_view(), name='statamazonlineuvday'),
url(r'^(?P<version>[v1|v2]+)/statamazonlineuvmonth/', StatAmazonLineUvMonthSet.as_view(), name='statamazonlineuvmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonlineuvweek/', StatAmazonLineUvWeekSet.as_view(), name='statamazonlineuvweek'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinepvday/', StatAmazonLinePvDaySet.as_view(), name='statamazonlinepvday'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinepvmonth/', StatAmazonLinePvMonthSet.as_view(), name='statamazonlinepvmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinepvweek/', StatAmazonLinePvWeekSet.as_view(), name='statamazonlinepvweek'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinetotalitemsday/', StatAmazonLineTotalItemsDaySet.as_view(), name='statamazonlinetotalitemsday'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinetotalitemsmonth/', StatAmazonLineTotalItemsMonthSet.as_view(), name='statamazonlinetotalitemsmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinetotalitemsweek/', StatAmazonLineTotalItemsWeekSet.as_view(), name='statamazonlinetotalitemsweek'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinebuyboxday/', StatAmazonLineBuyBoxDaySet.as_view(), name='statamazonlinebuyboxday'),
url(r'^(?P<version>[v1|v2]+)/statamazonlineuvitemsconversionrateday/', StatAmazonLineUvItemsConversionRateDaySet.as_view(), name='statamazonlineuvitemsconversionrateday'),
# Category Rank
url(r'^(?P<version>[v1|v2]+)/statamazonskucategoryrankday/', StatAmazonSkuCategoryRankDaySet.as_view(), name='statamazonskucategoryrankday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskucategoryrankmonth/', StatAmazonSkuCategoryRankMonthSet.as_view(), name='statamazonskucategoryrankmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskucategoryrankweek/', StatAmazonSkuCategoryRankWeekSet.as_view(), name='statamazonskucategoryrankweek'),
# Keyword Rank
url(r'^(?P<version>[v1|v2]+)/statamazonskukeywordrankday/', StatAmazonSkuKeywordRankDaySet.as_view(), name='statamazonskukeywordrankday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskukeywordrankmonth/', StatAmazonSkuKeywordRankMonthSet.as_view(), name='statamazonskukeywordrankmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskukeywordrankweek/', StatAmazonSkuKeywordRankWeekSet.as_view(), name='statamazonskukeywordrankweek'),
# Review Rank
url(r'^(?P<version>[v1|v2]+)/statamazonskureviewrankday/', StatAmazonSkuReviewRankDaySet.as_view(), name='statamazonskureviewrankday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskureviewrankmonth/', StatAmazonSkuReviewRankMonthSet.as_view(), name='statamazonskureviewrankmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskureviewrankweek/', StatAmazonSkuReviewRankWeekSet.as_view(), name='statamazonskureviewrankweek'),
# Composite Report
url(r'^(?P<version>[v1|v2]+)/statamazonskucompositereportday/', StatAmazonSkuCompositeReportDaySet.as_view(), name='statamazonskucompositereportday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskucompositereportmonth/', StatAmazonSkuCompositeReportMonthSet.as_view(), name='statamazonskucompositereportmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskucompositereportweek/', StatAmazonSkuCompositeReportWeekSet.as_view(), name='statamazonskucompositereportweek'),
# Price Log
url(r'^(?P<version>[v1|v2]+)/statamazonskupricelogday/', StatAmazonSkuPriceLogDaySet.as_view(), name='statamazonskupricelogday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskupricelogmonth/', StatAmazonSkuPriceLogMonthSet.as_view(), name='statamazonskupricelogmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskupricelogweek/', StatAmazonSkuPriceLogWeekSet.as_view(), name='statamazonskupricelogweek'),
# Bestseller Rank
url(r'^(?P<version>[v1|v2]+)/statamazonskubestsellerrankday/', StatAmazonSkuBestsellerRankDaySet.as_view(), name='statamazonskubestsellerrankday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskubestsellerrankmonth/', StatAmazonSkuBestsellerRankMonthSet.as_view(), name='statamazonskubestsellerrankmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskubestsellerrankweek/', StatAmazonSkuBestsellerRankWeekSet.as_view(), name='statamazonskubestsellerrankweek'),
# Auth Department
url(r'^(?P<version>[v1|v2]+)/statauthdepartment/', StatAuthDepartmentView.as_view(), name='statauthdepartment'),
]
'''
request params:
{
"pager":{"size":5, "page":1}
,"order":["dy", "-sku"]
,"filter": [[{"sku-eq":"WBLANKET-PLUSH-5KG"},{"sku-eq":"HM-BED-TASSEL-COT-CR"}],[{"dy-lk-and":"2019-03-19"}]]
}
'''
| [
"kevintang002@gmail.com"
] | kevintang002@gmail.com |
8996a492d2fae89a7cdfe698186f932481087617 | 1ca466de0ffc59b48ab63afdda369ccc13fe4fd3 | /python_import/test_audio_sum_use_01.py | 9b81b7f39cbb116710eaff58efe5025bac9c108d | [] | no_license | forwiat/youngri | 380df95b6eb5c6eaa070099530b5ff9ba39cc8d0 | 9ed93838db56f202153413095b661273c1e33ddb | refs/heads/main | 2023-05-06T06:36:50.651872 | 2021-06-04T02:30:18 | 2021-06-04T02:30:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,612 | py | # 국립 국어원 발화 말뭉치의 여성 화자를 화자별, 토픽9만 합쳐보자!!
import librosa
from pydub import AudioSegment
import soundfile as sf
import os
from voice_handling import import_test, voice_sum
import_test()
# ==== it will be great ====
# ---------------------------------------------------------------
# voice_sum: 오디오 한 wav 파일로 합쳐서 저장하기
# def voice_sum(form, pathaudio, save_dir, out_dir):
# **** example ****
# form(파일 형식): 'wav' or 'flac'
# audio_dir(여러 오디오가 있는 파일경로) = 'C:/nmb/nmb_data/F1F2F3/F3/'
# save_dir(flac일 경우 wav파일로 저장할 경로) = 'C:/nmb/nmb_data/F1F2F3/F3_to_wave/'
# out_dir(wav파일을 합쳐서 저장할 경로+파일명까지) = "C:/nmb/nmb_data/combine_test/F3_sum.wav"
# 1) wav일 때
<<<<<<< HEAD:python_import/test_audio_sum_use_01.py
path_wav = 'C:/nmb/gan_0504/audio'
path_out = 'C:/nmb/gan_0504/audio/b96_e10000_n100_total10000_sum.wav'
=======
path_wav = 'C:/nmb/gan_0504/audio/b100_e5000_n100_male'
path_out = 'C:/nmb/gan_0504/audio/b100_e5000_n100_male_total05000_sum.wav'
>>>>>>> 27a2e9746f969d30ff34658f0932877f900b077f:data/sum/test_audio_sum_use_01.py
voice_sum(form='wav', audio_dir=path_wav, save_dir=None, out_dir=path_out)
# 잘 되는 것 확인!
'''
# 2) flac일 때
path_flac = 'C:/nmb/nmb_data/channel_split/pansori_fandm/'
path_save = 'C:/nmb/nmb_data/channel_split/pansori_fandm_wav/'
path_out = 'C:/nmb/nmb_data/channel_split/pansori_fandm.wav'
voice_sum(form='flac', audio_dir=path_flac, save_dir=path_save, out_dir=path_out)
# 잘 되는 것 확인!
''' | [
"lemontleo0311@gmail.com"
] | lemontleo0311@gmail.com |
5cbdb08ef6c1b94df2eec04e2133cd087b486f96 | 0d86bb399a13152cd05e3ba5684e4cb22daeb247 | /python-exercise/6-regex/py151_match_address.py | 8d42e31cc168df7f51b8b1e726739eb9d57c1862 | [] | no_license | tazbingor/learning-python2.7 | abf73f59165e09fb19b5dc270b77324ea00b047e | f08c3bce60799df4f573169fcdb1a908dcb8810f | refs/heads/master | 2021-09-06T05:03:59.206563 | 2018-02-02T15:22:45 | 2018-02-02T15:22:45 | 108,609,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18/1/12 下午5:47
# @Author : Aries
# @Site :
# @File : py151_match_address.py
# @Software: PyCharm
'''
15-5 匹配住址.
1180 Bordeaux Drive
3120 De la Cruz Boulevard
'''
from re import match
def match_address(address=''):
try:
return match(r'^[\u4E00-\u9FA5A-Za-z0-9_ ]+$', address).group()
except AttributeError:
return None
def match_chinese_address(address=''):
try:
return match(ur'^[\u4E00-\u9FA5A-Za-z0-9_ ]+$', address).group()
except AttributeError:
return None
if __name__ == '__main__':
print match_address('1180 Bordeaux Drive') # 3120
print match_address('3120 De la Cruz Boulevard') # 3120 De la Cruz Boulevard
for i in match_chinese_address(u'新街口南大街 百花深处胡同'):
print i,
| [
"852353298@qq.com"
] | 852353298@qq.com |
41e85e05c129f9c11ad1d862da42bb45eac84f4b | 9a2ea68439d24632cdf1321db0137f412ad2b1ed | /analyzePcapWithScapy.py | 61ddb90dc464a22310a33722337e2244798418df | [] | no_license | parahaoer/AnalyzePcap | 91ec1656fd65e0aa51ce1fbc14d2cb7aac18fd22 | c67e6f0d72a9351c86c8ae05e55426e21ad2ec02 | refs/heads/master | 2022-12-13T05:19:19.832656 | 2020-08-28T07:28:16 | 2020-08-28T07:28:16 | 279,524,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,500 | py | from scapy.all import *
import re
def analyzePcap(filepath):
s1 = PcapReader(filepath)
ftxt = open('scapy_analyze_result/result_feature_great_than_30.txt','a', encoding="utf-8")
write_line = []
vnc_feature_count = 0
global vnc_file_count
No = 1
try:
# data 是以太网 数据包
data = s1.read_packet()
while data is not None:
if(is_ipv4_tcp(data)):
if(hasFeature01(data)):
write_line.append("No." + str(No) + " maybe RFB 协议")
vnc_feature_count +=1
elif(hasFeature03(data)):
write_line.append("No." + str(No) + " maybe pointerEvent")
vnc_feature_count +=1
elif(hasFeature02(data)):
write_line.append("No." + str(No) + " maybe security types supported package")
vnc_feature_count +=1
elif(hasFeature04(data)):
write_line.append("No." + str(No) + " maybe KeyEvent")
vnc_feature_count +=1
data = s1.read_packet()
No += 1
s1.close()
except:
pass
if(vnc_feature_count >= 30):
vnc_file_count += 1
ftxt.write(filepath + "\n")
ftxt.write("vnc_feature_count=" + str(vnc_feature_count) + "\n")
for line in write_line:
ftxt.write("\t" + line + "\n")
ftxt.close()
#print(type(data.payload)) #==><class 'scapy.layers.inet.IP'> 可以使用 help(scapy.layers.inet.IP) 查看帮助文档
def is_ipv4_tcp(data):
ip_packet = data.payload
return data.fields['type'] == 2048 and ip_packet.fields['version'] == 4 and ip_packet.fields['proto'] == 6
def getTcpPayloadLen(data):
ip_packet = data.payload
tcp_packet = ip_packet.payload
ip_header_len = ip_packet.fields['ihl'] * 4
ip_len = ip_packet.fields['len']
tcp_len = ip_len - ip_header_len
tcp_header_len = tcp_packet.fields['dataofs'] * 4
tcp_payload_len = tcp_len - tcp_header_len
# print(tcp_payload_len)
return tcp_payload_len
def getTcpPayload(data):
ip_packet = data.payload
tcp_packet = ip_packet.payload
tcp_payload = tcp_packet.payload
'''
tcp_payload.original 与 tcp_payload.fields['load'] 返回的都是 bytes对象
通过下标获取bytes对象的某一个字节内容,是十进制的,而不是十六进制数据。
'''
# print(tcp_payload.original[0]) # 82 , 转换成16进制是0x52, 与wireshark 中显示的相同。
# print(tcp_payload.original) # b'RFB 003.008\n', 结果是以字节的值为ASCII值转换成相应的字符串(字符串前边的b表示是bytes对象)。
# print(tcp_payload.original.hex())
# print(type(tcp_payload.original))
# print(type(tcp_payload.fields['load']))
return tcp_payload.original
# tcp_payload 的长度为12字节, 且包含字符串“RFB”
def hasFeature01(data):
tcp_payload = getTcpPayload(data)
tcp_payload_len = getTcpPayloadLen(data)
return tcp_payload_len == 12 and re.search("RFB", str(tcp_payload))
# tcp_payload的第一个字节内容等于tcp_payload的长度 减一。则该数据包是服务器向客户端发送其支持的security type
def hasFeature02(data):
tcp_payload = getTcpPayload(data)
tcp_payload_len = getTcpPayloadLen(data)
return tcp_payload[0] != 0 and tcp_payload[0] == tcp_payload_len -1
# tcp_payload的长度为6字节,且tcp_payload的第一个字节内容为5.则该数据包是一个pointerEvent
def hasFeature03(data):
tcp_payload = getTcpPayload(data)
tcp_payload_len = getTcpPayloadLen(data)
return tcp_payload_len == 6 and tcp_payload[0] == 5
# tcp_payload的长度为8字节,且tcp_payload的第一个字节内容为4.则该数据包是一个KeyEvent.
def hasFeature04(data):
tcp_payload = getTcpPayload(data)
tcp_payload_len = getTcpPayloadLen(data)
return tcp_payload_len == 8 and tcp_payload[0] == 4
def get_filelist(dir):
if os.path.isfile(dir):
try:
analyzePcap(dir)
except Scapy_Exception as e:
pass
elif os.path.isdir(dir):
for s in os.listdir(dir):
newDir = os.path.join(dir, s)
get_filelist(newDir)
vnc_file_count = 0
get_filelist('C:\\Users\\dong\\Desktop\\workAtHome\\dridex\\dridexPcap')
print(vnc_file_count)
| [
"884101054@qq.com"
] | 884101054@qq.com |
0890a6bf27d61c2f9589587a2bffc15d5faec9cc | 06e10ace821eb75f88299b8721f7e42ad497ca4c | /libby/main.py | 25cb7a143748aef15cab1aebc3c877138cff324b | [] | no_license | kwarwp/henrietta | a36f4e7fecf652e6fb00600aeed92fe18acc481b | 76963014bb7d47c0713cc7b43d61fe1292794f72 | refs/heads/master | 2022-11-23T22:37:35.872643 | 2022-11-08T17:39:41 | 2022-11-08T17:39:41 | 128,998,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py | # henrietta.libby.main.py
class Templo:
def __init__(self):
self._esta_no_templo = False
def entra(self):
self._esta_no_templo = True
def sai(self):
self._esta_no_templo = False
def entrou(self):
return self._esta_no_templo
class EntradaDoTemplo(Templo):
def __init__(self):
super().__init__()
self.corredor = CorredorDoTemplo()
def sai(self):
Templo.sai(self)
self.corredor.entra()
class CorredorDoTemplo(Templo):
def __init__(self):
super().__init__()
self.musica = SalaDoTemplo()
self.oceano = SalaDoTemplo()
self.floresta = SalaDoTemplo()
class SalaDoTemplo(Templo):
def __init__(self):
super().__init__()
def mostra_templo():
print("musica:{}, oceano:{}, floresta:{}, entrada: {}, cd:{}".format(
musica.entrou(), oceano.entrou(),
floresta.entrou(), entrada.entrou(), corredor.entrou()
)
)
musica = SalaDoTemplo()
oceano = SalaDoTemplo()
floresta = SalaDoTemplo()
#floresta.entra()
#oceano.entra()
entrada = EntradaDoTemplo()
corredor = entrada.corredor
entrada.entra()
mostra_templo()
entrada.sai()
mostra_templo()
| [
"38007182+kwarwp@users.noreply.github.com"
] | 38007182+kwarwp@users.noreply.github.com |
d01ab306dfeee67e9bda6895a5e86a518044d490 | d14032ed6f3ec3b4f149a02df9e5bf6fbd8fda44 | /app/modules/auth/active.py | d58070692fb442d44cd4994d430d41a8cfbe55ad | [] | no_license | tomszhou/pony | 6bae77c6188c710eaf82898b6e792234ec773161 | 1fa6ab22a04f3cd2c1a130803833c5c22460a382 | refs/heads/master | 2021-05-17T23:21:42.023804 | 2018-07-09T05:54:45 | 2018-07-09T05:54:45 | 250,999,515 | 1 | 0 | null | 2020-03-29T09:50:46 | 2020-03-29T09:50:45 | null | UTF-8 | Python | false | false | 1,375 | py | from django.shortcuts import redirect
from app.models.account.account import UserAccount
from app.models.account.token import AccessToken
from app.models.account.info import UserInfo
from app.modules.common.util_struct import *
from app.modules.common.secret import verify_password
from app.modules.common.easemob import register_ease_mob
def active_account_handler(request):
token = request.GET.get("access_token")
pass_port = request.GET.get("pass_port")
try:
access_token = AccessToken.objects.get(access_token=token)
if not verify_password(access_token.access_token+access_token.salt, pass_port):
return json_fail_response("无效的用户请求")
if access_token.status == 0:
return json_fail_response("token失效")
except AccessToken.DoesNotExist:
return json_fail_response("请求无效!")
try:
account = UserAccount.objects.get(id=access_token.user_id)
if account.status == 1:
return json_fail_response("当前用户已经激活")
except UserAccount.DoesNotExist:
return json_fail_response("激活用户不存在")
account.status = 1
account.save()
# 注册环信
user_info = UserInfo.query_format_info_by_user_id(account.id, use_cache=False)
register_ease_mob(user_info['ease_mob'])
return redirect("/auth/login")
| [
"wudong@eastwu.cn"
] | wudong@eastwu.cn |
8965f08a72396840cde95e71a464254a0bf45145 | 3bb57eb1f7c1c0aced487e7ce88f3cb84d979054 | /paetzold_nns/scripts/rankers/Run_Glavas.py | 081d7414d035f7ae7b6995b54c84c075c76a41a6 | [] | no_license | ghpaetzold/phd-backup | e100cd0bbef82644dacc73a8d1c6b757b2203f71 | 6f5eee43e34baa796efb16db0bc8562243a049b6 | refs/heads/master | 2020-12-24T16:41:21.490426 | 2016-04-23T14:50:07 | 2016-04-23T14:50:07 | 37,981,094 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,319 | py | from lexenstein.rankers import *
from lexenstein.features import *
import sys
victor_corpus = sys.argv[1]
test_victor_corpus = sys.argv[2].strip()
output_path = sys.argv[3].strip()
model = '/export/data/ghpaetzold/benchmarking/lexmturk/scripts/evaluators/stanford-postagger-full-2015-04-20/models/english-bidirectional-distsim.tagger'
tagger = '/export/data/ghpaetzold/benchmarking/lexmturk/scripts/evaluators/stanford-postagger-full-2015-04-20/stanford-postagger.jar'
java = '/usr/bin/java'
fe = FeatureEstimator()
fe.addNGramProbabilityFeature('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt', 1, 0, 'Simplicity')
fe.addNGramProbabilityFeature('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt', 0, 1, 'Simplicity')
fe.addNGramProbabilityFeature('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt', 1, 1, 'Simplicity')
fe.addNGramProbabilityFeature('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt', 2, 0, 'Simplicity')
fe.addNGramProbabilityFeature('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt', 0, 2, 'Simplicity')
#fe.addCollocationalFeature('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt', 1, 1, 'Simplicity')
w2vmodel = '/export/data/ghpaetzold/word2vecvectors/models/word_vectors_all_200_glove.bin'
fe.addWordVectorSimilarityFeature(w2vmodel, 'Simplicity')
fe.addWordVectorContextSimilarityFeature(w2vmodel, model, tagger, java, 'Simplicity')
br = GlavasRanker(fe)
ranks = br.getRankings(test_victor_corpus)
lm = kenlm.LanguageModel('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt')
o = open(output_path, 'w')
f = open(test_victor_corpus)
for rank in ranks:
target = f.readline().strip().split('\t')[1].strip()
targetp = lm.score(target)
newline = ''
if len(rank)>0:
candp = lm.score(rank[0])
if targetp>=candp:
newline = target + '\t'
else:
newline = ''
for r in rank:
newline += r + '\t'
else:
newline = target
o.write(newline.strip() + '\n')
o.close()
| [
"ghpaetzold@outlook.com"
] | ghpaetzold@outlook.com |
790f2c451355c35536dceb9e440223556ded9d71 | 1bad7fc3fdd9e38b7ff50a7825565b7b190fa5b7 | /qrback/migrations/0026_company_slogan.py | c8bcd1c71abe99686a4ad68070fc1d0f902b136d | [] | no_license | furkankykc/QRforAll | d4be43e403d75c86436ed9d9e2b222619ecf92b1 | 6cc0555fdc27797586628f2012523dce5212b321 | refs/heads/master | 2023-07-10T13:02:27.618792 | 2021-08-05T07:22:29 | 2021-08-05T07:22:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | # Generated by Django 3.0.8 on 2020-08-22 09:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('qrback', '0025_company_counter'),
]
operations = [
migrations.AddField(
model_name='company',
name='slogan',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| [
"furkanfbr@gmail.com"
] | furkanfbr@gmail.com |
63389afedfb107a2984a334fcbf2d0ddd4c0af9e | d1d9b21a81a354baf1c5bc1b3db4ee38825f794b | /_eh.py | 52189ad74a92a321aac330cbb9af43576d83b004 | [] | no_license | pytsite/plugin-seo | 5c235630490fea8d0067d8c03c76a9b1678d6c51 | 486d4a8e8ab42938ca73b7bd757b7f8bee51ed78 | refs/heads/master | 2021-01-11T18:49:19.960731 | 2018-08-02T11:13:42 | 2018-08-02T11:13:42 | 79,632,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | """PytSite SEO Plugin Event Handlers.
"""
__author__ = 'Alexander Shepetko'
__email__ = 'a@shepetko.com'
__license__ = 'MIT'
from pytsite import metatag as _metatag, reg as _reg
def router_dispatch():
for tag in _reg.get('seo.global_metatags', []):
_metatag.t_set(tag['name'], tag['content'])
| [
"a@shepetko.com"
] | a@shepetko.com |
9a8df1df3b7aaeea7f01727f104107208d1bf7fd | 02b1eccce01f515089ecb40862fc01c8b214fc50 | /auth.py | cb6da04d15be14443a0013fbfededf9ac506b531 | [
"MIT"
] | permissive | Nekmo/nekutils | 1f8a1f4e309733d31d16ca34c266367369f2cb45 | 1de28bb810625db1d4c575f61426ab67e7d1f1e0 | refs/heads/master | 2016-08-12T16:10:13.838188 | 2016-04-01T14:45:20 | 2016-04-01T14:45:20 | 43,179,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,888 | py |
class AuthAddress(object):
def __init__(self, address):
self._user = None
self._host = None
self._port = None
self._endpoint = None
self.address = address
@property
def user(self):
if self._user is None:
user_endpoint = self.address.split('@')
if len(user_endpoint) > 1:
self._user = user_endpoint[0]
else:
self._user = False
return self._user
@property
def endpoint(self):
if self._endpoint is None:
self._endpoint = self.address.split('@')
if len(self.endpoint) > 1:
self._endpoint = self.endpoint[1]
else:
self._endpoint = self._endpoint[0]
return self._endpoint
@property
def host(self):
if self._host is None:
self._host = self.endpoint.split(':')[0]
return self._host
@property
def port(self):
if self._port is None:
host_port = self.endpoint.split(':')
if len(host_port) < 2:
self._port = False
else:
self._port = host_port[1]
return self._port
def __str__(self):
return self.address
class UserPassword(object):
def __init__(self, user_password):
self._user = None
self._password = None
self.user_password = user_password
@property
def user(self):
if self._user is None:
self._user = self.user_password.split(':')
return self._user
@property
def password(self):
if self._password is None:
user_password = self.user_password.split(':')
if len(user_password) > 1:
self._password = user_password[1]
else:
self._password = False
return self._password
| [
"contacto@nekmo.com"
] | contacto@nekmo.com |
0db417c20a5d963481fb0f4b056258b3c8389ac1 | 7d5e694aba546c166004cab8e592a000fb7283ef | /PyQt5_Udemy/01_Basic_Widgets/07_comboBox_2.py | 72affbce9bf5aa4a6a113f381cf8eab7b81f8c4c | [] | no_license | OnurKaraguler/PyQt5 | 45ffe320911f25f2ad0e318de2c7e3851db7be0c | 909546b53c0f80c1eae27c660f47cd5ded3ff1a6 | refs/heads/master | 2022-12-21T09:06:24.063816 | 2020-09-24T14:54:11 | 2020-09-24T14:54:11 | 298,299,258 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | import sys, os
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import Qt
class Main(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('combo box demo')
self.setGeometry(500,150,300,300)
self.setFixedSize(self.size()) #Ekran ölçüsünü sabitlemek için
self.UI()
self.show()
def UI(self):
self.window()
def window(self):
self.cb = QComboBox()
self.cb.addItem("C")
self.cb.addItem("C++")
self.cb.addItems(["Java", "C#", "Python"])
self.cb.currentIndexChanged.connect(self.selectionchange)
print(self.cb.count())
print(self.cb.itemText(2))
self.cb.setItemText(2,'Onur')
# self.cb.activated[str].connect(self.activated)
self.cb.highlighted[str].connect(self.activated)
self.layout = QVBoxLayout()
self.layout.addWidget(self.cb)
self.mainLayout = QHBoxLayout()
self.mainLayout.addLayout(self.layout)
self.setLayout(self.mainLayout)
def selectionchange(self, i):
pass
# print("Items in the list are :")
# for count in range(self.cb.count()):
# print(self.cb.itemText(count))
# print("Current index", i, "selection changed ", self.cb.currentText())
# self.cb.clear()
def activated(self,text):
print(text)
if __name__=='__main__':
app = QApplication(sys.argv)
window = Main()
sys.exit(app.exec_()) | [
"onurkaraguler@hotmail.com"
] | onurkaraguler@hotmail.com |
c3765a33bb6228a494b01e9c2042906c4ff81caf | 8412b576f09202e8b07a241749d31fd6ef5380c3 | /rpc_interface.py | fe2bc978670679ced000f0f3ccc914e095611aff | [
"MIT"
] | permissive | meeh420/ngcccbase | 2d7f64e16972904a4c4a97d300f3e301632b98d0 | 1c15e9f813076151b9c758e2b8c7de086fccedc0 | refs/heads/master | 2020-12-31T01:11:23.659834 | 2013-11-18T01:10:09 | 2013-11-18T01:10:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,652 | py | """
rpc_interface.py
This file connects ngccc-server.py to wallet_controller.py
The main functions that this file has are to take the
JSON-RPC commands from the server and pass them through to
the wallet controller.
Note console_interface.py does a similar thing for ngccc.py
to wallet_controller.py
"""
from wallet_controller import WalletController
from pwallet import PersistentWallet
import pyjsonrpc
import json
# create a global wallet for this use.
wallet = PersistentWallet()
wallet.init_model()
model = wallet.get_model()
controller = WalletController(model)
def get_asset_definition(moniker):
"""Get the asset/color associated with the moniker.
"""
adm = model.get_asset_definition_manager()
asset = adm.get_asset_by_moniker(moniker)
if asset:
return asset
else:
raise Exception("asset %s not found" % moniker)
def balance(moniker):
"""Returns the balance in Satoshi for a particular asset/color.
"bitcoin" is the generic uncolored coin.
"""
asset = get_asset_definition(moniker)
return controller.get_balance(asset)
def newaddr(moniker):
"""Creates a new bitcoin address for a given asset/color.
"""
asset = get_asset_definition(moniker)
addr = controller.get_new_address(asset)
return addr.get_address()
def alladdresses(moniker):
"""Lists all addresses for a given asset/color
"""
asset = get_asset_definition(moniker)
return [addr.get_address()
for addr in controller.get_all_addresses(asset)]
def addasset(moniker, color_description):
"""Imports a color definition. This is useful if someone else has
issued a color and you want to be able to receive it.
"""
controller.add_asset_definition(
{"monikers": [moniker],
"color_set": [color_description]}
)
def dump_config():
"""Returns a JSON dump of the current configuration
"""
config = wallet.wallet_config
dict_config = dict(config.iteritems())
return json.dumps(dict_config, indent=4)
def setval(self, key, value):
"""Sets a value in the configuration.
Key is expressed like so: key.subkey.subsubkey
"""
if not (key and value):
print "setval command expects: key value"
return
kpath = key.split('.')
try:
value = json.loads(value)
except ValueError:
print "didn't understand the value: %s" % value
return
try:
# traverse the path until we get to the value we
# need to set
if len(kpath) > 1:
branch = self.wallet.wallet_config[kpath[0]]
cdict = branch
for k in kpath[1:-1]:
cdict = cdict[k]
cdict[kpath[-1]] = value
value = branch
self.wallet.wallet_config[kpath[0]] = value
except TypeError:
print "could not set the key: %s" % key
def getval(self, key):
"""Returns the value for a given key in the config.
Key is expressed like so: key.subkey.subsubkey
"""
if not key:
print "getval command expects: key"
return
kpath = key.split('.')
cv = self.wallet.wallet_config
try:
# traverse the path until we get the value
for k in kpath:
cv = cv[k]
print json.dumps(cv)
except (KeyError, TypeError):
print "could not find the key: %s" % key
def send(moniker, address, amount):
"""Send some amount of an asset/color to an address
"""
asset = get_asset_definition(moniker)
controller.send_coins(address, asset, amount)
def issue(moniker, pck, units, atoms_in_unit):
"""Starts a new color based on <coloring_scheme> with
a name of <moniker> with <units> per share and <atoms>
total shares.
"""
controller.issue_coins(moniker, pck, units, atoms_in_unit)
def scan():
"""Update the database of transactions (amount in each address).
"""
controller.scan_utxos()
def history(self, **kwargs):
"""print the history of transactions for this color
"""
asset = self.get_asset_definition(moniker=kwargs['moniker'])
return self.controller.get_history(asset)
class RPCRequestHandler(pyjsonrpc.HttpRequestHandler):
"""JSON-RPC handler for ngccc's commands.
The command-set is identical to the console interface.
"""
methods = {
"balance": balance,
"newaddr": newaddr,
"alladdresses": alladdresses,
"addasset": addasset,
"dump_config": dump_config,
"setval": setval,
"getval": getval,
"send": send,
"issue": issue,
"scan": scan,
"history": history,
}
| [
"jaejoon@gmail.com"
] | jaejoon@gmail.com |
052d73f6b96d29283777078b074e925cc5d8b8f4 | ac1fdf53359b53e183fb9b2602328595b07cf427 | /ParlAI/parlai/scripts/convert_data_to_fasttext_format.py | 70ebc7703659461c6ac56e9bc58a7c97fc00ca52 | [] | no_license | Ufukdogann/MasterThesis | 780410c5df85b789136b525bce86ba0831409233 | b09ede1e3c88c4ac3047800f5187c671eeda18be | refs/heads/main | 2023-01-24T18:09:52.285718 | 2020-11-27T16:14:29 | 2020-11-27T16:14:29 | 312,416,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:efecd6dfb74a652f16bcda15d3f0cf10eb85b19973aaaee4dabc722b6798caf9
size 3094
| [
"134679852Ufuk*"
] | 134679852Ufuk* |
95a028c6657a6a3a6252707015f2e449e578cd0c | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-servermanager/azure/mgmt/servermanager/models/__init__.py | e6e542614005fecb3af1a7d6d4f16fb74b7017be | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 2,795 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
from .encryption_jwk_resource import EncryptionJwkResource
from .gateway_status import GatewayStatus
from .gateway_resource import GatewayResource
from .gateway_profile import GatewayProfile
from .gateway_parameters import GatewayParameters
from .node_resource import NodeResource
from .node_parameters import NodeParameters
from .session_resource import SessionResource
from .session_parameters import SessionParameters
from .version import Version
from .power_shell_session_resource import PowerShellSessionResource
from .prompt_field_description import PromptFieldDescription
from .power_shell_command_result import PowerShellCommandResult
from .power_shell_command_results import PowerShellCommandResults
from .power_shell_command_status import PowerShellCommandStatus
from .power_shell_session_resources import PowerShellSessionResources
from .power_shell_command_parameters import PowerShellCommandParameters
from .prompt_message_response import PromptMessageResponse
from .power_shell_tab_completion_parameters import PowerShellTabCompletionParameters
from .power_shell_tab_completion_results import PowerShellTabCompletionResults
from .error import Error, ErrorException
from .gateway_resource_paged import GatewayResourcePaged
from .node_resource_paged import NodeResourcePaged
from .server_management_enums import (
UpgradeMode,
RetentionPeriod,
CredentialDataFormat,
PromptFieldType,
GatewayExpandOption,
PowerShellExpandOption,
)
__all__ = [
'Resource',
'EncryptionJwkResource',
'GatewayStatus',
'GatewayResource',
'GatewayProfile',
'GatewayParameters',
'NodeResource',
'NodeParameters',
'SessionResource',
'SessionParameters',
'Version',
'PowerShellSessionResource',
'PromptFieldDescription',
'PowerShellCommandResult',
'PowerShellCommandResults',
'PowerShellCommandStatus',
'PowerShellSessionResources',
'PowerShellCommandParameters',
'PromptMessageResponse',
'PowerShellTabCompletionParameters',
'PowerShellTabCompletionResults',
'Error', 'ErrorException',
'GatewayResourcePaged',
'NodeResourcePaged',
'UpgradeMode',
'RetentionPeriod',
'CredentialDataFormat',
'PromptFieldType',
'GatewayExpandOption',
'PowerShellExpandOption',
]
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
f39c368702daa9eef179818bc2e09dc0458cd47a | 863b664aa0849c9c90124e0c644490feae42b9e9 | /python3-demo/app/log.py | 70e7f2a8c22ad938b31a26f384a052bea88aa920 | [] | no_license | mingz2013/study.python | 75d856a77c752a6b6c58b8fcdbd4c2c2bb9189fe | d65017912aa8f8b2ec932518a95990d1ff0c8c6e | refs/heads/master | 2021-12-28T04:57:11.266866 | 2021-08-03T02:59:10 | 2021-08-03T02:59:10 | 78,043,106 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,227 | py | # -*- coding: utf-8 -*-
"""
@FileName: log
@Time: 2020/5/19 15:41
@Author: zhaojm
Module Description
"""
from datetime import datetime
# from app.config import config
def register_logging():
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
from logging.handlers import RotatingFileHandler
# 内部日志
f = datetime.now().strftime('%Y-%m-%d')
rotating_handler1 = RotatingFileHandler('logs/info-' + f + '.log', maxBytes=1 * 1024 * 1024 * 1024, backupCount=100)
rotating_handler2 = RotatingFileHandler('logs/error-' + f + '.log', maxBytes=1 * 1024 * 1024 * 1024,
backupCount=100)
formatter1 = logging.Formatter(
'%(asctime)s %(levelname)s - ''in %(funcName)s [%(filename)s:%(lineno)d]: %(message)s')
rotating_handler1.setFormatter(formatter1)
rotating_handler2.setFormatter(formatter1)
logger = logging.getLogger('name')
logger.addHandler(rotating_handler1)
logger.addHandler(rotating_handler2)
logger.setLevel(logging.INFO)
rotating_handler2.setLevel(logging.ERROR)
# if config.debug:
# app.logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
return logger
logger = register_logging()
# def _logFunc(*argl, **argd):
# # ftlog.xxx(... caller=self) for instance method
# # ftlog.xxx(... caller=cls) for @classmethod
# callerClsName = ""
# try:
# _caller = argd.get("caller", None)
# if _caller:
# if not hasattr(_caller, "__name__"):
# _caller = _caller.__class__
# callerClsName = _caller.__name__
# del argd["caller"]
# except:
# pass
# if log_level > LOG_LEVEL_DEBUG:
# print "[ ]",
# else:
# print "[" + callerClsName + "." + sys._getframe().f_back.f_back.f_code.co_name + "]",
# return argd
def _log(*argl, **argd):
_log_msg = ""
for l in argl:
if type(l) == tuple:
ps = str(l)
else:
try:
ps = "%r" % l
except:
try:
ps = str(l)
except:
ps = 'ERROR LOG OBJECT'
if type(l) == str:
_log_msg += ps[1:-1] + ' '
# elif type(l) == unicode:
# _log_msg += ps[2:-1] + ' '
else:
_log_msg += ps + ' '
if len(argd) > 0:
_log_msg += str(argd)
# ct = datetime.now().strftime('%m-%d %H:%M:%S.%f')
# _log_msg = ct + " " + _log_msg
return _log_msg
def debug(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.debug(msg)
def info(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.info(msg)
def error(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.error(msg)
def exception(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.exception(msg)
def warn(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.warn(msg)
def warning(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.warning(msg)
def critical(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.critical(msg)
| [
"305603665@qq.com"
] | 305603665@qq.com |
2d401730bc0c78d7c4c300b3aec2845406bb0f39 | b885eaf4df374d41c5a790e7635726a4a45413ca | /LeetCode/Session3/MinimumDepth.py | f6f31c751b0207b9ab055f367ff94a5a73cd8970 | [
"MIT"
] | permissive | shobhitmishra/CodingProblems | 2a5de0850478c3c2889ddac40c4ed73e652cf65f | 0fc8c5037eef95b3ec9826b3a6e48885fc86659e | refs/heads/master | 2021-01-17T23:22:42.442018 | 2020-04-17T18:25:24 | 2020-04-17T18:25:24 | 84,218,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minDepth(self, root: TreeNode) -> int:
if not root:
return 0
return self.minDepthHelper(root)
def minDepthHelper(self, root: TreeNode) -> int:
if not root.left and not root.right:
return 1
if not root.left:
return 1 + self.minDepthHelper(root.right)
if not root.right:
return 1 + self.minDepthHelper(root.left)
return 1 + min([self.minDepthHelper(root.left), self.minDepthHelper(root.right)])
ob = Solution()
root = TreeNode(1)
root.left = TreeNode(2)
# root.right = TreeNode(20)
# root.right.left = TreeNode(15)
# root.right.right = TreeNode(7)
print(ob.minDepth(root)) | [
"shmishra@microsoft.com"
] | shmishra@microsoft.com |
b0697372f6464df3cdb5fcb923c349a26573ab08 | 02e2e17aeebe1e9e69a955f88686edab7efbe5a8 | /kiyoshi_ni_shokuhatsu/update_objects.py | 7f636d04e83eeebce0822a2e9369f71f0f8acdc7 | [
"MIT"
] | permissive | grokit/grokit.github.io | 948d893010ed3203f43a54af2d75259b69e2a895 | 4150b013eacb9bbdbc1a5046bbc8355d8306a9bc | refs/heads/master | 2021-07-17T21:06:08.951517 | 2020-04-26T18:58:22 | 2020-04-26T18:58:22 | 136,870,651 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | #!/usr/bin/python3
import dcore.env_setup as env_setup
import glob
import os
def gen_generic_objects():
files = list(glob.iglob('.' + '/**/*.**', recursive=True))
exts = ['.js']
files = [os.path.splitext(os.path.split(f)[1])[0] for f in files if os.path.splitext(f)[1] in exts and os.path.split(f)[1][0:2] == 'OB']
files = set(files)
tagBegin = '// Reflect objects START.'
tagEnd = '// Reflect objects END.'
template = 'objs.push( function(){return new __file__();});'
insert = [template.replace('__file__', f) for f in files]
insert = "\n".join(insert)
env_setup.updateFileContentBetweenMarks('./src/objects/ObjectFactory.js', tagBegin, tagEnd, insert, False)
def gen_surfaces():
files = list(glob.iglob('.' + '/**/*.**', recursive=True))
exts = ['.png']
files = [os.path.split(f)[1] for f in files if '/surface/' in f]
files = [f for f in files if os.path.splitext(f)[1] in exts]
tagBegin = '// Reflect objects category: surface START.'
tagEnd = '// Reflect objects category: surface END.'
template = 'this._filesMappingToThis.add("__file__");'
insert = [template.replace('__file__', f) for f in files]
insert = "\n".join(insert)
env_setup.updateFileContentBetweenMarks('./src/objects/OBSurface.js', tagBegin, tagEnd, insert, False)
if __name__ == '__main__':
gen_generic_objects()
gen_surfaces()
| [
"you@example.com"
] | you@example.com |
7dfa6fce30442805c5ee7317697fc349a849a656 | 5094868ffc84f6591ee4ec6feb25b10b549aef2b | /inwin/fund/orderform.py | 2435061a767cd8d8161548e88387a2065c4af9ab | [] | no_license | 137996047/finance_trading | c8d9606cfb67525d79a9e60d5cb36b1c293fcc3c | d97edfbfbafc9eea7c47f30064b7aeb3f6e4bf55 | refs/heads/master | 2020-12-10T08:49:02.272634 | 2013-11-19T08:12:11 | 2013-11-19T08:13:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | '''
Created on 2013/2/13
@author: yhuang
'''
from django import forms
from django.utils.translation import ugettext as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit, MultiField, Div, Field,Button
from crispy_forms.bootstrap import AppendedText,FormActions
TSTYPE_CHOICES = (
('1', _('purchase')),
('2', _('withdraw')),
('3', _('dividend')),
('4', _('interest')),
)
class orderform(forms.Form):
F_Date= forms.DateTimeField(label=_('Trading Date'),)
F_SKID= forms.CharField(label=_('FundID'),max_length=8)
F_TSType= forms.ChoiceField(label=_('Trading Type'),choices=TSTYPE_CHOICES)
F_CurID=forms.CharField(label=_('Currency'),max_length=8)
F_Amt=forms.DecimalField(label=_('Amount'),max_digits=28, decimal_places=4)
F_Qty=forms.DecimalField(label=_('Quantity'),max_digits=28, decimal_places=4)
F_Rate=forms.DecimalField(label=_('Rate'),max_digits=28, decimal_places=4)
F_Nav=forms.DecimalField(label=_('Nav'),max_digits=28, decimal_places=4)
F_Fee=forms.DecimalField(label=_('Fee'),max_digits=10, decimal_places=4)
F_Exp=forms.DecimalField(label=_('Expense'),max_digits=10, decimal_places=4)
F_Payable=forms.DecimalField(label=_('Pay Amount'),max_digits=28, decimal_places=4)
F_Receivable=forms.DecimalField(label=_('Receive Amount'),max_digits=28, decimal_places=4)
F_Note=forms.CharField(label=_('Note'),max_length=128)
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_id = 'orderform'
self.helper.form_class = 'blueForms'
self.helper.form_method = 'post'
self.helper.form_action = 'submit_survey'
self.helper.layout = Layout(
MultiField(
'first arg is the legend of the fieldset',
Div('F_Date',
style="background: white;", title="Explication title", css_class="bigdivs"
),
'F_SKID',
'F_TSType',
'F_CurID',
'F_Qty',
'F_Rate',
'F_Nav',
'F_Fee',
'F_Exp',
'F_Payable',
'F_Receivable',
),
AppendedText('F_Amt', '$', active=True),
Field('F_Note', id="password-field", css_class="passwordfields", title="Explanation"),
#Field('slider', template="custom-slider.html"),
ButtonHolder(
Submit('submit', 'Submit', css_class='button white')
),
FormActions(
Submit('save', 'Save changes'),
Button('cancel', 'Cancel')
)
)
super(orderform, self).__init__(*args, **kwargs) | [
"yingchauhuang@gmail.com"
] | yingchauhuang@gmail.com |
41fd56496294aa28b4df70baf4467a20cfc53bc6 | ea5b4fdf353e76c44a8de71fa16aa8bae88c726a | /heap/613.highFive.py | 9f8641a749e1afe5a6f792b433db444a691bcab7 | [] | no_license | umnstao/lintcode-practice | dd61c66950ae89abec000063fe0d1a33f13ce6ec | e73b495e23c4dcb0421ab09133e573aaba23c431 | refs/heads/master | 2021-01-23T02:48:26.294160 | 2018-03-27T21:54:26 | 2018-03-27T21:54:26 | 86,024,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | '''
Definition for a Record
class Record:
def __init__(self, id, score):
self.id = id
self.score = score
'''
class Solution:
# @param {Record[]} results a list of <student_id, score>
# @return {dict(id, average)} find the average of 5 highest scores for each person
# <key, value> (student_id, average_score)
def highFive(self, results):
# Write your code here
hash = dict()
for r in results:
if r.id not in hash:
hash[r.id] = []
hash[r.id].append(r.score)
if len(hash[r.id]) > 5:
index = 5
for i in range(5):
if hash[r.id][i] < hash[r.id][index]:
index = i
hash[r.id].pop(index)
#print hash
answer = {}
for id,score in hash.items():
answer[id] = sum(score)/5.
return answer | [
"umnstao@gmail.com"
] | umnstao@gmail.com |
1ef60f5fc25c6b4427ff0a3202d65fbdb4d2172c | f039b3665b5ca29a5e197ed05a9860f9180a16aa | /maxProfit.py | 946506e98ea2316952ca5664c4aa99c22eb4f464 | [] | no_license | NeilWangziyu/HighPerformancwAlgorithm | 895a0e9d78aee9a0eacc6f81352f8fde10b9310b | 7e3fba6879bbe25b738989ef550fd71c7a49dab0 | refs/heads/master | 2020-04-17T04:36:35.178522 | 2019-08-09T16:11:34 | 2019-08-09T16:11:34 | 166,237,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | class Solution:
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
profit = 0
for i in range(1, len(prices)):
if prices[i] - prices[i - 1] > 0:
profit += prices[i] - prices[i - 1]
return profit
| [
"noreply@github.com"
] | NeilWangziyu.noreply@github.com |
45978b08a29506f6bd384b7c4cc8c361fc40d77b | 62442c2547b22aae27f3bb3a0d3f84a9e8e535a0 | /python/djangopro/mysite/polls/admin.py | 55708fd52ed1fb701b7c1cd1b86a4096caca8aef | [] | no_license | jsdelivrbot/demos | 935729fe9afde33709c4e4e74863b64c16c33b33 | 01a97eda371c2d832c9f2c907a945310662e0710 | refs/heads/master | 2020-04-10T10:35:44.039560 | 2018-12-08T10:53:29 | 2018-12-08T10:53:29 | 160,970,588 | 0 | 0 | null | 2018-12-08T19:13:00 | 2018-12-08T19:13:00 | null | UTF-8 | Python | false | false | 872 | py | from mysite.polls.models import Poll
from mysite.polls.models import Choice
from django.contrib import admin
#class ChoiceInline(admin.StackedInline):
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
# Show the fields in the following order
#fields = ['pub_date', 'question']
#Use the corresponding label for each field
fieldsets = [
(None, {'fields': ['question']}),
('Date information', {'fields':['pub_date'], 'classes':['collapse']}),
]
# Quickly add new choices while adding the poll
inlines = [ChoiceInline]
# In the 'Select a Poll to change' menu, display following fields for each poll
list_display = ('question', 'pub_date', 'was_published_today')
# Shows filter/facet for the below fields
list_filter = ['pub_date']
admin.site.register(Poll, PollAdmin)
admin.site.register(Choice)
| [
"amjedonline@gmail.com"
] | amjedonline@gmail.com |
2374b67ce1f63682539314996c2c82b71ee4b6df | cc8f8030d143f21e885995f97fd146d3dcc5fa40 | /sbb/tools/instances.py | b6775a7f93f1bdd48286bca7253a32349a5af83c | [
"MIT"
] | permissive | DanielLSM/train-sbb-challenge | 6719cb197df4eb16ef56e0ee3dbe267400cc5fcf | 9779b178c1e31f445d136d567e9f62390b0d2c5e | refs/heads/master | 2020-03-28T20:54:50.023088 | 2018-10-17T16:25:46 | 2018-10-17T16:25:46 | 149,113,258 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,913 | py | import logging
import sbb.tools.logger
import pprint
import networkx
from collections import defaultdict
from itertools import chain, product, starmap
from functools import partial
from sbb.tools.parsers import parse_input_paths, parse_json_file
from sbb.tools import input_dir, input_samples
from sbb.tools.route_graph import generate_route_graphs
class Instances:
def __init__(self, ninstance: int = 0, input_dir: str = input_samples):
self._input_dir = input_dir
self._ipaths = parse_input_paths(input_dir)
self.logger = logging.getLogger('APIlogger')
self.logger.setLevel(logging.INFO)
self.data = self._load_data(ninstance)
self.route_graphs = generate_route_graphs(self.data)
self._fname = self.data['label']
self._hash = self.data['hash']
self._generate_route2markers2sections()
self._generate_route2sections2nodes()
self._generate_service_intentions()
self.logger.info('api for the instances initialized')
def __str__(self):
return 'API to interface instance {}'.format(self._fname)
def __getitem__(self, key):
return self.data[key]
def keys(self):
return self.data.keys()
def _load_data(self, ninstance: int) -> dict:
try:
self.logger.info('loaded {}'.format(
self._ipaths[ninstance].parts[-1]))
return parse_json_file(self._ipaths[ninstance])
except ValueError as e:
self.logger.ERROR("select an instance from 0 to {}".format(
len(self._ipaths)))
raise e
def _generate_service_intentions(self) -> None:
""" Creates a dict from train to service intentions, and stores
more approprietly
"""
self.service_intentions = {
train['id']: train for train in self.data['service_intentions']
}
def _generate_route2markers2sections(self) -> None:
""" Creates a dict where the key is route_id
to markers, each marker has a list of possible required
sections
"""
# TODO: add route_alternative_marker_at_exit to compute paths
# TODO: finish this
self.route2marker2sections = {}
for route in self.data['routes']:
self.route2marker2sections[route['id']] = defaultdict(list)
for route_path in route['route_paths']:
for route_section in route_path['route_sections']:
if 'section_marker' in route_section.keys():
self.route2marker2sections[route['id']][route_section[
'section_marker'][0]].append(
route_section['sequence_number'])
#TODO: Put more things such as time rectritions on this dict
def _generate_route2sections2nodes(self) -> None:
""" Creates a dict where the key is route_id
to sections, to each 'in' and 'out' node
"""
self.route2sections2nodes = {}
for key in self.route_graphs.keys():
self.route2sections2nodes[key] = {}
edges_info = self.route_graphs[key].edges()
# TODO: do inverted mapping or actually check how they
# TODO: store the nodes and arcs
# inv_map = {v: k for k, v in edges_info.iteritems()}
for edges in edges_info:
self.route2sections2nodes[key][edges_info[edges[0], edges[1]][
'sequence_number']] = {
'in': edges[0],
'out': edges[1]
}
# TODO: PRODUCT ACEPTS ANY NUMBER OF PATHS XD
def paths_from_nodes(self, route_id, nodes) -> list:
""" Given a list of nodes by ORDER, get all possible paths (lists of lists) """
ppaths = []
for i in range(len(nodes) - 1):
paths = self.generate_edge_paths(route_id, nodes[i], nodes[i + 1])
if i is not 0:
pathsi = []
for path in list(product(ppaths, paths)):
pathsi.append(list(chain(*path)))
ppaths = pathsi
else:
ppaths = paths
return ppaths
def paths_from_arcs(self, route_id, arcs) -> list:
""" Given a list of nodes by ORDER, get all possible paths (lists of lists) """
nodes = self.transform_arcs2nodes(route_id, arcs)
return self.paths_from_nodes(route_id, nodes)
def transform_arcs2nodes(self, route_id, arcs) -> list:
nodes = []
for arc in arcs:
nodes.append(self.route2sections2nodes[route_id][arc]['in'])
nodes.append(self.route2sections2nodes[route_id][arc]['out'])
# import pdb; pdb.set_trace()
return nodes
def nodes(self, route_id) -> list:
return list(self.route_graphs[route_id].nodes())
def edges(self, route_id) -> list:
return list(self.route_graphs[route_id].edges())
def edges_sn(self, route_id) -> list:
return self.route_graphs[route_id].edges()
def generate_edge_paths(self, route_id: int, start: str, end: str) -> list:
all_paths = partial(networkx.algorithms.simple_paths.all_simple_paths,
self.route_graphs[route_id])
path_iter = all_paths(start, end)
paths = []
edges_info = self.edges_sn(route_id)
for path in path_iter:
edges_path = []
for i in range(len(path) - 1):
edges_path.append(
edges_info[path[i], path[i + 1]]['sequence_number'])
paths.append(edges_path)
return paths
def generate_paths(self, route_id: int, start: str, end: str) -> list:
all_paths = partial(networkx.algorithms.simple_paths.all_simple_paths,
self.route_graphs[route_id])
path_iter = all_paths(start, end)
return list(path_iter)
#TODO generate meaningfull route sections with sequece numbers as needed for the solution
#TODO get rid of the M1 cancer, we need to generate meaningful variables with times
def generate_all_paths(self, route_id: int) -> list:
roots = (
v for v, d in self.route_graphs[route_id].in_degree() if d == 0)
leaves = (
v for v, d in self.route_graphs[route_id].out_degree() if d == 0)
all_paths = partial(networkx.algorithms.simple_paths.all_simple_paths,
self.route_graphs[route_id])
return list(
chain.from_iterable(starmap(all_paths, product(roots, leaves))))
#TODO This is problably the most retarded function of the whole code
#TODO but I blaim networkx on this one.
#TODO this func transforms full paths written in nodes into paths written in
#TODO edges
def from_paths_to_arcs(self, route_id: int, path: list) -> list:
edges_info = self.edges_sn(route_id)
edges_path = []
for i in range(len(path) - 1):
edges_path.append(
edges_info[path[i], path[i + 1]]['sequence_number'])
return edges_path
# def inspect(self, key: str):
# return pprint.pprint(self.data[key])
# def generate_paths(self, route_id: int, start: str, end: str) -> list:
# all_paths = partial(networkx.algorithms.simple_paths.all_simple_paths,
# self.route_graphs[route_id])
# return list(chain.from_iterable(starmap(all_paths, start, end)))
# TODO: Include more information such as time
# def _generate_all_paths_under_restrictions(self):
# """ By route and train """
# self._train2path = {}
# for train in self.data['service_intentions']:
# self._train2paths[train['id']] = {}
# for requirement in train['section_requirements']:
if __name__ == "__main__":
i = Instances() | [
"daniellsmarta@gmail.com"
] | daniellsmarta@gmail.com |
c8fad1f100e4968fe5c63524938cdcb4c7395128 | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_chart_pattern05.py | 4ffee075709f4ba1541f72c152fcaf13ae9b4934 | [
"BSD-2-Clause-Views"
] | permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 3,121 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_pattern05.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [110902272, 110756608]
data = [
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
worksheet.write_column('D1', data[3])
worksheet.write_column('E1', data[4])
worksheet.write_column('F1', data[5])
worksheet.write_column('G1', data[6])
worksheet.write_column('H1', data[7])
chart.add_series({
'values': '=Sheet1!$A$1:$A$3',
'pattern': {
'pattern': 'percent_25',
'fg_color': '#C00000',
'bg_color': '#FFFFFF'
}
})
chart.add_series({
'values': '=Sheet1!$B$1:$B$3',
'pattern': {
'pattern': 'percent_75',
'fg_color': '#FF0000',
}
})
chart.add_series({
'values': '=Sheet1!$C$1:$C$3',
'pattern': {
'pattern': 'dark_upward_diagonal',
'fg_color': '#FFC000',
}
})
chart.add_series({
'values': '=Sheet1!$D$1:$D$3',
'pattern': {
'pattern': 'narrow_horizontal',
'fg_color': '#FFFF00',
}
})
chart.add_series({
'values': '=Sheet1!$E$1:$E$3',
'pattern': {
'pattern': 'dashed_vertical',
'fg_color': '#92D050',
}
})
chart.add_series({
'values': '=Sheet1!$F$1:$F$3',
'pattern': {
'pattern': 'horizontal_brick',
'fg_color': '#00B050',
}
})
chart.add_series({
'values': '=Sheet1!$G$1:$G$3',
'pattern': {
'pattern': 'shingle',
'fg_color': '#00B0F0',
}
})
chart.add_series({
'values': '=Sheet1!$H$1:$H$3',
'pattern': {
'pattern': 'large_check',
'fg_color': '#0070C0',
}
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
b75df8010682fe9df788a973a54b7c29ad65d8bb | 11aac96a622eadf3992d12659eaf0a450b9398bf | /Assignment/settings.py | 95126fea50afc7271ec767a5834dbca2a8746130 | [] | no_license | naveenkumar2505/Assignment | 36604d0545c10a4bcce5606ea26dbbf1c7596159 | 635c64c8116ad17a2893aa86f498cf8ecdc1f944 | refs/heads/master | 2020-06-01T03:54:23.802759 | 2019-06-07T05:57:41 | 2019-06-07T05:57:41 | 190,622,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,655 | py | """
Django settings for Assignment project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'upo7q5y25)mlvl#%@q7r%*37h$iq2am71j)nm21qnecon49kj_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'User.apps.UserConfig',
'rest_framework'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
#'DEFAULT_PAGINATION_CLASS': 'apps.core.pagination.StandardResultsSetPagination',
'PAGE_SIZE': 5
}
ROOT_URLCONF = 'Assignment.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Assignment.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE':'django.db.backends.mysql',
'NAME':'assigndb',
'USER':'root',
'PASSWORD':'root'
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
#
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"naveentechhie@gmail.com"
] | naveentechhie@gmail.com |
dbd7245e581ae91c182ba9ce192bb227f29d3af5 | e4266d7995c6952a374037e6809678a28e2972f4 | /abs/abs_project_task_template/models/task_template.py | 5fc1f01895efab778ea89c021e1665e3f7aa96f9 | [] | no_license | h3llopy/addons_12 | cdd3957faa46be9beb20239b713bcde7d3fb24bf | 7440086ae976754b0d268986519705cbc9ea0a8a | refs/heads/master | 2023-08-31T12:33:54.645648 | 2021-11-01T02:21:06 | 2021-11-01T02:21:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,772 | py | # -*- coding: utf-8 -*-
#################################################################################
#
# Odoo, Open Source Management Solution
# Copyright (C) 2018-Today Ascetic Business Solution <www.asceticbs.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from odoo import api,fields,models,_
#New Class Is Created For Task Template.
class TaskTemplate(models.Model):
_name='project.task.template'
name = fields.Char(string='Task Title', track_visibility='always', required=True, help=" The Title Of Task")
user_id = fields.Many2one('res.users', string='Assigned to', index=True, track_visibility='always', help="Many2one Field Related To res user")
date_deadline = fields.Date(string='Deadline', copy=False, help="Date Field For Deadline")
description = fields.Html(string='Description', help="Html Field For Description")
active = fields.Boolean(default=True, help="Boolean Field For Task Status")
#Class Is Extended For Add New Feature Of Task Template.
class Project(models.Model):
_inherit = 'project.project'
use_task_template = fields.Boolean(string="Use Active Task Templates", help="Use Task Templates for creating Tasks of the Project")
#Create Method Override To Add Task Template At The Time Of Project Creation.
@api.model
def create(self,vals):
variable=super(Project,self).create(vals)
if vals.get('use_task_template'):
template_id = self.env['project.task.template'].search([('active','=',True)])
if template_id:
for template in template_id:
tasktemplate={}
tasktemplate['name']=template.name
tasktemplate['user_id']=template.user_id.id
tasktemplate['date_deadline']=template.date_deadline
tasktemplate['description']=template.description
tasktemplate['project_id']=variable.id
self.env['project.task'].create(tasktemplate)
return variable
| [
"diegobgajardo@gmail.com"
] | diegobgajardo@gmail.com |
66b94cd88087c441f60c732183e04658634fc47f | 3dff4bef08954fadb7cc83c4f212fffa81b7d27e | /pub_site/src/pub_site/transfer/forms.py | e39f18c35f5843e52340d97b53ee14bb5ffa4b0e | [] | no_license | webee/pay | 3ec91cb415d9e3addabe961448533d861c0bd67a | b48c6892686bf3f9014bb67ed119506e41050d45 | refs/heads/master | 2020-04-29T14:31:09.643993 | 2016-02-02T07:14:14 | 2016-02-02T07:14:14 | 176,198,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from flask.ext.wtf import Form
from pub_site.commons import amount_less_than_balance, MyRegexp
from wtforms import StringField, SubmitField, ValidationError, DecimalField
from wtforms.validators import DataRequired, NumberRange, Length
from pub_site import dba
def username_should_exists(form, field):
username = field.data
if not dba.is_username_exists(username):
raise ValidationError(u"用户不存在")
class TransferForm(Form):
username = StringField(u"用户名", validators=[DataRequired(u"用户名不能为空"), username_should_exists])
amount = DecimalField(u"转账金额(元)",
validators=[DataRequired(u'请输入数字,小数点后最多2位, 例如"8.88"'), MyRegexp(r'^\d+(.\d{1,2})?$', message=u'请输入数字,小数点后最多2位, 例如"8.88"'),
amount_less_than_balance,
NumberRange(min=Decimal(0.01), message=u"提现金额必须大于0")])
info = StringField(u"备注", validators=[Length(max=50, message=u"备注不能超过50个字")])
submit = SubmitField(u"提交")
| [
"yiwang@lvye.com"
] | yiwang@lvye.com |
19ef3e931eaaa31f4ee7726864baf8d4c408bd89 | a5d22c99e781270317078f8980c934bcc71e6e8b | /samples/misc/opencv_samples/mqtt_cam/config.py | c1ba8083f6f40c04138fb58ff10003c9d0deedcf | [
"Apache-2.0"
] | permissive | aivclab/vision | dda3b30648b01c2639d64a016b8dbcfccb87b27f | 06839b08d8e8f274c02a6bcd31bf1b32d3dc04e4 | refs/heads/master | 2023-08-21T22:35:10.114394 | 2022-11-02T10:14:08 | 2022-11-02T10:14:08 | 172,566,233 | 1 | 3 | Apache-2.0 | 2023-08-16T05:11:30 | 2019-02-25T19:00:57 | Python | UTF-8 | Python | false | false | 979 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
"""
from pathlib import Path
from warg import NOD
MQTT_CAM_CONFIG = NOD(
mqtt=NOD(
broker="localhost", port=1883, QOS=1
), # or an ip address like 192.168.1.74
camera=NOD(
video_source=0,
fps=30, # 2
mqtt_topic="video/video0/capture",
# If your desired camera is listed as source 0 you will configure video_source: 0. Alternatively
# you can configure the video source as an MJPEG or RTSP stream. For example in config.yml you may
# configure something like video_source: "rtsp://admin:password@192.168.1.94:554/11" for a RTSP
# camera.
),
processing=NOD(
subscribe_topic="video/video0/capture",
publish_topic="video/video0/capture/rotated",
),
save_captures=NOD(
mqtt_topic="video/video0/capture", captures_directory=Path("captures")
),
)
| [
"christian.heider@alexandra.dk"
] | christian.heider@alexandra.dk |
42e17a6a17075549bcba19d12ccfd1b3f4983c35 | f525a67f7920d6d35077e60bbe3012ffd455ebdb | /sorting/reorder_data_log_files.py | d2cffa475979985bf5fd5fcbac86803e9689541e | [] | no_license | uma-c/CodingProblemSolving | c29671a76762ba34af0cab05d68e86f798616cab | b7d3b9e2f45ba68a121951c0ca138bf94f035b26 | refs/heads/master | 2023-05-02T05:38:43.666829 | 2021-05-19T02:23:13 | 2021-05-19T02:23:13 | 286,168,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | '''
You have an array of logs. Each log is a space delimited string of words.
For each log, the first word in each log is an alphanumeric identifier. Then, either:
Each word after the identifier will consist only of lowercase letters, or;
Each word after the identifier will consist only of digits.
We will call these two varieties of logs letter-logs and digit-logs. It is guaranteed that each log has at least one word after its identifier.
Reorder the logs so that all of the letter-logs come before any digit-log. The letter-logs are ordered lexicographically ignoring identifier, with the identifier used in case of ties. The digit-logs should be put in their original order.
Return the final order of the logs.
'''
from typing import List
def reorder_logs(logs: List[int]) -> List[str]:
let_logs = []
dig_logs = []
for log in logs:
if '0' <= log[-1] <= '9':
dig_logs.append(log)
else:
ident_after_idx = log.index(' ')
let_logs.append([log[(ident_after_idx+1):], log[0:ident_after_idx]])
let_logs.sort()
result = []
for let_log in let_logs:
result.append(let_log[1] + ' ' + let_log[0])
result += dig_logs
return result | [
"chowtoori@live.com"
] | chowtoori@live.com |
bccfed5d348f8c095814aa00c8d5e77feb4040ee | 05e454259b44882a1bfff0ba82475374b36b74f0 | /vision/utils/video_writer.py | 099a8a4933882341dd3d3cd0c9295757e019ac70 | [
"BSD-3-Clause"
] | permissive | TeamAutonomousCarOffenburg/TACO_2017 | ec49f539528388f28114cca9787c1ab7db880e64 | 724c37188209818c22046d2229f67d882c36e2f4 | refs/heads/master | 2021-08-14T18:33:24.203830 | 2017-11-16T13:48:57 | 2017-11-16T13:48:57 | 110,350,009 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,930 | py | import os
import time as t
from threading import Thread
import utils.tools as tools
import cv2
class VideoWriter:
def __init__(self, im_width, im_height, folder, video_queue):
self.video_queue = video_queue
self.folder = folder
self.im_width = im_width
self.im_height = im_height
self.writer = None
self.file = None
self.fps = None
self.stopped = True
self.thread = Thread(target=self.update, args=())
def init(self, file_prefix="output", fps=30):
filename = "{}_{}.avi".format(file_prefix, tools.get_timestamp_ms())
self.file = os.path.join(self.folder, filename)
self.fps = fps
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
self.writer = cv2.VideoWriter(self.file, fourcc,
float(self.fps), (self.im_width,
self.im_height))
return self
def start(self):
self.stopped = False
self.thread.start()
print("[VIDEO WRITER] Thread for writing video started")
return self
def update(self):
while True:
# if self.stopped and self.video_queue.empty():
if self.stopped:
return
# wait for element in queue
try:
image = self.video_queue.get_nowait()
except Exception as e:
t.sleep(0.02)
continue
self.writer.write(image)
def stop(self):
while not self.video_queue.empty():
t.sleep(0.1)
self.stopped = True
self.writer.release()
print('[VIDEO WRITER] Video written to file: {}'.format(self.file))
def is_running(self):
return not self.stopped
def is_thread_alive(self):
return self.thread.is_alive()
def get_video_file_name(self):
return self.file
| [
"jensfischer95@gmail.com"
] | jensfischer95@gmail.com |
a9ea65ef0f77600f090da1acf54b75f98d380c1c | c2643d37464d847facfaa39eca662578b6744c39 | /async_www/app.py | a365c66be8802682fdcaba1b4ff00589baf96892 | [] | no_license | Jelair/TMS_back_end | c85cd8dd74792a88354c8c2d85ff7e99dfd92677 | be267a70741cf7b6810bcc165fbe383c809f24ff | refs/heads/master | 2021-09-07T08:36:58.711793 | 2018-02-20T11:52:11 | 2018-02-20T11:52:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,866 | py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
-------------------------------------------------
File Name: app
Description :
Author : simplefly
date: 2018/2/3
-------------------------------------------------
Change Activity:
2018/2/3:
-------------------------------------------------
"""
__author__ = 'simplefly'
from jinja2 import Environment, FileSystemLoader
import asyncio, os, json, time
from datetime import datetime
from async_www import orm
from aiohttp import web
from async_www.config import configs
import logging; logging.basicConfig(level=logging.INFO)
from async_www.coreweb import add_routes, add_static
from async_www.handlers import cookie2user, COOKIE_NAME
# 初始化渲染模板
def init_jinja2(app, **kw):
logging.info('init jinja2...')
options = dict(
autoescape = kw.get('autoescape', True),
block_start_string = kw.get('block_start_string', '{%'),
block_end_string = kw.get('block_end_string', '%}'),
variable_start_string = kw.get('variable_start_string', '{{'),
variable_end_string = kw.get('variable_end_string', '}}'),
auto_reload = kw.get('auto_reload', True)
)
path = kw.get('path', None)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
logging.info('set jinja2 template path:%s' % path)
env = Environment(loader=FileSystemLoader(path), **options)
filters = kw.get('filters', None)
if filters is not None:
for name, f in filters.items():
env.filters[name] = f
app['__templating__'] = env
@asyncio.coroutine
def logger_factory(app, handler):
@asyncio.coroutine
def logger(request):
logging.info('Request: %s %s' % (request.method, request.path))
return (yield from handler(request))
return logger
@asyncio.coroutine
def auth_factory(app, handler):
@asyncio.coroutine
def auth(request):
logging.info('check user: %s %s' % (request.method, request.path))
request.__user__ = None
cookie_str = request.cookies.get(COOKIE_NAME)
if cookie_str:
user = yield from cookie2user(cookie_str)
if user:
logging.info('set current user: %s' % user.email)
request.__user__ = user
if request.path.startswith('/manage/') and (request.__user__ is None or not request.__user__.admin):
return web.HTTPFound('/signin')
return (yield from handler(request))
return auth
@asyncio.coroutine
def data_factory(app, handler):
@asyncio.coroutine
def parse_data(request):
if request.method == 'POST':
if request.content_type.startswith('application/json'):
request.__data__ = yield from request.json()
logging.info('request json: %s' % str(request.__data__))
elif request.content_type.startswith('application/x-www-form-urlencoded'):
request.__data__ = yield from request.post()
logging.info('request form: %s' % str(request.__data__))
return (yield from handler(request))
return parse_data
@asyncio.coroutine
def response_factory(app, handler):
@asyncio.coroutine
def response(request):
logging.info('Response handler...')
r = yield from handler(request)
if isinstance(r, web.StreamResponse):
return r
if isinstance(r, bytes):
resp = web.Response(body=r)
resp.content_type = 'application/octet-stream'
return resp
if isinstance(r, str):
if r.startswith('redirect:'):
return web.HTTPFound(r[9:])
resp = web.Response(body=r.encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, dict):
template = r.get('__template__')
if template is None:
resp = web.Response(body=json.dumps(r, ensure_ascii=False, default=lambda o: o.__dict__).encode('utf-8'))
resp.content_type = 'application/json;charset=utf-8'
return resp
else:
r['__user__'] = request.__user__
resp = web.Response(body=app['__templating__'].get_template(template).render(**r).encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, int) and t >= 100 and t < 600:
return web.Response(t)
if isinstance(r, tuple) and len(r) == 2:
t, m = r
if isinstance(t, int) and t >= 100 and t < 600:
return web.Response(t, str(m))
# default
resp = web.Response(body=str(r).encode('utf-8'))
resp.content_type = 'text/plain;charset=utf-8'
return resp
return response
def datetime_filter(t):
delta = int(time.time() - t)
if delta < 60:
return u'1分钟前'
if delta < 3600:
return u'%s分钟前' % (delta // 60)
if delta < 86400:
return u'%s小时前' % (delta // 3600)
if delta < 604800:
return u'%s天前' % (delta // 86400)
dt = datetime.fromtimestamp(t)
return u'%s年%s月%s日' % (dt.year, dt.month, dt.day)
@asyncio.coroutine
def init(loop):
yield from orm.create_pool(loop=loop, **configs.db)
app = web.Application(loop=loop, middlewares=[
logger_factory, auth_factory, response_factory
])
init_jinja2(app, filters=dict(datetime=datetime_filter))
add_routes(app, 'handlers')
add_static(app)
srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 9000)
logging.info('server started at http://127.0.0.1:9000...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever() | [
"1059229782@qq.com"
] | 1059229782@qq.com |
6823ae91cfcabb0d9c1f3bdc24adb4ffb866e73c | 3e30f89790a93e715ef7eb396575e28ae5849cf0 | /SurfaceTest.py | d4faf5549dc422114ab10b5dc19c567fb65d69b3 | [] | no_license | sulantha2006/Surface | 090d5d56fbe778de0b6c3a75cfb6cc3a2ebe12f3 | ccc58cbd206da5063c880927c8ba130b6fe6e097 | refs/heads/master | 2021-01-10T18:49:45.489158 | 2013-12-11T19:01:48 | 2013-12-11T19:01:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | __author__ = 'sulantha'
import numpy
from mayavi.mlab import *
from mayavi import mlab
def test_contour3d():
x, y, z = numpy.ogrid[-100:100:1, -100:100:1, -100:100:1]
scalars = (x*2*y*z*z)+(x*y*z)
obj = contour3d(scalars, contours=4, transparent=True)
return obj
test_contour3d()
mlab.show()
| [
"sulantha.s@gmail.com"
] | sulantha.s@gmail.com |
81dac263c0eb19bc1f2482b80239c0c651db6ed4 | 45fd54ecc12334806b4a285ca3886f3fe0d191c4 | /tests/fixtures/entities.py | 3d1cb92dff353405a98989dc76ded7ab47a091a7 | [
"BSD-3-Clause"
] | permissive | azthief/pontoon | 124fcb4b36ecbe7dc288df8d49ac4ed8e02b9d71 | 14f9de9b020e45c375311181ed32e487e76d28f8 | refs/heads/master | 2021-08-23T21:05:16.511013 | 2017-12-01T15:01:30 | 2017-12-01T15:01:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | # -*- coding: utf-8 -*-
import functools
import pytest
from pontoon.base.models import Entity
@pytest.fixture
def entity0(resource0):
"""Entity 0"""
return Entity.objects.get(resource=resource0, string="entity0")
@pytest.fixture
def entity1(resource1):
"""Entity 1"""
return Entity.objects.get(resource=resource1, string="entity1")
@pytest.fixture
def entity_factory(factory):
"""Entity factory
create entities in a hurry!
Provides an entity factory function that accepts the following args:
:arg int `batch`: number of entities to instantiate, defaults to len of
`batch_kwargs` or 1
:arg list `batch_kwargs`: a list of kwargs to instantiate the entities
"""
def instance_attrs(instance, i):
if not instance.string:
instance.string = "Entity %s" % i
return functools.partial(
factory, Model=Entity, instance_attrs=instance_attrs)
| [
"ryan@synca.io"
] | ryan@synca.io |
a00525930a6cd48eadc9c0a8846ad4b1f4204286 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_070/ch159_2020_06_21_20_15_37_551744.py | 5e4469af1a68b27e0cd0f94e4e1993cac9b58f68 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | import json
with open('estoque.json', 'r') as arquivo:
estoque = arquivo.read()
estoque = json.loads(estoque)
valorfinal = 0
for produto in estoque["produtos"]:
qntd = produto["quantidade"]
valor = produto["valor"]
valorfinal += (qntd * valor)
print(valorfinal) | [
"you@example.com"
] | you@example.com |
4ce36af0556c38c0b99aa0f46457cfeee2e0ccb1 | 318572c21d892155e7418e7eee88057a4f3c721d | /test/test_csr.py | a5958650d7ea97ccde7757fc32b9ade9bdb92cdc | [
"BSD-2-Clause"
] | permissive | goran-mahovlic/litex | 69a1b1d8b1e0c1e3788c5691888527ae7bc74506 | 8030c691137d294043d797ff140de3c65aefc086 | refs/heads/master | 2020-07-11T08:49:58.894980 | 2019-08-26T21:02:03 | 2019-08-26T21:02:03 | 204,495,234 | 1 | 0 | NOASSERTION | 2019-08-26T16:36:25 | 2019-08-26T14:36:25 | null | UTF-8 | Python | false | false | 3,085 | py | # This file is Copyright (c) 2019 Florent Kermarrec <florent@enjoy-digital.fr>
# License: BSD
import unittest
from migen import *
from litex.soc.interconnect import csr
from litex.soc.interconnect import csr_bus
def csr32_write(dut, adr, dat):
for i in range(4):
yield from dut.csr.write(adr + 3 - i, (dat >> 8*i) & 0xff)
def csr32_read(dut, adr):
dat = 0
for i in range(4):
dat |= ((yield from dut.csr.read(adr + 3 - i)) << 8*i)
return dat
class CSRModule(Module, csr.AutoCSR):
def __init__(self):
self._csr = csr.CSR()
self._storage = csr.CSRStorage(32, reset=0x12345678, write_from_dev=True)
self._status = csr.CSRStatus(32, reset=0x12345678)
# # #
# When csr is written:
# - set storage to 0xdeadbeef
# - set status to storage value
self.comb += [
If(self._csr.re,
self._storage.we.eq(1),
self._storage.dat_w.eq(0xdeadbeef)
)
]
self.sync += [
If(self._csr.re,
self._status.status.eq(self._storage.storage)
)
]
class CSRDUT(Module):
def address_map(self, name, memory):
return {"csrmodule": 0}[name]
def __init__(self):
self.csr = csr_bus.Interface()
self.submodules.csrmodule = CSRModule()
self.submodules.csrbankarray = csr_bus.CSRBankArray(
self, self.address_map)
self.submodules.csrcon = csr_bus.Interconnect(
self.csr, self.csrbankarray.get_buses())
class TestCSR(unittest.TestCase):
def test_csr_storage(self):
def generator(dut):
# check init value
self.assertEqual(hex((yield from csr32_read(dut, 5))), hex(0x12345678))
# check writes
yield from csr32_write(dut, 1, 0x5a5a5a5a)
self.assertEqual(hex((yield from csr32_read(dut, 1))), hex(0x5a5a5a5a))
yield from csr32_write(dut, 1, 0xa5a5a5a5)
self.assertEqual(hex((yield from csr32_read(dut, 1))), hex(0xa5a5a5a5))
# check update from dev
yield from dut.csr.write(0, 1)
self.assertEqual(hex((yield from csr32_read(dut, 1))), hex(0xdeadbeef))
dut = CSRDUT()
run_simulation(dut, generator(dut))
def test_csr_status(self):
def generator(dut):
# check init value
self.assertEqual(hex((yield from csr32_read(dut, 1))), hex(0x12345678))
# check writes (no effect)
yield from csr32_write(dut, 5, 0x5a5a5a5a)
self.assertEqual(hex((yield from csr32_read(dut, 5))), hex(0x12345678))
yield from csr32_write(dut, 5, 0xa5a5a5a5)
self.assertEqual(hex((yield from csr32_read(dut, 5))), hex(0x12345678))
# check update from dev
yield from dut.csr.write(0, 1)
yield from dut.csr.write(0, 1)
self.assertEqual(hex((yield from csr32_read(dut, 5))), hex(0xdeadbeef))
dut = CSRDUT()
run_simulation(dut, generator(dut))
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
85d1b8bfb200d8f6023817335f55d8cc0ce0daa0 | 6a0cb1571b72b3f5708bb861b303380cc57a9a16 | /English/prepare_flickr_train_chunks.py | 3fa5629d374d4e1aa59fb12e265eddbe8553ad13 | [
"Apache-2.0"
] | permissive | cltl/Spoken-versus-Written | 5bb8f5c46bba2594e86bcaeb12b63c29f78aa443 | 997024ae60a3f1dacf87162aa3c82439393c1bf2 | refs/heads/master | 2020-03-21T13:35:22.902840 | 2018-12-07T20:56:28 | 2018-12-07T20:56:28 | 138,614,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | from collections import defaultdict
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
# Only focus on the training data.
with open('./Resources/Flickr30K/splits/train_images.txt') as f:
train_ids = {line.split('.')[0] for line in f}
# Compile index for the written Flickr30K descriptions
flickr_index = defaultdict(list)
with open('./Resources/Flickr30K/results_20130124.token') as f:
for line in f:
identifier, description = line.strip().split('\t')
identifier = identifier.split('.')[0]
if identifier in train_ids:
flickr_index[identifier].append(description + '\n')
descriptions = [flickr_index[imgid] for imgid in train_ids]
flattened_descriptions = [description for split in zip(*descriptions)
for description in split]
gen = chunks(flattened_descriptions, 1000)
for i in range(100):
lines = next(gen)
with open('./Resources/Flickr30K/train_chunks/flickr_chunk.{0:03}'.format(i), 'w') as f:
f.writelines(lines)
| [
"emielonline@gmail.com"
] | emielonline@gmail.com |
0f736fdf633fa85c109716227574ac1e44c6a553 | bfb036667018dd50883f03ccc51b2d7cbe93b94e | /SignIn/urls.py | d7e0b30211a9534c025dc93ee04b8e09a3c42ea1 | [] | no_license | uniquehou/txj-php | 845589bd256237d133471d51e4501a06082ff6c7 | 05edbbcfac541a03754421850c7d0767d12030cc | refs/heads/master | 2021-07-21T05:10:01.513922 | 2017-10-30T23:27:50 | 2017-10-30T23:27:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | from django.conf.urls import url
from . import views
app_name = "SignIn"
urlpatterns = [
url(r'^$', views.index),
url(r'index', views.index, name='index'),
url(r'submit', views.submit, name='submit'),
]
| [
"919863463@qq.com"
] | 919863463@qq.com |
f465a631611beafd6ed28baa3a9cd236e84b711e | 45e03dd61493195cbbbce14fa54a787715c7c1fb | /Python_String_Methods/Encode().py | 66027a0d7c89956c85b20b4cacfba92d51cd82c2 | [] | no_license | VaishnaviReddyGuddeti/Python_programs | c55ee69c05d78a70a44385ee2e66365f69546035 | 309a1786fa5a3886d516cd49eb09f9cd847389df | refs/heads/master | 2023-02-17T03:41:31.480858 | 2021-01-14T15:39:52 | 2021-01-14T15:39:52 | 279,762,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # Encode() - Returns an encoded version of the string
# Syntax - string.encode(encoding=encoding, errors=errors)
# UTF-8 encode the string:
txt = "My name is Sushanth"
x = txt.encode()
print(x)
# These examples uses ascii encoding, and a character that cannot be encoded, showing the result with different errors:
txt = "My name is Ståle"
print(txt.encode(encoding="ascii",errors="backslashreplace"))
print(txt.encode(encoding="ascii",errors="ignore"))
print(txt.encode(encoding="ascii",errors="namereplace"))
print(txt.encode(encoding="ascii",errors="replace"))
print(txt.encode(encoding="ascii",errors="xmlcharrefreplace"))
print(txt.encode(encoding="ascii",errors="strict"))
| [
"vaishnavireddyguddeti@gmail.com"
] | vaishnavireddyguddeti@gmail.com |
390c4e261333eb5c95eb3d2d31bbf17f59221205 | 0ffed23713096d9034efdc44a9f1740f79ddc9e5 | /scripts/QMDP_RCNN/IRL_linear_decay.py | 3e763658e69cb3f7aea43ee969982ea4cbb4f5d1 | [] | no_license | rohitsemwal16/RCNN_MDP | 1f559a725195fbaf59e6f2f375695372251b8e55 | 1c493d17d71c470ebc1dbd1795a75d8ed11eb00a | refs/heads/master | 2021-09-04T17:24:04.348415 | 2018-01-20T10:52:17 | 2018-01-20T10:52:17 | 118,078,756 | 0 | 0 | null | 2018-01-19T04:52:16 | 2018-01-19T04:52:16 | null | UTF-8 | Python | false | false | 5,552 | py | #!/usr/bin/env python
import numpy as npy
from variables import *
action_size = 8
def initialize_state():
# global current_pose, from_state_belief, observed_state
global observed_state
from_state_belief[observed_state[0],observed_state[1]] = 1.
def initialize_observation():
global observation_model
# observation_model = npy.array([[0.05,0.05,0.05],[0.05,0.6,0.05],[0.05,0.05,0.05]])
# observation_model = npy.array([[0.05,0.05,0.05],[0.05,0.6,0.05],[0.05,0.05,0.05]])
observation_model = npy.array([[0.05,0.1,0.05],[0.1,0.4,0.1],[0.05,0.1,0.05]])
epsilon=0.0001
observation_model += epsilon
observation_model /= observation_model.sum()
def display_beliefs():
global from_state_belief,to_state_belief,target_belief,current_pose
print "From:"
for i in range(current_pose[0]-5,current_pose[0]+5):
print from_state_belief[i,current_pose[1]-5:current_pose[1]+5]
print "To:"
for i in range(current_pose[0]-5,current_pose[0]+5):
print to_state_belief[i,current_pose[1]-5:current_pose[1]+5]
print "Target:"
for i in range(current_pose[0]-5,current_pose[0]+5):
print target_belief[i,current_pose[1]-5:current_pose[1]+5]
def bayes_obs_fusion():
global to_state_belief, current_pose, observation_model, obs_space, observed_state, corr_to_state_belief
dummy = npy.zeros(shape=(discrete_size,discrete_size))
h = obs_space/2
for i in range(-h,h+1):
for j in range(-h,h+1):
dummy[observed_state[0]+i,observed_state[1]+j] = to_state_belief[observed_state[0]+i,observed_state[1]+j] * observation_model[h+i,h+j]
corr_to_state_belief[:,:] = copy.deepcopy(dummy[:,:]/dummy.sum())
def initialize_all():
initialize_state()
initialize_observation()
def construct_from_ext_state():
global from_state_ext, from_state_belief,discrete_size
d=discrete_size
from_state_ext[w:d+w,w:d+w] = copy.deepcopy(from_state_belief[:,:])
def belief_prop_extended(action_index):
global trans_mat, from_state_ext, to_state_ext, w, discrete_size
to_state_ext = signal.convolve2d(from_state_ext,trans_mat[action_index],'same')
d=discrete_size
##NOW MUST FOLD THINGS:
for i in range(0,2*w):
to_state_ext[i+1,:]+=to_state_ext[i,:]
to_state_ext[i,:]=0
to_state_ext[:,i+1]+=to_state_ext[:,i]
to_state_ext[:,i]=0
to_state_ext[d+2*w-i-2,:]+= to_state_ext[d+2*w-i-1,:]
to_state_ext[d+2*w-i-1,:]=0
to_state_ext[:,d+2*w-i-2]+= to_state_ext[:,d+2*w-i-1]
to_state_ext[:,d+2*w-i-1]=0
to_state_belief[:,:] = copy.deepcopy(to_state_ext[w:d+w,w:d+w])
def feedforward_recurrence():
global from_state_belief, to_state_belief, corr_to_state_belief
# from_state_belief = copy.deepcopy(corr_to_state_belief)
from_state_belief = copy.deepcopy(to_state_belief)
def calc_softmax():
global qmdp_values, qmdp_values_softmax
for act in range(0,action_size):
qmdp_values_softmax[act] = npy.exp(qmdp_values[act]) / npy.sum(npy.exp(qmdp_values), axis=0)
def dummy_softmax():
global qmdp_values, qmdp_values_softmax, action_size
# for act in range(0,action_size):
qmdp_values_softmax = npy.zeros(action_size)
qmdp_values_softmax[npy.argmax(qmdp_values)]=1.
def update_QMDP_values():
global to_state_belief, q_value_estimate, qmdp_values, from_state_belief
for act in range(0,action_size):
# qmdp_values[act] = npy.sum(q_value_estimate[act]*to_state_belief)
qmdp_values[act] = npy.sum(q_value_estimate[act]*from_state_belief)
# def IRL_backprop():
def Q_backprop():
global to_state_belief, q_value_estimate, qmdp_values_softmax, learning_rate, annealing_rate
global trajectory_index, length_index, target_actions, time_index
update_QMDP_values()
calc_softmax()
# dummy_softmax()
alpha = learning_rate - annealing_rate * time_index
for act in range(0,action_size):
q_value_estimate[act,:,:] = q_value_estimate[act,:,:] - alpha*(qmdp_values_softmax[act]-target_actions[act])*from_state_belief[:,:]
# print "Ello", alpha*(qmdp_values_softmax[act]-target_actions[act])*from_state_belief[:,:]
def parse_data():
global observed_state, trajectory_index, length_index, target_actions, current_pose, trajectories
observed_state[:] = observed_trajectories[trajectory_index,length_index,:]
target_actions[:] = 0
target_actions[actions_taken[trajectory_index,length_index]] = 1
current_pose[:] = trajectories[trajectory_index,length_index,:]
def master():
global trans_mat_unknown, to_state_belief, from_state_belief, target_belief, current_pose
global trajectory_index, length_index
construct_from_ext_state()
belief_prop_extended(actions_taken[trajectory_index,length_index])
print observed_state, current_pose, target_actions, qmdp_values_softmax
# bayes_obs_fusion()
parse_data()
Q_backprop()
# display_beliefs()
feedforward_recurrence()
def Inverse_Q_Learning():
global trajectories, trajectory_index, length_index, trajectory_length, number_trajectories, time_index
time_index = 0
for trajectory_index in range(0,number_trajectories):
initialize_all()
for length_index in range(0,trajectory_length):
if (from_state_belief.sum()>0):
master()
time_index += 1
print time_index
else:
print "We've got a problem"
trajectory_index = 0
length_index = 0
parse_data()
Inverse_Q_Learning()
value_function = npy.amax(q_value_estimate, axis=0)
plt.imshow(value_function, interpolation='nearest', origin='lower', extent=[0,50,0,50], aspect='auto')
plt.show(block=False)
plt.colorbar()
plt.show()
with file('Q_Value_Estimate.txt','w') as outfile:
for data_slice in q_value_estimate:
outfile.write('#Q_Value_Estimate.\n')
npy.savetxt(outfile,data_slice,fmt='%-7.2f') | [
"tanmay.shankar@gmail.com"
] | tanmay.shankar@gmail.com |
97852b5e54f297008951ce01fea42b20236751c7 | 34652a47355a8dbe9200db229a1bbc62619de364 | /Algorithms/Pascal's Triangle grid.py | 7171af45039db025d7f6251f269ee3a26bdb34cf | [] | no_license | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,263 | py |
grid = []
n = 7
for i in range(n+1):
grid.append([1] * (n+1))
for i in range(n):
for j in range(n):
grid[i+1][j+1] = grid[i][j+1] + grid[i+1][j]
print(grid)
print(str(grid[n][n]))
'''
1
1 1
1 2 1
1 3 3 1
1 4 6 4 1
1 5 10 10 5 1
1 6 15 20 15 6 1
1 7 21 35 35 21 7 1
'''
print('\n--------------')
def main():
size = 3
grid_points_count = size + 1
grid = []
for x in range(grid_points_count):
cur_row = []
for y in range(grid_points_count):
if x > 0 and y > 0:
cur_row.append(cur_row[y - 1] + grid[x - 1][y])
else:
cur_row.append(1)
grid.append(cur_row)
print(grid)
print(grid[size][size])
main()
print('\n-------------------- OnE MATRIX ROW - PASCAL TRIANGLE----------------------\n')
def generate_Pascal_Triangle(row_nr) :
'''**©** Made by Bogdan Trif @ 2016-12-20, 21:20.
:Description: Generates the Pascal Triangle , Binomial Coefficients
:param row_nr: int, the row number, int
:return: nested list, matrix in the form of Pascal's Triangle '''
blueprint = [1]*(row_nr+1)
Pascal= [blueprint]
for i in range(row_nr) :
tmp=[]
for j in range(0, row_nr-i) :
tmp.append(sum(Pascal[-1][0:j+1]) )
# print(tmp)
Pascal.append(tmp)
return Pascal
print('\n-----------Pascal s Triangle --------------' )
Pasc = generate_Pascal_Triangle(7)
print(Pasc,'\n')
for i in range(len(Pasc)):
print(Pasc[i])
# print(T)
# for i in Pascal:
# for j in range(i, rows+1):
# comb[j] += comb[j-i]
# print(comb)
#
# print(comb)
#
# rows = 7
# comb = [1] + [0]*rows
# # print(comb)
# Pascal = [1]*rows
# # print(Pascal)
#
# for i in Pascal:
# for j in range(i, rows+1):
# comb[j] += comb[j-i]
# print(comb)
#
# print(comb)
| [
"bogdan.evanzo@gmail.com"
] | bogdan.evanzo@gmail.com |
50c850718ee41b0daaf57cbf5aa1c0f224458fa1 | 6aab2d11b3ab7619ee26319886dcfc771cbcaba5 | /0x0A-python-inheritance/4-inherits_from.py | 7c1e345e609de731fc003fa63903631ed93486f7 | [] | no_license | IhebChatti/holbertonschool-higher_level_programming | ef592f25eb077e182a0295cb5f2f7d69c7a8ab67 | ca58262c6f82f98b2022344818e20d382cf82592 | refs/heads/master | 2022-12-18T10:06:30.443550 | 2020-09-24T17:31:30 | 2020-09-24T17:31:30 | 259,174,423 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | #!/usr/bin/python3
"""check if obj inhertied directly of a class
"""
def inherits_from(obj, a_class):
"""inherits_from definition
Arguments:
obj {[object]} -- [the object to check]
a_class {[class]} -- [the class]
Returns:
[bool] -- [true if inherited directly, false if not]
"""
if issubclass(type(obj), a_class) and not type(obj) == a_class:
return True
return False
| [
"iheb.chatti@holbertonschool.com"
] | iheb.chatti@holbertonschool.com |
0c79eacddb49731e119b453110e076bd5b9ca5da | 9ba2b89dbdeefa54c6b6935d772ce36be7b05292 | /devilry/devilry_group/cradmin_instances/crinstance_base.py | f01a0b0b7c01fb528306066de649c1c7909be4c5 | [] | no_license | kristtuv/devilry-django | 0ffcd9d2005cad5e51f6377484a83d778d65050f | dd2a4e5a887b28268f3a45cc3b25a40c0e313fd3 | refs/heads/master | 2020-04-27T06:02:45.518765 | 2019-02-15T13:28:20 | 2019-02-15T13:28:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,093 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.db.models.functions import Lower, Concat
from devilry.apps.core.models import Examiner, Candidate, AssignmentGroup
from devilry.devilry_dbcache.models import AssignmentGroupCachedData
class DevilryGroupCrInstanceMixin(object):
roleclass = AssignmentGroup
rolefrontpage_appname = 'feedbackfeed'
def _get_base_rolequeryset(self):
"""Get base rolequerysets used by subclasses.
Get :class:`~devilry.apps.core.models.AssignmentGroup`s and prefetch related
:class:`~devilry.apps.core.models.Examiner`s and :class:`~devilry.apps.core.models.Candidate`s.
Returns:
QuerySet: A queryset of :class:`~devilry.apps.core.models.AssignmentGroup`s.
"""
return AssignmentGroup.objects \
.annotate_with_is_waiting_for_feedback_count() \
.annotate_with_is_waiting_for_deliveries_count() \
.annotate_with_is_corrected_count() \
.select_related('parentnode__parentnode__parentnode') \
.prefetch_related(
models.Prefetch('candidates',
queryset=self._get_candidatequeryset())) \
.prefetch_related(
models.Prefetch('examiners',
queryset=self._get_examinerqueryset())) \
.prefetch_related(
models.Prefetch('cached_data',
queryset=self._get_assignment_group_cacheddata_queryset()))
def _get_candidatequeryset(self):
"""Get candidates.
Returns:
QuerySet: A queryset of :class:`~devilry.apps.core.models.Candidate`s.
"""
return Candidate.objects \
.select_related('relatedstudent') \
.order_by(
Lower(Concat('relatedstudent__user__fullname',
'relatedstudent__user__shortname')))
def _get_examinerqueryset(self):
"""Get examiners.
Returns:
QuerySet: A queryset of :class:`~devilry.apps.core.models.Examiner`s.
"""
return Examiner.objects \
.select_related('relatedexaminer') \
.order_by(
Lower(Concat('relatedexaminer__user__fullname',
'relatedexaminer__user__shortname')))
def _get_assignment_group_cacheddata_queryset(self):
return AssignmentGroupCachedData.objects\
.select_related(
'group',
'first_feedbackset',
'last_feedbackset',
'last_published_feedbackset')
def get_titletext_for_role(self, role):
"""String representation for the role.
Args:
role: An :obj:`~devilry.apps.core.models.AssignmentGroup`
instance of the roleclass for the crinstance.
Returns:
str: Formatted string reprensentation of the crinstance role.
"""
return "{} - {}".format(role.period, role.assignment.short_name)
| [
"stianjul@gmail.com"
] | stianjul@gmail.com |
9ce297a89bcb7527f0066f244fe50fac15f47f23 | ca75f7099b93d8083d5b2e9c6db2e8821e63f83b | /z2/part3/updated_part2_batch/jm/parser_errors_2/158698348.py | 07c5e587ea7fc17c711767e5940198e5050b7d40 | [
"MIT"
] | permissive | kozakusek/ipp-2020-testy | 210ed201eaea3c86933266bd57ee284c9fbc1b96 | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | refs/heads/master | 2022-10-04T18:55:37.875713 | 2020-06-09T21:15:37 | 2020-06-09T21:15:37 | 262,290,632 | 0 | 0 | MIT | 2020-06-09T21:15:38 | 2020-05-08T10:10:47 | C | UTF-8 | Python | false | false | 3,078 | py | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 158698348
"""
"""
random actions, total chaos
"""
board = gamma_new(5, 3, 3, 7)
assert board is not None
assert gamma_move(board, 1, 3, 1) == 1
assert gamma_busy_fields(board, 1) == 1
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 1, 1) == 1
assert gamma_move(board, 2, 4, 2) == 1
assert gamma_move(board, 3, 1, 4) == 0
assert gamma_move(board, 1, 2, 2) == 1
assert gamma_move(board, 2, 0, 1) == 1
assert gamma_move(board, 3, 3, 2) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 4) == 0
assert gamma_move(board, 3, 2, 2) == 0
board413242309 = gamma_board(board)
assert board413242309 is not None
assert board413242309 == ("..132\n" "2211.\n" ".....\n")
del board413242309
board413242309 = None
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_busy_fields(board, 1) == 3
assert gamma_free_fields(board, 1) == 8
assert gamma_move(board, 2, 4, 2) == 0
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 2, 0, 0) == 1
assert gamma_move(board, 2, 0, 2) == 1
assert gamma_move(board, 3, 2, 0) == 1
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 1, 4, 1) == 1
assert gamma_move(board, 2, 4, 2) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 3, 0, 0) == 0
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_free_fields(board, 1) == 3
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_free_fields(board, 2) == 3
assert gamma_golden_move(board, 2, 2, 3) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 3, 4, 0) == 1
assert gamma_golden_move(board, 3, 2, 0) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_busy_fields(board, 1) == 4
assert gamma_free_fields(board, 1) == 2
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 2) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_busy_fields(board, 1) == 4
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_busy_fields(board, 2) == 7
assert gamma_move(board, 3, 4, 1) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_busy_fields(board, 2) == 7
board793231563 = gamma_board(board)
assert board793231563 is not None
assert board793231563 == ("22132\n" "22111\n" "223.3\n")
del board793231563
board793231563 = None
assert gamma_move(board, 3, 0, 1) == 0
gamma_delete(board)
| [
"noreply@github.com"
] | kozakusek.noreply@github.com |
fb618bb99c8eb4fecc90504df2e15c24a4405d5e | 50eb4e3092fadb9af8f5ad33f2d37edce43633ed | /okfncart/tests/test_promotion_loader.py | fe9378704631eaf15f98a06f8b440fc913f5d10a | [] | no_license | tomwardill/okfncart | 8723cf42955f1393deeebadc4e7dbaa0de5b435e | 89759bf18efb7a49e16492dabdcf23fca41f49c9 | refs/heads/master | 2020-04-06T04:01:39.242094 | 2014-09-08T14:23:29 | 2014-09-08T14:23:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | import unittest
from okfncart.promotions.promotion_loader import PromotionLoader
class TestPromotionLoader(unittest.TestCase):
def setUp(self):
self.loader = PromotionLoader()
def test_load_promotions_empty(self):
promotions = self.loader.load_promotions()
self.assertTrue(promotions) | [
"tom@howrandom.net"
] | tom@howrandom.net |
12b07ab23fed1bee315b968b910789912c086c85 | e0d9844e123fa0706388814b9f29758258589487 | /torch/jit/_fuser.py | 349ecbea75621a03adfa9001d1d4c5bbd82370e0 | [] | no_license | pigpigman8686/seg | b5cf5261a5744e89ed5e5b145f60b0ccc3ba2c0c | 61c3816f7ba76243a872fe5c5fc0dede17026987 | refs/heads/master | 2023-04-10T22:22:35.035542 | 2021-04-22T06:24:36 | 2021-04-22T06:24:36 | 360,398,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,647 | py | import contextlib
import torch
@contextlib.contextmanager
def optimized_execution(should_optimize):
"""
A context manager that controls whether the JIT's executor will run
optimizations before executing a function.
"""
stored_flag = torch._C._get_graph_executor_optimize()
torch._C._set_graph_executor_optimize(should_optimize)
try:
yield
finally:
torch._C._set_graph_executor_optimize(stored_flag)
@contextlib.contextmanager
def fuser(name):
"""
A context manager that facilitates switching between
backend fusers.
Valid names:
* ``fuser0`` - enables only legacy fuser
* ``fuser1`` - enables only NNC
* ``fuser2`` - enables only nvFuser
"""
old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
old_nvfuser_state = torch._C._jit_nvfuser_enabled()
if name == 'fuser0': # legacy fuser
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
elif name == 'fuser1': # NNC
old_profiling_executor = torch._C._jit_set_profiling_executor(True)
old_profiling_mode = torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
elif name == 'fuser2': # nvFuser
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(True)
else:
raise Exception("unrecognized fuser option")
try:
yield
finally:
if name == 'fuser1': # NNC
torch._C._jit_set_profiling_executor(old_profiling_executor)
torch._C._jit_set_profiling_mode(old_profiling_mode)
# recover the previous values
torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuse)
torch._C._jit_override_can_fuse_on_gpu(old_gpu_fuse)
torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state)
torch._C._jit_set_nvfuser_enabled(old_nvfuser_state)
last_executed_optimized_graph = torch._C._last_executed_optimized_graph
def _graph_for(self, *args, **kwargs):
self(*args, **kwargs)
return last_executed_optimized_graph()
| [
"952361195@qq.com"
] | 952361195@qq.com |
0beb595ac8b8afe6fe9f98094c63c6054d060ac7 | df1eea603a7adbdd3f81e06800f788ee97ecefe1 | /0x11-python-network_1/101-starwars.py | bf5bd2f574c972d35b0370cd4d2369fe11c16ebf | [] | no_license | ledbagholberton/holbertonschool-higher_level_programming | be0b4423beb8331bd5915f065870a2cbcd8c6008 | df937fd4888dc64470f0068323a9fa6ad400e56d | refs/heads/master | 2021-06-12T15:16:57.812663 | 2019-10-01T06:00:34 | 2019-10-01T06:00:34 | 184,110,453 | 0 | 0 | null | 2021-04-30T21:18:08 | 2019-04-29T16:59:29 | TSQL | UTF-8 | Python | false | false | 550 | py | #!/usr/bin/python3
""" With request ask for header"""
import requests
import sys
if __name__ == "__main__":
url = "http://swapi.co/api/people/?all=true"
if len(sys.argv) < 2:
sys.exit(1)
else:
data = {'search': sys.argv[1]}
html = requests.get(url, params=data)
try:
my_json = html.json()
print("Number of results: ", my_json.get('count'))
list_results = my_json.get('results')
for dict_results in list_results:
print(dict_results.get('name'))
except:
pass
| [
"789@holbertonschool.com"
] | 789@holbertonschool.com |
18f4edd212936a2d9ad07e7a58d32021e5000f79 | cc619d6e81c39fe54d4875e3c6936e25bb8a7ebd | /Python/src/17 Scientific Python/SciKitImage/03_tablets.py | 35e9a6bcf6f9bf53efee9a38eb2c8fe75f4f809d | [] | no_license | joshua4289/python3-examples | cb01060f649c7dc97185566b00fa0d59a1ffdca3 | 70c1fd0b1e5bf25e82697257fb9f92cd06e922b7 | refs/heads/master | 2020-12-19T08:19:13.310071 | 2017-04-28T13:48:01 | 2017-04-28T13:48:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,018 | py | import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as plt
import skimage.morphology as morphology
import skimage.feature as feature
import PIL.Image as Image
def load_image( infilename ) :
img = Image.open( infilename )
img.load()
data = np.asarray( img, dtype="int32" )
return data
def set_title(title):
figure = plt.gcf()
figure.canvas.set_window_title(title)
# # use PIL to show true image
# from PIL import Image
img = Image.open("images/tablets.jpg")
img.show()
# image is an int numpy array [0 ... 255]
set_title("numpy int array 0 ... 255")
image = load_image("images/tablets.jpg")
plt.imshow(image, interpolation="none")
plt.show()
# convert image to floats
image = image / 256.0
set_title("numpy float array 0.0 ... 1.0")
plt.imshow(image, interpolation="none")
plt.show()
# algorithms expect monochrome images
# so just use the RED part of the image
image = image[:,:,0]
# use Canny algorith to detect edges
# vary sigma
for i in range(2,10):
set_title("sigma = {}".format(i))
edges = feature.canny(image, sigma=i, low_threshold=40/256.0, high_threshold=50/256.0)
plt.imshow(edges, cmap=plt.cm.gray)
plt.show()
# vary thresholds
for i in range(5, 60, 5):
low = i / 256.0
high = (i + 5) / 256.0
set_title("low = {}, high = {}".format(low*256, high*256))
edges = feature.canny(image, sigma=4, low_threshold=low, high_threshold=high)
plt.imshow(edges, cmap=plt.cm.gray)
plt.show()
# chose best parametrs
sigma = 4
low = 40/256.0
high = 45/256.0
set_title("Best choice: sigma = {}, low = {}, high = {}".format(sigma, low*256, high*256))
edges = feature.canny(image, sigma=sigma, low_threshold=low, high_threshold=high)
# close edges
for i in range(1,10):
closed = morphology.binary_closing(edges, morphology.square(i)).astype(int)
# fill circles
set_title("fill factor = {}".format(i))
filled = nd.binary_fill_holes(closed).astype(int)
plt.imshow(filled, cmap=plt.cm.gray)
plt.show()
| [
"seddon-software@keme.co.uk"
] | seddon-software@keme.co.uk |
6a300cad68c584580fc8f8d23564a9d3917e56de | 73d9b5664d6949140b13e92d8b91a01e8502752a | /good_spot/images/migrations/0006_auto_20180313_1440.py | d6a7bce62944f54512cae8f23ae1618d32f2648b | [
"MIT"
] | permissive | jasmine92122/NightClubBackend | 3ed46cce0f6b534b4b49829f53fe7cb6a42ae42e | 7f59129b78baaba0e0c25de2b493033b858f1b00 | refs/heads/master | 2022-11-23T00:42:25.606762 | 2019-10-02T01:56:29 | 2019-10-02T01:56:29 | 212,234,882 | 0 | 0 | MIT | 2022-11-22T02:10:16 | 2019-10-02T01:47:52 | JavaScript | UTF-8 | Python | false | false | 462 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-13 14:40
from __future__ import unicode_literals
from django.db import migrations
def reorder(apps, schema_editor):
from django.core import management
management.call_command('reorder', 'images.PlaceImage')
class Migration(migrations.Migration):
dependencies = [
('images', '0005_auto_20180313_1438'),
]
operations = [
migrations.RunPython(reorder)
]
| [
"jasminegarcia111@outlook.com"
] | jasminegarcia111@outlook.com |
20c0be5ceca17532092c08704ef8644540114ee4 | 936dc2666f27de7a7d1428c7ad2ded62a722b8fa | /src/geofr/tasks.py | cccc966051394561c8ed36e13ac6c969e30e66cd | [
"ISC"
] | permissive | MTES-MCT/aides-territoires | 03451a32bdeaab3812b8593bfe3a27c1b1d9a182 | af9f6e6e8b1918363793fbf291f3518ef1454169 | refs/heads/master | 2023-09-04T22:15:17.819264 | 2023-08-25T13:19:17 | 2023-08-25T13:19:17 | 124,301,398 | 21 | 11 | NOASSERTION | 2023-09-12T13:46:49 | 2018-03-07T22:19:11 | Python | UTF-8 | Python | false | false | 206 | py | from core.celery import app
from django.core import management
@app.task
def count_by_department():
"""Count backers and programs by department."""
management.call_command("count_by_department")
| [
"noreply@github.com"
] | MTES-MCT.noreply@github.com |
df71e788bcfd8b63c8f6aabc31fca3443c9f04b4 | df716b2868b289a7e264f8d2b0ded52fff38d7fc | /tests/parsers/trendmicroav.py | a8f3cf80dcbcd5830b11ee17d920946a8d4d990f | [
"Apache-2.0"
] | permissive | ir4n6/plaso | 7dd3cebb92de53cc4866ae650d41c255027cf80a | 010f9cbdfc82e21ed6658657fd09a7b44115c464 | refs/heads/master | 2021-04-25T05:50:45.963652 | 2018-03-08T15:11:58 | 2018-03-08T15:11:58 | 122,255,666 | 0 | 0 | Apache-2.0 | 2018-02-20T21:00:50 | 2018-02-20T21:00:50 | null | UTF-8 | Python | false | false | 1,777 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Trend Micro AV Log parser."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import trendmicroav as _ # pylint: disable=unused-import
from plaso.parsers import trendmicroav
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class TrendMicroUnitTest(test_lib.ParserTestCase):
"""Tests for the Trend Micro AV Log parser."""
@shared_test_lib.skipUnlessHasTestFile(['pccnt35.log'])
def testParse(self):
"""Tests the Parse function."""
parser = trendmicroav.OfficeScanVirusDetectionParser()
storage_writer = self._ParseFile(['pccnt35.log'], parser)
# The file contains 3 lines which results in 3 events.
self.assertEqual(storage_writer.number_of_events, 3)
# The order in which DSVParser generates events is nondeterministic
# hence we sort the events.
events = list(storage_writer.GetSortedEvents())
event = events[1]
self.CheckTimestamp(event.timestamp, '2018-01-30 14:45:32.000000')
# The third and last event has been edited to match the older, documented
# format for log lines (without a Unix timestamp).
event = events[2]
self.CheckTimestamp(event.timestamp, '2018-01-30 14:46:00.000000')
# Test the third event.
self.assertEqual(event.path, 'C:\\temp\\')
self.assertEqual(event.filename, 'eicar.com_.gstmp')
expected_message = (
r'Path: C:\temp\ File name: eicar.com_.gstmp '
r'Eicar_test_1 : Failure (clean), moved (Real-time scan)')
expected_short_message = r'C:\temp\ eicar.com_.gstmp Failure (clean), moved'
self._TestGetMessageStrings(event, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| [
"onager@deerpie.com"
] | onager@deerpie.com |
4d1e24b85cf62a6aab7cfc79383b9f0d1481768f | 5b4fe473179b5fadaf59ec96d55b2ec4cb326f65 | /src/graph_transpiler/webdnn/backend/webgl/optimize_rules/insert_channel_mode_conversion.py | 121a0b3cbf32a9c693a52efa3c3fdef3de9bc1d1 | [
"Zlib",
"MIT"
] | permissive | TarrySingh/webdnn | 13d3f1ec4936916abacfb67e270f48571e2fcff2 | b31b19de0798d8ca198b78d19cb06e4fce1bc260 | refs/heads/master | 2021-05-07T02:24:47.500746 | 2017-11-13T13:00:24 | 2017-11-13T13:00:24 | 110,582,816 | 0 | 1 | null | 2017-11-13T18:03:46 | 2017-11-13T18:03:46 | null | UTF-8 | Python | false | false | 2,948 | py | from typing import Tuple
from webdnn.backend.webgl.attributes.channel_mode import ChannelModeEnum, ChannelMode
from webdnn.backend.webgl.attributes.texture_shape import TextureShape
from webdnn.backend.webgl.operators.convert_r_to_rgba import ConvertRtoRGBA, convert_r_to_rgba
from webdnn.backend.webgl.operators.convert_rgba_to_r import ConvertRGBAtoR, convert_rgba_to_r
from webdnn.graph import traverse
from webdnn.graph.graph import Graph
from webdnn.graph.operator import Operator
from webdnn.graph.operators.sgemm import Sgemm
from webdnn.graph.operators.tensordot import Tensordot
from webdnn.graph.optimize_rule import OptimizeRule
from webdnn.graph.variable import Variable
def _replace_input(op: Operator, var_name: str, target: ChannelModeEnum):
"""
before)
v -{op}-
after)
v -{conversion}- v' -{op}-
"""
v = op.inputs[var_name]
if ChannelMode.get(v) == target:
return False
if target == ChannelModeEnum.RGBA:
v_new = convert_r_to_rgba(v)
else:
v_new = convert_rgba_to_r(v)
TextureShape.set(v_new, height=TextureShape.get(v)[0], width=TextureShape.get(v)[1])
op.replace_input(v, v_new)
return True
def _replace_output(op: Operator, var_name: str, target: ChannelModeEnum):
"""
before)
-{op}- v
after)
-{op}- v' -{conversion}- v
"""
v = op.outputs[var_name]
if ChannelMode.get(v) == target:
return False
v_new = Variable(v.shape, v.order)
ChannelMode.set(v_new, target)
op.replace_output(v, v_new)
if target == ChannelModeEnum.RGBA:
convert_rgba_to_r(v_new).change_order(v.order).replace(v)
else:
convert_r_to_rgba(v_new).change_order(v.order).replace(v)
return True
def _replace_input_all(op: Operator, target: ChannelModeEnum):
return any(_replace_input(op, var_name, target) for var_name in op.inputs.keys())
def _replace_output_all(op: Operator, target: ChannelModeEnum):
return any(_replace_output(op, var_name, target) for var_name in op.outputs.keys())
class InsertChannelModeConversion(OptimizeRule):
def optimize(self, graph: Graph) -> Tuple[Graph, bool]:
flag_changed = False
for op in traverse.listup_operators(graph):
if isinstance(op, (Sgemm, Tensordot)):
pass
elif isinstance(op, ConvertRGBAtoR):
flag_changed |= _replace_input(op, "x0", ChannelModeEnum.RGBA)
flag_changed |= _replace_output(op, "y", ChannelModeEnum.R)
elif isinstance(op, ConvertRtoRGBA):
flag_changed |= _replace_input(op, "x0", ChannelModeEnum.R)
flag_changed |= _replace_output(op, "y", ChannelModeEnum.RGBA)
else:
flag_changed |= _replace_input_all(op, ChannelModeEnum.R)
flag_changed |= _replace_output_all(op, ChannelModeEnum.R)
return graph, flag_changed
| [
"y.kikura@gmail.com"
] | y.kikura@gmail.com |
cb0ecd20b0fdca9b9abf3647279afb77cc77ecbb | 318270aeab9182a42482c33167f90b3e6bb8a77b | /Pattern exercise/pattern 8.py | 69e53ceffc379c4d66b91586da5711cef2d1d07f | [] | no_license | Raj-kar/Python | b857214392384752855f6ab5d673b0218ce3ecd7 | 7eab4705eda566827ad01b3285095d253e55a7dc | refs/heads/master | 2023-01-21T07:11:03.054162 | 2020-11-30T08:47:11 | 2020-11-30T08:47:11 | 286,132,816 | 3 | 11 | null | 2020-10-13T21:47:31 | 2020-08-08T23:09:42 | Python | UTF-8 | Python | false | false | 428 | py | # * *
# ** **
# *** ***
# **** ****
# ***** *****
# ****** ******
# *************
num = int(input("Enter a range :: "))
symbol = input("Enter a symbol :: ") # user can enter any symbol for print !
num += 1
for i in range(1, num):
for j in range(1, (num*2)-2):
if j <= i or j >= ((num*2)-2)-i:
print(symbol, end="")
else:
print(" ", end="")
print()
| [
"rajkar921@gmail.com"
] | rajkar921@gmail.com |
0346683d74959bf1bc9cf8400043c01c34de5b01 | 8200122ad875e73f627f5d1eca29c778167cb5fb | /tests/test_documenter.py | a9b851c1b0a4b51e81f69479b92cd72e904c4922 | [
"ISC"
] | permissive | jaimergp/mkdocstrings | bb1a5ad2360f051e17e4af0c854119dcc6b652ac | 895c3192cb9328d0800234a8732745ecae840d97 | refs/heads/master | 2021-02-17T17:47:03.476674 | 2020-03-04T20:30:36 | 2020-03-04T20:30:36 | 245,115,235 | 1 | 0 | ISC | 2020-03-05T09:05:19 | 2020-03-05T09:05:19 | null | UTF-8 | Python | false | false | 862 | py | from mkdocstrings.documenter import Documenter
def test_getattr_dunder():
class Base:
def __init__(self):
pass
def __getitem__(self, item):
"""Written docstring."""
return item
class Child(Base):
def __init__(self):
super().__init__()
def __getitem__(self, item):
return item
doc = Documenter()
class_doc = doc.get_class_documentation(Child)
for child in class_doc.children:
if child.name == "__getitem__":
assert child.docstring.original_value == ""
def test_no_filter():
doc = Documenter()
assert not doc.filter_name_out("hello")
def test_filter():
doc = Documenter(["!^_[^_]", "!^__C$"])
assert doc.filter_name_out("_B")
assert doc.filter_name_out("__C")
assert not doc.filter_name_out("__A")
| [
"pawamoy@pm.me"
] | pawamoy@pm.me |
28a809f729a9e54614cfc5b64ccef5cd57046d51 | 5f73d71c47ecac793e2e1a1ce14ca5c24483d45a | /tools/vis_gt_stats.py | a64d6b583c3b35e1c28d921cf1f608683c3aa6c4 | [] | no_license | GeeeG/sixd_toolkit | 694db642c9f2a179353284dfcf7b16f45722aaae | ec914f3a9d2ced9a5b6d87722f342c6bef1b95b7 | refs/heads/master | 2021-08-08T13:32:29.018936 | 2017-11-10T12:10:36 | 2017-11-10T12:10:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,069 | py | # Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz)
# Center for Machine Perception, Czech Technical University in Prague
# Plots statistics of the ground truth poses.
import os
import sys
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from pysixd import inout
from params.dataset_params import get_dataset_params
# dataset = 'hinterstoisser'
dataset = 'tless'
# dataset = 'tudlight'
# dataset = 'rutgers'
# dataset = 'tejani'
# dataset = 'doumanoglou'
dataset_part = 'train'
# dataset_part = 'test'
delta = 15 # Tolerance used in the visibility test [mm]
# Load dataset parameters
dp = get_dataset_params(dataset)
if dataset_part == 'train':
data_ids = range(1, dp['obj_count'] + 1)
gt_mpath_key = 'obj_gt_mpath'
gt_stats_mpath_key = 'obj_gt_stats_mpath'
else: # 'test'
data_ids = range(1, dp['scene_count'] + 1)
gt_mpath_key = 'scene_gt_mpath'
gt_stats_mpath_key = 'scene_gt_stats_mpath'
# Load the GT statistics
gt_stats = []
for data_id in data_ids:
print('Loading GT stats: {}, {}'.format(dataset, data_id))
gts = inout.load_gt(dp[gt_mpath_key].format(data_id))
gt_stats_curr = inout.load_yaml(
dp[gt_stats_mpath_key].format(data_id, delta))
for im_id, gt_stats_im in gt_stats_curr.items():
for gt_id, p in enumerate(gt_stats_im):
p['data_id'] = data_id
p['im_id'] = im_id
p['gt_id'] = gt_id
p['obj_id'] = gts[im_id][gt_id]['obj_id']
gt_stats.append(p)
print('GT count: {}'.format(len(gt_stats)))
# Collect the data
px_count_all = [p['px_count_all'] for p in gt_stats]
px_count_valid = [p['px_count_valid'] for p in gt_stats]
px_count_visib = [p['px_count_visib'] for p in gt_stats]
visib_fract = [p['visib_fract'] for p in gt_stats]
bbox_all_x = [p['bbox_all'][0] for p in gt_stats]
bbox_all_y = [p['bbox_all'][1] for p in gt_stats]
bbox_all_w = [p['bbox_all'][2] for p in gt_stats]
bbox_all_h = [p['bbox_all'][3] for p in gt_stats]
bbox_visib_x = [p['bbox_visib'][0] for p in gt_stats]
bbox_visib_y = [p['bbox_visib'][1] for p in gt_stats]
bbox_visib_w = [p['bbox_visib'][2] for p in gt_stats]
bbox_visib_h = [p['bbox_visib'][3] for p in gt_stats]
f, axs = plt.subplots(2, 2)
f.canvas.set_window_title(dataset)
axs[0, 0].hist([px_count_all, px_count_valid, px_count_visib],
bins=20, range=(min(px_count_visib), max(px_count_all)))
axs[0, 0].legend([
'All object mask pixels',
'Valid object mask pixels',
'Visible object mask pixels'
])
axs[0, 1].hist(visib_fract, bins=50, range=(0.0, 1.0))
axs[0, 1].set_xlabel('Visible fraction')
axs[1, 0].hist([bbox_all_x, bbox_all_y, bbox_visib_x, bbox_visib_y], bins=20)
axs[1, 0].legend([
'Bbox all - x',
'Bbox all - y',
'Bbox visib - x',
'Bbox visib - y'
])
axs[1, 1].hist([bbox_all_w, bbox_all_h, bbox_visib_w, bbox_visib_h], bins=20)
axs[1, 1].legend([
'Bbox all - width',
'Bbox all - height',
'Bbox visib - width',
'Bbox visib - height'
])
f.tight_layout()
plt.show()
| [
"tom.hodan@gmail.com"
] | tom.hodan@gmail.com |
01986fb8b82dfc269f8acfe87fc88ab902ec5cd7 | a9868b17ddc5f7f28911c57870e327238a2432d8 | /python_Pandas_Numpy/Pandas/Pandas08_05_Oper_최윤종.py | 8e68d21c628a7da3d95fdb66df283dc46f2a3a4d | [] | no_license | ChoiYoonJong/DataScience | 439568a668307ed0cab0cffb688fd832b10047ab | 3cab98eacecd8c1782e6f91b2b7ffa0ecefe4ed1 | refs/heads/main | 2023-06-07T02:56:08.335411 | 2021-07-09T13:23:58 | 2021-07-09T13:23:58 | 378,833,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py |
# coding: utf-8
# In[1]:
import pandas as pd
# In[2]:
scientists = pd.read_csv('../data/scientists.csv')
# In[3]:
ages = scientists['Age']
print(ages.max())
# In[5]:
print(ages + ages)
# In[6]:
print(ages * ages)
# In[7]:
print(ages + 100)
# In[8]:
print(ages * 2)
# In[9]:
print(pd.Series([1,100]))
# In[10]:
print(ages,"\n\n")
print(pd.Series([1,100]),"\n\n")
print(ages + pd.Series([1,100]))
# In[11]:
print(ages)
# In[14]:
rev_ages = ages.sort_index(ascending=False)
print(rev_ages)
# In[15]:
print(ages * 2)
# In[16]:
print(ages + rev_ages)
| [
"noreply@github.com"
] | ChoiYoonJong.noreply@github.com |
18843cd91a77a4b123c23a31259d14bb5f63f9a9 | f83934dd60d4961848c0a86f6d7fbe07b79a1d63 | /examples/skybox.py | 35338a6b839cf8d1a1925b6a5bd7f559fced491a | [] | no_license | brianholland/glumpy | 2a31e2f5fd039d1debb30dd010ad36c458f329cf | a691082385e02db9b1d461847b9e36d8534630fa | refs/heads/master | 2020-12-25T21:43:58.743259 | 2015-11-30T11:04:46 | 2015-11-30T11:04:46 | 46,670,630 | 0 | 0 | null | 2015-11-30T11:04:46 | 2015-11-22T17:10:24 | Python | UTF-8 | Python | false | false | 2,225 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import app, gloo, gl, data
from glumpy.transforms import Trackball, Position
vertex = """
attribute vec3 position;
attribute vec3 texcoord;
varying vec3 v_texcoord;
void main()
{
gl_Position = <transform(position)> * vec4(-1,-1,1,1);
v_texcoord = texcoord;
}
"""
fragment = """
uniform samplerCube texture;
varying vec3 v_texcoord;
void main()
{
gl_FragColor = textureCube(texture, v_texcoord);
}
"""
window = app.Window(width=1024, height=1024)
@window.event
def on_draw(dt):
window.clear()
program.draw(gl.GL_TRIANGLES, indices)
@window.event
def on_init():
gl.glEnable(gl.GL_DEPTH_TEST)
vertices = np.array([[+1,+1,+1], [-1,+1,+1], [-1,-1,+1], [+1,-1,+1],
[+1,-1,-1], [+1,+1,-1], [-1,+1,-1], [-1,-1,-1]])
texcoords = np.array([[+1,+1,+1], [-1,+1,+1], [-1,-1,+1], [+1,-1,+1],
[+1,-1,-1], [+1,+1,-1], [-1,+1,-1], [-1,-1,-1]])
faces = np.array([vertices[i] for i in [0,1,2,3, 0,3,4,5, 0,5,6,1,
6,7,2,1, 7,4,3,2, 4,7,6,5]])
indices = np.resize(np.array([0,1,2,0,2,3], dtype=np.uint32), 36)
indices += np.repeat(4 * np.arange(6, dtype=np.uint32), 6)
indices = indices.view(gloo.IndexBuffer)
texture = np.zeros((6,1024,1024,3),dtype=np.float32).view(gloo.TextureCube)
texture.interpolation = gl.GL_LINEAR
program = gloo.Program(vertex, fragment, count=24)
program['position'] = faces*10
program['texcoord'] = faces
program['texture'] = texture
program['transform'] = Trackball(Position(), distance=0)
texture[2] = data.get("sky-left.png")/255.
texture[3] = data.get("sky-right.png")/255.
texture[0] = data.get("sky-front.png")/255.
texture[1] = data.get("sky-back.png")/255.
texture[4] = data.get("sky-up.png")/255.
texture[5] = data.get("sky-down.png")/255.
window.attach(program["transform"])
app.run()
| [
"Nicolas.Rougier@inria.fr"
] | Nicolas.Rougier@inria.fr |
8d14fad12f09747881d57daf744f6a3832bf66d5 | f4d0c26d3aa27c77a7c27d9002a08465a0638cbb | /csv_schema/apps.py | cc68a34da948dcfb3886e8505753ddb7fcf4e90f | [] | no_license | uk-gov-mirror/nhsengland.NCDR-reference-library | 3afe0711f47dc1b5fa25646bc870a806b3512ce5 | cac30ee0787e81fb9868731576c242c7ea3dbde8 | refs/heads/master | 2023-04-03T15:10:19.320708 | 2017-11-03T15:03:27 | 2017-11-03T15:03:27 | 356,799,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class CsvSchemaConfig(AppConfig):
name = 'csv_schema'
| [
"fredkingham@gmail.com"
] | fredkingham@gmail.com |
d41949b29005b96b1b15f022fe7c76b524793263 | f5d3d2f2a79b07bf71a0f1bbbb50385f135f5dd3 | /jina/peapods/runtimes/base.py | 504d65cff495cf7ce2b57c50bf5d6701964b5b87 | [
"Apache-2.0"
] | permissive | Rohitpandit021/jina | 5ab9be96eebeb6ec1a7cfae78a47e9b71789158e | f3db4d5e480375d8dc3bceda814ac1963dee76d7 | refs/heads/master | 2023-06-02T14:46:16.445246 | 2021-06-21T10:18:01 | 2021-06-21T10:18:01 | 378,832,389 | 0 | 0 | Apache-2.0 | 2021-06-21T10:22:10 | 2021-06-21T06:42:59 | Python | UTF-8 | Python | false | false | 3,683 | py | import argparse
from ...logging.logger import JinaLogger
class BaseRuntime:
"""A Jina Runtime is a procedure that blocks the main process once running (i.e. :meth:`run_forever`),
therefore must be put into a separated thread/process. Any program/library/package/module that blocks the main
process, can be formulated into a :class:`BaseRuntime` class and then be used in :class:`BasePea`.
In the sequel, we call the main process/thread as ``M``, the process/thread blocked :class:`Runtime` as ``S``.
In Jina, a :class:`BasePea` object is used to manage a :class:`Runtime` object's lifecycle. A :class:`BasePea`
acts as a :class:`multiprocessing.Process` or :class:`threading.Thread`, it starts from ``M`` and once the
``S`` is spawned, it calls :class:`Runtime` methods in the following order:
0. :meth:`__init__`
1. :meth:`run_forever`. Note that this will block ``S``, step 3 won't be
reached until it is unblocked by :meth:`cancel`
2. :meth:`teardown` in ``S``. Note that ``S`` is blocked by
:meth:`run_forever`, this step won't be reached until step 2 is unblocked by :meth:`cancel`
The :meth:`__init__` and :meth:`teardown` pair together, which defines instructions that will be executed before
and after. In subclasses, `teardown` is optional.
The :meth:`run_forever` and :meth:`cancel` pair together, which introduces blocking to ``S`` and then
unblocking from it. They are mandatory for all subclasses.
Note that, there is no "exclusive" relation between :meth:`run_forever` and :meth:`teardown`, :meth:`teardown`
is not about "cancelling", it is about "cleaning".
Unlike other three methods that get invoked inside ``S``, the :meth:`cancel` is invoked in ``M`` to unblock ``S``.
Therefore, :meth:`cancel` usually requires some special communication between ``M`` and ``S``, e.g.
- Use :class:`threading.Event` or `multiprocessing.Event`, while :meth:`run_forever` polls for this event
- Use ZMQ to send a message, while :meth:`run_forever` polls for this message
- Use HTTP/REST to send a request, while :meth:`run_forever` listens to this request
Note, another way to jump out from :meth:`run_forever` is raise exceptions from it. This will immediately move to
:meth:`teardown`.
.. note::
Rule of thumb on exception handling: if you are not sure if you should handle exception inside
:meth:`run_forever`, :meth:`cancel`, :meth:`teardown`, then DO NOT catch exception in them.
Exception is MUCH better handled by :class:`BasePea`.
.. seealso::
:class:`BasePea` for managing a :class:`Runtime` object's lifecycle.
"""
def __init__(self, args: 'argparse.Namespace', **kwargs):
super().__init__()
self.args = args
if args.name:
self.name = f'{args.name}/{self.__class__.__name__}'
else:
self.name = self.__class__.__name__
self.logger = JinaLogger(self.name, **vars(self.args))
def run_forever(self):
"""Running the blocking procedure inside ``S``. Note, once this method is called,
``S`` is blocked.
.. note::
If this method raises any exception, :meth:`teardown` will be called.
.. seealso::
:meth:`cancel` for cancelling the forever loop.
"""
raise NotImplementedError
def teardown(self):
"""Method called immediately after :meth:`run_forever` is unblocked.
You can tidy up things here. Optional in subclasses. The default implementation does nothing.
"""
self.logger.close()
| [
"rajashree.patil@embold.io"
] | rajashree.patil@embold.io |
0b283b8c8bf43055210cd3e22e33b88cdbe61862 | e4428f7635a978e3e68f0a94b736c3626f0ea66d | /src/basic/server/server.py | e17e91830e4dfa9c9aceece4d5eb3c8a59fa0211 | [] | no_license | rodrigoms2004/pythonWebSocketsNumpy | df297908f796dd5b9c7ff69d82cad3c06b240c46 | 353af18cc1303dfa0a0f2da7533eb603b7fe6bcb | refs/heads/master | 2020-08-04T04:39:28.198215 | 2019-10-01T04:08:30 | 2019-10-01T04:08:30 | 212,009,424 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | #!/usr/bin/env python
# WS server example
import asyncio
import websockets
import numpy as np
from pickle import dumps, loads
async def inverseMatrix(websocket, path):
buffer_matrix = await websocket.recv()
matrix = loads(buffer_matrix)
print("Receiving matrix:\n", matrix)
inverse = np.linalg.inv(matrix)
buffer_inverse = dumps(inverse)
await websocket.send(buffer_inverse)
print("Sending inverse:\n", inverse)
start_server = websockets.serve(inverseMatrix, "localhost", 8765)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever() | [
"rodrigoms2004@gmail.com"
] | rodrigoms2004@gmail.com |
148085e914624bff502e11b0827994b9858b1877 | 7d98c95227ff36e7735eaf05857507baa8ecfaff | /myproject/settings.py | 2521c71c6540df85adc9b41e75e259df36493067 | [] | no_license | KANISHK-VERMA/Blogapp | 55876b58f1a5cb9db252571ad0898fb92c1df85a | 87e384139c453bff3e1487992449fed2e3e6d045 | refs/heads/master | 2022-11-11T13:25:57.291910 | 2020-07-02T17:35:53 | 2020-07-02T17:35:53 | 254,918,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,326 | py | """
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'js)^obc*7izn8512y#-^1_!g&7p^1+f20r3gw(p=$@&on#k-dx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'crispy_forms',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles','Blogapp','user.apps.UserConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
MEDIA_URL='/media/'
CRISPY_TEMPLATES_PACK='bootstrap4'
LOGIN_REDIRECT_URL='nblog'
LOGIN_URL='nlogin' | [
"you@example.com"
] | you@example.com |
3bb3f731ef2c9152d232482fc1e57fa643925b8e | ca59d18e503ef22fbc920c6de48ffc8eac5a1443 | /tools/Polygraphy/examples/api/02_using_real_data/example.py | c59a454b5dbe79668694119aaf766617c9d0051b | [
"Apache-2.0",
"BSD-3-Clause",
"ISC",
"BSD-2-Clause",
"MIT"
] | permissive | boh-inspur/TensorRT | 9fc0ae0ad4e31da040d10728b63d9dc284852b67 | e4d2f7f4406f1c8f4632cc67de33728cef90ca29 | refs/heads/master | 2023-04-13T21:24:13.912673 | 2021-04-23T09:55:18 | 2021-04-23T09:55:18 | 265,431,588 | 0 | 0 | Apache-2.0 | 2021-04-23T09:55:19 | 2020-05-20T02:49:58 | null | UTF-8 | Python | false | false | 2,068 | py | #!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script uses the Polygraphy Runner API to validate the outputs
of an identity model using a trivial dataset.
"""
import os
import numpy as np
from polygraphy.backend.trt import (EngineFromNetwork, NetworkFromOnnxPath,
TrtRunner)
INPUT_SHAPE = (1, 1, 2, 2)
REAL_DATASET = [ # Definitely real data
np.ones(INPUT_SHAPE, dtype=np.float32),
np.zeros(INPUT_SHAPE, dtype=np.float32),
np.ones(INPUT_SHAPE, dtype=np.float32),
np.zeros(INPUT_SHAPE, dtype=np.float32),
]
# For our identity network, the golden output values are the same as the input values.
# Though this network appears to do nothing, it can be incredibly useful in some cases (like here!).
GOLDEN_VALUES = REAL_DATASET
MODEL = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, "models", "identity.onnx")
build_engine = EngineFromNetwork(NetworkFromOnnxPath(MODEL))
# Activate the runner using a context manager. For TensorRT, this will build an engine,
# then destroy it upon exiting the context.
# NOTE: You can also use the activate() function for this, but you will need to make sure to
# deactivate() to avoid a memory leak. For that reason, a context manager is the safer option.
with TrtRunner(build_engine) as runner:
for (data, golden) in zip(REAL_DATASET, GOLDEN_VALUES):
outputs = runner.infer(feed_dict={"x": data})
assert np.all(outputs["y"] == golden)
| [
"rajeevsrao@users.noreply.github.com"
] | rajeevsrao@users.noreply.github.com |
2a897f6362fbdff9b1df3a593a276ff405e2436c | b2e1d96c0551b6b31ef85353f9b6e5b6354d64e8 | /datafaucet/spark/data.py | b701af9a518224f407232a2f190558970479dda2 | [
"MIT"
] | permissive | SylarCS/datafaucet-1 | 8bd7b96cecc5592e153b61367892e2a63a96119d | a63074ba1fb1a6d15f06e2bfff05df754aaaa452 | refs/heads/master | 2020-09-15T06:04:31.999012 | 2019-11-18T20:00:55 | 2019-11-18T20:00:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from pyspark.sql import DataFrame
from datafaucet.data import _Data
class Data(_Data):
def collect(self, n=1000, axis=0):
res = self.df.select(self.columns).limit(n).toPandas()
return res.T if axis else res
def _data(self):
return Data(self)
DataFrame.data = property(_data)
| [
"natalino.busa@gmail.com"
] | natalino.busa@gmail.com |
7c881f192f7bb5eea325cab96de8af8dd74bfdf8 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/classification/Resnet50_Cifar_for_PyTorch/configs/_base_/models/regnet/regnetx_12gf.py | d0b11c71bd70e06fedb1869a0fa5f51e24fc5d1b | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 928 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='RegNet', arch='regnetx_12gf'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=2240,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.