blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a49bed0ad6fb441cd8b332aad95442e6b04774ed
|
84bad7d10540d988e0a68051c0b9ff75a8a40b72
|
/agrigo/manage.py
|
329e08312ab6214c529e78643d9fb30594c0cd60
|
[
"BSD-3-Clause"
] |
permissive
|
cmdrspartacus/agrigo
|
cd22bac9c301c980259de65e71c60154c6a3f5f9
|
de6ae4980786ca29ff4ab743f64b1759016e1f57
|
refs/heads/master
| 2016-09-13T03:09:51.420817
| 2016-05-16T13:14:34
| 2016-05-16T13:14:34
| 58,933,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,463
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Management script."""
import os
from glob import glob
from subprocess import call
from flask_migrate import Migrate, MigrateCommand
from flask_script import Command, Manager, Option, Server, Shell
from flask_script.commands import Clean, ShowUrls
from agrigo.app import create_app
from agrigo.database import db
from agrigo.settings import DevConfig, ProdConfig
from agrigo.user.models import User
CONFIG = ProdConfig if os.environ.get('AGRIGO_ENV') == 'prod' else DevConfig
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
app = create_app(CONFIG)
manager = Manager(app)
migrate = Migrate(app, db)
def _make_context():
"""Return context dict for a shell session so you can access app, db, and the User model by default."""
return {'app': app, 'db': db, 'User': User}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
class Lint(Command):
"""Lint and check code style with flake8 and isort."""
def get_options(self):
"""Command line options."""
return (
Option('-f', '--fix-imports', action='store_true', dest='fix_imports', default=False,
help='Fix imports using isort, before linting'),
)
def run(self, fix_imports):
"""Run command."""
skip = ['requirements']
root_files = glob('*.py')
root_directories = [name for name in next(os.walk('.'))[1] if not name.startswith('.')]
files_and_directories = [arg for arg in root_files + root_directories if arg not in skip]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
print('{}: {}'.format(description, ' '.join(command_line)))
rv = call(command_line)
if rv is not 0:
exit(rv)
if fix_imports:
execute_tool('Fixing import order', 'isort', '-rc')
execute_tool('Checking code style', 'flake8')
manager.add_command('server', Server())
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
manager.add_command('urls', ShowUrls())
manager.add_command('clean', Clean())
manager.add_command('lint', Lint())
if __name__ == '__main__':
manager.run()
|
[
"tobiasorlando@gmail.com"
] |
tobiasorlando@gmail.com
|
eabc4c1e37dc5cde8f65a964309ab319a2938e79
|
0862289d532ee9750d461e12b62583494987b39f
|
/run.py
|
3ace771921d0de564ec78e562f8654082c9f2aa5
|
[] |
no_license
|
habibah-adam/uyghur_school
|
71d99ce44b25e36fdf57c7ec23e88de5d47bea2d
|
9a93e1adb12820f7611a790d8bc108412dc39017
|
refs/heads/master
| 2023-03-23T19:49:31.458789
| 2021-03-13T02:02:53
| 2021-03-13T02:02:53
| 345,954,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
from school_app import create_app, db
app = create_app()
if __name__ == "__main__":
app.run(debug=True)
|
[
"hebibe2s@gmail.com"
] |
hebibe2s@gmail.com
|
f4f95db04d1c22ec1a024380dac59917668cfa2d
|
d39eabbd338b6ad565c411784c62ba6cacbd88cc
|
/alfred/server/CommandLineInterface.py
|
5acd514f5a166f9d39c669de08dfe1d5f0b48557
|
[
"MIT"
] |
permissive
|
nakul225/alfred
|
f4d8680c614146c87824b14aedb7901a21e97136
|
0dfc90b830ca06403102db2de27c3f70607e976c
|
refs/heads/master
| 2021-01-01T06:03:27.866914
| 2017-07-17T01:37:19
| 2017-07-17T01:37:19
| 97,346,900
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,611
|
py
|
'''
Created on Mar 20, 2017
@author: nakul
'''
import traceback
import sys
class CommandLineInterface:
# Implementation that provides cmd line input/response interaction
def __init__(self, providedLife):
self.life = providedLife
def _show_usage(self):
print "\n==========================================================="
print "Supported commands are:"
print "Put Goals: \n\t pg <lowercase_goal_name_without_spaces> <lowercase_description_without_spaces>"
print "Put Step: \n\t ps <goal_name> <name> <cost_in_hours>"
print "Get Goals: \n\t gg"
print "Mark Step Complete: \n\t msc <goal_name> <step_name>"
print "Mark Step Incomplete: \n\t msi <goal_name> <step_name>"
print "Get Progress Summary: \n\t gps"
print "Exit Program: \n\t exit"
print "===========================================================\n"
def _show_progress(self):
#Iterates through each goal/category and shows progress for each one
self._show_progress_for_goals()
self._show_progress_for_categories()
def _show_progress_for_goals(self):
#Iterates through each goal and shows progress for each one
for goal in self.life.get_goals():
print "Goal " + goal.name + " is " + str(goal.get_progress_percentage()) + "% complete"
def _show_progress_for_categories(self):
#Iterates through each goal and shows progress for each one
for category in self.life.get_categories():
print "Category " + category.name + "has completed " + str(category.get_progress_percentage())
def _process_command(self, command):
lowercase_command = command.lower()
operation = lowercase_command.split()[0]
continue_program = True
if operation == Operation.EXIT.value:
continue_program = False
elif operation == Operation.PUT_GOAL.value:
self.put_goal(lowercase_command)
elif operation == Operation.GET_GOALS.value:
self.get_goals(lowercase_command)
elif operation == Operation.PUT_STEP.value:
self.put_step(lowercase_command)
elif operation == Operation.GET_PROGRESS_SUMMARY.value:
self.show_progress_summary()
elif operation == Operation.MARK_STEP_COMPLETE.value:
self.mark_step_complete(lowercase_command)
elif operation == Operation.MARK_STEP_INCOMPLETE.value:
self.mark_step_incomplete(lowercase_command)
else:
print "Operation not recognized. Please see usage:"
self._show_usage()
return continue_program
def show_progress_summary(self):
self._show_progress()
def put_goal(self, command):
#PutGoal <lowercase_goal_name_without_spaces> <lowercase_description_without_spaces>
elements = command.split()
name = elements[1].lower()
description = elements[2].lower()
goal = Goal.build_new_goal(name, description)
self.life.put_goal(goal)
def get_goals(self, command):
print "You have following goals in the system: "
for goal in self.life.get_goals():
goal.print_details()
def put_step(self, command):
#PutStep <name> <description> <cost_in_hours> <name_of_goal>
elements = command.split()
goal_name = elements[1].lower()
name = elements[2].lower()
description = ""
cost = int(elements[3])
step = Step.build_new_step(name, description, cost)
# Find the goal in life and add this step to it.
success = False
for goal in self.life.get_goals():
if goal.name == goal_name:
goal.put_step(step)
success=True
if success == False:
print "Specified goal not found!"
def _show_usage_and_accept_user_input(self):
# Show usage and accept user input
self._show_usage()
continue_flag = self._read_input_and_process()
return continue_flag
def mark_step_complete(self, command):
elements = command.split()
goal_name = elements[1]
step_name = elements[2]
print "Marking step "+ step_name + " in goal " + goal_name + " as COMPLETE"
for goal in self.life.get_goals():
if goal.name == goal_name:
goal.mark_step_complete(step_name)
def mark_step_incomplete(self, command):
elements = command.split()
goal_name = elements[1]
step_name = elements[2]
print "Marking step "+ step_name + " in goal " + goal_name + " as INCOMPLETE"
for goal in self.life.get_goals():
if goal.name == goal_name:
goal.mark_step_incomplete(step_name)
def main_menu_loop(self):
# Keeps the program running so that use can interact
should_keep_loop_running = True
while(should_keep_loop_running):
try:
should_keep_loop_running = self._show_usage_and_accept_user_input()
except:
print "Exception raised\n"
traceback.print_exc(file=sys.stdout)
def _process_single_command(self):
# Useful to accept single command invoked with the program. This is alternative to having conitinous loop of accepting commands and showing output.
try:
actual_command = " ".join(sys.argv[1:])
self._process_command(actual_command)
except:
print "Exception raised while dealing with input command"
self._show_usage()
|
[
"nakul225@gmail.com"
] |
nakul225@gmail.com
|
55c78f9adc431c314924891a2846056a21118d3d
|
f5ddc6122e361e9a6508ced36a3ebfc3c0814356
|
/beanstalkd/south_migrations/0001_initial.py
|
0feafbae89dbb4b68ad52f1d1664693fbbc7e076
|
[
"Apache-2.0"
] |
permissive
|
baitcode/django-beanstalkd
|
84c920bd8860a0af4fbbf1f06137f73390594c43
|
27696832fd5bffcabe96f787d2608fb1fbb0ec5a
|
refs/heads/master
| 2021-01-24T06:04:53.263333
| 2014-10-30T11:41:03
| 2014-10-30T11:41:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,995
|
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tube'
db.create_table('beanstalk_tube', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('buried', self.gf('django.db.models.fields.IntegerField')(null=True)),
('delayed', self.gf('django.db.models.fields.IntegerField')(null=True)),
('ready', self.gf('django.db.models.fields.IntegerField')(null=True)),
('reserved', self.gf('django.db.models.fields.IntegerField')(null=True)),
('urgent', self.gf('django.db.models.fields.IntegerField')(null=True)),
))
db.send_create_signal('beanstalkd', ['Tube'])
# Adding model 'Job'
db.create_table('beanstalk_job', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('beanstalk_id', self.gf('django.db.models.fields.IntegerField')()),
('instance_ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
('instance_port', self.gf('django.db.models.fields.IntegerField')()),
('tube', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['beanstalk.Tube'], null=True)),
('tube_name', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')(default='{}')),
('state', self.gf('django.db.models.fields.SmallIntegerField')(default=1)),
))
db.send_create_signal('beanstalkd', ['Job'])
def backwards(self, orm):
# Deleting model 'Tube'
db.delete_table('beanstalk_tube')
# Deleting model 'Job'
db.delete_table('beanstalk_job')
models = {
'beanstalk.job': {
'Meta': {'object_name': 'Job'},
'beanstalk_id': ('django.db.models.fields.IntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'instance_port': ('django.db.models.fields.IntegerField', [], {}),
'message': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'tube': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beanstalk.Tube']", 'null': 'True'}),
'tube_name': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'beanstalk.tube': {
'Meta': {'ordering': "['name']", 'object_name': 'Tube'},
'buried': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'delayed': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'ready': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'urgent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
}
}
complete_apps = ['beanstalkd']
|
[
"ilya@ostrovok.ru"
] |
ilya@ostrovok.ru
|
496a84c1d1d106e52d4af6fdde5b9432b262f5e0
|
da23df96d1d93d9b3526f7576e76e0a3d87aa17e
|
/exercises/3.tuple_exe.py
|
8e4471dac512ab7f5e17fea3b6f3488c06d2c834
|
[] |
no_license
|
zhoucong2/python-
|
3d27b9290cefc04d0179808d0c6b34bc6623eaef
|
3d0aa0ea1235c73b75b87742eda7d356cda1250c
|
refs/heads/master
| 2023-01-30T16:47:23.992700
| 2020-12-10T10:05:46
| 2020-12-10T10:05:46
| 312,167,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
tuple1=(1,2,3)
x,y,z = tuple1#元组解包
tuple2 = x,y,z#元组封包
print(x,y,z,tuple2)
|
[
"505614943@qq.com"
] |
505614943@qq.com
|
29307944764273393298ed15682e1772ffc0005a
|
eea7adb1221e39e949e9f13b92805f1f63c61696
|
/leetcode-04/solutions/python3/0026.py
|
cffe5152560aba1f85e3de6977536a78c4a951fd
|
[] |
no_license
|
zhangchunbao515/leetcode
|
4fb7e5ac67a51679f5ba89eed56cd21f53cd736d
|
d191d3724f6f4b84a66d0917d16fbfc58205d948
|
refs/heads/master
| 2020-08-02T21:39:05.798311
| 2019-09-28T14:44:14
| 2019-09-28T14:44:14
| 211,514,370
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
if len(nums) <= 1:
return len(nums)
j = 0
for i in range(len(nums)):
if nums[i] != nums[j]:
j += 1
nums[j] = nums[i]
return j + 1
|
[
"zhangchunbao515@163.com"
] |
zhangchunbao515@163.com
|
b5131dfd5ec3d93cfb93ae353873b30d8f0cf541
|
5eb51cc393c35be33d8faca9548df0170c29bfc7
|
/base/getMeminfo.py
|
c728252269b1408de3315cb285df3750bb113edc
|
[] |
no_license
|
wxpokay/autogui
|
965f3dc1aa908ab62347f6872365f54c84eb9ea9
|
3f128b663bff61428f344579faab62668281a1a9
|
refs/heads/master
| 2021-01-22T17:22:38.663439
| 2016-03-14T08:46:38
| 2016-03-14T08:46:38
| 49,067,292
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,053
|
py
|
# -*- coding: utf-8 -*-
'''
Created on 2016-3-7
@author: Administrator
'''
import os
import time
import subprocess
script_dir = os.path.dirname(os.path.realpath(__file__))
print script_dir
result = "F:\\workspace\\autogui\\meminfo\\"
#获取系统当前时间
now = time.strftime('%Y-%m-%d-%H_%M_%S', time.localtime(time.time()))
day = time.strftime('%Y-%m-%d', time.localtime(time.time()))
#定义个报告存放路径,支持相对路径
tdresult = result + day
class getAndroidMem:
def def_file(self):
''' 定义 存放内存数据的文件 '''
print '定义 存放内存数据的文件 '
if os.path.exists(tdresult):
filename = tdresult + "\\" + now + "_mem.csv"
else:
os.mkdir(tdresult)
filename = tdresult + "\\" + now + "_mem.csv"
if(os.path.exists(filename)):
print "已经存在了"
else:
print "不存在"
return filename
def getMemDump(self,file_path):
''' 获取操作的dump文件 '''
print '获取操作的dump文件'
subprocess.Popen('adb shell am dumpheap com.jhd.help>' +file_path)
def getMemPic(self,file_path):
''' 获取内存趋势图 '''
print '获取内存趋势图 '
#f = open(file_path, 'w')
cmd1 = 'adb shell dumpsys meminfo com.jhd.help |findstr Pss>>' + file_path
#cmd1 = 'adb shell logcat -v time |findstr jhd >>' + file_path
print cmd1
subprocess.Popen(cmd1,shell=True)
cmd2 = 'adb shell dumpsys meminfo com.jhd.help |findstr Total>>' + file_path
subprocess.Popen(cmd2,shell=True)
cmd3 = 'adb shell dumpsys meminfo com.jhd.help |findstr TOTAL>>' + file_path
while(True):
subprocess.Popen(cmd3,shell=True)
if __name__ == "__main__":
filename = getAndroidMem().def_file()
#print filename
print '开始获取趋势'
getAndroidMem().getMemPic(filename)
|
[
"wuxiaoping1120@126.com"
] |
wuxiaoping1120@126.com
|
a399756b98d4f56f840f7c66e636e9b73a4a9272
|
86a1aab74d3c1c991c8effa835ce8f9dfcf980b3
|
/utils/src/arguments.py
|
36b2e4e521b8431242ca3521587776f357f90e0d
|
[
"MIT"
] |
permissive
|
CBIIT/nci-hitif
|
a97e93c005c079d7650b2ef73bcd77e518a30fcd
|
2f825cbcba92ff2fdffac60de56604578f31e937
|
refs/heads/master
| 2023-04-15T04:55:05.703576
| 2021-04-21T20:11:06
| 2021-04-21T20:11:06
| 97,508,922
| 1
| 5
|
MIT
| 2023-03-24T23:35:20
| 2017-07-17T18:25:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,106
|
py
|
import argparse
def get_unet_parser():
parser = argparse.ArgumentParser(description="spot learner")
parser.add_argument('images', help="The 2d numpy array image stack or 128 * 128")
parser.add_argument('masks', help="The 2d numpy array mask (16bits) stack or 128 * 128")
parser.add_argument('--nlayers', default=4, type = int, dest='nlayers', help="The number of layer in the forward path ")
parser.add_argument('--num_filters', default=32, type = int, dest='num_filters', help="The number of convolution filters in the first layer")
parser.add_argument('--conv_size', default='3', type = int, dest='conv_size', help="The convolution filter size.")
parser.add_argument('--dropout', default=None, type = float, dest='dropout', help="Include a droupout layer with a specific dropout value.")
parser.add_argument('--activation', default='relu',dest='activation', help="Activation function.")
parser.add_argument('--augmentation', default=1, type = float, dest='augmentation', help="Augmentation factor for the training set.")
parser.add_argument('--initialize', default=None, dest='initialize', help="Numpy array for weights initialization.")
parser.add_argument('--normalize_mask', action='store_true', dest='normalize_mask', help="Normalize the mask in case of uint8 to 0-1 by dividing by 255.")
parser.add_argument('--predict', action='store_true', dest='predict', help="Use the model passed in initialize to perform segmentation")
parser.add_argument('--loss_func', default='dice', dest='loss_func', help="Keras supported loss function, or 'dice'. ")
parser.add_argument('--last_act', default='sigmoid', dest='last_act', help="The activation function for the last layer.")
parser.add_argument('--batch_norm', default=False, action = "store_true", dest='batch_norm', help="Enable batch normalization")
parser.add_argument('--lr', default='1e-5', type = float, dest='lr', help="Initial learning rate for the optimizer")
parser.add_argument('--rotate', default=False, action = "store_true", dest='rotate', help="")
return parser
|
[
"george.zaki@nih.gov"
] |
george.zaki@nih.gov
|
db996257ef666016749abab744fca60cc7c79dc3
|
2d2fcc54af513a84bc624589dc7c6a0316848784
|
/microbe/lib/python3.6/hmac.py
|
3433dd988ff9ff0a1ec0203ba62a078b796dcc94
|
[] |
no_license
|
tatyana-perlova/microbe-x
|
9becf3a176e1277a3bb4ffcd96d4b25365038bb8
|
5b364c09dcf43c3ab237c8d9304a4eaa9ecff33f
|
refs/heads/master
| 2022-12-13T18:29:59.372327
| 2018-02-06T19:35:11
| 2018-02-06T19:35:11
| 120,374,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
py
|
/home/perlusha/anaconda3/lib/python3.6/hmac.py
|
[
"tatyana.perlova@gmail.com"
] |
tatyana.perlova@gmail.com
|
baae6fae01fff3f6aec29b4e4d2b1d0690ecc8d7
|
41c74240ef78070ee5ad19ece21672e629da6881
|
/elections/migrations/0001_initial.py
|
47ba1d9d4599cd1ce1a4b0c10cf6582b2cf65c5b
|
[] |
no_license
|
NamGungGeon/DjangoStudy
|
33d3f3d66bcc6a9dafa9cbeee10f55b705d1755f
|
7985d384f26538b78414148c485d4a126c199ad0
|
refs/heads/master
| 2021-01-23T20:07:23.033394
| 2017-09-08T11:03:46
| 2017-09-08T11:03:46
| 102,852,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-03 14:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Candidate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
('introduction', models.TextField()),
('area', models.CharField(max_length=15)),
('party_number', models.IntegerField(default=1)),
],
),
]
|
[
"rndrjs123@naver.com"
] |
rndrjs123@naver.com
|
454eb93dccb38e7fc7963f055b5cbdc78b1c6663
|
95544e6ac0847dd7b21e6ec180d31a1bc5dedaed
|
/H4/TypeX/WatchingApp(TYPEX)-H4/env.py
|
37bb28626a2cdaf685df1d5954a74e8c1c6c963b
|
[] |
no_license
|
meanJustin/Real-Time-Trade-Watching-app
|
f965731da8e01bb81b8517c2080506246073c78c
|
867dde7a5a8ad2f67c2a19b46b15283720884678
|
refs/heads/main
| 2023-01-04T13:27:54.179141
| 2020-10-23T02:05:30
| 2020-10-23T02:05:30
| 306,504,056
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
#assign the source filepath
FILEPATH = "C:\\Users\\VMAK CAPITAL\\AppData\\Roaming\\MetaQuotes\\Terminal\\CEA95A93FC8D185DD2235895C53A5FFF\\MQL4\\Files\\"
#FILEPATH = ".\\Assets\\"
#assign the mastersheet filepath
MASTERFILEPATH = ".\\Assets\\"
#the measure is second
CYCLE_TIME = 3600 * 4
#the period of every index check time
CHECK_INDEX_DURATION_TIME = 3600
#the period of refresh or should I say update?
REFRESH_TIME = 5
#TYPEXMASTERSHEET.xlsx file name
TYPEX_MASTER = "MasterTypeXSheet.xlsx"
#out put file names
#TYPEX PRINT
TYPEX_PRINT = "TypeX.csv"
|
[
"69616732+meanJustin@users.noreply.github.com"
] |
69616732+meanJustin@users.noreply.github.com
|
1186de1cba914cdcc904a0e0a09520080aa16289
|
46492cc7429c83fe362b0ed566fc54982e52c46e
|
/pitches/main/forms.py
|
bb9c5b6a6c3f20f413c47970a696323c03307838
|
[
"MIT"
] |
permissive
|
jakhax/pitches
|
15c8d87825c879b56cd931d26d398e736636134f
|
e56358d00089bd46addd54192220bcca0478e0da
|
refs/heads/master
| 2020-03-18T00:36:09.254870
| 2018-05-20T14:48:14
| 2018-05-20T14:48:14
| 134,102,974
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,399
|
py
|
from flask import current_app, session
from flask_babel import lazy_gettext
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, BooleanField, SelectField, SubmitField, IntegerField
from wtforms import ValidationError
from wtforms.validators import DataRequired, InputRequired, Length, Email, Regexp
from ..models import Role, User, TopicGroup
class FormHelpersMixIn(object):
@property
def submit_fields(self):
return [getattr(self, field) for field, field_type in self._fields.items()
if isinstance(field_type, SubmitField)]
@staticmethod
def is_has_data(*fields):
return any([field.data for field in fields])
def get_flashed_errors(self):
errors = session.pop('_form_errors') if '_form_errors' in session else {}
self.errors.update(errors)
for field, errors in errors.items():
if hasattr(self, field):
form_field = getattr(self, field)
if form_field.errors:
form_field.errors.extend(errors)
else:
form_field.errors = errors
class EditProfileForm(FlaskForm):
name = StringField(lazy_gettext('Real name'), validators=[Length(0, 64)])
homeland = StringField(lazy_gettext('Homeland'), validators=[Length(0, 64)])
about = TextAreaField(lazy_gettext('About me'))
avatar = StringField(lazy_gettext('Link to avatar'), validators=[Length(0, 256)])
submit = SubmitField(lazy_gettext('Save'))
class EditProfileAdminForm(FlaskForm):
email = StringField(lazy_gettext('Email'), validators=[DataRequired(), Length(1, 64), Email()])
username = StringField(lazy_gettext('Username'), validators=[
DataRequired(), Length(1, 32), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0, lazy_gettext(
'Usernames must have only letters, numbers, dots or underscores'))])
confirmed = BooleanField(lazy_gettext('Confirmed'))
role = SelectField(lazy_gettext('Role'), coerce=int)
name = StringField(lazy_gettext('Real name'), validators=[Length(0, 64)])
homeland = StringField(lazy_gettext('Homeland'), validators=[Length(0, 64)])
about = TextAreaField(lazy_gettext('About me'))
avatar = StringField(lazy_gettext('Link to avatar'), validators=[Length(0, 256)])
submit = SubmitField(lazy_gettext('Save'))
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name) for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if (field.data.lower() != self.user.email
and User.query.filter_by(email=field.data.lower()).first()):
raise ValidationError(lazy_gettext('Email already registered.'))
def validate_username(self, field):
if (field.data.lower() != self.user.username_normalized
and User.query.filter_by(username_normalized=field.data.lower()).first()):
raise ValidationError(lazy_gettext('Username already in use.'))
class TopicForm(FlaskForm):
title = StringField(lazy_gettext('Title'), validators=[DataRequired(), Length(0, 128)])
group_id = IntegerField(lazy_gettext('Topic group ID'), validators=[InputRequired()])
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 20})
poll_question="Rank"
poll_answers="Upvote\n Downvote"
submit = SubmitField(lazy_gettext('Save'))
cancel = SubmitField(lazy_gettext('Cancel'))
delete = SubmitField(lazy_gettext('Delete'))
def remove_edit_fields(self):
del self.group_id
del self.delete
def validate_group_id(self, field):
if not TopicGroup.query.filter_by(id=field.data).first():
raise ValidationError(lazy_gettext('Topic group with such ID is not exist.'))
class TopicWithPollForm(FlaskForm):
title = StringField(lazy_gettext('Title'), validators=[DataRequired(), Length(0, 128)])
group_id = IntegerField(lazy_gettext('Topic group ID'), validators=[InputRequired()])
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 20})
poll_question = StringField(lazy_gettext('Poll question'), validators=[DataRequired(), Length(0, 256)])
poll_answers = TextAreaField(lazy_gettext('Poll answers'), validators=[DataRequired()], render_kw={'rows': 10})
submit = SubmitField(lazy_gettext('Save'))
cancel = SubmitField(lazy_gettext('Cancel'))
delete = SubmitField(lazy_gettext('Delete'))
def remove_edit_fields(self):
del self.group_id
del self.delete
def validate_group_id(self, field):
if not TopicGroup.query.filter_by(id=field.data).first():
raise ValidationError(lazy_gettext('Topic group with such ID is not exist.'))
class TopicGroupForm(FlaskForm):
title = StringField(lazy_gettext('Title'), validators=[DataRequired(), Length(0, 64)])
group_id = IntegerField(lazy_gettext('Parent topic group ID'), validators=[InputRequired()])
priority = SelectField(lazy_gettext('Priority'), coerce=int)
protected = BooleanField(lazy_gettext('Moderators only'))
submit = SubmitField(lazy_gettext('Save'))
cancel = SubmitField(lazy_gettext('Cancel'))
delete = SubmitField(lazy_gettext('Delete'))
def __init__(self, *args, **kwargs):
super(TopicGroupForm, self).__init__(*args, **kwargs)
self.priority.choices = [(p, p) for p in current_app.config['TOPIC_GROUP_PRIORITY']]
def remove_edit_fields(self):
del self.group_id
del self.delete
def validate_group_id(self, field):
if not TopicGroup.query.filter_by(id=field.data).first():
raise ValidationError(lazy_gettext('Topic group with such ID is not exist.'))
class CommentForm(FlaskForm):
body = TextAreaField(lazy_gettext('Leave your comment, {username}:'), validators=[DataRequired()],
render_kw={'rows': 4})
submit = SubmitField(lazy_gettext('Submit'))
def __init__(self, user, *args, **kwargs):
super(CommentForm, self).__init__(*args, **kwargs)
self.body.label.text = self.body.label.text.format(username=user.username)
class CommentEditForm(FlaskForm):
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 8})
submit = SubmitField(lazy_gettext('Save'))
cancel = SubmitField(lazy_gettext('Cancel'))
delete = SubmitField(lazy_gettext('Delete'))
class MessageReplyForm(FlaskForm):
title = StringField(lazy_gettext('Subject'), validators=[DataRequired(), Length(0, 128)])
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 4})
send = SubmitField(lazy_gettext('Send'))
close = SubmitField(lazy_gettext('Close'))
delete = SubmitField(lazy_gettext('Delete'))
class MessageSendForm(FlaskForm):
title = StringField(lazy_gettext('Subject'), validators=[DataRequired(), Length(0, 128)])
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 4})
send = SubmitField(lazy_gettext('Send'))
cancel = SubmitField(lazy_gettext('Cancel'))
class SearchForm(FlaskForm):
text = StringField('', validators=[DataRequired(), Length(1, 64)])
search = SubmitField(lazy_gettext('Search'))
|
[
"jackogina60@gmail.com"
] |
jackogina60@gmail.com
|
f8d2154649e59afa419b79b4777684cdda82eb5c
|
56b4d00870af18752b4414495b08e2ec3adf3ae4
|
/src/clims/api/endpoints/process_assignments.py
|
c5fd2f83c03d0928f0637275f0b82856ee822b26
|
[
"BSD-2-Clause"
] |
permissive
|
commonlims/commonlims
|
26c3f937eaa18e6935c5d3fcec823053ab7fefd9
|
36a02ed244c7b59ee1f2523e64e4749e404ab0f7
|
refs/heads/develop
| 2021-07-01T17:20:46.586630
| 2021-02-02T08:53:22
| 2021-02-02T08:53:22
| 185,200,241
| 4
| 1
|
NOASSERTION
| 2021-02-02T08:53:23
| 2019-05-06T13:16:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
from __future__ import absolute_import
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from sentry.api.base import Endpoint, SessionAuthentication
class ProcessAssignmentsEndpoint(Endpoint):
authentication_classes = (SessionAuthentication, )
permission_classes = (IsAuthenticated, )
def post(self, request, organization_slug):
"""
Assign one or more item to a workflow. The items are assigned by global_id.
"""
# TODO-auth: Ensure that the user is only assigning samples that are under the organization
# Entities is a list of global ids (e.g. Substance-100)
entities = request.data["entities"]
definition = request.data["definitionId"]
variables = request.data["variables"]
assignments = list()
assignments += self.app.workflows.batch_assign(
entities, definition, request.user, variables)
return Response({"assignments": len(assignments)}, status=201)
|
[
"costeinar@gmail.com"
] |
costeinar@gmail.com
|
230c93a04644bae6fca2f3d207a8e00cba3a24de
|
beae5a43e5bf3d3627d49531e5cc8365c204d15c
|
/contactnetwork/migrations/0002_auto_20180117_1457.py
|
7da9fdc770627bede76a26f59e0e2291f2f612df
|
[
"Apache-2.0"
] |
permissive
|
protwis/protwis
|
e8bbe928a571bc9d7186f62963d49afe1ed286bd
|
75993654db2b36e2a8f67fa38f9c9428ee4b4d90
|
refs/heads/master
| 2023-09-01T18:16:34.015041
| 2023-04-06T11:22:30
| 2023-04-06T11:22:30
| 50,017,823
| 31
| 92
|
Apache-2.0
| 2023-07-28T06:56:59
| 2016-01-20T09:02:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,796
|
py
|
# Generated by Django 2.0.1 on 2018-01-17 13:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contactnetwork', '0001_initial'),
('structure', '0001_initial'),
('residue', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='interactingresiduepair',
name='referenced_structure',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='structure.Structure'),
),
migrations.AddField(
model_name='interactingresiduepair',
name='res1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='residue1', to='residue.Residue'),
),
migrations.AddField(
model_name='interactingresiduepair',
name='res2',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='residue2', to='residue.Residue'),
),
migrations.CreateModel(
name='FaceToEdgeInteraction',
fields=[
('aromaticinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.AromaticInteraction')),
('res1_has_face', models.BooleanField()),
],
options={
'db_table': 'interaction_aromatic_face_edge',
},
bases=('contactnetwork.aromaticinteraction',),
),
migrations.CreateModel(
name='FaceToFaceInteraction',
fields=[
('aromaticinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.AromaticInteraction')),
],
options={
'db_table': 'interaction_aromatic_face_face',
},
bases=('contactnetwork.aromaticinteraction',),
),
migrations.CreateModel(
name='PiCationInteraction',
fields=[
('aromaticinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.AromaticInteraction')),
('res1_has_pi', models.BooleanField()),
],
options={
'db_table': 'interaction_aromatic_pi_cation',
},
bases=('contactnetwork.aromaticinteraction',),
),
migrations.CreateModel(
name='PolarBackboneSidechainInteraction',
fields=[
('polarinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.PolarInteraction')),
('res1_is_sidechain', models.BooleanField()),
],
options={
'db_table': 'interaction_polar_backbone_sidechain',
},
bases=('contactnetwork.polarinteraction',),
),
migrations.CreateModel(
name='PolarSidechainSidechainInteraction',
fields=[
('polarinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.PolarInteraction')),
],
options={
'db_table': 'interaction_polar_sidechain_sidechain',
},
bases=('contactnetwork.polarinteraction',),
),
]
|
[
"christian@munk.be"
] |
christian@munk.be
|
9a2b4bd952a3bd412a603232556bd9cad7508e62
|
9638fccea89ece61f7ba1f985f488bf3e8671155
|
/venv/bin/jp.py
|
3187218827e61b01a87d6828b56a3e2045077914
|
[] |
no_license
|
ked66/ResearchNotes
|
7ada6bc14a54dd9c86719f901e090265738642b9
|
c653e02f78bf195dc417394baf0342033a9984e4
|
refs/heads/master
| 2023-03-04T05:00:48.261084
| 2021-02-12T20:15:27
| 2021-02-12T20:15:27
| 316,547,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,717
|
py
|
#!/Users/katie/PycharmProjects/ResearchNotes/venv/bin/python
import sys
import json
import argparse
from pprint import pformat
import jmespath
from jmespath import exceptions
def main():
parser = argparse.ArgumentParser()
parser.add_argument('expression')
parser.add_argument('-f', '--filename',
help=('The filename containing the input data. '
'If a filename is not given then data is '
'read from stdin.'))
parser.add_argument('--ast', action='store_true',
help=('Pretty print the AST, do not search the data.'))
args = parser.parse_args()
expression = args.expression
if args.ast:
# Only print the AST
expression = jmespath.compile(args.expression)
sys.stdout.write(pformat(expression.parsed))
sys.stdout.write('\n')
return 0
if args.filename:
with open(args.filename, 'r') as f:
data = json.load(f)
else:
data = sys.stdin.read()
data = json.loads(data)
try:
sys.stdout.write(json.dumps(
jmespath.search(expression, data), indent=4))
sys.stdout.write('\n')
except exceptions.ArityError as e:
sys.stderr.write("invalid-arity: %s\n" % e)
return 1
except exceptions.JMESPathTypeError as e:
sys.stderr.write("invalid-type: %s\n" % e)
return 1
except exceptions.UnknownFunctionError as e:
sys.stderr.write("unknown-function: %s\n" % e)
return 1
except exceptions.ParseError as e:
sys.stderr.write("syntax-error: %s\n" % e)
return 1
if __name__ == '__main__':
sys.exit(main())
|
[
"ked66@cornell.edu"
] |
ked66@cornell.edu
|
f2d8006fa9d4e809157de1688060502edc3218c4
|
2368972f5cd45704b5ab1b4877f6409fc38bf693
|
/app/app.py
|
15563cc268e2f61394e02d4b08bcdf53cec19708
|
[] |
no_license
|
sergiodias28/manobra
|
7d67498521aabb0d8c9a5d9ebce97d39099913cb
|
5f38eef2035547807ae8aaa095a76961cb372852
|
refs/heads/master
| 2021-01-19T00:24:57.666548
| 2016-08-11T23:12:26
| 2016-08-11T23:12:26
| 65,052,300
| 0
| 0
| null | 2016-08-11T23:12:27
| 2016-08-05T22:15:02
|
Python
|
ISO-8859-1
|
Python
| false
| false
| 2,234
|
py
|
# -*- coding: utf-8 -*-
"""
Autman
~~~~~~
Sistema de automanção de manobras.
:copyright: (c) 2016 by Sergio Dias.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from sqlite3 import dbapi2 as sqlite3
from flask import Flask, request, session, g, jsonify, redirect, url_for, abort, \
render_template, flash
from time import gmtime, strftime
import paramiko
import time
# create our little application :)
app = Flask(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
#DATABASE=os.path.join(app.root_path, 'autman.db'),
DEBUG=True,
SECRET_KEY='bZJc2sWbQLKos6GkHn/VB9oXwQt8S0R0kRvJ5/xJ89E=',
USERNAME='admin',
PASSWORD='default',
IP_SAGE='192.168.0.18',
USER_SAGE='sage',
PASS_SAGE='sage'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
#Conecta ao banco
conn = sqlite3.connect('autman.db')
comandos = conn.execute('select c.codigo as equipamento, c.tipo as tipo, a.comando as comando, d.codigo as unidade, b.descricao AS Acao from roteiro_comando a inner join roteiro_manobra_item b on b.id=a.id_roteiro_manobra_item inner join equipamento c on c.id=a.id_equipamento inner join unidade d on d.id=b.id_unidade')
if comandos:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(app.config['IP_SAGE'], username=app.config['USER_SAGE'], password=app.config['PASS_SAGE'])
for item_comando in comandos:
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("sage_ctrl %s:%s:%d %d" % (item_comando[3], item_comando[0], item_comando[1], item_comando[2]))
print "sage_ctrl %s:%s:%d %d" % (item_comando[3], item_comando[0], item_comando[1], item_comando[2]), "%s" % (item_comando[4])
time.sleep(4)
#ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("sage_ctrl %s:%s:%d %d" % (item_comando['unidade'], item_comando['equipamento'],item_comando['tipo'], item_comando['comando']))
#ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('sage_ctrl JCD:14C1:52 0')
#ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('ls')
#for line in ssh_stdout:
# print '... ' + line.strip('\n')
ssh.close()
|
[
"engsergiodias28@gmail.com"
] |
engsergiodias28@gmail.com
|
b561d9b1c21f08c5647bd599c14beb24eee2dc86
|
e3d06e2f11e5afc623ffbd59143fa8b3dbd8f1f7
|
/DCGAN_train.py
|
056a21a18c7d859cb7c96536dd32ba00f620ae1e
|
[] |
no_license
|
yangpeiwen/implementation
|
931f6f1d8d475affcb95b6fd0baacfc0ec1325f5
|
a2bf3e1de98a78173f73e003bd888de9cd4a77e9
|
refs/heads/master
| 2020-04-29T15:18:00.296864
| 2019-03-27T12:47:09
| 2019-03-27T12:47:09
| 176,223,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,365
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#D网络中使用LeakyReLU作为激活函数
from __future__ import division, print_function, absolute_import
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from network_construction import DCGAN
#载入mnist数据集
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# 网路的两个输入,生成器需要的随机噪声和判别器需要的真实图片
noise_input = tf.placeholder(tf.float32, shape=[None, DCGAN.noise_dim])
real_image_input = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
#batch normalization需要的is_training参数
#is_training在这里需要还有DCGAN文件里面的函数也需要,直接调用DCGAN文件的placeholder创建
is_training = DCGAN.is_training
#训练网络实例化,返回的gen_vars与disc_vars暂时用不到
train_gen,train_disc,gen_loss,disc_loss,gen_vars,disc_vars = DCGAN.DCGAN_train(noise_input,real_image_input)
#开始训练
init = tf.global_variables_initializer()
sess = tf.Session()
#下面的训练参数与网络参数在DCGAN文件中都有,例:可以直接使用num_steps或者删去下面参数然后DCGAN.num_steps
# Training Params
num_steps = 10000
batch_size = 128
lr_generator = 0.002
lr_discriminator = 0.002
# Network Params
image_dim = 784 # 28*28 pixels * 1 channel
noise_dim = 100 # Noise data points
#初始化并且创建saver对象准备保存
sess.run(init)
saver = tf.train.Saver()
model_path = "/tmp/DCGAN_model.ckpt"
for i in range(1, DCGAN.num_steps+1):
batch_x, _ = mnist.train.next_batch(DCGAN.batch_size)
batch_x = np.reshape(batch_x, newshape=[-1, 28, 28, 1])
batch_x = batch_x * 2. - 1.
# 训练判别器
z = np.random.uniform(-1., 1., size=[DCGAN.batch_size, DCGAN.noise_dim])
_, dl = sess.run([train_disc, disc_loss], feed_dict={real_image_input: batch_x, noise_input: z, is_training:True})
# 训练生成器
z = np.random.uniform(-1., 1., size=[batch_size, noise_dim])
_, gl = sess.run([train_gen, gen_loss], feed_dict={noise_input: z, is_training:True})
if i % 500 == 0 or i == 1:
print('Step %i: Generator Loss: %f, Discriminator Loss: %f' % (i, gl, dl))
save_path = saver.save(sess,model_path)
print("Model saved in file: %s" % save_path)
|
[
"yangpeiwen"
] |
yangpeiwen
|
eef750f84f81a27c35f5f451faf9e9a1b93c1cc4
|
4c117ea3617a576ddd07d8ea8aaab1a925fc402f
|
/bin/individualization/VennPlot.py
|
18f444e66a82a4f9f64427b83e18f591f031b0f6
|
[] |
no_license
|
452990729/Rep-seq
|
7be6058ba3284bea81282f2db7fd3bd7895173ba
|
e217b115791e0aba064b2426e4502a5c1b032a94
|
refs/heads/master
| 2021-12-11T14:27:46.912144
| 2019-06-04T03:49:40
| 2019-06-04T03:49:40
| 190,124,555
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
#!/usr/bin/env python
import os
import sys
import re
import matplotlib
matplotlib.use('Agg')
import venn
from matplotlib import pyplot as plt
def HandleFq(file_in):
base = '_'.join(re.split('_', os.path.basename(file_in))[:2])
list_tmp = []
m = 0
with open(file_in, 'r') as in1:
for line in in1:
m += 1
if m%4 == 2:
list_tmp.append(line.strip())
return set(list_tmp), base
def ReadTab(file_in):
list_tmp = []
label = '_'.join(re.split('_', os.path.basename(file_in))[:2])
with open(file_in, 'r') as in1:
for line in in1.readlines()[1:]:
list_tmp.append(re.split('\t', line.strip())[36])
return set(list_tmp), label
def main():
len_arg = len(sys.argv)
if sys.argv[1] == 'fastq':
func = HandleFq
elif sys.argv[1] == 'tab':
func = ReadTab
list_l = []
list_lb = []
for i in range(len_arg-2):
l, lb = func(sys.argv[i+2])
list_l.append(l)
list_lb.append(lb)
labels = venn.get_labels(list_l, fill=['number',])
if len_arg == 4:
fig, ax = venn.venn2(labels, names=list_lb)
elif len_arg == 5:
fig, ax = venn.venn3(labels, names=list_lb)
elif len_arg == 6:
fig, ax = venn.venn4(labels, names=list_lb)
elif len_arg == 7:
fig, ax = venn.venn5(labels, names=list_lb)
elif len_arg == 8:
fig, ax = venn.venn6(labels, names=list_lb)
plt.savefig('{}wayvenn.png'.format(str(len_arg-2)))
if __name__ == '__main__':
main()
|
[
"452990729@qq.com"
] |
452990729@qq.com
|
ac0eac50d356d658ba3b95fa27707c44039e1d5d
|
a96b98aaec11160c0b9c5f3cee3471c2f50e8c1d
|
/flask_backend/question-classification.py
|
120ba1ab70086e7ddb7908cd6d156d938cf2b7b6
|
[] |
no_license
|
duvsr01/NLP-based-QA-System-for-custom-KG
|
ea486c5cdede0ef6a4882b3490e15b9be6e4ce97
|
ae7af74b21079b1cc441676064e9aa387d8177a2
|
refs/heads/main
| 2023-04-30T21:52:52.736928
| 2021-05-15T00:26:40
| 2021-05-15T00:26:40
| 305,169,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,947
|
py
|
import pickle
# Training data
# X is the sample sentences
X = [
'How many courses are taught by Dan Harkey?',
'What is number of faculty in SJSU?',
'How many machine learning courses are on Coursera?',
'How many students are in the world?',
'What is the email of Ram Shyam?',
'What is the email address of Albert Einstein?',
'What is the deadline to pay Fall 2021 Tuition Fee?',
'What are office hours of Vinodh Gopinath?',
'How many courses are offered by University of Hogwarts?',
'How to pay tuition fees?',
'Phone number of Mr Sam Igloo?',
'How can I get a bus pass?'
]
# y is the intent class corresponding to sentences in X
y = [
'aggregation_question',
'aggregation_question',
'aggregation_question',
'aggregation_question',
'factoid_question',
'factoid_question',
'factoid_question',
'factoid_question',
'aggregation_question',
'factoid_question',
'factoid_question',
'factoid_question'
]
# Define the classifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
clf = Pipeline(
[
('tfidf', TfidfVectorizer()),
('sgd', SGDClassifier())
]
)
## Train the classifier
#clf.fit(X, y)
# Test your classifier
## New sentences (that weren't in X and your model never seen before)
new_sentences = [
'What is number of students that study in CMPE department?',
'How can I reach CMPE department?',
'How to apply for graduation?',
'How many faulty in CS department?',
'Number of students CS department?',
'What is the address of CS department?'
]
#predicted_intents = clf.predict(new_sentences)
filename = 'finalized_model.sav'
#pickle.dump(clf, open(filename, 'wb'))
loaded_model = pickle.load(open(filename, 'rb'))
predicted_intents = loaded_model.predict(new_sentences)
print(predicted_intents)
|
[
"vijendersingh.aswal@sjsu.edu"
] |
vijendersingh.aswal@sjsu.edu
|
a29347fa5a55f754c48ba25f7b9c8c93f00f8db4
|
a45e45b5b3b706f369f586e7b03c5972eb21b926
|
/pythonsyntax/any7.py
|
61226e6cc4351b5b432ec75fe82928ac0cf3f5e7
|
[] |
no_license
|
khagerman/Python-Practice
|
44882bbcf876ab06536da0d4ec0e1a5d9b2bf10d
|
982dc7595691f32a6da6ef8fb918ec9dfdfdfd93
|
refs/heads/main
| 2023-03-27T15:27:51.889132
| 2021-03-31T21:38:08
| 2021-03-31T21:38:08
| 350,499,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
def any7(nums):
"""Are any of these numbers a 7? (True/False)"""
# YOUR CODE HERE
for num in nums:
if num == 7:
return True
return False
print("should be true", any7([1, 2, 7, 4, 5]))
print("should be false", any7([1, 2, 4, 5]))
|
[
"71734063+khagerman@users.noreply.github.com"
] |
71734063+khagerman@users.noreply.github.com
|
1bff440e67a7189665b42fe0833a0c9b007950e7
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_defenders.py
|
bb7548df4efbbe4fec4aeb39f3eec118e52a2ba7
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from xai.brain.wordbase.nouns._defender import _DEFENDER
#calss header
class _DEFENDERS(_DEFENDER, ):
def __init__(self,):
_DEFENDER.__init__(self)
self.name = "DEFENDERS"
self.specie = 'nouns'
self.basic = "defender"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
705c2db27a5d0906938b557caab4e18133150a24
|
19ac1d0131a14ba218fd2c55d585170222eb9400
|
/social_login/wsgi.py
|
9523f947cda705e24cea5e1c828e7fb9ee17044c
|
[] |
no_license
|
oereo/social-login
|
4ed27658c585dd0a24484e628e053070fe012518
|
41e67b889354189c986da45bcf03c20c1f1063e3
|
refs/heads/master
| 2023-01-15T22:38:06.667909
| 2020-11-22T12:12:08
| 2020-11-22T12:12:08
| 303,985,281
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for social_login project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'social_login.settings')
application = get_wsgi_application()
|
[
"dlstpgns0406@gmail.com"
] |
dlstpgns0406@gmail.com
|
15d614e5ec83637c824c55ec0c2d7c4291482954
|
55877a854a6325b0ba8265645b94184f56839480
|
/spider/settings.py
|
e54a1a60ae1f076b59f6850ee210e7d072d32e79
|
[] |
no_license
|
xiaowuwuwuwuwu/scrapy_pager_frame
|
cc48cee4daaa655d78be336678ed18aa6e9037ca
|
bc3d9bd26b842fe66dba98ca3982ffd2fa1b8d39
|
refs/heads/master
| 2020-09-22T15:44:49.016852
| 2019-12-02T02:17:38
| 2019-12-02T02:17:38
| 225,263,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,803
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for tutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'spider'
SPIDER_MODULES = ['spider.spiders']
NEWSPIDER_MODULE = 'spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
# Obey robots.txt rules
#不按照rebots进行爬取
ROBOTSTXT_OBEY = False
# Redis
#SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderPriorityQueue'
#SCHEDULER_IDLE_BEFORE_CLOSE = 10
#REDIS_HOST = 'localhost'
#REDIS_PORT = 6379
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#处理并发数
#CONCURRENT_REQUESTS = 100
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
TELNETCONSOLE_ENABLED = True
TELNETCONSOLE_HOST = '127.0.0.1'
TELNETCONSOLE_PORT = '6023'
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# 中间件
# KEY=中间件;VALUE=中间件顺序
SPIDER_MIDDLEWARES = {
#'spider.middlewares.TutorialSpiderMiddleware': 543,
#'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 531,
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,
}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'spider.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': 500,
# 'tutorial.openextension.SpiderOpenCloseLogging': 501
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
# 执行的优先度
# KEY=管道文件;VALUE=管道顺序
ITEM_PIPELINES = {
'spider.pipelines.SpiderPipeline': 300,
#'scrapy_redis.pipelines.RedisPipeline': 301
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#开启爬取速度间隔
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#开始间隔时间为3秒
#AUTOTHROTTLE_START_DELAY = 3
# The maximum download delay to be set in case of high latencies
#如果请求未响应,最大延迟时间为20秒
#AUTOTHROTTLE_MAX_DELAY = 20
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#爬取时Debug信息
#AUTOTHROTTLE_DEBUG = True
DOWNLOAD_DELAY = 5
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
#测试扩展
#MYEXT_ENABLED = True
################日志################
#开启日志
#LOG_ENABLED = True
#日志文件位置
#LOG_FILE = "日志路径"
#日志编码
#LOG_ENCODING = "utf-8"
#日志级别
#LOG_LEVEL = "DEBUG"
#标准输出
#LOG_STDOUT = False
#开启Cookie追踪
COOKIES_ENABLED = True
COOKIES_DEBUG = True
##################Web###############
#开启web服务
WEBSERVICE_ENABLED = True
#日志文件位置
WEBSERVICE_LOGFILE = "日志路径"
#端口
WEBSERVICE_PORT = [6080, 7030]
#主机
WEBSERVICE_HOST = '127.0.0.1'
#################自动限速###############
#启用AutoThrottle扩展
#AUTOTHROTTLE_ENABLED = True
#初始下载延迟(单位:秒)
#AUTOTHROTTLE_START_DELAY = 1.0
#在高延迟情况下最大的下载延迟(单位秒)
#AUTOTHROTTLE_MAX_DELAY = 60.0
#起用AutoThrottle调试(debug)模式,展示每个接收到的response
#AUTOTHROTTLE_DEBUG = True
#DOWNLOAD_DELAY = 1.0
|
[
"1059174412@qq.com"
] |
1059174412@qq.com
|
fb1dc48dad15f690de8d830e797d4ab28dc0f404
|
e247ce1a6e98772ad1fd7593f01d21971da7e738
|
/AlgorithmTest/Test/MatplotlibTest/TestMatlab.py
|
5706795c7c76e2b650e1c9ce131fc42f51dcb704
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
A666666685A/Multi-constrainedQoSRouting
|
a394d82d3acd71032918a8ffd651e42934f49baa
|
8ea2bd2a8602ed51379c7a89ea1fdf370b8f1ca7
|
refs/heads/master
| 2023-08-11T19:57:59.550777
| 2021-09-19T04:01:41
| 2021-09-19T04:01:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
s=raw_input("Input your age:")
if s =="":
raise Exception("Input must no be empty.")
try:
i=int(s)
except ValueError:
print "Could not convert data to an integer."
except:
print "Unknown exception!"
else: # It is useful for code that must be executed if the try clause does not raise an exception
print "You are %d" % i," years old"
finally: # Clean up action
print "Goodbye!"
|
[
"yueludanfeng@gmail.com"
] |
yueludanfeng@gmail.com
|
c423950c678b966d72c428c4dadd7d1045308bbb
|
c536c764aab4170c64f3f8b78bd91593dcb161a3
|
/vigenereb62.py
|
037292215097560084e9451db9c5655b7c2fb996
|
[] |
no_license
|
numberly/vigenere-b62
|
63bbc95c1f9390e9623a5882a9c2a14d110851b4
|
3dea3394ee557ba2e589af014cbc4454ebbbc874
|
refs/heads/master
| 2023-02-16T02:13:31.254670
| 2021-01-11T15:24:58
| 2021-01-11T15:24:58
| 328,698,862
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
def iter_reverse_digits(number, base):
while number != 0:
digit = number % base
yield digit
number -= digit
number //= base
def encode(alphabets, seed, size=6):
if len(alphabets) < size:
raise ValueError("There should be an alphabet per character you want")
secret = "".join(
alphabets[i][digit]
for i, digit in enumerate(iter_reverse_digits(seed, len(alphabets[0])))
)
secret += "".join(alphabets[i][0] for i in range(len(secret), size))
return secret
|
[
"julien@thebault.co"
] |
julien@thebault.co
|
5cd5782af0c7af2c6f90c48001a91cd1e255da08
|
c9e95974e3f3320f2da36ba23403d46e00ac884d
|
/projects/mars/model_classes/MarsSurface.py
|
00b902223fb2354c3cade7b796303c0febaf41b3
|
[
"MIT"
] |
permissive
|
ModelFlow/modelflow
|
877ff8d80ab2987b0572bebcf3753ae0942a5ae2
|
c2b720b2da8bb17462baff5c00bbe942644474b0
|
refs/heads/master
| 2023-07-12T17:22:49.540043
| 2021-08-26T03:51:26
| 2021-08-26T03:51:26
| 280,748,869
| 8
| 0
|
MIT
| 2021-08-18T19:48:57
| 2020-07-18T22:17:52
|
Python
|
UTF-8
|
Python
| false
| false
| 729
|
py
|
class MarsSurface:
name = "Mars Surface"
params = []
states = [
{
"key": "atmospheric_co2",
"label": "Atmospheric CO2",
"units": "kg",
"private": False,
"value": 999999999,
"confidence": 0,
"notes": "",
"source": "fake"
},
{
"key": "temperature",
"label": "Temperature",
"units": "c",
"private": False,
"value": 0,
"confidence": 0,
"notes": "",
"source": "fake"
}
]
@staticmethod
def run_step(states, params, utils):
# TODO: Have temperature change with time
pass
|
[
"1890491+adamraudonis@users.noreply.github.com"
] |
1890491+adamraudonis@users.noreply.github.com
|
aecd6191686bd841066715f69f2dbd3ae327fd10
|
6c55174a3ecfc0757ed04700ea4c549e6b9c45d2
|
/lib/koala/utils/mail.py
|
7b1dd4c0909e722872e3f53c3cf673b8b6b516a3
|
[] |
no_license
|
adefelicibus/koala-server
|
ce7cbc584b0775482b60e2eb72794104f2fe0cf3
|
defec28c30a9fc4df2b81efeb8df4fc727768540
|
refs/heads/master
| 2020-05-25T15:43:43.772302
| 2016-04-26T02:08:56
| 2016-04-26T02:08:56
| 38,928,778
| 2
| 3
| null | 2016-02-23T20:37:11
| 2015-07-11T14:38:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from email.Utils import formatdate
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
import datetime
import os
from koala.utils import show_error_message
# TODO: take the smtp configuration from galaxy's config.ini file
# TODO: review exception rules
def get_message_email(tool_name):
try:
now = datetime.datetime.now()
tupla = now.timetuple()
data = str(
tupla[2]) + '/' + str(tupla[1]) + '/' + \
str(tupla[0]) + ' ' + str(tupla[3]) + ':' + str(tupla[4]) + ':' + str(tupla[5])
tool_name = tool_name.replace('_', ' ')
messageEmail = '''Hi,
Your simulation has been conclued at ''' + data + '''.
You have to go to your History and download it.
Best Regards.
%s''' % tool_name
return messageEmail
except Exception, e:
show_error_message("Error while getMessageEmail email!\n%s" % e)
def send_email(de, para, assunto, mensagem, arquivos, servidor):
try:
# Cria o objeto da mensagem
msg = MIMEMultipart()
# Define o cabeçalho
msg['From'] = de
msg['To'] = para
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = assunto
# Atacha o texto da mensagem
msg.attach(MIMEText(mensagem))
# Atacha os arquivos
for arquivo in arquivos:
parte = MIMEBase('application', 'octet-stream')
parte.set_payload(open(arquivo, 'rb').read())
encoders.encode_base64(parte)
parte.add_header(
'Content-Disposition', 'attachment; filename="%s"' % os.path.basename(arquivo)
)
msg.attach(parte)
# Conecta ao servidor SMTP
smtp = smtplib.SMTP(servidor, 587)
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
# Faz login no servidor
smtp.login('adefelicibus@gmail.com', 'mami1752@')
try:
# Envia o e-mail
smtp.sendmail(de, para, msg.as_string())
finally:
# Desconecta do servidor
smtp.close()
except Exception, e:
show_error_message("Error when SendEmail:\n%s" % e)
|
[
"adefelicibus@gmail.com"
] |
adefelicibus@gmail.com
|
23206587aae4835dbc893edeaad63d67170d75c3
|
23e877d2e65cdc49cf9a456845470f97194674bc
|
/src/main/resources/http/http_request.py
|
e9a3e1cdc87380b5ff871b18466c069841a84cdd
|
[
"MIT"
] |
permissive
|
xebialabs-community/xld-github-dynamic-dictionaries-plugin
|
77da6a4fea1ca2b96207d77b0396011e088ac850
|
67c3a596f4a7f58f9d0a939bb57091d1f82c51ee
|
refs/heads/master
| 2021-07-13T17:15:15.222551
| 2020-11-02T12:49:14
| 2020-11-02T12:49:14
| 68,606,897
| 2
| 2
|
MIT
| 2021-03-26T22:14:23
| 2016-09-19T13:09:01
|
Python
|
UTF-8
|
Python
| false
| false
| 9,826
|
py
|
#
# Copyright 2020 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import re
import urllib
from java.lang import String
from org.apache.commons.codec.binary import Base64
from org.apache.http import HttpHost
from org.apache.http.client.config import RequestConfig
from org.apache.http.client.methods import HttpGet, HttpPost, HttpPut, HttpDelete
from org.apache.http.util import EntityUtils
from org.apache.http.impl.client import HttpClients
from http.http_response import HttpResponse
class HttpRequest:
def __init__(self, params, username = None, password = None):
"""
Builds an HttpRequest
:param params: an HttpConnection
:param username: the username
(optional, it will override the credentials defined on the HttpConnection object)
:param password: an password
(optional, it will override the credentials defined on the HttpConnection object)
"""
self.params = params
self.username = username
self.password = password
def do_request(self, **options):
"""
Performs an HTTP Request
:param options: A keyword arguments object with the following properties :
method: the HTTP method : 'GET', 'PUT', 'POST', 'DELETE'
(optional: GET will be used if empty)
context: the context url
(optional: the url on HttpConnection will be used if empty)
body: the body of the HTTP request for PUT & POST calls
(optional: an empty body will be used if empty)
contentType: the content type to use
(optional, no content type will be used if empty)
headers: a dictionary of headers key/values
(optional, no headers will be used if empty)
:return: an HttpResponse instance
"""
request = self.build_request(
options.get('method', 'GET'),
options.get('context', ''),
options.get('entity', ''),
options.get('contentType', None),
options.get('headers', None))
return self.execute_request(request)
def do_request_without_headers(self, **options):
"""
Performs an HTTP Request
:param options: A keyword arguments object with the following properties :
method: the HTTP method : 'GET', 'PUT', 'POST', 'DELETE'
(optional: GET will be used if empty)
context: the context url
(optional: the url on HttpConnection will be used if empty)
body: the body of the HTTP request for PUT & POST calls
(optional: an empty body will be used if empty)
contentType: the content type to use
(optional, no content type will be used if empty)
headers: a dictionary of headers key/values
(optional, no headers will be used if empty)
:return: an HttpResponse instance
"""
request = self.build_request_without_headers(
options.get('method', 'GET'),
options.get('context', ''),
options.get('entity', ''))
return self.execute_request(request)
def get(self, context, **options):
"""
Performs an Http GET Request
:param context: the context url
:param options: the options keyword argument described in do_request()
:return: an HttpResponse instance
"""
options['method'] = 'GET'
options['context'] = context
return self.do_request(**options)
def put(self, context, entity, **options):
"""
Performs an Http PUT Request
:param context: the context url
:param body: the body of the HTTP request
:param options: the options keyword argument described in do_request()
:return: an HttpResponse instance
"""
options['method'] = 'PUT'
options['context'] = context
options['entity'] = entity
return self.do_request(**options)
def post(self, context, entity, **options):
"""
Performs an Http POST Request
:param context: the context url
:param body: the body of the HTTP request
:param options: the options keyword argument described in do_request()
:return: an HttpResponse instance
"""
options['method'] = 'POST'
options['context'] = context
options['entity'] = entity
return self.do_request(**options)
def post_without_headers(self, context, entity, **options):
"""
Performs an Http POST Request
:param context: the context url
:param body: the body of the HTTP request
:param options: the options keyword argument described in do_request()
:return: an HttpResponse instance
"""
options['method'] = 'POST'
options['context'] = context
options['entity'] = entity
return self.do_request_without_headers(**options)
def delete(self, context, **options):
"""
Performs an Http DELETE Request
:param context: the context url
:param options: the options keyword argument described in do_request()
:return: an HttpResponse instance
"""
options['method'] = 'DELETE'
options['context'] = context
return self.do_request(**options)
def build_request(self, method, context, entity, contentType, headers):
url = self.quote(self.create_path(self.params.getUrl(), context))
method = method.upper()
if method == 'GET':
request = HttpGet(url)
elif method == 'POST':
request = HttpPost(url)
request.setEntity(entity)
elif method == 'PUT':
request = HttpPut(url)
request.setEntity(entity)
elif method == 'DELETE':
request = HttpDelete(url)
else:
raise Exception('Unsupported method: ' + method)
request.addHeader('Content-Type', contentType)
request.addHeader('Accept', contentType)
self.set_credentials(request)
self.set_proxy(request)
self.setHeaders(request, headers)
return request
def build_request_without_headers(self, method, context, entity):
url = self.quote(self.create_path(self.params.getUrl(), context))
method = method.upper()
if method == 'GET':
request = HttpGet(url)
elif method == 'POST':
request = HttpPost(url)
request.setEntity(entity)
elif method == 'PUT':
request = HttpPut(url)
request.setEntity(entity)
elif method == 'DELETE':
request = HttpDelete(url)
else:
raise Exception('Unsupported method: ' + method)
self.set_credentials(request)
self.set_proxy(request)
return request
def create_path(self, url, context):
url = re.sub('/*$', '', url)
if context is None:
return url
elif context.startswith('/'):
return url + context
else:
return url + '/' + context
def quote(self, url):
return urllib.quote(url, ':/?&=%')
def set_credentials(self, request):
if self.username:
username = self.username
password = self.password
elif self.params.getUsername():
username = self.params.getUsername()
password = self.params.getPassword()
else:
return
encoding = Base64.encodeBase64String(String(username + ':' + password).getBytes())
request.addHeader('Authorization', 'Basic ' + encoding)
def set_proxy(self, request):
if not self.params.getProxyHost():
return
proxy = HttpHost(self.params.getProxyHost(), int(self.params.getProxyPort()))
config = RequestConfig.custom().setProxy(proxy).build()
request.setConfig(config)
def setHeaders(self, request, headers):
if headers:
for key in headers:
request.setHeader(key, headers[key])
def execute_request(self, request):
client = None
response = None
try:
client = HttpClients.createDefault()
response = client.execute(request)
status = response.getStatusLine().getStatusCode()
entity = response.getEntity()
result = EntityUtils.toString(entity, "UTF-8") if entity else None
headers = response.getAllHeaders()
EntityUtils.consume(entity)
return HttpResponse(status, result, headers)
finally:
if response:
response.close()
if client:
client.close()
|
[
"bmoussaud@xebialabs.com"
] |
bmoussaud@xebialabs.com
|
4dbac7a2a1cb6e13f4d8d326dca4790eaae5658c
|
2715a573e2faf4d52af2578c40e4fd3cbac80c05
|
/analysis/spectrum.py
|
9134c26ebdcf3d665cf13ef2876cc2d3e022a42b
|
[] |
no_license
|
legend-exp/CAGE
|
9a67d945727831c3b084e177db3a2ff28e4599b1
|
71dfd9f27b6125853e2d3e09d07db7836bf10348
|
refs/heads/master
| 2023-08-03T21:45:57.955025
| 2023-08-03T20:18:33
| 2023-08-03T20:18:33
| 198,919,238
| 0
| 15
| null | 2022-07-01T17:00:15
| 2019-07-26T00:35:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 439
|
py
|
import sys, h5py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pygama.io.io_base as io
def main():
filename = '/Users/gothman/Data/CAGE/pygama_dsp/dsp_run42.lh5'
plot_spectrum(filename)
def plot_spectrum(filename):
lh5 = io.LH5Store()
df = lh5.read_object('data', filename).get_dataframe()
df['trapE'].plot.hist(bins=1000)
plt.show()
if __name__ == '__main__':
main()
|
[
"gulden.othman@gmail.com"
] |
gulden.othman@gmail.com
|
e4603076015ad9b338c87de21b02807faa509853
|
91948d5be26636f1f2b941cb933701ea626a695b
|
/amazon_longest_substring_with_no_repeat.py
|
30208e55e14fb6ba9b3eabe03ddda30851bc6a3b
|
[
"MIT"
] |
permissive
|
loghmanb/daily-coding-problem
|
4ae7dd201fde5ee1601e0acae9e9fc468dcd75c9
|
b2055dded4276611e0e7f1eb088e0027f603aa7b
|
refs/heads/master
| 2023-08-14T05:53:12.678760
| 2023-08-05T18:12:38
| 2023-08-05T18:12:38
| 212,894,228
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,709
|
py
|
'''
Longest Substring Without Repeat
Asked in: Amazon
https://www.interviewbit.com/problems/longest-substring-without-repeat/
Given a string,
find the length of the longest substring without repeating characters.
Example:
The longest substring without repeating letters for "abcabcbb" is "abc", which the length is 3.
For "bbbbb" the longest substring is "b", with the length of 1.
'''
# @param A : string
# @return an integer
def lengthOfLongestSubstring(A):
if not A: return 0
result = 0
letters = set()
N = len(A)
i = j = 0
while i<N and j<N:
if A[j] in letters:
letters.remove(A[i])
i += 1
else:
letters.add(A[j])
j += 1
result = max(result, j-i)
return result
if __name__ == "__main__":
data = [
['abcabcbb', 3],
['Wnb9z9dMc7E8v1RTUaZPoDNIAXRlzkqLaa97KMWLzbitaCkRpiE4J4hJWhRcGnC8H6mwasgDfZ76VKdXhvEYmYrZY4Cfmf4HoSlchYWFEb1xllGKyEEmZOLPh1V6RuM7Mxd7xK72aNrWS4MEaUmgEn7L4rW3o14Nq9l2EN4HH6uJWljI8a5irvuODHY7A7ku4PJY2anSWnfJJE1w8p12Ks3oZRxAF3atqGBlzVQ0gltOwYmeynttUmQ4QBDLDrS4zn4VRZLosOITo4JlIqPD6t4NjhHThOjJxpMp9fICkrgJeGiDAwsb8a3I7Txz5BBKV9bEfMsKNhCuY3W0ZHqY0MhBfz1CbYCzwZZdM4p65ppP9s5QJcfjadmMMi26JKz0TVVwvNA8LP5Vi1QsxId4SI19jfcUH97wmZu0pbw1zFtyJ8GAp5yjjQTzFIboC1iRzklnOJzJld9TMaxqvBNBJKIyDjWrdfLOY8FGMOcPhfJ97Dph35zfxYyUf4DIqFi94lm9J0skYqGz9JT0kiAABQZDazZcNi80dSSdveSl6h3dJjHmlK8qHIlDsqFd5FMhlEirax8WA0v3NDPT8vPhwKpxcnVeu14Gcxr3h1wAXXV0y7Xy9qqB2NQ5HQLJ7cyXAckEYHsLCPSy28xcdNJatx1KLWohOQado4WywJbGvsFR17rKmvOPABweXnFD3odrbSMD4Na4nuBBswvMmFRTUOcf7jZi4z5JnJqXz6hitaPnaEtjoSEBq82a52nvqYy7hhldBoxen2et2OMadVEHeTYLL7GLsIhTP6UizHIuzcJMljo4lFgW5AyrfUlIBPAlhwaSiJtTvcbVZynDSM6RO1PqFKWKg2MHIgNhjuzENg2oFCfW7z5KJvEL9qWqKzZNc0o3BMRjS04NCHFvhtsteQoQRgz84XZBHBJRdekCdcVVXu9c01gYRAz7oIAxN3zKZb64EFKssfQ4HW971jv3H7x5E9dAszA0HrKTONyZDGYtHWt4QLhNsIs8mo4AIN7ecFKewyvGECAnaJpDn1MTTS4yTgZnm6N6qnmfjVt6ZU51F9BxH0jVG0kovTGSjTUkmb1mRTLQE5mTlVHcEz3yBOh4WiFFJjKJdi1HBIBaDL4r45HzaBvmYJPlWIomkqKEmQ4rLAbYG7C5rFfpMu8rHvjU7hP0JVvteGtaGn7mqeKsn7CgrJX1tb8t0ldaS3iUy8SEKAo5IZHNKOfEaij3nI4oRVzeVOZsH91pMsA4jRYgEohubPW8ciXwVrFi1qEWjvB8gfalyP60n1fHyjsiLW0T5uY1JzQWHKCbLVh7QFoJFAEV0L516XmzIo556yRH1vhPnceOCjebqgsmO78AQ8Ir2d4pHFFHAGB9lESn3OtJye1Lcyq9D6X93UakA3JKVKEt6JZDLVBMp4msOefkPKSw59Uix9d9kOQm8WCepJTangdNSOKaxblZDNJ5eHvEroYacBhd9UdafEitdF3nfStF7AhkSfQVC61YWWkKTNdx96OoJGTnxuqt4oFZNFtO7aMuN3IJAkw3m3kgZFRGyd3D3wweagNL9XlYtvZwejbjpkDOZz33C0jbEWaMEaUPw6BG49XqyQoUwtriguO0yvWyaJqD4ye3o0E46huKYAsdKAq6MLWMxF6tfyPVaoqOGd0eOBHbAF89XXmDd4AIkoFPXkAOW8hln5nXnIWP6RBbfEkPPbxoToMbV', 27]
]
for d in data:
print('input', d[0], lengthOfLongestSubstring(d[0]))
|
[
"loghmanb@gmail.com"
] |
loghmanb@gmail.com
|
65069c192bdcfc8bf792f8d1e63112e0837c7ea7
|
708e17ad98f3143abaf811357883e680991d711f
|
/python3/happyNum.py
|
26195bfb19651e99a7333f4f60b484243ba43fcc
|
[] |
no_license
|
yichuanma95/leetcode-solns
|
a363cc8e85f2e8cdd5d2cde6e976cd76d4c4ea93
|
6812253b90bdd5a35c6bfba8eac54da9be26d56c
|
refs/heads/master
| 2021-05-24T18:05:02.588481
| 2020-10-08T00:39:58
| 2020-10-08T00:39:58
| 253,690,413
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,132
|
py
|
'''
Problem 202: Happy Number
Write an algorithm to determine if a number is "happy".
A happy number is a number defined by the following process: Starting with any positive
integer, replace the number by the sum of the squares of its digits, and repeat the process
until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does
not include 1. Those numbers for which this process ends in 1 are happy numbers.
Example:
Input: 19
Output: true
Explanation:
1^2 + 9^2 = 82
8^2 + 2^2 = 68
6^2 + 8^2 = 100
1^2 + 0^2 + 0^2 = 1
Solution runtime: 24ms, faster than 99.77% of Python3 submissions
Solution memory usage: 12.7 MB, less than 100% of Python3 submissions
'''
class Solution:
def isHappy(self, n: int) -> bool:
''' (Solution, int) -> bool
Returns True iff n is a "happy" number, which is a number that results in a 1 after
a repetitive process of replacing the original number by the sum of digit squares.
>>> soln = Solution()
>>> soln.isHappy(19)
True
'''
# This set will store all the unique sum of digit squares generated while
# determining if n is "happy".
unique_digit_square_sums = set()
# Keep calculating the sum of digit squares until it's equal to 1, in this case
# return True, or it already is in the set, in this case return False.
while n not in unique_digit_square_sums:
unique_digit_square_sums.add(n)
n = self.sum_of_digit_squares(n)
if n == 1:
return True
return False
def sum_of_digit_squares(self, n):
''' (Solution, int) -> int
Calculates and returns the sum of squares of n's digits.
>>> soln = Solution()
>>> soln.sum_of_digit_squares(19)
82
>>> soln.sum_of_digit_squares(82)
68
'''
digit_square_sum = 0
while n > 0:
digit_square_sum += ((n % 10) ** 2)
n //= 10
return digit_square_sum
|
[
"ma.yich@husky.neu.edu"
] |
ma.yich@husky.neu.edu
|
5f6965f66911a55288b83b23515ceb2fe17157db
|
9303cc8be6a467be84ff03a1e476c299d7001077
|
/main.py
|
9e2c4a87e3f5afc2f68c3148a7bf9ada1678b59f
|
[
"MIT"
] |
permissive
|
AuthFailed/nCoV-tgbot
|
8c5908983c7f299ae17f134756d87306e5c5acf4
|
d2ecea97b76b6d733d38573cce1a72b8c4a9868e
|
refs/heads/master
| 2022-09-04T23:04:32.702175
| 2022-08-25T00:40:00
| 2022-08-25T00:40:00
| 237,244,007
| 0
| 0
|
MIT
| 2020-01-30T17:14:49
| 2020-01-30T15:33:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,474
|
py
|
from aiogram import executor
from aiogram.types import *
import info_handler
import keyboard as kb
from config import dp, bot
@dp.message_handler(commands=["start"])
async def start_message(msg: Message):
await msg.reply(
text="Привет! Я отслеживаю статистику заражения 2019-nCoV.\n"
"Используйте /menu чтобы получить всю информацию.")
@dp.message_handler(commands=["menu"])
async def menu_message(msg: Message):
await msg.reply(
text="Используйте *кнопки ниже*:",
reply_markup=kb.main_menu(),
)
@dp.callback_query_handler(lambda _call: True)
async def handle_callbacks(call: CallbackQuery):
"""Отлавливаем кэллбэки телеграма."""
if call.data == "current_stats":
info = info_handler.get_main_info()
await call.message.edit_text(
f"*Статистика 2019-nCoV*:\n\n"
f"Зараженных ☣️: *{info['Infected']}*\n\n"
f"На подозрении ❓: *{info['Possible']}*\n\n"
f"На карантине ☢️: *{info['Quarantine']} ({info['Quarantined_Cities']} городов)\n\n*"
f"Вылечившихся 💊: *{info['Recovered']}*\n\n"
f"Смерти ☠️: *{info['Deaths']}*\n\n"
f"_Смертность составляет {info['Death_Rate']}%_\n"
f"Последнее обновление: *{info['Date']} MSK*",
reply_markup=kb.main_menu(),
)
await call.answer()
elif call.data == "quarantined_cities":
table = info_handler.get_table_cities()
answer_message = "*Города на карантине*\n(Город\t\t|\t\t дата закрытия\t\t|\t\tНаселение)__\n\n"
for i in range(len(table) - 1):
answer_message += f"{table[i][0]} - {table[i][1]} - {table[i][2]}\n"
await call.message.edit_text(
answer_message + "__",
reply_markup=kb.main_menu())
await call.answer()
elif call.data == "disease_forecast":
table = info_handler.disease_forecast()
answer_message = "*Прогноз заражения по Китаю на ближайшие 5 дней:*\n\n" \
"*Дата* |\t\t\t*Кол-во инфицированных*\n"
for i in range(len(table)):
answer_message += f"{table[i][0]}\t\t\t|\t\t\t{table[i][1]}\n"
answer_message = answer_message.replace("(Прогноз)", "`(Прогноз)`")
await call.message.edit_text(answer_message +
"\n\n_На основании данных статистики за последние 5 дней по Китаю (текущий день не учитывается)"
"\nСтатистика актуальна при среднем модификаторе заражения в 1.304180_",
reply_markup=kb.main_menu())
await call.answer()
elif call.data == "back_to_home":
await call.message.edit_text("Используйте *кнопки ниже*:",
reply_markup=kb.main_menu())
await call.answer()
@dp.inline_handler()
async def inline_stats(inline_query: InlineQuery):
info = info_handler.get_main_info()
text = (f"*Статистика 2019-nCoV*:\n\n"
f"Зараженных ☣️: *{info['Infected']}*\n\n"
f"На подозрении ❓: *{info['Possible']}*\n\n"
f"На карантине ☢️: *{info['Quarantine']} ({info['Quarantined_Cities']} городов)\n\n*"
f"Вылечившихся 💊: *{info['Recovered']}*\n\n"
f"Смерти ☠️: *{info['Deaths']}*\n\n"
f"_Смертность составляет {info['Death_Rate']}%_\n"
f"Последнее обновление: *{info['Date']} MSK*")
input_content = InputTextMessageContent(text)
item = InlineQueryResultArticle(
id="1", title="2019-nCoV stats", input_message_content=input_content
)
await bot.answer_inline_query(inline_query.id, results=[item], cache_time=1)
# @dp.errors_handler()
# async def error_handler():
if __name__ == "__main__":
executor.start_polling(dp, skip_updates=True)
|
[
"lenz1e973nyro"
] |
lenz1e973nyro
|
8339c4b6670fe18b61771e18903739838373f58c
|
01ce2eec1fbad3fb2d98085ebfa9f27c7efb4152
|
/itertools/itertools-combinations.py
|
b32166fe2a76aece52bb636b0b8705a63f17c3ce
|
[
"MIT"
] |
permissive
|
anishLearnsToCode/hackerrank-python
|
4cfeaf85e33f05342df887896fa60dae5cc600a5
|
7d707c07af051e7b00471ebe547effd7e1d6d9d9
|
refs/heads/master
| 2023-01-01T23:39:01.143328
| 2020-11-01T07:31:15
| 2020-11-01T07:31:15
| 265,767,347
| 8
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
# https://www.hackerrank.com/challenges/itertools-combinations/problem
import itertools
line = input().split()
word = sorted(line[0])
k = int(line[1])
for i in range(1, k + 1):
for j in itertools.combinations(word, i):
print(''.join(j))
|
[
"anish_bt2k16@dtu.ac.in"
] |
anish_bt2k16@dtu.ac.in
|
4082075c20005fab8b339bf42d30021fa63be367
|
efdc94781d5be9e018c84d5ac5d1b988c2806c68
|
/images_dialog.py
|
0d5eaf1776a5d2229eca96f68c63264926d00079
|
[] |
no_license
|
vadimmpog/PyCalib
|
bf0a8d46a086feef4bca5d33d7222578c1e98ff0
|
0508dd1745ef341f86f5d9b7977f05d7dc3c031b
|
refs/heads/main
| 2023-08-20T03:07:56.594670
| 2021-10-27T06:43:37
| 2021-10-27T06:43:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,255
|
py
|
from PyQt5 import QtCore
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtWidgets import QDialog
from PyQt5 import QtWidgets
import imutils
class ImagesDialog(QDialog):
def __init__(self, frames, show=False):
super().__init__()
self.current_image = 0
self.frames = frames
self.frames_num = len(frames)
self.show = show
if not show:
self.selected_frames = [False for _ in range(self.frames_num)]
self.setWindowTitle("Добавление")
self.setFixedSize(724, 519)
self.buttonBox = QtWidgets.QDialogButtonBox(self)
self.buttonBox.setGeometry(QtCore.QRect(480, 470, 211, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.label = QtWidgets.QLabel(self)
self.label.setMargin(30)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayoutWidget = QtWidgets.QWidget(self)
self.gridLayoutWidget.setGeometry(QtCore.QRect(260, 430, 195, 80))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.pushButton_2 = QtWidgets.QPushButton(self.gridLayoutWidget)
self.pushButton_2.setObjectName("pushButton_2")
self.gridLayout.addWidget(self.pushButton_2, 1, 1, 1, 1)
self.pushButton = QtWidgets.QPushButton(self.gridLayoutWidget)
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 1, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_2.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
if not show:
self.checkBox = QtWidgets.QCheckBox(self.gridLayoutWidget)
self.checkBox.setObjectName("checkBox")
self.gridLayout.addWidget(self.checkBox, 0, 1, 1, 1)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
QtCore.QMetaObject.connectSlotsByName(self)
self.set_logic()
def set_logic(self):
_translate = QtCore.QCoreApplication.translate
self.setWindowTitle("Просмотр фреймов")
self.label.setText("Пустой кадр")
self.pushButton_2.setText(">>")
self.pushButton_2.clicked.connect(self.next_image)
self.pushButton.setText("<<")
self.pushButton.clicked.connect(self.previous_image)
if not self.show:
self.checkBox.setText("выбрать")
self.checkBox.clicked.connect(self.select_frame)
self.choose_frames()
def choose_frames(self, i=0):
self.label_2.setText(f"{i + 1}/{self.frames_num}")
image = imutils.resize(self.frames[i], width=550)
height, width, channel = image.shape
bytesPerLine = 3 * width
qImg = QImage(image.data, width, height, bytesPerLine, QImage.Format_RGB888)
pix = QPixmap.fromImage(qImg)
self.label.setPixmap(pix)
def next_image(self):
if self.current_image < self.frames_num-1:
self.current_image += 1
self.choose_frames(i=self.current_image)
if not self.show:
self.checkBox.setChecked(self.selected_frames[self.current_image])
def previous_image(self):
if self.current_image > 0:
self.current_image -= 1
self.choose_frames(i=self.current_image)
if not self.show:
self.checkBox.setChecked(self.selected_frames[self.current_image])
def select_frame(self):
self.selected_frames[self.current_image] = not self.selected_frames[self.current_image]
def reject(self):
super().reject()
if not self.show:
self.selected_frames = None
|
[
"vadimmm120@yandex.ru"
] |
vadimmm120@yandex.ru
|
a7f1b70b6ba4951bee8aab80789e69f1581b33d1
|
c6bf1b52dce9eff35a91f261aa3c33f83c887d3a
|
/bai 4.15.py
|
63f32d74a7b6c67b8ee870d15e735a7cfe4a8ca7
|
[] |
no_license
|
bachdinhthang59ktdh/b-i-t-p-ktlt-tr-n-l-p
|
bfc88fe8a97a0524680d1063daa8d5283a38f8e1
|
7500173e45d0ac032d8657c82e53742de43f1b15
|
refs/heads/master
| 2022-08-31T22:55:29.845869
| 2020-05-25T06:22:38
| 2020-05-25T06:22:38
| 262,918,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 92
|
py
|
s=input('nhap chuoi s').split()
s.sort()
for h in s:
print(h)
|
[
"noreply@github.com"
] |
bachdinhthang59ktdh.noreply@github.com
|
f50d553f88129bfc29a4c1bc98e9a6ddfe0af18b
|
090bceb6c9418b39056f8aa0204051da621eef01
|
/app/views.py
|
b24b5d9864c45018044b7a0e75b6974701d0c3e8
|
[] |
no_license
|
panasevychol/beetroot-test
|
627a1bb7b2935d908ed9b4da530ee77d21ae21fa
|
102b4dc1616f83038c5851da3f2c9dd83b8b2723
|
refs/heads/master
| 2021-05-01T22:42:43.723675
| 2016-12-30T13:13:51
| 2016-12-30T13:13:51
| 77,614,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
import json
import sys
import time
from flask import render_template, request
from . import app
from .utils import find_games
@app.route('/')
def index():
keywords = request.args.get('keywords', '')
return render_template('index.html', games=find_games(keywords))
|
[
"panasevychol@gmail.com"
] |
panasevychol@gmail.com
|
566302b568f0103bd3c6c2d54e6988ac6dd06f4b
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/JD9vSKZGrxQhLbA9r_11.py
|
8153c6d8cc99992256ea1d82f8771cd6328f44f3
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
def pile_of_cubes(m):
if m >= 10252519345963644753026: return None
x = m**0.5
if (x%1==0):
c = 1
while (x != c and x > 0):
x = x - c
c = c + 1
if (x == c):
return c
return None
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
5722c5bd79ba59802f5e4174de590823f9b31f54
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5631989306621952_1/Python/Hotshot8325/Q2.py
|
c61b1a46284a8ff8a0e7daff7477923bbd7b7f0f
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
#CodeJam pancake problem
import csv
import string
#import data from test file in the form [[[],[]],[[],[]].... with [[],[]] being one test case
with open('a-large.in') as csvfile:
testCase = csv.reader(csvfile, delimiter = ' ', quotechar='|')
rowNum = 0
inputText = []
#swapCount = []
for row in testCase:
#row = [str(i) for i in row]
if rowNum == 0:
numTestCases = int(row[0])
else:
inputText.append(row)
rowNum = rowNum + 1
for i in range(0,numTestCases):
letterInput = inputText[i][0]
lastWord = letterInput[0]
for j in range(1,len(letterInput)):
if string.uppercase.index(letterInput[j])>=string.uppercase.index(lastWord[0]):
lastWord = letterInput[j]+lastWord
else:
lastWord = lastWord +letterInput[j]
print "Case #"+str(i+1)+": "+lastWord
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
b3c4bd9dc92f583c4160e397ad5aca581ce33ed0
|
a14e3faea802cbe20e0c65995bf67b84c41bf0f4
|
/tests/test_car_generator.py
|
7f6bf58e5bcc413c4cd1624b849d2bdd5335d003
|
[
"MIT"
] |
permissive
|
DrimTim32/py_proj_lights
|
aafdc4b1a0d8de8926c56f92682a9058b3b92db7
|
a056e7292b0b81db95316d5d0f517c69a0d473e8
|
refs/heads/master
| 2020-07-29T00:37:29.021483
| 2017-02-07T15:51:09
| 2017-02-07T15:51:09
| 73,689,047
| 0
| 0
|
MIT
| 2020-07-14T19:00:44
| 2016-11-14T09:35:24
|
Python
|
UTF-8
|
Python
| false
| false
| 769
|
py
|
"""This file contains tests for car generator"""
import sys
from simulation import Directions, TurnDirection
from simulation.generators import CarProperGenerator
if "core" not in sys.path[0]:
sys.path.insert(0, 'core')
def test_lights_generator():
prob = {Directions.TOP: [[0, 0, 0]],
Directions.BOTTOM: [[0, 0, 0], [1, 0, 0]],
Directions.RIGHT: [[0, 1, 0]],
Directions.LEFT: [[0, 0, 1]]}
lg = CarProperGenerator(prob)
assert lg.generate(Directions.TOP, 0) is None
assert lg.generate(Directions.BOTTOM, 1).turn_direction == TurnDirection.RIGHT
assert lg.generate(Directions.RIGHT, 0).turn_direction == TurnDirection.STRAIGHT
assert lg.generate(Directions.LEFT, 0).turn_direction == TurnDirection.LEFT
|
[
"barteks95@gmail.com"
] |
barteks95@gmail.com
|
22dbc2be582ff1eae04ea4b6343fb46b0511f014
|
20552c79d92593ab8c574a61ac0dcbd25aa09e2e
|
/Account/models.py
|
825a6a29f4baa7fb9b2f27a207b867a72a95be82
|
[] |
no_license
|
junaidgirkar/Unicode_REST-API
|
85580f2c85148c1b11ee2fffaae8d8b40aa91def
|
d9f812f867aabec7df9458511dfb03e7794d7de7
|
refs/heads/master
| 2023-01-04T11:27:17.964846
| 2020-10-29T13:21:04
| 2020-10-29T13:21:04
| 297,365,250
| 1
| 2
| null | 2020-10-29T13:21:06
| 2020-09-21T14:29:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,331
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.utils.translation import ugettext_lazy as _
from .managers import UserManager, StudentManager, TeacherManager
# Create your models here.
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_('email address'), unique = True)
first_name = models.CharField(_('first_name'), max_length = 40)
last_name = models.CharField(_('last name'), max_length = 40)
date_joined = models.DateTimeField(_('date joined'), auto_now_add = True)
is_active = models.BooleanField(_('active'), default = True)
is_staff = models.BooleanField(_('staff status'), default=False)
is_superuser = models.BooleanField(_('is superuser'), default = False)
is_admin = models.BooleanField(_('is admin'), default=False)
is_student = models.BooleanField(_('is student'), default = False)
is_teacher = models.BooleanField(_('is teacher'), default = False)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_short_name(self):
return self.first_name
def get_full_name(self):
return self.first_name + "_" + self.last_name
def save(self, *args, **kwargs):
self.username = self.email
super(User, self).save(*args, **kwargs)
def __str__(self):
return self.email
class Student(User):
user = models.OneToOneField(User, on_delete=models.CASCADE, parent_link=True)
user.is_student = True
user.is_teacher = False
branch = models.CharField(max_length=40)
sap_id = models.CharField(max_length=12, default=0, blank=True)
objects = StudentManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def __str__(self):
return self.user.email
class Teacher(User):
user = models.OneToOneField(User, on_delete=models.CASCADE, parent_link=True)
user.is_student = False
user.is_teacher = True
subject = models.CharField(max_length=40)
objects = TeacherManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def __str__(self):
return self.user.email
|
[
"60307291+junaidgirkar@users.noreply.github.com"
] |
60307291+junaidgirkar@users.noreply.github.com
|
7da8e44c7b81b0928a7aa944b72042d967acb70c
|
34f3d3c01a29b05e58d7dccca2ac5776e2324d0f
|
/files/zipModule.py
|
c6eb11ca46c953549e13a40ae56467d84e0acd7d
|
[] |
no_license
|
nethirangasai/pythonpgms
|
d50c485c7f13ba0bdd78b79508d4792caf5e7a20
|
c0bfddfea95b22e32cfa53ee8b531b6535b1df42
|
refs/heads/master
| 2020-05-27T09:55:00.094520
| 2019-05-26T07:56:01
| 2019-05-26T07:56:01
| 188,574,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
from zipfile import ZipFile,ZIP_DEFLATED
f=ZipFile('files.zip','w',ZIP_DEFLATED)
f.write('csvReading.py')
f.write('csvWriting.py')
f.write('students.csv')
f.close()
|
[
"rangasai.nethi@gmail.com"
] |
rangasai.nethi@gmail.com
|
c9062fbe8e75b4749ea59e439897d1de93808c00
|
a88ac040aa274d94ac8decbbf43a585af56cf825
|
/src/perftest.py
|
d8bb9fedffb0ea4c58157bde543ff3c510c1343f
|
[] |
no_license
|
s7evinkelevra/Agent-Model
|
6dd0544326502c00572db2c2f4cf9785092e9ef3
|
f25cde7190736778dbf0d0a5a45fa3a3f3f1efc3
|
refs/heads/master
| 2023-08-13T02:26:04.232434
| 2021-09-30T12:24:50
| 2021-09-30T12:24:50
| 403,736,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,984
|
py
|
from Bio.Seq import Seq
from matplotlib.ticker import LinearLocator
from matplotlib import cm
import random
from pprint import pprint
import itertools
from collections import deque
import uuid
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
rng = np.random.default_rng()
# Random base sequence of length
def randomDNAseq(length):
return ''.join(random.choice('GCAT') for _ in range(length))
# Random proteinogenic amino acids sequence of length
def randomASseq(length):
return ''.join(random.choice('ACDEFGHIKLMNOPQRSTUVWY') for _ in range(length))
# Random bitstring
def randomBitseq(length):
return ''.join(random.choice('01') for _ in range(length))
# Generate allele with unique id and random position in peptide space
def randomPSallele(peptide_space_length):
return {
"x": rng.integers(low=1, high=peptide_space_length),
"y": rng.integers(low=1, high=peptide_space_length),
"id": uuid.uuid4()
}
def sliding_window_iter(seq, width):
it = iter(seq)
result = tuple(itertools.islice(it, width))
if len(result) == width:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
# Sliding window iterator over sequence seq and of window width of n
def window(seq, n=2):
it = iter(seq)
win = deque((next(it, None) for _ in range(n)), maxlen=n)
yield win
append = win.append
for e in it:
append(e)
yield win
start_time = time.perf_counter_ns()
peptide_space_length = 1000
host_n = 10000
host_allele_initial_n = 150
host_allele_length = 9
host_fitness_initial = 1
host_fitness_increment = 0.2
host_species_n = 1
pathogen_n = 100000
pathogen_haplotype_initial_n = 400
pathogen_haplotype_length = 100
pathogen_fitness_initial = 1
pathogen_fitness_increment = 1
pathogen_species_n = 1
host_allele_pool = [[randomPSallele(peptide_space_length) for _ in range(
host_allele_initial_n)] for _ in range(host_species_n)]
def generateHost():
species = random.choice(range(host_species_n))
allele_1_data = random.choice(host_allele_pool[species])
allele_2_data = random.choice(host_allele_pool[species])
return {
"species": species,
"fitness": host_fitness_initial,
"allele_1_id": allele_1_data["id"],
"allele_1_x": allele_1_data["x"],
"allele_1_y": allele_1_data["y"],
"allele_2_id": allele_2_data["id"],
"allele_2_x": allele_2_data["x"],
"allele_2_y": allele_2_data["y"]
}
host_data = [generateHost() for _ in range(host_n)]
hosts = pd.DataFrame(host_data)
pathogen_haplotype_pool = [[randomPSallele(peptide_space_length) for _ in range(
pathogen_haplotype_initial_n)] for _ in range(pathogen_species_n)]
def generatePathogen():
species = random.choice(range(pathogen_species_n))
haplotype = random.choice(pathogen_haplotype_pool[species])
return {
"species": species,
"fitness": pathogen_fitness_initial,
"haplotype_id": haplotype["id"],
"haplotype_x": haplotype["x"],
"haplotype_y": haplotype["y"]
}
pathogen_data = [generatePathogen() for _ in range(pathogen_n)]
pathogens = pd.DataFrame(pathogen_data)
print(f'host count - {len(hosts)}')
print(f'host allele count (unique) - {len(hosts.allele_1_id.unique())}')
print(f'pathogen count - {len(pathogens)}')
print(
f'pathogen haplotype count (unique) - {len(pathogens.haplotype_id.unique())}')
sim_gen_n = 10000
sim_logging_interval = 50
sim_allele_subsample_n = 100
def uniqueAlleleCount():
print("yeee")
"""
print(hosts[['allele_1_id', 'allele_2_id']].value_counts())
print(hosts[['allele_1_id', 'allele_2_id']].values.ravel('K'))
print(len(pd.unique(hosts[['allele_1_id', 'allele_2_id']].values.ravel('K'))))
host_allele_all = hosts[['allele_1_id', 'allele_2_id']].values.ravel('K')
unique, counts = np.unique(host_allele_all, return_counts=True)
# print(np.asarray((unique,counts)).T)
print(counts)
plt.bar([str(i)[10:15] for i in unique], counts)
"""
def eucDist(x0, y0, x1, y1):
dX = x1 - x0
dY = y1 - y0
return np.sqrt(dX*dX + dY * dY)
def infect(host):
infecting_pathogen = pathogens.sample()
dist1 = eucDist(host["allele_1_x"], host["allele_1_y"],
infecting_pathogen["haplotype_x"], infecting_pathogen["haplotype_y"])
dist2 = eucDist(host["allele_2_x"], host["allele_2_y"],
infecting_pathogen["haplotype_x"], infecting_pathogen["haplotype_y"])
min_dist = np.min([dist1, dist2])
if(min_dist < 200):
return host["fitness"] - host_fitness_increment
else:
return host["fitness"]
"""
for i in range(sim_gen_n):
# log every sim_logging_interval'th generation
if(i % sim_logging_interval == 0):
print("logging data")
# infection regieme
## each host is infected between 1 and n times
infecting_pathogen_species = 0
hosts["fitness"] = hosts.apply(infect, axis=1)
print(hosts)
break
"""
end_time = time.perf_counter_ns()
print((end_time-start_time) / 1000)
|
[
"kelevra.1337@gmail.com"
] |
kelevra.1337@gmail.com
|
b48dcc67a5875823dc15b6cb4f7142b0cdc08af1
|
64cea21dc4834cc876b6788f4cb8572982d2f60a
|
/product_pricelist_report_qweb/tests/common.py
|
34ce1ca81ed41750f6ce505f83679205947fda18
|
[] |
no_license
|
yelizariev/addons-vauxoo
|
708463f847a75898d99fd8c2045d20ab9083b703
|
511dc410b4eba1f8ea939c6af02a5adea5122c92
|
refs/heads/8.0
| 2020-12-11T09:04:04.912471
| 2016-03-17T06:00:36
| 2016-03-17T06:00:36
| 53,125,976
| 3
| 2
| null | 2016-03-04T10:01:48
| 2016-03-04T10:01:48
| null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
# coding: utf-8
# ##########################################################################
# Module Writen to ODOO, Open Source Management Solution
#
# Copyright (c) 2015 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
# ###########################################################################
# Coded by: Luis Torres (luis_t@vauxoo.com)
# ###########################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# #############################################################################
from openerp.tests import common
import logging
_logger = logging.getLogger(__name__)
class TestXLSProductCommon(common.TransactionCase):
def setUp(self):
super(TestXLSProductCommon, self).setUp()
self.product_price_obj = self.env['product.price_list']
self.attachment_obj = self.env['ir.attachment']
self.price_list_id = self.ref('product.list0')
self.product = self.env.ref('product.product_product_7').copy()
|
[
"hbto@vauxoo.com"
] |
hbto@vauxoo.com
|
564f8f9e85d4c8a6057469a98f58669f1dfe7534
|
ae22eebfadfdeb33f5c972702a92be266248c5f7
|
/Project2_Flask/main_functions.py
|
a1d535862e0995209a154d4e27cb6ac53a887988
|
[] |
no_license
|
ecaru003/COP4813_Project2
|
964831ad9a50634dbaf0b2a397a18b3a76316b63
|
2cfc87d373340c36de11bb7c856addf4dcb905bc
|
refs/heads/master
| 2023-07-24T12:58:28.901004
| 2021-09-01T21:33:38
| 2021-09-01T21:33:38
| 315,692,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
import json
def read_from_file(file_name):
with open(file_name,"r") as read_file:
data=json.load(read_file)
print("You successfully read from {}.".format(file_name))
return data
def save_to_file(data,file_name):
with open(file_name,"w") as write_file:
json.dump(data,write_file,indent=2)
print("You successfully saved to {}.".format(file_name))
|
[
"ecaru003@fiu.edu"
] |
ecaru003@fiu.edu
|
83a5e8277279567beb43b9117f28f6b87142acf6
|
9f1a165798a13b4fd24b94d23eb137a6763a1bed
|
/tickets/migrations/0001_initial_squashed_0006_auto_20200610_1403.py
|
36b5098cc5ffd8d4476caa47cb08b33bf448b406
|
[
"MIT"
] |
permissive
|
AdamCottrill/ticket_tracker
|
42455ed9e4b0439df08694b0f73713163aace68a
|
72fad3cf9c0e7f44ca62650a2338a5ac7696bcbf
|
refs/heads/master
| 2023-03-04T11:15:55.097923
| 2022-08-25T20:10:46
| 2022-08-25T20:10:46
| 198,422,697
| 1
| 3
|
MIT
| 2023-02-15T18:25:54
| 2019-07-23T12:07:00
|
Python
|
UTF-8
|
Python
| false
| false
| 9,882
|
py
|
# Generated by Django 2.2.13 on 2020-06-10 18:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import taggit.managers
class Migration(migrations.Migration):
replaces = [
("tickets", "0001_initial"),
("tickets", "0002_auto_20190209_2214"),
("tickets", "0003_auto_20190210_1052"),
("tickets", "0004_auto_20190210_1942"),
("tickets", "0005_auto_20190723_1134"),
("tickets", "0006_auto_20200610_1403"),
]
dependencies = [
("taggit", "0003_taggeditem_add_unique_index"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Application",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("application", models.CharField(max_length=20)),
("slug", models.SlugField(editable=False, unique=True)),
],
),
migrations.CreateModel(
name="Ticket",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("active", models.BooleanField(default=True)),
(
"status",
models.CharField(
choices=[
("new", "New"),
("accepted", "Accepted"),
("assigned", "Assigned"),
("re-opened", "Re-Opened"),
("closed", "Closed"),
("duplicate", "Closed - Duplicate"),
("split", "Closed - Split"),
],
db_index=True,
default=True,
max_length=20,
),
),
(
"ticket_type",
models.CharField(
choices=[
("feature", "Feature Request"),
("bug", "Bug Report"),
("task", "Task"),
],
db_index=True,
default=True,
max_length=10,
),
),
("title", models.CharField(max_length=80)),
("description", models.TextField()),
("description_html", models.TextField(blank=True, editable=False)),
(
"priority",
models.IntegerField(
choices=[
(1, "Critical"),
(2, "High"),
(3, "Normal"),
(4, "Low"),
(5, "Very Low"),
],
db_index=True,
),
),
(
"created_on",
models.DateTimeField(
auto_now_add=True, verbose_name="date created"
),
),
(
"updated_on",
models.DateTimeField(auto_now=True, verbose_name="date updated"),
),
("votes", models.IntegerField(default=0)),
(
"application",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="tickets.Application",
),
),
(
"assigned_to",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="assigned_tickets",
to=settings.AUTH_USER_MODEL,
),
),
(
"parent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="tickets.Ticket",
),
),
(
"submitted_by",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="submitted_tickets",
to=settings.AUTH_USER_MODEL,
),
),
(
"tags",
taggit.managers.TaggableManager(
blank=True,
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
],
options={
"ordering": ["-created_on"],
},
managers=[
("all_tickets", django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name="TicketDuplicate",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"original",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="original",
to="tickets.Ticket",
),
),
(
"ticket",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="duplicate",
to="tickets.Ticket",
),
),
],
),
migrations.CreateModel(
name="UserVoteLog",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"ticket",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="tickets.Ticket"
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="FollowUp",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_on",
models.DateTimeField(
auto_now_add=True, verbose_name="date created"
),
),
("comment", models.TextField()),
("comment_html", models.TextField(blank=True, editable=False)),
(
"action",
models.CharField(
choices=[
("no_action", "No Action"),
("closed", "Closed"),
("re-opened", "Re-Opened"),
("split", "Split"),
],
db_index=True,
default="no_action",
max_length=20,
),
),
("private", models.BooleanField(default=False)),
(
"parent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="tickets.FollowUp",
),
),
(
"submitted_by",
models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
(
"ticket",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="tickets.Ticket"
),
),
],
),
]
|
[
"adam.cottrill@ontario.ca"
] |
adam.cottrill@ontario.ca
|
9eb23f2fb0bdb9407531c0cc21444f0cba5aaead
|
aa1b98be1dabf14752750999b35aec8d819122fe
|
/utils.py
|
382c10d19fc66a245748c89531951d5c14186ced
|
[] |
no_license
|
tevonsb/a5
|
8fe8df7461c8515b649e3d3b601befc968c694d1
|
8d183228ed280582c45dba589f413405a49a49c4
|
refs/heads/master
| 2020-04-25T00:42:54.714606
| 2019-02-24T20:33:58
| 2019-02-24T20:33:58
| 172,386,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,328
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2018-19: Homework 5
nmt.py: NMT Model
Pencheng Yin <pcyin@cs.cmu.edu>
Sahil Chopra <schopra8@stanford.edu>
"""
import math
from typing import List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def pad_sents_char(sents, char_pad_token):
""" Pad list of sentences according to the longest sentence in the batch and max_word_length.
@param sents (list[list[list[int]]]): list of sentences, result of `words2charindices()`
from `vocab.py`
@param char_pad_token (int): index of the character-padding token
@returns sents_padded (list[list[list[int]]]): list of sentences where sentences/words shorter
than the max length sentence/word are padded out with the appropriate pad token, such that
each sentence in the batch now has same number of words and each word has an equal
number of characters
Output shape: (batch_size, max_sentence_length, max_word_length)
"""
# Words longer than 21 characters should be truncated
max_word_length = 21
### YOUR CODE HERE for part 1f
### TODO:
### Perform necessary padding to the sentences in the batch similar to the pad_sents()
### method below using the padding character from the arguments. You should ensure all
### sentences have the same number of words and each word has the same number of
### characters.
### Set padding words to a `max_word_length` sized vector of padding characters.
###
### You should NOT use the method `pad_sents()` below because of the way it handles
### padding and unknown words.
max_sentence_length = max([len(sent) for sent in sents])
pad_word = [char_pad_token for x in range(21)]
sents_padded = [[word+[char_pad_token for x in range(max_word_length - len(word))] for word in sent] for sent in sents]
sents_padded = [sent+[pad_word for x in range(max_sentence_length-len(sent))] for sent in sents_padded]
### END YOUR CODE
return sents_padded
def pad_sents(sents, pad_token):
""" Pad list of sentences according to the longest sentence in the batch.
@param sents (list[list[int]]): list of sentences, where each sentence
is represented as a list of words
@param pad_token (int): padding token
@returns sents_padded (list[list[int]]): list of sentences where sentences shorter
than the max length sentence are padded out with the pad_token, such that
each sentences in the batch now has equal length.
Output shape: (batch_size, max_sentence_length)
"""
sents_padded = []
max_len = max(len(s) for s in sents)
batch_size = len(sents)
for s in sents:
padded = [pad_token] * max_len
padded[:len(s)] = s
sents_padded.append(padded)
return sents_padded
def read_corpus(file_path, source):
""" Read file, where each sentence is dilineated by a `\n`.
@param file_path (str): path to file containing corpus
@param source (str): "tgt" or "src" indicating whether text
is of the source language or target language
"""
data = []
for line in open(file_path):
sent = line.strip().split(' ')
# only append <s> and </s> to the target sentence
if source == 'tgt':
sent = ['<s>'] + sent + ['</s>']
data.append(sent)
return data
def batch_iter(data, batch_size, shuffle=False):
""" Yield batches of source and target sentences reverse sorted by length (largest to smallest).
@param data (list of (src_sent, tgt_sent)): list of tuples containing source and target sentence
@param batch_size (int): batch size
@param shuffle (boolean): whether to randomly shuffle the dataset
"""
batch_num = math.ceil(len(data) / batch_size)
index_array = list(range(len(data)))
if shuffle:
np.random.shuffle(index_array)
for i in range(batch_num):
indices = index_array[i * batch_size: (i + 1) * batch_size]
examples = [data[idx] for idx in indices]
examples = sorted(examples, key=lambda e: len(e[0]), reverse=True)
src_sents = [e[0] for e in examples]
tgt_sents = [e[1] for e in examples]
yield src_sents, tgt_sents
|
[
"tevon.strandbrown@gmail.com"
] |
tevon.strandbrown@gmail.com
|
3a829b2c788daa3d8a5b5cdfa4c5b6ccd3daabd7
|
fefa88dd63533ed36ec4f86c029b5d9a00a3ad82
|
/monapi/serializers.py
|
987d80eaf5e66ca2d97f70f100a17e9b7334545c
|
[] |
no_license
|
jeremyguiller/Api-mairie
|
f8fee21610acfb2ec20fdb761d5cb854a82480e5
|
7f1a6173e5ef0c25f2971f8a7e41adf8e88b8d8c
|
refs/heads/master
| 2023-04-06T15:33:48.750300
| 2021-04-26T11:44:03
| 2021-04-26T11:44:03
| 361,729,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
from rest_framework import serializers , fields
from .models import Location,Texte,Image,Administrateur
class Locationserializer(serializers.HyperlinkedModelSerializer):
date = serializers.DateTimeField()
class Meta:
model = Location
fields = ('date','name','confirmer')
class AdministrateurSerializers(serializers.HyperlinkedModelSerializer):
class Meta:
model = Administrateur
fields = ('name','email','mdp')
class TexteSerializers(serializers.HyperlinkedModelSerializer):
class Meta:
model = Texte
fields = ('intitule','texte')
class ImageSerializers(serializers.HyperlinkedModelSerializer):
class Meta:
model = Image
fields = ('description','image')
|
[
"guillerjeremy@gmail.com"
] |
guillerjeremy@gmail.com
|
cd75f26df497e0e47746786f0197f8dc9b218f06
|
930c207e245c320b108e9699bbbb036260a36d6a
|
/BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/FCU_Return_Air_Temperature_Sensor.py
|
d4ac39c9698a57051d03037b2f79dc41b5511c4b
|
[] |
no_license
|
InnovationSE/BRICK-Generated-By-OLGA
|
24d278f543471e1ce622f5f45d9e305790181fff
|
7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2
|
refs/heads/master
| 2021-07-01T14:13:11.302860
| 2017-09-21T12:44:17
| 2017-09-21T12:44:17
| 104,251,784
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Return_Air_Temperature_Sensor import Return_Air_Temperature_Sensor
class FCU_Return_Air_Temperature_Sensor(Return_Air_Temperature_Sensor):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').FCU_Return_Air_Temperature_Sensor
|
[
"Andre.Ponnouradjane@non.schneider-electric.com"
] |
Andre.Ponnouradjane@non.schneider-electric.com
|
fa76acace0c4cd47c3cdb6b96aa8b5eed60ae7bf
|
8a41ef3e60355b867116754444d3b844721b7ff9
|
/how2pizza/pizza/admin.py
|
ea329c4b7c6a2a8d549786f75503dd73fe4627be
|
[
"MIT"
] |
permissive
|
ianonavy/how2pizza
|
12cc99b1f8adc6aa5513d396cb67ecb62039554e
|
ebac7b0cd2ea3be851eddb3fe221c11d1a2a426a
|
refs/heads/master
| 2021-01-23T22:07:46.822089
| 2015-05-28T05:55:59
| 2015-05-28T05:55:59
| 31,512,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
from django.contrib import admin
from pizza.models import PizzaOrder, PizzaOrderUserChoice, PizzaType
@admin.register(PizzaOrder)
class PizzaOrderAdmin(admin.ModelAdmin):
list_display = ('id', 'created_at')
@admin.register(PizzaOrderUserChoice)
class PizzaOrderUserChoiceAdmin(admin.ModelAdmin):
pass
@admin.register(PizzaType)
class PizzaTypeAdmin(admin.ModelAdmin):
pass
|
[
"ianonavy@gmail.com"
] |
ianonavy@gmail.com
|
32dab3a9805a876cadd1c98c55ad23f5d16cff81
|
2a58920968814b87ee93decf2b887747dbb56c12
|
/helpers/create_module/find_path.py
|
d81d21182ecc2c75dade8d606fbc7d80fa6d75c5
|
[] |
no_license
|
chrysa/gae-toolbox-2
|
5e52b2c2ce66358feb82bdd078d6b9ab9f08da2e
|
b666567359888ff29d2c3dddb0453b762a65d75a
|
refs/heads/master
| 2020-03-29T20:53:11.495776
| 2014-04-16T12:10:17
| 2014-04-16T12:10:17
| 15,781,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# disco-toolbox-2.helpers.find_path -- fichier de génération du chemin relatif du script en cours d'éxécution
import os
def find_path(module_type, mod_folder):
"""fonction de génération du chemin relatif du script en cours d'éxécution
:param module_type: type de module
:param mod_folder: nom du dossier contenant les modules admin et front
:returns: renvoi le chemin relatif du script
:rtype: string
"""
test = os.getcwd()[len(os.getcwd()) - 7:len(os.getcwd())] # isolation des 8 derniers caractères permettant de savoir si le script est appelé depuis le dossier d'installation
if module_type == 1:
if test == 'install' or test == 'helpers':
path_folder = os.getcwd()[0:len(os.getcwd()) - 8] + os.sep + 'src' + os.sep + mod_folder + os.sep + 'admin'
else:
path_folder = os.getcwd() + os.sep + 'src' + os.sep + mod_folder + os.sep + 'admin'
else:
if test == 'install' or test == 'helpers':
path_folder = os.getcwd()[0:len(os.getcwd()) - 8] + os.sep + 'src' + os.sep + mod_folder + os.sep + 'front'
else:
path_folder = os.getcwd() + os.sep + 'src' + os.sep + mod_folder + os.sep + 'front'
return path_folder
|
[
"agreau@student.42.fr"
] |
agreau@student.42.fr
|
01056432f916ec5052c06f42038020cc0f7a42d4
|
27b2cee1701a2e3073ecf020065f697c5b145de0
|
/txboto/auth_handler.py
|
86da5f1288ca2ecd9647ca8feb619d35631317b4
|
[
"ADSL",
"BSD-3-Clause"
] |
permissive
|
2mf/txboto
|
25209b2d5c465ca093581dda281ae65e3e17103e
|
3ecc5c5e86b650edc6c3b42064a07d42faa210e4
|
refs/heads/master
| 2020-04-25T08:40:47.640350
| 2017-02-22T11:38:21
| 2017-02-22T11:38:21
| 45,603,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,618
|
py
|
# Copyright 2010 Google Inc.
# Copyright (c) 2015 Silver Egg Technology, Co., Ltd.
# Copyright (c) 2015 Michael Franke
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Defines an interface which all Auth handlers need to implement.
"""
from txboto.plugin import Plugin
class NotReadyToAuthenticate(Exception):
pass
class AuthHandler(Plugin):
capability = []
def __init__(self, host, config, provider):
"""Constructs the handlers.
:type host: string
:param host: The host to which the request is being sent.
:type config: txboto.pyami.Config
:param config: TxBoto configuration.
:type provider: txboto.provider.Provider
:param provider: Provider details.
Raises:
NotReadyToAuthenticate: if this handler is not willing to
authenticate for the given provider and config.
"""
pass
def add_auth(self, http_request):
"""Invoked to add authentication details to request.
:type http_request: txboto.connection.HTTPRequest
:param http_request: HTTP request that needs to be authenticated.
"""
pass
|
[
"mf33456@gmail.com"
] |
mf33456@gmail.com
|
d292bf9b5228884b9307bbd114fbf6aae0eda93e
|
19b2856c718dab5380d381053c0f1d664faeab53
|
/Login/migrations/0001_initial.py
|
56a3b97852967f2bab2bfc9395c58579d0fbc9da
|
[] |
no_license
|
ywl1584/ywl1584.GraduationProject.io
|
7f62c50c939274039f304ccee378345fd083a2bf
|
18d6b1d199d3ba56ebee8de1c5551e01c7ab5bd5
|
refs/heads/master
| 2020-04-25T02:22:49.021355
| 2019-02-25T04:59:27
| 2019-02-25T04:59:27
| 172,437,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 992
|
py
|
# Generated by Django 2.1.2 on 2018-10-28 07:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('password', models.CharField(max_length=256)),
('email', models.EmailField(max_length=254, unique=True)),
('sex', models.CharField(choices=[('male', '男'), ('female', '女')], default='男', max_length=32)),
('c_time', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
'ordering': ['c_time'],
},
),
]
|
[
"your email address1270834936@qq.com"
] |
your email address1270834936@qq.com
|
b9862bab92c4aa791fbc0851e03b13c965d9dff8
|
8ee86008310da9954e3c200dd4711d295d449329
|
/blog/urls.py
|
0e0724f937f23bacaf9bde8904f4b9f53f37edd2
|
[] |
no_license
|
madp3e/Blog
|
846fef127330b9f600c7b0c15a080efb5de4a148
|
1379041c68c6e4045d25a5f1bf9ff325457788e7
|
refs/heads/master
| 2022-11-26T22:39:36.391205
| 2019-12-12T11:47:25
| 2019-12-12T11:47:25
| 227,589,494
| 0
| 0
| null | 2022-11-22T04:37:45
| 2019-12-12T11:19:41
|
Python
|
UTF-8
|
Python
| false
| false
| 774
|
py
|
from django.urls import path
from . import views
from .views import (PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
UserPostListView)
urlpatterns = [
path("", PostListView.as_view(), name="blog-home"),
path("post/<int:pk>/", PostDetailView.as_view(), name="post-detail"),
path("post/<int:pk>/update", PostUpdateView.as_view(), name="post-update"),
path("post/<int:pk>/delete", PostDeleteView.as_view(), name="post-delete"),
path("post/new/", PostCreateView.as_view(), name="post-create"),
path("about/", views.about, name="blog-about"),
path("user/<str:username>/", UserPostListView.as_view(), name="user-posts")
]
|
[
"ahmadfaizuddin17@gmail.com"
] |
ahmadfaizuddin17@gmail.com
|
029e8d41228f8d09c6e0cb103693dbf48021707d
|
eb008a137a8da49d48985240bea8c29e0966293a
|
/tools/config.py
|
5d5a6fcb30acfb04f0bf90925fd32b94d98ea154
|
[] |
no_license
|
Kukushenok/GunParty
|
4a5f7de407b68061c46cc645658b11cba3edd2d8
|
acac4ea8bd80ec9101a8a2f64a08f594f0edf31c
|
refs/heads/master
| 2021-05-12T11:09:17.202229
| 2018-02-24T18:32:15
| 2018-02-24T18:32:15
| 117,379,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
import configparser
import os
import pygame
class Config():
def __init__(self,defaultPath = None,path = None):
self.defaultPath = defaultPath
self.config = configparser.ConfigParser()
self.defaultConfig = configparser.ConfigParser()
if path:
self.config.read(os.path.join(path,'config.ini'))
if self.defaultPath:self.defaultConfig.read(os.path.join(self.defaultPath, 'default_config.ini'))
else:
self.config.read('config.ini')
if self.defaultPath:self.defaultConfig.read(os.path.join(self.defaultPath, 'default_config.ini'))
def get(self,item):
try:
return self.config["SETTINGS"][item]
except KeyError:
return self.defaultConfig["SETTINGS"][item]
def getAsDict(self,item):
toDict = ""
try:
toDict = self.config["SETTINGS"][item]
except KeyError:
toDict = self.defaultConfig["SETTINGS"][item]
dictPairs = toDict.split(",")
resDict = {}
for e in dictPairs:
splittedE = e.split(":")
exec("resDict["+splittedE[0]+"] = "+splittedE[1])
return resDict
|
[
"mrcoolmoder@gmail.com"
] |
mrcoolmoder@gmail.com
|
c8e453ae1f4aa67ae58b7f6d6dd39e2b6c2afb3d
|
0367d2c25de1584fd064522e9b9efc8fa52d1478
|
/odd_eve_list.py
|
c97aec023602a1865225836fc042828587cb288f
|
[] |
no_license
|
sk013/Python_Basic_Programs
|
9d69698f28246f6787c695e20d5b2b4a45417019
|
c44ed384e8185261ef4fd715694362269837d6c8
|
refs/heads/main
| 2023-05-03T12:04:20.144301
| 2021-05-26T17:43:22
| 2021-05-26T17:43:22
| 371,121,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
def odd_eve(l):
odd = []
eve = []
for i in l:
if i%2==0:
eve.append(i)
else :
odd.append(i)
output = [eve,odd]
return output
numbers = [1,2,4,3,5,6,54,2,36,43,31]
print(odd_eve(numbers))
|
[
"noreply@github.com"
] |
sk013.noreply@github.com
|
b66bdcf6efc1e3d36d06876d5a98947743683ff5
|
95a05bee4ef9a16da7185e7651685d7df71d55af
|
/metadata.py
|
4daf5b1243803c996a12e9c057b935b032fb26d4
|
[
"Unlicense"
] |
permissive
|
ArniDagur/auto-rental
|
0f0b342c1a0d320100f4bcaba4a881f78358b76e
|
8b7fcf724c7501c0414454771addbd36be185b26
|
refs/heads/master
| 2020-04-10T16:44:39.510794
| 2018-12-10T10:06:25
| 2018-12-10T10:06:25
| 161,154,249
| 0
| 0
|
Unlicense
| 2018-12-10T10:01:58
| 2018-12-10T10:01:58
| null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
import os
from appdirs import user_data_dir
# Information for humans:
# -----------------------------------------------------------------------------
APPNAME = 'Auto-Rental'
AUTHOR = 'hopur-32'
# Information for computers:
# -----------------------------------------------------------------------------
DATA_DIR = user_data_dir(APPNAME, AUTHOR) # OS specific directory to store data
if not os.path.isdir(DATA_DIR):
os.makedirs(DATA_DIR)
|
[
"arnidg@protonmail.ch"
] |
arnidg@protonmail.ch
|
338f0fba5917e4ae0b096d9a4b4b41e5389d4123
|
05e2452e154806455d2d829466055f0ac8a11f92
|
/Name/wsgi.py
|
64efb28a6a226a33c4fe67a9c9bcc6ede1cd3dee
|
[] |
no_license
|
WesamAlmasri/Translator
|
35a295ca8aa2ded1ccc315e19494201475491cf4
|
875a324a4cb7a75c7b80f51ba420c3efc2306092
|
refs/heads/main
| 2023-04-03T03:48:56.830044
| 2021-04-03T13:20:30
| 2021-04-03T13:20:30
| 353,406,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for Name project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Name.settings')
application = get_wsgi_application()
|
[
"mr0virus@gmail.com"
] |
mr0virus@gmail.com
|
1158acb79cf822c0ded1ea29f10b77727305c073
|
cd142a4e15d3576546fcb44841417039f0b8fb00
|
/build/double/catkin_generated/pkg.installspace.context.pc.py
|
9b014836f2e3e476722b6c40aa901294660dad37
|
[] |
no_license
|
mgou123/rplidar
|
4389819eb1998d404d1066c7b4a983972d236ce7
|
608c1f6da2d3e5a8bac06e8d55d8569af828a40b
|
refs/heads/master
| 2022-11-10T05:51:56.403293
| 2020-06-29T04:16:14
| 2020-06-29T04:16:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "double"
PROJECT_SPACE_DIR = "/home/xu/dogkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"492798337@qq.com"
] |
492798337@qq.com
|
05f1c23936d977e70fdef1e44fc27ab9f069cadf
|
55647a80c8b412af9df0ba3f50595cc2f29c25e6
|
/res/scripts/common/Lib/encodings/gbk.py
|
4b4a46dcbfdea9c2f98724c76a52405e54febf9c
|
[] |
no_license
|
cnsuhao/WOT-0.9.17-CT
|
0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb
|
d1f932d8cabaf8aa21708622e87f83c8d24d6451
|
refs/heads/master
| 2021-06-08T18:11:07.039293
| 2016-11-19T19:12:37
| 2016-11-19T19:12:37
| null | 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,136
|
py
|
# 2016.11.19 19:58:56 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/encodings/gbk.py
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gbk')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(name='gbk', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\Lib\encodings\gbk.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:58:56 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
6a2758f58f6ef665dec7ea80ebf419557651d695
|
1443c180718ea74cb0862d112a7c08d6ec5d1828
|
/flaskfundamental/DojoSurvey2/DojoSurvey.py
|
2061cf3d5200337887342ecb7b0ebbf99da85a33
|
[] |
no_license
|
Dragonlizard1/Python_Project
|
7ca7e7f4245f1d1394542127c107fe5f79e0cafe
|
be83d84dddc6b1c30fd231a0e15f60da5a5bceb2
|
refs/heads/master
| 2020-03-12T23:41:16.856306
| 2018-04-24T16:39:50
| 2018-04-24T16:39:50
| 130,871,148
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 964
|
py
|
from flask import Flask, render_template, request, session, redirect, flash
app = Flask(__name__)
app.secret_key = 'ThisIsSecret'
@app.route("/")
def index():
return render_template("form.html")
@app.route("/result", methods = ["POST"])
def infoprocess():
name = request.form["name"]
location = request.form["location"]
language = request.form["language"]
comment = request.form["comment"]
if name == "":
flash("The name field is empty.")
if comment == "":
flash("Please add comment in.")
elif len(comment) > 120:
flash("Please put in less than 120 characters.")
return redirect ("/")
if comment == "":
flash("Please add comment in.")
return redirect ("/")
elif len(comment) > 120:
flash("Please put in less than 120 characters.")
return redirect ("/")
#print name
return render_template("result.html", name1 = name, location1 = location, language1 = language, comment1 = comment)
app.run(debug=True)
|
[
"bobbyimaging@gmail.com"
] |
bobbyimaging@gmail.com
|
488243e5d4538da2bac8bd00083dfb737797e000
|
4dcee7dff58a6f0364283787aa7ad1dff16721e1
|
/pre_pred_bert.py
|
85a565945bd985a6dbd43cc220760b93320738a5
|
[] |
no_license
|
karthikpuranik11/Masked-LM
|
ead8bcb5bcaedb8b62b627cc6dab2ce3c5fefcbe
|
bb049e493bc9968e3c50cac1fe88ebe7c436523f
|
refs/heads/main
| 2023-03-18T21:51:27.842906
| 2021-03-07T17:37:54
| 2021-03-07T17:37:54
| 342,780,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
a=predict_masked_sent('The animals came to the meeting.', top_k=5)
for j in range(len(a)):
x=0
a[j]=a[j].split()
#print(a[j])
tok = pos_tag(a[j])
for k in range(len(tok)):
if tok[k][0]=='[MASK]':
break
elif tok[k][1]=='IN' or tok[k][1]=='TO':
pred=' '.join(a[j])
print(pred)
x=1
break
if x==1:
break
|
[
"noreply@github.com"
] |
karthikpuranik11.noreply@github.com
|
46f9807e15556efa7d2439bee101b14f588ee791
|
44413721791e00e5e0d728d2063cce9d072680bc
|
/env/bin/jupyter-nbextension
|
f272010110d6ad96be430c20709bbda7f2ea6cb7
|
[] |
no_license
|
andriyka/term-extraction-and-ontology-learning
|
5174ba52db93bc3dd22b75a41c998c5e23a3bcd5
|
2fa478f1f6f28949d461331f6e8348f86bd344e1
|
refs/heads/master
| 2020-03-21T19:05:07.755413
| 2018-07-09T16:46:58
| 2018-07-09T16:46:58
| 138,929,875
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
#!/home/ankus/Documents/ucu/terms/ate/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from notebook.nbextensions import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ankus@ciklum.com"
] |
ankus@ciklum.com
|
|
b52563bc708de755093f4abaf4427720c8741e1c
|
654acf62f757435f11afe3edb784c19ba9a996b5
|
/Cmimid/src/generalizetokens.py
|
b8d1ba37156b9a633f7d37b92cf041e669f90ced
|
[] |
no_license
|
anonymous-scientist/anonymous-scientist.github.io
|
92337f97ed48f68f2b8de0f2a23de31fac6ee702
|
b699788fc0c44d03e4d3e172428202f52a57fd08
|
refs/heads/master
| 2020-07-05T21:10:15.055470
| 2020-03-11T10:22:38
| 2020-03-11T10:22:38
| 202,777,252
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,277
|
py
|
#!/usr/bin/env python
import sys
import pudb
import grammartools
# ulimit -s 100000
sys.setrecursionlimit(99000)
import random
import string
import util
import copy
import json
import re
import fuzz as F
import subprocess
b = pudb.set_trace
def is_nt(token):
return token.startswith('<') and token.endswith('>')
def generalize_tokens(grammar):
g_ = {}
for k in grammar:
new_rules = []
for rule in grammar[k]:
new_rule = []
for token in rule:
if not is_nt(token):
new_rule.extend(list(token))
else:
new_rule.append(token)
new_rules.append(new_rule)
g_[k] = new_rules
return g_
def get_list_of_single_chars(grammar):
lst = []
for p,key in enumerate(grammar):
for q,rule in enumerate(grammar[key]):
for r,token in enumerate(rule):
if is_nt(token): continue
if len(token) == 1:
lst.append((key, q, r, token))
return lst
def remove_recursion(d):
new_d = {}
for k in d:
new_rs = []
for t in d[k]:
if t != k:
new_rs.append(t)
new_d[k] = new_rs
return new_d
def replaceable_with_kind(stree, orig, parent, gk, command):
my_node = None
def fill_tree(node):
nonlocal my_node
name, children = node
if name == gk:
my_node = [name, [[parent, []]]]
return my_node
elif not children:
if name in ASCII_MAP:
return (random.choice(ASCII_MAP[name]), [])
return (name, [])
else:
return (name, [fill_tree(c) for c in children])
tree0 = fill_tree(stree)
sval = util.tree_to_str(tree0)
assert my_node is not None
a1 = my_node, '', tree0
if parent == orig:
aX = ((gk, [[orig, []]]), '', tree0)
val = util.is_a_replaceable_with_b(a1, aX, command)
if val:
return True
else:
return False
else:
for pval in ASCII_MAP[parent]:
aX = ((gk, [[pval, []]]), '', tree0)
val = util.is_a_replaceable_with_b(a1, aX, command)
if val:
continue
else:
return False
return True
# string.ascii_letters The concatenation of the ascii_lowercase and ascii_uppercase constants described below. This value is not locale-dependent.
# string.ascii_lowercase The lowercase letters 'abcdefghijklmnopqrstuvwxyz'. This value is not locale-dependent and will not change.
# string.ascii_uppercase The uppercase letters 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'. This value is not locale-dependent and will not change.
# string.digits The string '0123456789'.
# string.hexdigits The string '0123456789abcdefABCDEF'.
# string.octdigits The string '01234567'.
# string.punctuation String of ASCII characters which are considered punctuation characters in the C locale: !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~.
# string.printable String of ASCII characters which are considered printable. This is a combination of digits, ascii_letters, punctuation, and whitespace.
# string.whitespace A string containing all ASCII characters that are considered whitespace. This includes the characters space, tab, linefeed, return, formfeed, and vertical tab.
def parent_map():
parent = {}
for sp in string.whitespace:
parent[sp] = '[__WHITESPACE__]'
for digit in string.digits:
parent[digit] = '[__DIGIT__]'
for ll in string.ascii_lowercase:
parent[ll] = '[__ASCII_LOWER__]'
for ul in string.ascii_uppercase:
parent[ul] = '[__ASCII_UPPER__]'
for p in string.punctuation:
parent[p] = '[__ASCII_PUNCT__]'
parent['[__WHITESPACE__]'] = '[__ASCII_PRINTABLE__]'
parent['[__DIGIT__]'] = '[__ASCII_ALPHANUM__]'
parent['[__ASCII_LOWER__]'] = '[__ASCII_LETTER__]'
parent['[__ASCII_UPPER__]'] = '[__ASCII_LETTER__]'
parent['[__ASCII_LETTER__]'] = '[__ASCII_ALPHANUM__]'
parent['[__ASCII_ALPHANUM__]'] = '[__ASCII_PRINTABLE__]'
parent['[__PUNCT__]'] = '[__ASCII_PRINTABLE__]'
return parent
ASCII_MAP = {
'[__WHITESPACE__]': string.whitespace,
'[__DIGIT__]': string.digits,
'[__ASCII_LOWER__]': string.ascii_lowercase,
'[__ASCII_UPPER__]': string.ascii_uppercase,
'[__ASCII_PUNCT__]': string.punctuation,
'[__ASCII_LETTER__]': string.ascii_letters,
'[__ASCII_ALPHANUM__]': string.ascii_letters + string.digits,
'[__ASCII_PRINTABLE__]': string.printable
}
PARENT_MAP = parent_map()
def find_max_generalized(tree, kind, gk, command):
if kind not in PARENT_MAP: return kind
parent = PARENT_MAP[kind]
if replaceable_with_kind(tree, kind, parent, gk, command):
return find_max_generalized(tree, parent, gk, command)
else:
return kind
def do_n(tree, kind, gk, command, n):
ret = []
for i in range(n):
pval = random.choice(ASCII_MAP[kind])
ret.append([pval, []])
return (gk, ret)
def find_max_widened(tree, kind, gk, command):
my_node = None
def fill_tree(node):
nonlocal my_node
name, children = node
if name == gk:
my_node = [name, [[kind, []]]]
return my_node
elif not children:
if name in ASCII_MAP:
return (random.choice(ASCII_MAP[name]), [])
return (name, [])
else:
return (name, [fill_tree(c) for c in children])
tree0 = fill_tree(tree)
sval = util.tree_to_str(tree0)
assert my_node is not None
a1 = my_node, '', tree0
# this is a single character. Now, try 2, 4 etc.
pvals = do_n(tree, kind, gk, command, 2)
aX = (pvals, '', tree0)
val = util.is_a_replaceable_with_b(a1, aX, command)
if not val: return kind
pvals = do_n(tree, kind, gk, command, 4)
aX = (pvals, '', tree0)
val = util.is_a_replaceable_with_b(a1, aX, command)
if not val: return kind
return kind + '+'
GK = '<__GENERALIZE__>'
MAX_CHECKS = 1000
def generalize_single_token(grammar, start, k, q, r, command, blacklist):
# first we replace the token with a temporary key
gk = GK
# was there a previous widened char? and if ther wase,
# do we belong to it?
char = grammar[k][q][r]
if r > 0 and grammar[k][q][r-1][-1] == '+':
# remove the +
last_char = grammar[k][q][r-1][0:-1]
if last_char in ASCII_MAP and char in ASCII_MAP[last_char]:
#we are part of the last.
grammar[k][q][r] = last_char + '+'
return grammar
g_ = copy.deepcopy(grammar)
g_[k][q][r] = gk
g_[gk] = [[char]]
#reachable_keys = grammartools.reachable_dict(g_)
# now, we need a path to reach this.
fg = grammartools.get_focused_grammar(g_, (gk, []))
fuzzer = F.LimitFuzzer(fg)
#skel_tree = find_path_key(g_, start, gk, reachable_keys, fuzzer)
tree = None
check = 0
while tree is None:
#tree = flush_tree(skel_tree, fuzzer, gk, char)
#tree = fuzzer.gen_key(grammartools.focused_key(start), depth=0, max_depth=1)
tree = fuzzer.iter_gen_key(grammartools.focused_key(start), max_depth=1)
val = util.check(char, char, '<__CHECK__(%d/%d)>' % (check, MAX_CHECKS), tree, command, char, char)
check += 1
if not val:
tree = None
if check > MAX_CHECKS:
print("Exhausted limit for key:%s, rule:%d, token:%d, char:%s" % (k, q, r, char), file=sys.stderr)
blacklist.append((k, q, r, char))
#raise "Exhausted limit for key:%s, rule:%d, token:%d, char:%s" % (k, q, r, char)
return grammar
# now we need to make sure that this works.
gen_token = find_max_generalized(tree, char, gk, command)
if gen_token != char:
# try widening
gen_token = find_max_widened(tree, gen_token, gk, command)
del g_[gk]
g_[k][q][r] = gen_token
# preserve the order
grammar[k][q][r] = gen_token
return grammar
def remove_duplicate_repetitions(g):
new_g = {}
for k in g:
new_rules = []
for rule in g[k]:
#srule = ''.join(rule)
new_rule = []
last = -1
for i,t in enumerate(rule):
if last >= 0 and len(t) > 0 and t[-1] == '+' and t == rule[last]:
continue
else:
last = i
new_rule.append(t)
#snrule = ''.join(new_rule)
#if srule != snrule:
# print("change:",file=sys.stderr)
# print(" ", srule, file=sys.stderr)
# print(" ", snrule, file=sys.stderr)
new_rules.append(new_rule)
new_g[k] = new_rules
return new_g
def main(args):
gfname = args[0]
with open(gfname) as f:
gf = json.load(fp=f)
grammar = gf['[grammar]']
start = gf['[start]']
command = gf['[command]']
# now, what we want to do is first regularize the grammar by splitting each
# multi-character tokens into single characters.
generalized_grammar = generalize_tokens(grammar)
# next, we want to get the list of all such instances
list_of_things_to_generalize = get_list_of_single_chars(generalized_grammar)
#print(len(list_of_things_to_generalize), file=sys.stderr)
# next, we want to generalie each in turn
# finally, we want to generalize the length.
#reachable_keys = reachable_dict(grammar)
g_ = generalized_grammar
blacklist = []
for k, q, r, t in list_of_things_to_generalize:
assert g_[k][q][r] == t
bl = []
g_ = generalize_single_token(g_, start, k, q, r, command, bl)
if bl:
print("Blacllisted:", bl, file=sys.stderr)
blacklist.extend(bl)
g = remove_duplicate_repetitions(g_)
g = grammartools.remove_duplicate_rules_in_a_key(g)
# finally, we want to generalize the length.
#g = generalize_size(g_)
print(json.dumps({'[start]': start, '[grammar]':g, '[command]': command, '[blacklist]': blacklist}, indent=4))
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"anonymous@anonymous.net"
] |
anonymous@anonymous.net
|
7421e6059aeff1e3016934fea7f9e2910344351e
|
83648babb83497ff162ccfa6104c1f09029bcb37
|
/local_global.py
|
aa1da55f52fde40d694f0c3e6e2fb5b0626ebf22
|
[] |
no_license
|
seeni-eldho/pythonProgram
|
aeeb5ec559049feb4d331b3a40e09f21f9b799b2
|
3361c4673d85e0bfb0df93414c573bdd3a4944b0
|
refs/heads/master
| 2023-08-07T17:54:24.405327
| 2021-09-22T09:40:04
| 2021-09-22T09:40:04
| 402,530,682
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 82
|
py
|
x=5
def foo():
global y
y=7
print('loccal',y)
foo()
print('local',y)
|
[
"seenieldho85@gmail.com"
] |
seenieldho85@gmail.com
|
782612e4635027ea04a2431e6dc0a11bcc45d1ee
|
e82ba9e19c415e5eeff4a48f52dbd7efc4ae4d6b
|
/9.sort/BubbleSort2.py
|
a43b5016f4aa1cde02f156f1bd522421ff774c94
|
[] |
no_license
|
GoldK11/dataSKKU
|
8a4dbbd5adb6b766a28cdfaba3b9a744992d4e41
|
24b5e82e5456daf3c07db271e1b6932661c967a3
|
refs/heads/master
| 2021-08-23T01:33:16.984279
| 2017-12-02T05:39:34
| 2017-12-02T05:39:34
| 112,315,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
# 0 부터 ( 좀 이상함)
def bubbleSort(l):
count =0
for i in range(len(l)):
for j in range(i+1,len(l)):
count+=1
if l[i]>l[j]:
(l[i],l[j])=(l[j],l[i])
return count
l = [53, 112, 174, 200, 258, 123, 184, 254, 232, 136, 198, 3, 286, 6, 62, 57, 110, 10, 17, 189, 291, 2, 245, 118, 226, 154, 33, 211, 285, 191, 289, 161, 56, 74, 241, 297, 249, 9, 208, 251, 63, 214, 145, 97, 75, 149, 158, 59, 275, 68, 95, 124, 32, 99, 167, 224, 197, 79, 296, 152, 171, 98, 30, 148, 26, 50, 266, 93, 293, 182, 181, 153, 88, 66, 210, 100, 127, 94, 247, 277, 44, 262, 77, 121, 138, 71, 82, 119, 37, 140, 233, 206, 237, 212, 231, 11, 248, 209, 271, 234, 255, 51, 25, 243, 163, 146, 172, 142, 238, 263, 114, 104, 253, 236, 4, 273, 54, 151, 73, 250, 204, 227, 107, 18, 92, 60, 187, 120, 102, 64, 128, 173, 281, 279, 282, 144, 219, 244, 269, 40, 180, 283, 126, 288, 45, 143, 91, 178, 157, 96, 70, 129, 109, 85, 147, 35, 90, 195, 261, 19, 22, 55, 267, 280, 299, 15, 199, 168, 108, 235, 105, 196, 135, 58, 155,
162, 101, 218, 24, 246, 207, 89, 132, 192, 14, 290, 1, 295, 188, 270, 201, 78, 229, 39, 274, 49, 13, 28, 65, 72, 52, 81, 217, 252, 220, 34, 31, 216, 139, 256, 169, 166, 27, 160, 12, 284, 111, 228, 0, 159, 8, 298, 122, 87, 41, 205, 215, 193, 165, 203, 221, 84, 7, 176, 80, 20, 125, 179, 141, 29, 134, 5, 257, 16, 268, 194, 202, 225, 23, 185, 36, 21, 117, 48, 76, 260, 186, 156, 170, 47, 223, 265, 287, 103, 42, 113, 38, 239, 115, 278, 230, 259, 61, 150, 69, 130, 133, 116, 164, 242, 213, 183, 67, 175, 131, 240, 264, 46, 276, 43, 86, 83, 106, 294, 177, 137, 292, 190, 222, 272]
print(bubbleSort(l))
print(l)
|
[
"ssori113@gmail.com"
] |
ssori113@gmail.com
|
57d8840f3ae45365005e9730310b3b9956021a54
|
eace995a65e1029cfb88c9a2764a831717b7b4cb
|
/rpn.py
|
30dcecb9a78d6373463dd42462d47f1d69f267b8
|
[
"MIT"
] |
permissive
|
HoangTuan110/rpn-calc
|
010115637c80417aefa088db04532c602ad0810e
|
8418999cd039cb0f63b828844e34b291e768533b
|
refs/heads/main
| 2023-04-08T13:06:35.181722
| 2021-04-11T11:27:06
| 2021-04-11T11:27:06
| 356,848,524
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
"""
This is a simple RPN (Reverse Polish Notation) calculator written in Python.
It may be quite slow, but I don't care lol.
"""
def calc(code):
# Variables
tokens = code.split(" ")
stack = []
ops = "+-*/"
result = ""
# Helper functions
push = lambda n: stack.append(n)
pop = lambda: stack.pop()
is_number = lambda ipt: ipt.isnumeric()
is_hex = lambda ipt: "x" in ipt
is_binary = lambda ipt: "b" in ipt
# Main part
for token in tokens:
if is_number(token) or is_hex(token) or is_binary(token):
push(eval(token))
if len(result) == 0:
result += f"{token}"
# This is to avoid the case that user put extra spaces at the end
# or the start of the input
elif token == "":
continue
elif token in ops:
op1, op2 = stack.pop(), stack.pop()
# Since 'result' have the first value ('op1') already in them,
# so we don't need to add it twice.
result += f" {token} {op2}"
push(eval(f"{op1} {token} {op2}"))
else:
print(f"Illegal character: {token}")
break
print(eval(result))
def repl():
while True:
calc(input(">> "))
repl()
|
[
"noreply@github.com"
] |
HoangTuan110.noreply@github.com
|
b74ebd69ba2428966df06b67ec9e088623bd0bc7
|
b7b2728bcfeda781ef79540dc46577f4a772e471
|
/django_hbase/models/exceptions.py
|
6cfcf27b10ce6d62f13764fc2cea8fbbda7e7c11
|
[] |
no_license
|
Veronica1026/django-twitter
|
7dd8e0efe84d50654bc92f83bf6ac0bb0c6b432e
|
e28e8fe5443db48b761cd2e4e6a43e0d0c3590ff
|
refs/heads/main
| 2023-08-25T16:15:36.092192
| 2021-10-23T10:41:00
| 2021-10-23T10:41:00
| 364,218,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
class BadRowKeyError(Exception):
pass
class EmptyColumnError(Exception):
pass
|
[
"543587590@qq.com"
] |
543587590@qq.com
|
7ff7ebba377cd3e6d83e88368536f529b763202f
|
e966ac971af90faff55fce232620f3d0ad7f7fb8
|
/com/swj/OOP/Fundamental.py
|
a0241ea4e38f9bd58018cf65265a64b4d8590778
|
[] |
no_license
|
shouguouo/PythonDemo
|
f987b9849e01806ccb6c370bbd4d4ba9675629ec
|
d9011506e3474054e2f5b1246f8e014facea7961
|
refs/heads/master
| 2021-09-21T23:55:53.258819
| 2018-09-03T15:47:27
| 2018-09-03T15:47:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,441
|
py
|
# -*- coding: utf-8 -*-
# class Student(object): # 表示从object类中继承
# pass
# bart = Student()
# print(bart)
# print(Student)
# bart.name = 'swj'
# print(bart.name)
# class Student(object):
# def __init__(self, name, score): # 第一个参数永远是self表示创建的实例本身 不用传 其他参数需要传
# self.name = name
# self.score = score
# def print_score(self):
# print('%s:%s'%(self.name, self.score))
# def get_grade(self):
# if self.score >= 90:
# return 'A'
# elif self.score >= 60:
# return 'B'
# else:
# return 'C'
# bart = Student('swj', 99)
# print(bart.get_grade())
#
# # 数据封装
# bart.print_score()
# 访问权限 实例变量以__开头就变成了私有变量
# class Student(object):
# def __init__(self, name, score):
# self.__name = name
# self.__score = score
# def print_score(self):
# print('%s:%s'%(self.__name, self.__score))
# def get_name(self):
# return self.__name
# def get_score(self):
# return self.__score
# def set_score(self, score):
# if 0 <= score <= 100:
# self.__score = score
# else:
# raise ValueError('bad score')
#
# s = Student('swj', 99)
# # print(s.__name) # 无法访问
# print(s.get_name())
# s.print_score()
#
# # 类似__xx__的实例变量名 是特殊变量(以双下划线开头以双下划线结尾) 可以直接访问 但是不能用__name__ __score__这样的变量名
# # 以单下划线开头的变量 外部可以访问 但是“虽然我可以被访问,但是,请把我视为私有变量,不要随意访问”
# print(s._Student__name) # 私有变量也可以访问 但是强烈建议不要这么做 私有变量被内部包装为_Student__name
# s._Student__name = 'xhy'
# print(s.get_name())
# 继承与多态
class Animal(object):
def run(self):
print('animal is running...')
class Dog(Animal):
def run(self):
print('dog is running...')
def eat(self):
print('dog is eating...')
class Cat(Animal):
def run(self):
print('cat is running...')
def eat(self):
print('cat is running...')
def run_twice(animal):
animal.run()
animal.run()
Dog().run()
Cat().run()
run_twice(Animal())
run_twice(Dog()) # 多态
# 开闭原则 对扩展开放:允许新增Animal子类 对修改封闭:不需要修改run_twice()等接受Animal类型的函数
# 静态语言VS动态语言 静态语言必须要传入Animal或子类 否则就无法调用run()方法 动态语言则只需保证传入的对象一个run()方法 ----鸭子类型(file-like object)
# 获取对象信息
type(123) # int
type('str') # str
type(None) # NoneType
type(abs) # builtin_function_or_method
type(Animal()) # __main__.Animal
# 更多的type在types模块中定义
# 对于class的继承关系 使用type()不方便 可以用isinstance() 函数 优先使用isinstance()
# 使用dir()函数获得一个对象的所有属性和方法 返回包含str的list
dir('dir')
# len('ABC') 和'ABC'.__len__() 也可以自定义len方法
class MyDog(Dog):
def __len__(self):
return 100
print(len(MyDog()))
# 通过getattr()、setattr()、hasattr()可以直接操作一个对象的状态
# 类属性与实例属性 不要对实例属性和类属性使用相同的名字 会屏蔽掉类属性
|
[
"1132331056@qq.com"
] |
1132331056@qq.com
|
bb49d8dd28b9c93d2856e8511907f5a8c6efa6fb
|
ade3b5a88b2129d2e305d7be1a36dcda283a4c59
|
/Lab3/utils.py
|
8d718c9de473a5d7800d0b6f86c650cb1ac74dc5
|
[] |
no_license
|
jelenab98/DL_FER
|
0e299003d1a41a7b502853b0643cb9e0bf8138a9
|
258eba86c708b53f96e92f2c2f5e9cb458e093ef
|
refs/heads/master
| 2023-08-21T21:51:47.256101
| 2021-10-26T14:44:54
| 2021-10-26T14:44:54
| 347,167,124
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,674
|
py
|
<<<<<<< HEAD
from sklearn.metrics import confusion_matrix as conf_matrix
from torch.nn.utils.rnn import pad_sequence
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import Dataset
from torch.nn import Embedding
from pathlib import Path
from tqdm import tqdm
import pandas as pd
import numpy as np
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
PADDING_TOKEN = "<PAD>" # 0
UNKNOWN_TOKEN = "<UNK>" # 1
class Instance:
def __init__(self, input_text: [str], target: str):
self.text = input_text
self.label = target
class Vocab:
def __init__(self, frequencies: dict, max_size: int = -1, min_freq: int = 0, is_target: bool = False):
if is_target:
self.stoi = dict()
self.itos = dict()
else:
self.stoi = {PADDING_TOKEN: 0, UNKNOWN_TOKEN: 1}
self.itos = {0: PADDING_TOKEN, 1: UNKNOWN_TOKEN}
self.is_target = is_target
self.max_size = max_size
self.min_freq = min_freq
i = len(self.itos)
for key, value in sorted(frequencies.items(), key=lambda x: x[1], reverse=True):
if (self.max_size != -1) and (len(self.itos) >= self.max_size):
break
if value >= self.min_freq:
self.stoi[key] = i
self.itos[i] = key
i += 1
else:
break
def __len__(self):
return len(self.itos)
def encode(self, inputs: [str]):
numericalized_inputs = []
for token in inputs:
if token in self.stoi:
numericalized_inputs.append(self.stoi[token])
else:
numericalized_inputs.append(self.stoi[UNKNOWN_TOKEN])
return torch.tensor(numericalized_inputs)
def reverse_numericalize(self, inputs: list):
tokens = []
for numericalized_item in inputs:
if numericalized_item in self.itos:
tokens.append(self.itos[numericalized_item])
else:
tokens.append(UNKNOWN_TOKEN)
return tokens
class NLPDataset(Dataset):
def __init__(self, text_vocab: Vocab, target_vocab: Vocab, path: Path):
self.vocab_input_text = text_vocab
self.vocab_targets = target_vocab
self.instances = []
data = pd.read_csv(path, header=None)
for i in range(len(data)):
text = data[0][i]
label = data[1][i]
self.instances.append(Instance(space_tokenizer(text), label.strip()))
def __len__(self):
return len(self.instances)
def __getitem__(self, item):
instance_item = self.instances[item]
text = instance_item.text
label = [instance_item.label]
return self.vocab_input_text.encode(text), self.vocab_targets.encode(label)
def space_tokenizer(raw_text: str):
return raw_text.strip("\n").strip("\r").split(" ")
def get_embedding_matrix(vocab: Vocab, dim: int = 300, freeze: bool = True, path: Path = None):
matrix = torch.normal(mean=0, std=1, size=(len(vocab), dim))
matrix[0] = torch.zeros(size=[dim])
if path is not None:
data = pd.read_csv(path, header=None, delimiter=" ")
for i in range(len(data)):
row = data.loc[i]
token = row.loc[0]
if token in vocab.stoi:
tmp_array = []
for j in range(1, len(row)):
tmp_array.append(row[j])
matrix[vocab.stoi[token]] = torch.tensor(tmp_array)
return Embedding.from_pretrained(matrix, padding_idx=0, freeze=freeze)
def pad_collate_fn(batch, pad_index=0):
texts, labels = zip(*batch)
lengths = torch.tensor([len(text) for text in texts])
return pad_sequence(texts, batch_first=True, padding_value=pad_index), torch.tensor(labels), lengths
def get_frequencies(path, is_target=False):
frequencies = {}
data = pd.read_csv(path, header=None)
idx = 1 if is_target else 0
for i in range(len(data)):
inputs = data[idx][i].strip().split(" ")
for token in inputs:
if token in frequencies:
frequencies[token] += 1
else:
frequencies[token] = 1
return frequencies
def train_valid(model, train_data, valid_data, optimizer, criterion, train_logger,
valid_logger, save_path: Path = None, epochs=100, gradient_clip=0.25):
best_f1 = -1
for epoch in range(epochs):
model.train()
confusion_matrix = np.zeros(shape=(2, 2))
losses = []
for idx, batch in tqdm(enumerate(train_data), total=len(train_data)):
model.zero_grad()
x, y, lengths = batch
x = x.to(device)
y = y.to(device)
output = model(x).reshape(y.shape)
loss = criterion(output, y.float())
loss.backward()
clip_grad_norm_(model.parameters(), max_norm=gradient_clip)
optimizer.step()
predictions = torch.sigmoid(output).round().int().detach().cpu().numpy()
confusion_matrix += conf_matrix(y.detach().cpu().numpy(), predictions)
losses.append(loss.item())
acc, p, r, f1 = calculate_stats(confusion_matrix)
train_stats = f"Loss: {np.average(losses):.4f}, Acc: {100 * acc:.2f}%, F1: {100 * f1:.2f}%"
train_stats2 = f"{np.average(losses)}, {acc}, {f1}"
print("[TRAIN STATS:] " + train_stats)
train_logger.update(train_stats2)
acc_v, p_v, r_v, f1_v, loss_v = evaluate(model, valid_data, criterion)
valid_stats = f"Loss: {np.average(loss_v):.4f}, Acc: {100 * acc_v:.2f}%, F1: {100 * f1_v:.2f}%"
valid_stats2 = f"{np.average(loss_v)}, {acc_v}, {f1_v}"
print("[VALID STATS:] " + valid_stats)
valid_logger.update(valid_stats2)
if f1_v > best_f1:
torch.save(model, save_path / "best_model.pth")
print(f"Best model saved at {epoch} epoch.")
def calculate_stats(confusion_matrix):
acc = np.sum(confusion_matrix.diagonal()) / np.sum(confusion_matrix)
p = confusion_matrix[0, 0] / np.sum(confusion_matrix[0, :])
r = confusion_matrix[0, 0] / np.sum(confusion_matrix[:, 0])
f1 = 2 * p * r / (p + r)
return acc, p, r, f1
def evaluate(model, data, criterion):
confusion_matrix = np.zeros(shape=(2, 2))
losses = list()
model.eval()
with torch.no_grad():
for idx, batch in tqdm(enumerate(data), total=len(data)):
x, y, lengths = batch
x = x.to(device)
y = y.to(device)
output = model(x).reshape(shape=y.shape)
loss = criterion(output, y.float())
losses.append(loss.item())
predictions = torch.sigmoid(output).round().int().detach().cpu().numpy()
confusion_matrix += conf_matrix(y.detach().cpu().numpy(), predictions)
acc, p, r, f1 = calculate_stats(confusion_matrix)
loss = np.average(losses)
return acc, p, r, f1, loss
class Logger:
def __init__(self, path: Path, start_message: str):
with path.open(mode="w") as f:
f.write(f"{start_message}\n")
self.path = path
def update(self, message):
with self.path.open(mode="a") as f:
f.write(f"{message}\n")
=======
from sklearn.metrics import confusion_matrix as conf_matrix
from torch.nn.utils.rnn import pad_sequence
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import Dataset
from torch.nn import Embedding
from pathlib import Path
from tqdm import tqdm
import pandas as pd
import numpy as np
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
PADDING_TOKEN = "<PAD>" # 0
UNKNOWN_TOKEN = "<UNK>" # 1
class Instance:
def __init__(self, input_text: [str], target: str):
self.text = input_text
self.label = target
class Vocab:
def __init__(self, frequencies: dict, max_size: int = -1, min_freq: int = 0, is_target: bool = False):
if is_target:
self.stoi = dict()
self.itos = dict()
else:
self.stoi = {PADDING_TOKEN: 0, UNKNOWN_TOKEN: 1}
self.itos = {0: PADDING_TOKEN, 1: UNKNOWN_TOKEN}
self.is_target = is_target
self.max_size = max_size
self.min_freq = min_freq
i = len(self.itos)
for key, value in sorted(frequencies.items(), key=lambda x: x[1], reverse=True):
if (self.max_size != -1) and (len(self.itos) >= self.max_size):
break
if value >= self.min_freq:
self.stoi[key] = i
self.itos[i] = key
i += 1
else:
break
def __len__(self):
return len(self.itos)
def encode(self, inputs: [str]):
numericalized_inputs = []
for token in inputs:
if token in self.stoi:
numericalized_inputs.append(self.stoi[token])
else:
numericalized_inputs.append(self.stoi[UNKNOWN_TOKEN])
return torch.tensor(numericalized_inputs)
def reverse_numericalize(self, inputs: list):
tokens = []
for numericalized_item in inputs:
if numericalized_item in self.itos:
tokens.append(self.itos[numericalized_item])
else:
tokens.append(UNKNOWN_TOKEN)
return tokens
class NLPDataset(Dataset):
def __init__(self, text_vocab: Vocab, target_vocab: Vocab, path: Path):
self.vocab_input_text = text_vocab
self.vocab_targets = target_vocab
self.instances = []
data = pd.read_csv(path, header=None)
for i in range(len(data)):
text = data[0][i]
label = data[1][i]
self.instances.append(Instance(space_tokenizer(text), label.strip()))
def __len__(self):
return len(self.instances)
def __getitem__(self, item):
instance_item = self.instances[item]
text = instance_item.text
label = [instance_item.label]
return self.vocab_input_text.encode(text), self.vocab_targets.encode(label)
def space_tokenizer(raw_text: str):
return raw_text.strip("\n").strip("\r").split(" ")
def get_embedding_matrix(vocab: Vocab, dim: int = 300, freeze: bool = True, path: Path = None):
matrix = torch.normal(mean=0, std=1, size=(len(vocab), dim))
matrix[0] = torch.zeros(size=[dim])
if path is not None:
data = pd.read_csv(path, header=None, delimiter=" ")
for i in range(len(data)):
row = data.loc[i]
token = row.loc[0]
if token in vocab.stoi:
tmp_array = []
for j in range(1, len(row)):
tmp_array.append(row[j])
matrix[vocab.stoi[token]] = torch.tensor(tmp_array)
return Embedding.from_pretrained(matrix, padding_idx=0, freeze=freeze)
def pad_collate_fn(batch, pad_index=0):
texts, labels = zip(*batch)
lengths = torch.tensor([len(text) for text in texts])
return pad_sequence(texts, batch_first=True, padding_value=pad_index), torch.tensor(labels), lengths
def get_frequencies(path, is_target=False):
frequencies = {}
data = pd.read_csv(path, header=None)
idx = 1 if is_target else 0
for i in range(len(data)):
inputs = data[idx][i].strip().split(" ")
for token in inputs:
if token in frequencies:
frequencies[token] += 1
else:
frequencies[token] = 1
return frequencies
def train_valid(model, train_data, valid_data, optimizer, criterion, train_logger,
valid_logger, save_path: Path = None, epochs=100, gradient_clip=0.25):
best_f1 = -1
for epoch in range(epochs):
model.train()
confusion_matrix = np.zeros(shape=(2, 2))
losses = []
for idx, batch in tqdm(enumerate(train_data), total=len(train_data)):
model.zero_grad()
x, y, lengths = batch
x = x.to(device)
y = y.to(device)
output = model(x).reshape(y.shape)
loss = criterion(output, y.float())
loss.backward()
clip_grad_norm_(model.parameters(), max_norm=gradient_clip)
optimizer.step()
predictions = torch.sigmoid(output).round().int().detach().cpu().numpy()
confusion_matrix += conf_matrix(y.detach().cpu().numpy(), predictions)
losses.append(loss.item())
acc, p, r, f1 = calculate_stats(confusion_matrix)
train_stats = f"Loss: {np.average(losses):.4f}, Acc: {100 * acc:.2f}%, F1: {100 * f1:.2f}%"
train_stats2 = f"{np.average(losses)}, {acc}, {f1}"
print("[TRAIN STATS:] " + train_stats)
train_logger.update(train_stats2)
acc_v, p_v, r_v, f1_v, loss_v = evaluate(model, valid_data, criterion)
valid_stats = f"Loss: {np.average(loss_v):.4f}, Acc: {100 * acc_v:.2f}%, F1: {100 * f1_v:.2f}%"
valid_stats2 = f"{np.average(loss_v)}, {acc_v}, {f1_v}"
print("[VALID STATS:] " + valid_stats)
valid_logger.update(valid_stats2)
if f1_v > best_f1:
torch.save(model, save_path / "best_model.pth")
print(f"Best model saved at {epoch} epoch.")
def calculate_stats(confusion_matrix):
acc = np.sum(confusion_matrix.diagonal()) / np.sum(confusion_matrix)
p = confusion_matrix[0, 0] / np.sum(confusion_matrix[0, :])
r = confusion_matrix[0, 0] / np.sum(confusion_matrix[:, 0])
f1 = 2 * p * r / (p + r)
return acc, p, r, f1
def evaluate(model, data, criterion):
confusion_matrix = np.zeros(shape=(2, 2))
losses = list()
model.eval()
with torch.no_grad():
for idx, batch in tqdm(enumerate(data), total=len(data)):
x, y, lengths = batch
x = x.to(device)
y = y.to(device)
output = model(x).reshape(shape=y.shape)
loss = criterion(output, y.float())
losses.append(loss.item())
predictions = torch.sigmoid(output).round().int().detach().cpu().numpy()
confusion_matrix += conf_matrix(y.detach().cpu().numpy(), predictions)
acc, p, r, f1 = calculate_stats(confusion_matrix)
loss = np.average(losses)
return acc, p, r, f1, loss
class Logger:
def __init__(self, path: Path, start_message: str):
with path.open(mode="w") as f:
f.write(f"{start_message}\n")
self.path = path
def update(self, message):
with self.path.open(mode="a") as f:
f.write(f"{message}\n")
>>>>>>> ca8923228a32a1117eff983cbec160e90b72ca02
|
[
"jelena.bratulic@gmail.hr"
] |
jelena.bratulic@gmail.hr
|
76755ff963dbd261a204a635342afde89fe3cf1b
|
f12ca610566e7249c892811bafc37594abe7895a
|
/orangecontrib/text/country_codes.py
|
17a5b1ff2687507b4e62449ea0e34095ab18856a
|
[
"BSD-2-Clause"
] |
permissive
|
nagyistoce/orange3-text
|
d04e6dfa68a7e86a4947c08bc2a078b4c0e772f5
|
fbdc3320b00a88c62ba866a671f28694958f6921
|
refs/heads/master
| 2021-01-21T09:43:25.598139
| 2015-06-27T14:32:09
| 2015-06-27T14:32:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,422
|
py
|
"""Country names to ISO3166_alpha2 codes mapping
Roughly generated by the following bash script on GNU/Linux:
while read cc name; do
[ ! "$cc" ] &&
continue
out=$(isoquery $cc | cut -f3 --complement);
[ ! "$out" ] &&
out="$cc"
[ "$(echo $out | cut -f3)" = "$name" ] &&
name=''
echo -e "$out\t$name" |
sed -r 's/\s+$//' |
sed -r "s/\t/': ['/" |
sed -r "s/\t/', '/g" |
sed -r "s/^/'/" |
sed -r 's/$/'"'"',],/'
done < input/cc.list # cc.list from jVectorMap; format: lines start with ISO3166_alpha2_code else copied as is
Certain details updated by hand.
"""
CC_EUROPE = {
'_0': ['Kosovo', 'Kosovo, Republic of'],
'-99': ['N. Cyprus', 'North Cyprus'],
'AD': ['AND', 'Andorra'],
'AL': ['ALB', 'Albania'],
'AT': ['AUT', 'Austria'],
'AX': ['ALA', 'Åland Islands', 'Aland'],
'BA': ['BIH', 'Bosnia and Herzegovina', 'Bosnia and Herz.'],
'BE': ['BEL', 'Belgium'],
'BG': ['BGR', 'Bulgaria'],
'BY': ['BLR', 'Belarus'],
'CH': ['CHE', 'Switzerland'],
'CY': ['CYP', 'Cyprus'],
'CZ': ['CZE', 'Czech Republic', 'Czech Rep.'],
'DE': ['DEU', 'Germany'],
'DK': ['DNK', 'Denmark'],
'DZ': ['DZA', 'Algeria'],
'EE': ['EST', 'Estonia'],
'EG': ['EGY', 'Egypt'],
'ES': ['ESP', 'Spain'],
'FI': ['FIN', 'Finland'],
'FO': ['FRO', 'Faroe Islands', 'Faeroe Is.'],
'FR': ['FRA', 'France'],
'GB': ['GBR', 'United Kingdom'],
'GE': ['GEO', 'Georgia'],
'GG': ['GGY', 'Guernsey'],
'GR': ['GRC', 'Greece'],
'HR': ['HRV', 'Croatia'],
'HU': ['HUN', 'Hungary'],
'IE': ['IRL', 'Ireland'],
'IL': ['ISR', 'Israel'],
'IM': ['IMN', 'Isle of Man'],
'IQ': ['IRQ', 'Iraq'],
'IS': ['ISL', 'Iceland'],
'IT': ['ITA', 'Italy'],
'JE': ['JEY', 'Jersey'],
'JO': ['JOR', 'Jordan'],
'LB': ['LBN', 'Lebanon'],
'LI': ['LIE', 'Liechtenstein'],
'LT': ['LTU', 'Lithuania'],
'LU': ['LUX', 'Luxembourg'],
'LV': ['LVA', 'Latvia'],
'LY': ['LBY', 'Libya'],
'MA': ['MAR', 'Morocco'],
'MD': ['MDA', 'Moldova, Republic of', 'Moldova'],
'ME': ['MNE', 'Montenegro'],
'MK': ['MKD', 'Macedonia, Republic of', 'Macedonia'],
'MT': ['MLT', 'Malta'],
'NL': ['NLD', 'Netherlands'],
'NO': ['NOR', 'Norway'],
'PL': ['POL', 'Poland'],
'PS': ['PSE', 'Palestine, State of', 'Palestine'],
'PT': ['PRT', 'Portugal'],
'RO': ['ROU', 'Romania'],
'RS': ['SRB', 'Serbia'],
'RU': ['RUS', 'Russian Federation', 'Russia'],
'SA': ['SAU', 'Saudi Arabia'],
'SE': ['SWE', 'Sweden'],
'SI': ['SVN', 'Slovenia'],
'SK': ['SVK', 'Slovakia'],
'SM': ['SMR', 'San Marino'],
'SY': ['SYR', 'Syrian Arab Republic', 'Syria'],
'TN': ['TUN', 'Tunisia'],
'TR': ['TUR', 'Turkey'],
'UA': ['UKR', 'Ukraine'],
}
CC_WORLD = {
# Does NOT include CC_EUROPE
'_1': ['Somaliland',],
'AE': ['ARE', 'United Arab Emirates'],
'AF': ['AFG', 'Afghanistan'],
'AM': ['ARM', 'Armenia'],
'AO': ['AGO', 'Angola'],
'AR': ['ARG', 'Argentina'],
'AU': ['AUS', 'Australia'],
'AZ': ['AZE', 'Azerbaijan'],
'BD': ['BGD', 'Bangladesh'],
'BF': ['BFA', 'Burkina Faso'],
'BI': ['BDI', 'Burundi'],
'BJ': ['BEN', 'Benin'],
'BN': ['BRN', 'Brunei Darussalam', 'Brunei'],
'BO': ['BOL', 'Bolivia, Plurinational State of', 'Bolivia'],
'BR': ['BRA', 'Brazil'],
'BS': ['BHS', 'Bahamas'],
'BT': ['BTN', 'Bhutan'],
'BW': ['BWA', 'Botswana'],
'BZ': ['BLZ', 'Belize'],
'CA': ['CAN', 'Canada'],
'CD': ['COD', 'Congo, The Democratic Republic of the', 'Dem. Rep. Congo'],
'CF': ['CAF', 'Central African Republic', 'Central African Rep.'],
'CG': ['COG', 'Congo'],
'CI': ['CIV', "Côte d'Ivoire"],
'CL': ['CHL', 'Chile'],
'CM': ['CMR', 'Cameroon'],
'CN': ['CHN', 'China'],
'CO': ['COL', 'Colombia'],
'CR': ['CRI', 'Costa Rica'],
'CU': ['CUB', 'Cuba'],
'DJ': ['DJI', 'Djibouti'],
'DO': ['DOM', 'Dominican Republic', 'Dominican Rep.'],
'EC': ['ECU', 'Ecuador'],
'EH': ['ESH', 'Western Sahara', 'W. Sahara'],
'ER': ['ERI', 'Eritrea'],
'ET': ['ETH', 'Ethiopia'],
'FJ': ['FJI', 'Fiji'],
'FK': ['FLK', 'Falkland Islands [Malvinas]', 'Falkland Is.'],
'GA': ['GAB', 'Gabon'],
'GH': ['GHA', 'Ghana'],
'GL': ['GRL', 'Greenland'],
'GM': ['GMB', 'Gambia'],
'GN': ['GIN', 'Guinea'],
'GQ': ['GNQ', 'Equatorial Guinea', 'Eq. Guinea'],
'GT': ['GTM', 'Guatemala'],
'GW': ['GNB', 'Guinea-Bissau'],
'GY': ['GUY', 'Guyana'],
'HN': ['HND', 'Honduras'],
'HT': ['HTI', 'Haiti'],
'ID': ['IDN', 'Indonesia'],
'IN': ['IND', 'India'],
'IR': ['IRN', 'Iran, Islamic Republic of', 'Iran'],
'JM': ['JAM', 'Jamaica'],
'JP': ['JPN', 'Japan'],
'KE': ['KEN', 'Kenya'],
'KG': ['KGZ', 'Kyrgyzstan'],
'KH': ['KHM', 'Cambodia'],
'KP': ['PRK', "Korea, Democratic People's Republic of", 'Dem. Rep. Korea', 'North Korea'],
'KR': ['KOR', 'Korea, Republic of', 'Korea', 'South Korea'],
'KW': ['KWT', 'Kuwait'],
'KZ': ['KAZ', 'Kazakhstan'],
'LA': ['LAO', "Lao People's Democratic Republic", 'Lao PDR'],
'LK': ['LKA', 'Sri Lanka'],
'LR': ['LBR', 'Liberia'],
'LS': ['LSO', 'Lesotho'],
'MG': ['MDG', 'Madagascar'],
'ML': ['MLI', 'Mali'],
'MM': ['MMR', 'Myanmar'],
'MN': ['MNG', 'Mongolia'],
'MR': ['MRT', 'Mauritania'],
'MW': ['MWI', 'Malawi'],
'MX': ['MEX', 'Mexico'],
'MY': ['MYS', 'Malaysia'],
'MZ': ['MOZ', 'Mozambique'],
'NA': ['NAM', 'Namibia'],
'NC': ['NCL', 'New Caledonia'],
'NE': ['NER', 'Niger'],
'NG': ['NGA', 'Nigeria'],
'NI': ['NIC', 'Nicaragua'],
'NP': ['NPL', 'Nepal'],
'NZ': ['NZL', 'New Zealand'],
'OM': ['OMN', 'Oman'],
'PA': ['PAN', 'Panama'],
'PE': ['PER', 'Peru'],
'PG': ['PNG', 'Papua New Guinea'],
'PH': ['PHL', 'Philippines'],
'PK': ['PAK', 'Pakistan'],
'PR': ['PRI', 'Puerto Rico'],
'PY': ['PRY', 'Paraguay'],
'QA': ['QAT', 'Qatar'],
'RW': ['RWA', 'Rwanda'],
'SB': ['SLB', 'Solomon Islands', 'Solomon Is.'],
'SD': ['SDN', 'Sudan'],
'SL': ['SLE', 'Sierra Leone'],
'SN': ['SEN', 'Senegal'],
'SO': ['SOM', 'Somalia'],
'SR': ['SUR', 'Suriname'],
'SS': ['SSD', 'South Sudan', 'S. Sudan'],
'SV': ['SLV', 'El Salvador'],
'SZ': ['SWZ', 'Swaziland'],
'TD': ['TCD', 'Chad'],
'TF': ['ATF', 'French Southern Territories', 'Fr. S. Antarctic Lands'],
'TG': ['TGO', 'Togo'],
'TH': ['THA', 'Thailand'],
'TJ': ['TJK', 'Tajikistan'],
'TL': ['TLS', 'Timor-Leste'],
'TM': ['TKM', 'Turkmenistan'],
'TT': ['TTO', 'Trinidad and Tobago'],
'TW': ['TWN', 'Taiwan, Province of China', 'Taiwan'],
'TZ': ['TZA', 'Tanzania, United Republic of', 'Tanzania'],
'UG': ['UGA', 'Uganda'],
'US': ['USA', 'United States', 'United States of America'],
'UY': ['URY', 'Uruguay'],
'UZ': ['UZB', 'Uzbekistan'],
'VE': ['VEN', 'Venezuela, Bolivarian Republic of', 'Venezuela'],
'VN': ['VNM', 'Viet Nam', 'Vietnam'],
'VU': ['VUT', 'Vanuatu'],
'YE': ['YEM', 'Yemen'],
'ZA': ['ZAF', 'South Africa'],
'ZM': ['ZMB', 'Zambia'],
'ZW': ['ZWE', 'Zimbabwe'],
}
CC_WORLD.update(CC_EUROPE)
CC_USA = {
'US-AK': ['AK', 'Alaska'],
'US-AL': ['AL', 'Alabama'],
'US-AR': ['AR', 'Arkansas'],
'US-AZ': ['AZ', 'Arizona'],
'US-CA': ['CA', 'California'],
'US-CO': ['CO', 'Colorado'],
'US-CT': ['CT', 'Connecticut'],
'US-DC': ['DC', 'District of Columbia'],
'US-DE': ['DE', 'Delaware'],
'US-FL': ['FL', 'Florida'],
'US-GA': ['GA', 'Georgia'],
'US-HI': ['HI', 'Hawaii'],
'US-IA': ['IA', 'Iowa'],
'US-ID': ['ID', 'Idaho'],
'US-IL': ['IL', 'Illinois'],
'US-IN': ['IN', 'Indiana'],
'US-KS': ['KS', 'Kansas'],
'US-KY': ['KY', 'Kentucky'],
'US-LA': ['LA', 'Louisiana'],
'US-MA': ['MA', 'Massachusetts'],
'US-MD': ['MD', 'Maryland'],
'US-ME': ['ME', 'Maine'],
'US-MI': ['MI', 'Michigan'],
'US-MN': ['MN', 'Minnesota'],
'US-MO': ['MO', 'Missouri'],
'US-MS': ['MS', 'Mississippi'],
'US-MT': ['MT', 'Montana'],
'US-NC': ['NC', 'North Carolina'],
'US-ND': ['ND', 'North Dakota'],
'US-NE': ['NE', 'Nebraska'],
'US-NH': ['NH', 'New Hampshire'],
'US-NJ': ['NJ', 'New Jersey'],
'US-NM': ['NM', 'New Mexico'],
'US-NV': ['NV', 'Nevada'],
'US-NY': ['NY', 'New York'],
'US-OH': ['OH', 'Ohio'],
'US-OK': ['OK', 'Oklahoma'],
'US-OR': ['OR', 'Oregon'],
'US-PA': ['PA', 'Pennsylvania'],
'US-RI': ['RI', 'Rhode Island'],
'US-SC': ['SC', 'South Carolina'],
'US-SD': ['SD', 'South Dakota'],
'US-TN': ['TN', 'Tennessee'],
'US-TX': ['TX', 'Texas'],
'US-UT': ['UT', 'Utah'],
'US-VA': ['VA', 'Virginia'],
'US-VT': ['VT', 'Vermont'],
'US-WA': ['WA', 'Washington'],
'US-WI': ['WI', 'Wisconsin'],
'US-WV': ['WV', 'West Virginia'],
'US-WY': ['WY', 'Wyoming'],
}
def _invert_mapping(dict):
return {v:k for k in dict for v in dict[k]}
INV_CC_EUROPE = _invert_mapping(CC_EUROPE)
INV_CC_WORLD = _invert_mapping(CC_WORLD)
INV_CC_USA = _invert_mapping(CC_USA)
SET_CC_EUROPE = set(INV_CC_EUROPE.keys()) | set(INV_CC_EUROPE.values())
SET_CC_USA = set(INV_CC_USA.keys()) | set(INV_CC_USA.values())
|
[
"kerncece@gmail.com"
] |
kerncece@gmail.com
|
0d3f672dc0e572c955fb17809d11692cbcc434be
|
c01e107f3b781df76f83ca470c22c32cacf7ddb3
|
/src/qsimulator.py
|
1d6549fd4668f7e4d098970cfbe3b45a9491cc92
|
[] |
no_license
|
UB-Quantic/EG-VQClass
|
593d24d10da3295532fa2064d098b59de433e91e
|
ff3ae612d666c80d6dbc38d461ecae79e3c82208
|
refs/heads/master
| 2020-04-27T17:28:11.308759
| 2019-03-26T20:28:21
| 2019-03-26T20:28:21
| 174,520,998
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,336
|
py
|
import numpy as np
import math
import cmath
Pi = math.pi
class QC(object):
def __init__(self, qubits):
self.size = qubits
"""
The quantum state is initialized with all qubits at 0.
"""
self.state = [0]*2**self.size
self.state[0] = 1.
def initialize(self):
"""Brings the state vector back to its initial state.
"""
self.state = [0]*2**self.size
self.state[0] = 1.
###############################
# 1-Qubit Gates
###############################
def h(self, m):
"""Apply the Hadamard Gate on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
"""
s = 1/np.sqrt(2)
if m>=self.size: raise ValueError('Qubit does not exist.')
for i in range(2**(self.size-1)):
I = 2*i-i%(2**m)
J = I+2**m
a = s*self.state[I] + s*self.state[J]
b = s*self.state[I] - s*self.state[J]
self.state[I] = a
self.state[J] = b
def x(self, m):
"""Apply the X Pauli Gate on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
for i in range(2**(self.size-1)):
I = 2*i-i%(2**m)
J = I+2**m
a = self.state[I]
self.state[I] = self.state[J]
self.state[J] = a
def y(self, m):
"""Apply the Y Pauli Gate on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
for i in range(2**(self.size-1)):
I = 2*i -i%(2**m)
J = I+2**m
a = -1.j * self.state[I]
self.state[I] = 1.j*self.state[J]
self.state[J] = a
def z(self, m):
"""Apply the Z Pauli Gate on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
for i in range(2**(self.size-1)):
J = 2*i - i%(2**m) + 2**m
self.state[J] *= -1
def s(self, m):
"""Apply the Phase Gate on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
for i in range(2**(self.size-1)):
J = 2*i - i%(2**m) + 2**m
self.state[J] *= 1.j
def t(self, m):
"""Apply the pi/8 Gate on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
aux = cmath.exp(0.25j*math.pi)
for i in range(2**(self.size-1)):
J = 2*i - i%(2**m) + 2**m
self.state[J] *= aux
def rx(self, m, th):
"""Apply a x-rotation on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
th (float): angle we rotate.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
th2 = 0.5*th
c = math.cos(th2)
s = -1.j * math.sin(th2) # beware of conventions
for i in range(2**(self.size-1)):
I = 2*i - i%2**m
J = I + 2**m
a = c*self.state[I] + s*self.state[J]
b = s*self.state[I] + c*self.state[J]
self.state[I] = a
self.state[J] = b
def ry(self, m, th):
"""Apply a y-rotation on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
th (float): angle we rotate.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
th2 = 0.5*th
c = math.cos(th2)
s = math.sin(th2) # beware of conventions
for i in range(2**(self.size-1)):
I = 2*i - i%2**m
J = I + 2**m
a = c*self.state[I] - s*self.state[J]
b = s*self.state[I] + c*self.state[J]
self.state[I] = a
self.state[J] = b
def rz(self, m, th):
"""Apply a z-rotation on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
th (float): angle we rotate.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
aux1 = cmath.exp(0.5j*th)
aux2 = cmath.exp(-0.5j*th)
for i in range(2**(self.size-1)):
I = 2*i - i%2**m
J = I + 2**m
self.state[I] *= aux1
self.state[J] *= aux2
#######################################
# 2-Qubit Gates, Entanglement
#######################################
def cnot(self, c, t):
"""Apply a Controlled-NOT gate.
Args.
c (int): control qubit.
t (int): target qubit.
"""
if c>=self.size: raise ValueError('Control does not exist.')
if t>=self.size: raise ValueError('Target does not exist.')
if c==t: raise ValueError('Control and Target cannot be the same.')
for i in range(2**(self.size-2)):
I = (2**c + i%2**c + ((i-i%2**c)*2)%2**t + 2*((i-i%2**c)*2 -
((2*(i-i%2**c))%2**t)))
J = I + 2**t
self.state[I], self.state[J] = self.state[J], self.state[I]
def cz(self, c, t):
"""Apply a Controlled-Z gate.
Args.
c (int): control qubit.
t (int): target qubit.
"""
if c>=self.size: raise ValueError('Control does not exist.')
if t>=self.size: raise ValueError('Target does not exist.')
if c==t: raise ValueError('Control and Target cannot be the same.')
if t<c: t,c = c,t
for i in range(2**(self.size-2)):
I = (2**c + i%2**c + ((i-i%2**c)*2)%2**t + 2*((i-i%2**c)*2 -
((2*(i-i%2**c))%2**t)) + 2**t)
self.state[I] *= -1
def swap(self, m, n):
"""Apply a SWAP gate.
Args.
m (int): first qubit.
n (int): second qubit.
"""
if m>=self.size: raise ValueError('First Qubit does not exist.')
if n>=self.size: raise ValueError('Second Qubit does not exist.')
if m==n: raise ValueError('Both Qubits cannot be the same.')
for i in range(2**(self.size-2)):
I = (i%2**m + ((i-i%2**m)*2)%2**n + 2*((i-i%2**m)*2 -
((2*(i-i%2**m))%2**n)) + 2**n)
J = I + 2**m - 2**n
self.state[I], self.state[J] = self.state[J], self.state[I]
############################################
# Circuits
############################################
# The following were created for classification using 4-qubits
def encode(self, point):
"""Creates the encoding layer.
Args.
point (dim=2 float): coordinates of one input point.
"""
for i in range(self.size):
self.h(i)
self.rz(i, point[i%2])
def blocka(self, angles, qubits=[0,1,2,3]):
"""Adds a block of type a.
Args.
angles (dim=8 float): rotation angles for each gate .
qubits (dim=4 int): qubits the block acts on.
"""
for i in range(4):
self.rx(qubits[i], angles[i])
self.cz(qubits[0], qubits[1])
self.cz(qubits[2], qubits[3])
for i in range(4):
self.ry(qubits[i], angles[4+i])
self.cz(qubits[1], qubits[2])
self.cz(qubits[0], qubits[3])
def blockb(self, angles, qubits=[0,1,2,3]):
"""Adds a block of type b.
Args.
angles (dim=8 float): rotation angles for each gate.
qubits (dim=4 int): qubits the block acts on.
"""
for i in range(4):
self.ry(qubits[i], angles[i])
self.cz(qubits[0], qubits[1])
self.cz(qubits[2], qubits[3])
for i in range(4):
self.rx(qubits[i], angles[4+i])
self.cz(qubits[1], qubits[2])
self.cz(qubits[0], qubits[3])
def blockc(self, angles, qubits=[0,1,2,3]):
"""Adds a block of type c.
Args.
angles (dim=8 float): rotation angles for each gate.
qubits (dim=4 int): qubits the block acts on.
"""
self.rx(qubits[0], angles[0])
self.ry(qubits[1], angles[1])
self.rx(qubits[2], angles[2])
self.ry(qubits[3], angles[3])
self.cz(qubits[0], qubits[1])
self.cz(qubits[2], qubits[3])
self.ry(qubits[0], angles[4])
self.rx(qubits[1], angles[5])
self.ry(qubits[2], angles[6])
self.rx(qubits[3], angles[7])
self.cz(qubits[1], qubits[2])
self.cz(qubits[0], qubits[3])
def blockd(self, angles, qubits=[0,1,2,3]):
"""Adds a block of type d.
Args.
angles (dim=8 float): rotation angles for each gate.
qubits (dim=4 int): qubits the block acts on.
"""
self.rx(qubits[0], angles[0])
self.ry(qubits[1], angles[1])
self.rx(qubits[2], angles[2])
self.ry(qubits[3], angles[3])
self.cz(qubits[0], qubits[1])
self.cz(qubits[2], qubits[3])
self.rx(qubits[0], angles[4])
self.ry(qubits[1], angles[5])
self.rx(qubits[2], angles[6])
self.ry(qubits[3], angles[7])
self.cz(qubits[1], qubits[2])
self.cz(qubits[0], qubits[3])
def blockx(self, angles, qubits=[0,1,2,3]):
"""Adds a block of type x.
Args.
angles (dim=8 float): rotation angles for each gate.
qubits (dim=4 int): qubits the block acts on.
"""
for i in range(4):
self.rx(qubits[i], angles[i])
self.cz(qubits[0], qubits[1])
self.cz(qubits[2], qubits[3])
for i in range(4):
self.rx(qubits[i], angles[4+i])
self.cz(qubits[1], qubits[2])
self.cz(qubits[0], qubits[3])
def blocky(self, angles, qubits=[0,1,2,3]):
"""Adds a block of type y.
Args.
angles(dim=8 float): rotation angles for each gate.
qubits (dim=4 int): qubits the block acts on.
"""
for i in range(4):
self.ry(qubits[i], angles[i])
self.cz(qubits[0], qubits[1])
self.cz(qubits[2], qubits[3])
for i in range(4):
self.ry(qubits[i], angles[4+i])
self.cz(qubits[1], qubits[2])
self.cz(qubits[0], qubits[3])
def add(self, typ, angles, qubits=[0,1,2,3]):
"""Adds a block of a certain type in a given position.
Args.
typ (char): type of circuit 'a', 'b', 'c' or 'd'.
angles (dim=8 float): rotation angles for each gate.
qubits (dim=4 int): which qubits the block acts on.
Rets.
success (int): indicates whether some error flag was raised.
"""
if(typ not in 'abcdxy'):
print("Wrong key for type.")
return 1
return {
'a': self.blocka(angles, qubits),
'b': self.blockb(angles, qubits),
'c': self.blockc(angles, qubits),
'd': self.blockd(angles, qubits),
'x': self.blockx(angles, qubits),
'y': self.blocky(angles, qubits)
}.get(typ, 1)
# The following are intended to be used with 1-qubit circuits.
def unitary(self, m, theta, phi, lamb):
"""Apply an arbitrary unitary gate on the m'th qubit.
Every unitary gate is characterized by three angles.
Args.
m (int): qubit the gate is applied on.
beta (float): first angle.
gamma (float): second angle.
delta (float): third angle.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
c = math.cos(0.5*gamma)
s = math.sin(0.5*gamma)
ephi = cmath.exp(1j*phi)
elamb = cmath.exp(1j*lamb)
for i in range(2**(self.size-1)):
I = 2*i -i%(2**m)
J = I+2**m
a = c*self.state[I] - s*elamb*self.state[J]
b = s*ephi*self.state[I] + c*ephi*elamb*self.state[J]
self.state[I] = a
self.state[J] = b
def block(self, m, point, angles, style=0):
"""Apply a learning block on the m'th qubit.
Args.
m (int): qubit the block is applied on.
point (dim=2 float): coordinates of input.
angles (dim=3 float): angles that determine a unitary gate.
style (int): customizes the block.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
if style:
self.unitary(m, point[0]+angles[0], point[1]+angles[1], angles[2])
else:
self.ry(m, point[0]*0.5*Pi)
self.rz(m, (1+point[1])*Pi)
self.unitary(m, angles[0], angles[1], angles[2])
|
[
"emgilfuster@gmail.com"
] |
emgilfuster@gmail.com
|
2847baf0977045d715c296153c4a804ffd01798a
|
0592c83ef8bed931d310c1233a0e329a21876cbe
|
/tests/test_dataset.py
|
d4f1a84980c48f6536ff0ed15f5ab4dc09a3e1f3
|
[] |
no_license
|
datastory-org/frame2package
|
81d12439715f42dce8cdbd80853c16bba481da28
|
bea7e7d45ced2e9792078088b1e6271360bc86f8
|
refs/heads/master
| 2020-04-17T04:50:40.112731
| 2019-06-18T10:58:18
| 2019-06-18T10:58:18
| 166,248,711
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,867
|
py
|
import unittest
import pandas as pd
from frame2package import Dataset, Concept
class DatasetTestCase(unittest.TestCase):
def setUp(self):
data = [
{
'country': 'Sweden',
'capital': 'Stockholm',
'year': 2000,
'population': 9_000_000
},
{
'country': 'Sweden',
'capital': 'Stockholm',
'year': 2019,
'population': 10_000_000
},
{
'country': 'Norway',
'capital': 'Oslo',
'year': 2000,
'population': 5_000_000
},
{
'country': 'Norway',
'capital': 'Oslo',
'year': 2019,
'population': 6_000_000
},
]
concepts = [
{
'concept': 'country',
'concept_type': 'entity_domain'
},
{
'concept': 'capital',
'concept_type': 'string'
},
{
'concept': 'population',
'concept_type': 'measure'
},
{
'concept': 'year',
'concept_type': 'time'
}
]
self.data = data
self.concepts = concepts
self.dataset = Dataset(pd.DataFrame(data), concepts)
def test_has_concepts(self):
self.assertTrue(hasattr(self.dataset, 'concepts'))
def test_has_entities(self):
self.assertTrue(hasattr(self.dataset, 'entities'))
def test_has_tables(self):
self.assertTrue(hasattr(self.dataset, 'tables'))
def test_has_data(self):
self.assertTrue(hasattr(self.dataset, 'data'))
def test_data_is_frame(self):
self.assertTrue(type(self.dataset.data) is pd.DataFrame)
def test_concept_type(self):
self.assertTrue(all([type(x) is Concept
for x in self.dataset.concepts]))
def test_has_correct_number_of_entities(self):
self.assertEqual(len(self.dataset.entities), 1)
def test_fails_if_missing_concepts(self):
data = pd.DataFrame(self.data)
def create_dataset_with_missing_concepts():
return Dataset(data, self.concepts[:-1])
self.assertRaises(ValueError, create_dataset_with_missing_concepts)
def test_creates_correct_table_name(self):
table_name = self.dataset.tables[0][0]
expected = 'ddf--datapoints--population--by--country--year.csv'
self.assertEqual(table_name, expected)
def test_creates_correct_table_size(self):
self.assertEqual(self.dataset.tables[0][1].shape, (4, 3))
def test_records_extra_string_concepts(self):
self.assertIn('capital', self.dataset.concepts)
|
[
"robin.linderborg@gmail.com"
] |
robin.linderborg@gmail.com
|
237743cb29e83580cbade37977253888764a05b4
|
f4f54015298eedfbbdfcaaf5e2a9603112f803a5
|
/sachin/gocept.filestore-0.3/gocept.filestore-0.3/src/gocept/filestore/tests.py
|
39487c46c2cf44f18a2df60610d46b4e1e9848c4
|
[] |
no_license
|
raviramawat8/Old_Python_Codes
|
f61e19bff46856fda230a096aa789c7e54bd97ca
|
f940aed0611b0636e1a1b6826fa009ceb2473c2b
|
refs/heads/master
| 2020-03-22T22:54:50.964816
| 2018-06-16T01:39:43
| 2018-06-16T01:39:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
# Copyright (c) 2007 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id: tests.py 5111 2007-08-30 11:27:23Z zagy $
import unittest
from zope.testing import doctest
def test_suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocFileSuite(
'README.txt',
optionflags=doctest.ELLIPSIS))
return suite
|
[
"sachinyadav3496@gmail.com"
] |
sachinyadav3496@gmail.com
|
a2dd70fc69879a4648eb45dac4bea8dae1233790
|
d83118503614bb83ad8edb72dda7f449a1226f8b
|
/src/dprj/platinumegg/app/cabaret/views/application/effect.py
|
40b158532e97911174a83a5334610da7b7a1310a
|
[] |
no_license
|
hitandaway100/caba
|
686fe4390e182e158cd9714c90024a082deb8c69
|
492bf477ac00c380f2b2758c86b46aa7e58bbad9
|
refs/heads/master
| 2021-08-23T05:59:28.910129
| 2017-12-03T19:03:15
| 2017-12-03T19:03:15
| 112,512,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54,649
|
py
|
# -*- coding: utf-8 -*-
from platinumegg.app.cabaret.views.apphandler import AppHandler
from platinumegg.app.cabaret.util.api import BackendApi
import settings
from platinumegg.app.cabaret.util.url_maker import UrlMaker
from platinumegg.lib.pljson import Json
import settings_sub
from urlparse import urlparse
import urllib
from defines import Defines
from platinumegg.app.cabaret.util.scout import ScoutEventNone
from platinumegg.app.cabaret.util.card import CardUtil
from platinumegg.lib.opensocial.util import OSAUtil
from platinumegg.app.cabaret.util.present import PresentSet
import datetime
from platinumegg.app.cabaret.util.datetime_util import DateTimeUtil
from platinumegg.app.cabaret.util.rediscache import LoginBonusTimeLimitedAnimationSet
from platinumegg.app.cabaret.views.application.loginbonus.base import LoginBonusHandler
class Handler(AppHandler):
"""演出のパラメータを取得.
"""
@classmethod
def get_default_status(cls):
"""デフォルトで返すHttpStatus.
"""
return 500
def processError(self, error_message):
self.response.set_status(500)
self.response.end()
def __sendErrorResponse(self, status):
self.response.set_status(status)
self.response.end()
def checkUser(self):
pass
def check_process_pre(self):
if settings_sub.IS_LOCAL:
return True
elif self.osa_util.is_dbg_user:
pass
elif not settings_sub.IS_DEV and self.osa_util.viewer_id in ('10814964', '11404810', '39121', '12852359', '1412759', '11830507', '11467913', '10128761', '11868885', '434009', '23427632', '10918839', '21655464', '17279084', '24500573', '28774432', '11739356','2588824','28978730','20174324'):
pass
elif not self.checkMaintenance():
return False
return True
def process(self):
args = self.getUrlArgs('/effect/')
ope = args.get(0)
f = getattr(self, 'proc_%s' % ope, None)
if f is None:
self.__sendErrorResponse(404)
return
f(args)
def writeResponseBody(self, params):
if self.isUsePCEffect():
body = Json.encode({
'flashVars' : self.makeFlashVars(params)
})
else:
body = Json.encode(params)
self.response.set_header('Content-Type', 'plain/text')
self.response.set_status(200)
self.response.send(body)
def proc_battle(self, args):
"""バトル演出.
"""
model_mgr = self.getModelMgr()
v_player = self.getViewerPlayer(True)
if v_player is None:
# 結果が存在しない.
self.osa_util.logger.error('Player is None. opensocial_viewer_id=%s' % self.osa_util.viewer_id)
self.__sendErrorResponse(404)
return
# 結果データ.
battleresult = BackendApi.get_battleresult(model_mgr, v_player.id, using=settings.DB_READONLY)
if battleresult is None or not battleresult.anim:
# 結果が存在しない.
self.osa_util.logger.error('result is None')
self.__sendErrorResponse(404)
return
# 演出用パラメータ.
animationdata = battleresult.anim
params = animationdata.to_animation_data(self)
if BackendApi.get_current_battleevent_master(model_mgr, using=settings.DB_READONLY):
params['feverFlag'] = 0 # イベントでは表示しない.
urldata = urlparse(self.url_cgi)
url = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = url + UrlMaker.battleresultanim()
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
params['backUrl'] = url
self.writeResponseBody(params)
def proc_battleevent(self, args):
"""イベントバトル演出.
"""
model_mgr = self.getModelMgr()
v_player = self.getViewerPlayer(True)
if v_player is None:
# 結果が存在しない.
self.osa_util.logger.error('Player is None. opensocial_viewer_id=%s' % self.osa_util.viewer_id)
self.__sendErrorResponse(404)
return
uid = v_player.id
try:
eventid = int(args.get(1))
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
# 結果データ.
battleresult = BackendApi.get_battleevent_battleresult(model_mgr, eventid, uid, using=settings.DB_READONLY)
if battleresult is None or not battleresult.anim:
# 結果が存在しない.
self.osa_util.logger.error('result is None')
self.__sendErrorResponse(404)
return
# 演出用パラメータ.
animationdata = battleresult.anim
params = animationdata.to_animation_data(self)
params['feverFlag'] = 0 # イベントでは表示しない.
rarity = args.getInt(2)
piecenumber = args.getInt(3)
is_complete = args.getInt(4)
urldata = urlparse(self.url_cgi)
url = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = url + UrlMaker.battleevent_battleresultanim(eventid, rarity, piecenumber, is_complete)
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
params['backUrl'] = url
self.writeResponseBody(params)
def proc_scout(self, args):
"""スカウト演出.
"""
try:
scoutid = int(args.get(1))
scoutkey = urllib.unquote(args.get(2))
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
v_player = self.getViewerPlayer()
model_mgr = self.getModelMgr()
uid = v_player.id
using = settings.DB_READONLY
# 進行情報.
playdata = BackendApi.get_scoutprogress(model_mgr, uid, [scoutid], using=using).get(scoutid, None)
if playdata is None or playdata.alreadykey != scoutkey:
# DBからとり直すべき.
playdata = BackendApi.get_scoutprogress(model_mgr, uid, [scoutid], using=settings.DB_DEFAULT, reflesh=True).get(scoutid, None)
if playdata is None or playdata.alreadykey != scoutkey:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
eventlist = playdata.result.get('event', [])
if eventlist:
# ここで必要なのははじめの1件.
event = eventlist[0]
else:
# なにも起きなかった.
event = ScoutEventNone.create()
eventKind = event.get_type()
backUrl = None
# イベント毎の設定.
if eventKind == Defines.ScoutEventType.NONE:
# そのままもう一回.
backUrl = UrlMaker.scoutdo(scoutid, playdata.confirmkey)
elif eventKind in (Defines.ScoutEventType.LEVELUP, Defines.ScoutEventType.COMPLETE, Defines.ScoutEventType.HAPPENING):
# 結果表示へ.
backUrl = UrlMaker.scoutresultanim(scoutid, scoutkey, 0)
# 結果表示へ.
backUrl = backUrl or UrlMaker.scoutresult(scoutid, scoutkey)
# 演出のパラメータ.
scoutmaster = BackendApi.get_scouts(model_mgr, [scoutid], using=using)[0]
resultlist = playdata.result.get('result', [])
params = BackendApi.make_scoutanim_params(self, scoutmaster, eventlist, resultlist)
if params is None:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
urldata = urlparse(self.url_cgi)
url = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url + backUrl))
params['backUrl'] = url
self.writeResponseBody(params)
def __make_eventscoutanim_params(self, stagemaster, playdata, backUrl):
"""スカウトイベント演出.
"""
eventlist = playdata.result.get('event', [])
# 演出のパラメータ.
resultlist = playdata.result.get('result', [])
params = BackendApi.make_scoutanim_params(self, stagemaster, eventlist, resultlist, feveretime=getattr(playdata, 'feveretime', None))
if params is None:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
urldata = urlparse(self.url_cgi)
url = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url + backUrl))
params['backUrl'] = url
return params
def proc_scoutevent(self, args):
"""スカウトイベント演出.
"""
try:
stageid = int(args.get(1))
scoutkey = urllib.unquote(args.get(2))
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
v_player = self.getViewerPlayer()
model_mgr = self.getModelMgr()
using = settings.DB_READONLY
eventmaster = BackendApi.get_current_scouteventmaster(model_mgr, using=using)
if eventmaster is None:
# 引数がおかしい.
self.osa_util.logger.error('Event Not Found')
self.__sendErrorResponse(404)
return
mid = eventmaster.id
# 進行情報.
playdata = BackendApi.get_event_playdata(model_mgr, mid, v_player.id, using)
if playdata is None or playdata.alreadykey != scoutkey:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
eventlist = playdata.result.get('event', [])
if eventlist:
# ここで必要なのははじめの1件.
event = eventlist[0]
else:
# なにも起きなかった.
event = ScoutEventNone.create()
eventKind = event.get_type()
backUrl = None
# イベント毎の設定.
if eventKind == Defines.ScoutEventType.NONE:
# そのままもう一回.
backUrl = UrlMaker.scouteventdo(stageid, playdata.confirmkey)
else:
if playdata.result.get('feverstart'):
# フィーバー演出
backUrl = UrlMaker.scouteventfever(stageid, scoutkey)
elif playdata.result.get('lovetime_start'):
# 逢引タイム演出.
backUrl = UrlMaker.scouteventlovetime(stageid, scoutkey)
elif eventKind in (Defines.ScoutEventType.LEVELUP, Defines.ScoutEventType.COMPLETE, Defines.ScoutEventType.HAPPENING):
# 結果表示へ.
backUrl = UrlMaker.scouteventresultanim(stageid, scoutkey, 0)
# 結果表示へ.
backUrl = backUrl or UrlMaker.scouteventresult(stageid, scoutkey)
stagemaster = BackendApi.get_event_stage(model_mgr, stageid, using=using)
params = self.__make_eventscoutanim_params(stagemaster, playdata, backUrl)
if self.response.isEnd:
return
self.writeResponseBody(params)
def proc_raideventscout(self, args):
"""スカウトイベント演出.
"""
try:
stageid = int(args.get(1))
scoutkey = urllib.unquote(args.get(2))
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
v_player = self.getViewerPlayer()
uid = v_player.id
model_mgr = self.getModelMgr()
using = settings.DB_READONLY
eventmaster = BackendApi.get_current_raideventmaster(model_mgr, using=using)
if eventmaster is None:
# 引数がおかしい.
self.osa_util.logger.error('Event Not Found')
self.__sendErrorResponse(404)
return
mid = eventmaster.id
# 進行情報.
playdata = BackendApi.get_raideventstage_playdata(model_mgr, mid, uid, using)
if playdata is None or playdata.alreadykey != scoutkey:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
eventlist = playdata.result.get('event', [])
if eventlist:
# ここで必要なのははじめの1件.
event = eventlist[0]
else:
# なにも起きなかった.
event = ScoutEventNone.create()
eventKind = event.get_type()
backUrl = None
# イベント毎の設定.
if eventKind == Defines.ScoutEventType.NONE:
# そのままもう一回.
backUrl = UrlMaker.raidevent_scoutdo(stageid, playdata.confirmkey)
elif eventKind in (Defines.ScoutEventType.LEVELUP, Defines.ScoutEventType.COMPLETE, Defines.ScoutEventType.HAPPENING):
# 結果表示へ.
backUrl = UrlMaker.raidevent_scoutresultanim(stageid, scoutkey, 0)
# 結果表示へ.
backUrl = backUrl or UrlMaker.raidevent_scoutresult(stageid, scoutkey)
stagemaster = BackendApi.get_raidevent_stagemaster(model_mgr, stageid, using=using)
params = self.__make_eventscoutanim_params(stagemaster, playdata, backUrl)
if self.response.isEnd:
return
self.writeResponseBody(params)
def proc_produceeventscout(self, args):
"""プロデュースイベントのスカウトイベント演出.
"""
try:
stageid = int(args.get(1))
scoutkey = urllib.unquote(args.get(2))
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
v_player = self.getViewerPlayer()
uid = v_player.id
model_mgr = self.getModelMgr()
using = settings.DB_READONLY
eventmaster = BackendApi.get_current_produce_event_master(model_mgr, using=using)
if eventmaster is None:
# 引数がおかしい.
self.osa_util.logger.error('Event Not Found')
self.__sendErrorResponse(404)
return
mid = eventmaster.id
# 進行情報.
playdata = BackendApi.get_raideventstage_playdata(model_mgr, mid, uid, using)
playdata = BackendApi.get_produceeventstage_playdata(model_mgr, mid, uid, using)
if playdata is None or playdata.alreadykey != scoutkey:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
eventlist = playdata.result.get('event', [])
if eventlist:
# ここで必要なのははじめの1件.
event = eventlist[0]
else:
# なにも起きなかった.
event = ScoutEventNone.create()
eventKind = event.get_type()
backUrl = None
# イベント毎の設定.
if eventKind == Defines.ScoutEventType.NONE:
# そのままもう一回.
backUrl = UrlMaker.produceevent_scoutdo(stageid, playdata.confirmkey)
elif eventKind in (Defines.ScoutEventType.LEVELUP, Defines.ScoutEventType.COMPLETE, Defines.ScoutEventType.HAPPENING):
# 結果表示へ.
backUrl = UrlMaker.produceevent_scoutresultanim(stageid, scoutkey, 0)
# 結果表示へ.
backUrl = backUrl or UrlMaker.produceevent_scoutresult(stageid, scoutkey)
stagemaster = BackendApi.get_produceevent_stagemaster(model_mgr, stageid, using=using)
params = self.__make_eventscoutanim_params(stagemaster, playdata, backUrl)
if self.response.isEnd:
return
self.writeResponseBody(params)
def proc_gacha(self, args):
"""ガチャ演出.
"""
CONTENT_NUM_PER_PAGE = 10
try:
mid = int(args.get(1))
reqkey = urllib.unquote(args.get(2))
page = int(args.get(3) or 0)
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
model_mgr = self.getModelMgr()
using = settings.DB_READONLY
v_player = self.getViewerPlayer()
uid = v_player.id
gachamaster = BackendApi.get_gachamaster(model_mgr, mid, using)
playdata = None
gachamasterstep = None
if gachamaster:
if gachamaster.stepsid > 0:
if gachamaster.stepsid != gachamaster.id:
gachamasterstep = BackendApi.get_gachamaster(model_mgr, gachamaster.stepsid, using=using)
if gachamasterstep is None:
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
else:
gachamasterstep = gachamaster
playdata = BackendApi.get_gachaplaydata(model_mgr, uid, [gachamaster.boxid], using=using).get(gachamaster.boxid)
if playdata is None or not playdata.result:
# 結果がない.
self.osa_util.logger.error('Not Found')
self.__sendErrorResponse(404)
return
if gachamaster.consumetype == Defines.GachaConsumeType.RANKING:
cardtextformat_getter = lambda master : Defines.EffectTextFormat.RANKINGGACHA_CARDTEXT
else:
cardtextformat_getter = lambda master : Defines.EffectTextFormat.GACHA_CARDTEXT if master.ckind == Defines.CardKind.NORMAL else Defines.EffectTextFormat.GACHA_ITEMTEXT
sep = Defines.ANIMATION_SEPARATE_STRING
urlsep = Defines.ANIMATION_URLSEPARATE_STRING
newFlag = []
rarityFlag = []
cardText = []
image = []
pointlist = []
expectation = []
is_first = page == 0
is_last = True
# 獲得したカード.
resultlist = playdata.result['result'] if isinstance(playdata.result, dict) else playdata.result
if gachamaster.consumetype in (Defines.GachaConsumeType.FUKUBUKURO, Defines.GachaConsumeType.FUKUBUKURO2016, Defines.GachaConsumeType.FUKUBUKURO2017):
page_last = int((len(resultlist) + CONTENT_NUM_PER_PAGE - 1) / CONTENT_NUM_PER_PAGE) - 1
page = min(page, page_last)
offset = page * CONTENT_NUM_PER_PAGE
resultlist = resultlist[offset:(offset+CONTENT_NUM_PER_PAGE)]
is_last = page == page_last
if gachamaster.consumetype == Defines.GachaConsumeType.FIXEDSR:
try:
gachamorecast = int(args.get(5))
except:
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
if gachamorecast == 0:
resultlist = resultlist[gachamaster.rarity_fixed_num:]
cardidlist = [data['id'] for data in resultlist]
cardmasters = BackendApi.get_cardmasters(cardidlist, model_mgr, using=settings.DB_READONLY)
groupidlist = [data['group'] for data in resultlist]
groupmaster_dict = BackendApi.get_gachagroupmaster_dict(model_mgr, groupidlist, using=settings.DB_READONLY)
rarityFlag_getter = None
if gachamaster.consumetype == Defines.GachaConsumeType.CHRISTMAS:
image_getter = lambda idx,master:(CardUtil.makeThumbnailUrlIcon(master) if idx < gachamaster.continuity-1 else CardUtil.makeThumbnailUrlMiddle(master))
cardtext_getter = lambda idx,master:master.name
elif gachamaster.consumetype in (Defines.GachaConsumeType.FUKUBUKURO, Defines.GachaConsumeType.FUKUBUKURO2016, Defines.GachaConsumeType.FUKUBUKURO2017):
image_getter = lambda idx,master:CardUtil.makeThumbnailUrlMiddle(master)
cardtext_getter = lambda idx,master:master.name
elif gachamaster.consumetype == Defines.GachaConsumeType.XMAS_OMAKE:
image_getter = lambda idx,master:CardUtil.makeThumbnailUrlIcon(master)
cardtext_getter = lambda idx,master:master.name
elif gachamaster.consumetype == Defines.GachaConsumeType.SCOUTEVENT and Defines.SCOUTEVENTGACHA_USE_EXCLUSIVE_USE_EFFECT:
image_getter = lambda idx,master:CardUtil.makeThumbnailUrlMiddle(master)
cardtext_getter = lambda idx,master:(cardtextformat_getter(master) % master.name)
else:
image_getter = lambda idx,master:self.makeAppLinkUrlImg(CardUtil.makeThumbnailUrlMiddle(master))
cardtext_getter = lambda idx,master:(cardtextformat_getter(master) % master.name)
rarityFlag_getter = rarityFlag_getter or (lambda master:'1' if Defines.Rarity.SUPERRARE <= master.rare else '0')
max_rare = Defines.Rarity.NORMAL
for idx,data in enumerate(resultlist):
master = cardmasters[data['id']]
groupmaster = groupmaster_dict.get(data['group'])
newFlag.append(str(int(bool(data['is_new']))))
cardText.append(cardtext_getter(idx, master))
image.append(image_getter(idx, master))
pointlist.append(str(data['point']))
expectation.append(str(groupmaster.expectation) if groupmaster else str(Defines.RankingGachaExpect.LOW))
rarityFlag.append(rarityFlag_getter(master))
if max_rare < master.rare:
max_rare = master.rare
v_player = self.getViewerPlayer()
# シートガチャ情報.
seatmodels = BackendApi.get_gachaseatmodels_by_gachamaster(model_mgr, uid, gachamasterstep or gachamaster, do_get_result=False, using=settings.DB_READONLY)
urldata = urlparse(self.url_cgi)
urlhead = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
if seatmodels.get('playdata'):
# シート演出へ.
url = urlhead + UrlMaker.gachaseatanim(gachamaster.id, reqkey)
else:
url = urlhead + UrlMaker.gacharesult(gachamaster.id, reqkey)
backUrl = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
params = {
'newFlag': sep.join(newFlag),
'cardText' : sep.join(cardText),
'image' : urlsep.join(image),
}
if gachamaster.consumetype == Defines.GachaConsumeType.CHRISTMAS:
params['logoPre'] = self.url_static + 'effect/sp/v2/gachaxmas/data/'
params['pre'] = self.url_static_img
params['cardText'] = cardText[-1]
elif gachamaster.consumetype == Defines.GachaConsumeType.RANKING:
params.update({
'point' : sep.join(pointlist),
'expectation' : sep.join(expectation),
'pre' : self.url_static + 'img/sp/large/gacha/ranking/rank_01/', # TODO:DBを見るように修正が必要.
'logo_img' : 'event_logo.png',
'logo_w_img' : 'event_logo_w.png',
})
elif gachamaster.consumetype == Defines.GachaConsumeType.SCOUTEVENT and Defines.SCOUTEVENTGACHA_USE_EXCLUSIVE_USE_EFFECT:
eventmaster = BackendApi.get_current_present_scouteventmaster(model_mgr, using=settings.DB_READONLY)
if Defines.SCOUTEVENTGACHA_FOR_VALENTINE:
params.update({
'pre' : self.url_static_img,
'effectPre' : self.url_static + 'effect/sp/v2/gachascev/data/scev_25/',
'cardText' : params['cardText'].replace('が入店しました', ''), # js, flash の修正をすると作業が大きくなるのでquick hack.
})
else:
params.update({
'imagePre' : self.url_static_img,
'rarityFlag' : sep.join(rarityFlag),
'logoPre' : self.makeAppLinkUrlImg('event/scevent/%s/gacha/' % eventmaster.codename),
})
elif gachamaster.consumetype in (Defines.GachaConsumeType.FUKUBUKURO, Defines.GachaConsumeType.FUKUBUKURO2016, Defines.GachaConsumeType.FUKUBUKURO2017):
url = None
if is_last:
if isinstance(playdata.result, dict) and playdata.result.get('omake'):
prizelist = BackendApi.get_prizelist(model_mgr, playdata.result['omake'], using=settings.DB_READONLY)
presentlist = BackendApi.create_present_by_prize(model_mgr, v_player.id, prizelist, 0, using=settings.DB_READONLY, do_set_save=False)
presentsetlist = PresentSet.presentToPresentSet(model_mgr, presentlist, using=settings.DB_READONLY)
thumblist = []
omakeindexes = []
for presentset in presentsetlist:
if presentset.present.itype in (Defines.ItemType.GOLD, Defines.ItemType.GACHA_PT):
num = 1
else:
num = presentset.num
if presentset.itemthumbnail in thumblist:
idx = thumblist.index(presentset.itemthumbnail)
else:
idx = len(thumblist)
thumblist.append(presentset.itemthumbnail)
omakeindexes.extend([str(idx)] * num)
if thumblist:
params.update({
'itemImage' : urlsep.join(thumblist),
'itemImageIdx' : sep.join(omakeindexes),
})
else:
url = urlhead + UrlMaker.gachaanimsub(gachamaster.id)
url = OSAUtil.addQuery(url, Defines.URLQUERY_PAGE, page + 1)
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
params.update({
'skipUrl': backUrl,
'pre' : self.url_static_img,
# 4月ver
#'logoPre' : self.url_static + 'effect/sp/v2/gachahappybag201604/data/',
#'logoPre' : self.url_static + 'effect/sp/v2/gachahappybag201605/data/',
# 'logoPre' : self.url_static + 'effect/sp/v2/gachahappybag201607/data/',
# 'logoPre' : self.url_static + 'effect/sp/v2/gachahappybag201608/data/',
'logoPre' : self.url_static + 'effect/sp/v2/gachahappybag201701/data/',
'isFirst' : is_first,
'isLast' : is_last,
'n' : gachamaster.continuity,
'rarityFlag' : sep.join(rarityFlag),
})
del params['cardText']
backUrl = url or backUrl
elif gachamaster.consumetype == Defines.GachaConsumeType.SR_SSR_PROBABILITY_UP or gachamaster.consumetype == Defines.GachaConsumeType.PTCHANGE:
#トレードショップが開いていたら
if gachamaster.trade_shop_master_id is not None and 0 < gachamaster.trade_shop_master_id:
try:
lottery_point = int(args.get(4))
url = urlhead + UrlMaker.gacharesult(gachamaster.id, reqkey, lottery_point=lottery_point)
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
else:
url = urlhead + UrlMaker.gacharesult(gachamaster.id, reqkey)
# URL作り直し
backUrl = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
elif gachamaster.consumetype == Defines.GachaConsumeType.FIXEDSR:
try:
gachamorecast = int(args.get(5))
except:
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
if gachamorecast == 0:
url = urlhead + UrlMaker.gachamorecast(gachamaster.id, reqkey)
backUrl = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
else:
if 0 < gachamaster.rarity_fixed_num:
fixed_card_id = cardidlist[0]
card = BackendApi.get_cardmasters([fixed_card_id], model_mgr).get(fixed_card_id)
backUrl = self.makeAppLinkUrl(UrlMaker.gacharesult(gachamaster.id, reqkey))
params = {
'cardText': Defines.EffectTextFormat.GACHA_CARDTEXT % card.name,
'image': self.makeAppLinkUrlImg(CardUtil.makeThumbnailUrlMiddle(card)),
'pre': 'img/',
}
else:
self.osa_util.logger.error('Not set Gachamaster.rarity_fixed_num')
self.__sendErrorResponse(400)
return
elif gachamaster.consumetype == Defines.GachaConsumeType.XMAS_OMAKE:
params = {
'pre' : self.url_static_img,
'logoPre' : self.url_static + 'effect/sp/v2/gachaxmas2015/',
'image' : urlsep.join(image),
'newFlag': sep.join(newFlag)
}
params['backUrl'] = backUrl
self.writeResponseBody(params)
def proc_panelmission(self, args):
"""パネルミッション.
"""
try:
panel = int(args.get(1))
except:
# 引数がおかしい.
self.osa_util.logger.error('Invalid arguments')
self.__sendErrorResponse(400)
return
model_mgr = self.getModelMgr()
using = settings.DB_READONLY
# パネルのマスターデータ.
panelmaster = None
if panel:
panelmaster = BackendApi.get_panelmission_panelmaster(model_mgr, panel, using=using)
if panelmaster is None:
self.osa_util.logger.error('Illigal panel number')
self.__sendErrorResponse(400)
return
v_player = self.getViewerPlayer()
uid = v_player.id
now = OSAUtil.get_now()
# 進行情報.
panelplaydata = BackendApi.get_panelmission_data(model_mgr, uid, panel, using=using, get_instance=False)
if panelplaydata is None:
self.osa_util.logger.error('Illigal panel number')
self.__sendErrorResponse(400)
return
# 演出パラメータ.
params = {
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201412/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201505/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201508/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201512/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201602/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201604/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201606/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201607/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201610/',
# 'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201612/',
'logoPre' : self.url_static + 'effect/sp/v2/panel_mission/data/201702/',
'pre' : self.url_static_img,
'panel' : panel,
'bg' : panelmaster.image,
}
# ミッションのマスター.
missionmaster_list = BackendApi.get_panelmission_missionmaster_by_panelid(model_mgr, panel, using=using)
# 全クリフラグ.
is_allend = True
# 今回クリアしたミッション.
max_time = None
clearlist = []
missionmaster_dict = {}
for missionmaster in missionmaster_list:
number = missionmaster.number
missionmaster_dict[number] = missionmaster
idx = number - 1
data = panelplaydata.get_data(number)
rtime = data['rtime']
if now < rtime:
# 未達成のミッション画像と名前.
params['m%d' % idx] = missionmaster.image_pre
params['mtext%d' % idx] = missionmaster.name
is_allend = False
continue
elif max_time and rtime < max_time:
continue
elif max_time is None or max_time < rtime:
max_time = rtime
clearlist = []
clearlist.append(str(idx))
if not clearlist:
self.osa_util.logger.error('You can not view the effect.')
self.__sendErrorResponse(400)
return
params['clear'] = ','.join(clearlist)
# 今回達成したミッションの画像と名前.
for idx in clearlist:
missionmaster = missionmaster_dict[int(idx) + 1]
params['m%s' % idx] = missionmaster.image_pre
params['mtext%s' % idx] = missionmaster.name
if is_allend:
# 獲得したカード画像と名前.
prizelist = BackendApi.get_prizelist(model_mgr, panelmaster.prizes, using=using)
if not prizelist:
self.osa_util.logger.error('prize none.')
self.__sendErrorResponse(400)
return
presentlist = BackendApi.create_present_by_prize(model_mgr, uid, prizelist, 0, using=using, do_set_save=False)
presentset = PresentSet.presentToPresentSet(model_mgr, presentlist[:1], using=using)[0]
params['card'] = presentset.itemthumbnail_middle
params['cname'] = presentset.itemname
# 次のパネル.
next_panelmaster = BackendApi.get_panelmission_panelmaster(model_mgr, panel + 1, using=using)
if next_panelmaster:
next_panelmissionmaster_list = BackendApi.get_panelmission_missionmaster_by_panelid(model_mgr, next_panelmaster.id, using=using)
for next_panelmissionmaster in next_panelmissionmaster_list:
idx = next_panelmissionmaster.number - 1
params['next%s' % idx] = next_panelmissionmaster.image_pre
urldata = urlparse(self.url_cgi)
url = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = url + UrlMaker.panelmissiontop()
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
params['backUrl'] = url
self.writeResponseBody(params)
def proc_loginbonustimelimited(self, args):
"""期限付きログインボーナス.
"""
mid = args.getInt(1)
loginbonus = args.getInt(2)
str_midlist = self.request.get(Defines.URLQUERY_ID) or ''
midlist = [int(str_mid) for str_mid in str_midlist.split(',') if str_mid.isdigit()]
model_mgr = self.getModelMgr()
now = OSAUtil.get_now()
master = BackendApi.get_loginbonustimelimitedmaster(model_mgr, mid, using=settings.DB_READONLY)
if master is None:
self.osa_util.logger.error('masterdata is not found.')
self.__sendErrorResponse(400)
return
# プレイヤー情報.
v_player = self.getViewerPlayer()
if BackendApi.check_lead_loginbonustimelimited(model_mgr, v_player.id, now):
# まだ受け取っていない.
self.osa_util.logger.error('not received.')
self.__sendErrorResponse(400)
return
logindata = BackendApi.get_logintimelimited_data(model_mgr, v_player.id, mid, using=settings.DB_READONLY)
if logindata is None:
self.osa_util.logger.error('logindata is None.')
self.__sendErrorResponse(400)
return
# 表示するログインボーナスを選別(現在の日数のボーナスの前のボーナスから4つ表示したい).
table = BackendApi.get_loginbonustimelimiteddaysmaster_day_table_by_timelimitedmid(model_mgr, mid, using=settings.DB_READONLY)
params = {
'pre' : self.url_static_img,
}
# 設定情報.
config = BackendApi.get_current_loginbonustimelimitedconfig(model_mgr, using=settings.DB_READONLY)
config_data = dict(config.getDataList()).get(master.id)
making_functions = {
'monthly_login' : self.__makeMonthlyLoginBonusParams,
}
func = making_functions.get(master.effectname, self.__makeCommonLoginBonusParams)
tmp, cur_bonusmaster, next_bonusmaster = func(master, logindata, table, config_data)
params.update(**tmp)
#取得したアイテム(名前,日数).
if cur_bonusmaster:
params['td'] = cur_bonusmaster.day
params['tt'] = self.getBonusItemText(cur_bonusmaster)
else:
# 演出いらない.
self.osa_util.logger.error('can not view the effect.')
self.__sendErrorResponse(400)
return
if next_bonusmaster:
params['nt'] = self.getBonusItemText(next_bonusmaster)
# 遷移先.
url = None
if mid in midlist:
next_idx = midlist.index(mid)+1
if next_idx < len(midlist):
# 次がある.
url = UrlMaker.loginbonustimelimitedanim(midlist[next_idx], loginbonus)
url = OSAUtil.addQuery(url, Defines.URLQUERY_ID, str_midlist)
if url is None:
if loginbonus:
# ログインボーナス.
url = UrlMaker.loginbonusanim()
else:
url = LoginBonusHandler.getEffectBackUrl(self)
anniversary_data = {}
if master.effectname == 'countdown_login_2ndanniversary':
anniversary_data = {
'ten_digit': params['day'] / 10,
'one_digit': params['day'] % 10,
}
elif master.effectname == 'countdown_login_3rdanniversary':
anniversary_data = {
'one_digit': params['day'] % 10,
'predata': self.url_static + 'effect/sp/v2/countdown_login_3rdanniversary/data/'
}
params.update(anniversary_data)
urldata = urlparse(self.url_cgi)
urlhead = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = urlhead + url
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
params['backUrl'] = url
self.writeResponseBody(params)
def __makeCommonLoginBonusParams(self, master, logindata, day_table, config_data):
"""共通のログインボーナス演出パラメータ.
"""
VIEW_ITEM_NUM_MAX_TABLE = {
Defines.LoginBonusTimeLimitedType.TOTAL : 4,
Defines.LoginBonusTimeLimitedType.FIXATION : 6,
Defines.LoginBonusTimeLimitedType.MONTHLY : 3,
}
VIEW_ITEM_NUM_MAX_TABLE_BY_EFFECTNAME = {
'hinamatsuri_login' : 4,
'countdown_login_2ndanniversary' : 4,
'countdown_login_3rdanniversary' : 4,
'2nd_anniversary_login' : 4,
'3rd_anniversary_login' : 4,
'valentine2016' : 6,
'end_of_year_countdown' : 3,
'newyear_login' : 7,
'newbie_login' : 7,
}
item_num_max = VIEW_ITEM_NUM_MAX_TABLE_BY_EFFECTNAME.get(master.effectname, VIEW_ITEM_NUM_MAX_TABLE[master.lbtype])
model_mgr = self.getModelMgr()
cur_day = logindata.days
params = {}
cur_bonusmaster = None
next_bonusmaster = None
mid = master.id
days = day_table.keys()
days.sort()
tmp_days = list(set(days + [cur_day]))
tmp_days.sort()
start = max(0, min(tmp_days.index(cur_day) - 1, len(days) - item_num_max))
bonusmidlist = []
has_next = False
for day in days[start:]:
if not day_table.has_key(day):
continue
elif len(bonusmidlist) == item_num_max:
has_next = True
break
bonusmidlist.append(day_table[day])
bonusmaster_list = BackendApi.get_loginbonustimelimiteddaysmaster_by_idlist(model_mgr, bonusmidlist, using=settings.DB_READONLY)
params.update(has_next=has_next)
if master.lbtype == Defines.LoginBonusTimeLimitedType.FIXATION:
min_time = DateTimeUtil.strToDateTime(logindata.lbtltime.strftime("%Y%m01"), "%Y%m%d") - datetime.timedelta(seconds=1)
min_time = DateTimeUtil.toLoginTime(min_time)
receive_flags = BackendApi.get_loginbonustimelimited_fixation_received_dates(logindata.uid, mid, min_time).keys()
params['logoPre'] = self.url_static + 'effect/sp/v2/%s/data/' % master.effectname
else:
params['logoPre'] = self.url_static + 'effect/sp/v2/%s/data/' % master.effectname
receive_flags = None
make_date_string = {
Defines.LoginBonusTimeLimitedType.FIXATION : lambda x:u'%s月%s日' % (logindata.lbtltime.month, x),
Defines.LoginBonusTimeLimitedType.MONTHLY : lambda x:u'%s日' % (logindata.lbtltime.month, x),
}.get(master.lbtype, lambda x:'%d日目' % x)
#アイテム一覧(日数と画像URL).
bonusmaster_list.sort(key=lambda x:x.day)
for idx, bonusmaster in enumerate(bonusmaster_list):
params['i%d' % idx] = bonusmaster.thumb
params['d%d' % idx] = bonusmaster.day
params['date%d' % idx] = make_date_string(bonusmaster.day)
if cur_day == bonusmaster.day:
cur_bonusmaster = bonusmaster
params['idx'] = idx
elif cur_bonusmaster and not next_bonusmaster:
next_bonusmaster = bonusmaster
if receive_flags is not None:
params['f%d' % idx] = 1 if bonusmaster.day in receive_flags else 0
# 最終日までの日数.
td = config_data['etime'] - logindata.lbtltime
params['day'] = td.days
if next_bonusmaster and 0 < td.days:
params['idxnext'] = params['idx'] + 1
if master.lbtype == Defines.LoginBonusTimeLimitedType.TOTAL:
for i in xrange(params['idx']):
params['f%d' % i] = 1
def getEffectDBValue(attname, default):
v = getattr(cur_bonusmaster, attname, '') if cur_bonusmaster else ''
return v or default
# 演出用文言.
params['logo'] = master.logo
params['preEffect'] = self.url_static_img + master.img_effect
params['bg'] = getEffectDBValue(u'bg', u'bg.png')
params['tlogo'] = getEffectDBValue(u'text_logo', master.text_logo)
params['t0'] = getEffectDBValue(u'text_start', master.text_start)
params['t1'] = getEffectDBValue(u'text_itemlist', master.text_itemlist)
params['t2'] = getEffectDBValue(u'text_itemget', master.text_itemget)
params['t3'] = getEffectDBValue(u'text_itemnext', master.text_itemnext)
params['t4'] = getEffectDBValue(u'text_end', master.text_end)
if cur_bonusmaster:
params['ix'] = cur_bonusmaster.item_x
params['iy'] = cur_bonusmaster.item_y
params['gx'] = cur_bonusmaster.item_x
params['gy'] = cur_bonusmaster.item_y
return params, cur_bonusmaster, next_bonusmaster
def __makeMonthlyLoginBonusParams(self, master, logindata, day_table, config_data):
"""月末ログインボーナス演出用パラメータ.
"""
LOOP_CNT = 3
ITEM_NUM_MAX = 3
model_mgr = self.getModelMgr()
mid = master.id
cur_day = logindata.days
params = {}
params['logoPre'] = self.url_static + 'effect/sp/v2/monthly_login/data/default/' # TODO: これをマスターデータで設定しないと.
# 次の日.
tomorrow = logindata.lbtltime + datetime.timedelta(days=1)
# 月末はなんか特殊.
bonusmaster_list = BackendApi.get_loginbonustimelimiteddaysmaster_by_idlist(model_mgr, day_table.values(), using=settings.DB_READONLY)
bonusmaster_list.sort(key=lambda x:x.id)
cur_bonusmaster = BackendApi.get_loginbonustimelimiteddaysmaster(model_mgr, mid, cur_day, using=settings.DB_READONLY)
next_bonusmaster = None
if config_data['stime'] <= tomorrow < config_data['etime']:
# 次の日が期間内.
next_bonusmaster = BackendApi.get_loginbonustimelimiteddaysmaster(model_mgr, mid, tomorrow.day, using=settings.DB_READONLY)
cur_prizeid = cur_bonusmaster.prizes[0] if cur_bonusmaster and cur_bonusmaster.prizes else 0
next_prizeid = next_bonusmaster.prizes[0] if next_bonusmaster and next_bonusmaster.prizes else 0
prizeidlist = []
for bonusmaster in bonusmaster_list:
if not bonusmaster.prizes:
continue
prizeid = bonusmaster.prizes[0]
if prizeid in prizeidlist:
continue
idx = len(prizeidlist)
params['i%d' % idx] = bonusmaster.thumb
prizeidlist.append(prizeid)
if ITEM_NUM_MAX <= len(prizeidlist):
break
idx = prizeidlist.index(cur_prizeid)
params['idx'] = idx
if next_prizeid:
params['idxnext'] = prizeidlist.index(next_prizeid)
params['rouletteCnt'] = LOOP_CNT * ITEM_NUM_MAX + idx
return params, cur_bonusmaster, next_bonusmaster
def getBonusItemText(self, master):
"""ログインボーナスのテキストを作成
"""
if LoginBonusTimeLimitedAnimationSet.exists(master.mid, master.day):
items = LoginBonusTimeLimitedAnimationSet.get(master.mid, master.day)
else:
model_mgr = self.getModelMgr()
prizelist = BackendApi.get_prizelist(model_mgr, master.prizes, using=settings.DB_READONLY)
prizeinfo = BackendApi.make_prizeinfo(self, prizelist, using=settings.DB_READONLY)
items = [listitem['text'] for listitem in prizeinfo['listitem_list']]
LoginBonusTimeLimitedAnimationSet.save(master.mid, master.day, items)
return Defines.STR_AND.join(items)
#==============================================================
# イベントシナリオ.
def proc_eventscenario(self, args):
"""イベントシナリオ.
"""
number = args.getInt(1)
edt = args.get(2) or ''
backUrl = '/'.join(args.args[3:])
model_mgr = self.getModelMgr()
data = BackendApi.get_eventscenario_by_number(model_mgr, number, using=settings.DB_READONLY)
if not data:
self.osa_util.logger.error('the scenario is not found...%s' % number)
self.__sendErrorResponse(404)
return
urldata = urlparse(self.url_cgi)
urlhead = '%s://%s%s' % (urldata.scheme, settings_sub.WEB_GLOBAL_HOST, urldata.path)
url = '%s/%s' % (urlhead, backUrl)
url = self.osa_util.makeLinkUrl(self.addTimeStamp(url))
img_pre = self.url_static_img + (data.get('thumb') or 'event/scenario/%d/' % number)
params = {
'backUrl' : url,
'pre' : img_pre,
'edt' : edt,
}
params.update(data)
self.writeResponseBody(params)
#==============================================================
# 双六.
def proc_sugoroku(self, args):
"""双六ログイン.
"""
mid = args.getInt(1)
if mid is None:
self.__sendErrorResponse(404)
return
page = args.getInt(2) or 0
model_mgr = self.getModelMgr()
# プレイヤー情報.
v_player = self.getViewerPlayer()
viewer_id = v_player.id
# 結果情報を取得.
logindata = BackendApi.get_loginbonus_sugoroku_playerdata(model_mgr, viewer_id, mid, using=settings.DB_DEFAULT)
if logindata is None:
self.__sendErrorResponse(404)
return
# 停まったマス.
squares_id_list = logindata.result.get('square_id_list')
squares_master_list = BackendApi.get_loginbonus_sugoroku_map_squares_master_list_by_id(model_mgr, squares_id_list, using=settings.DB_READONLY)
squares_master_dict = dict([(squares_master.id, squares_master) for squares_master in squares_master_list])
page_cnt = 0
arr = []
mapid = None
for squares_id in squares_id_list:
squares_master = squares_master_dict[squares_id]
if mapid is None:
mapid = squares_master.mid
elif mapid != squares_master.mid:
page_cnt += 1
if page < page_cnt:
# 次のマップの分も入れておく.
arr.append(squares_master)
break
mapid = squares_master.mid
if page_cnt == page:
arr.append(squares_master)
squares_master_list = arr
# マップ.
mapmaster = BackendApi.get_loginbonus_sugoroku_map_master(model_mgr, mapid, using=settings.DB_READONLY)
# 演出パラメータ.
params = dict(
backUrl = self.request.get('backUrl'),
logoPre = self.url_static_img + 'sugo6/{}/'.format(mapmaster.effectname),
pre = self.url_static_img,
lt = 0,
)
# 報酬.
prizeidlist_list = []
message_items = []
def get_prize_number(prizeidlist):
if prizeidlist in prizeidlist_list:
return prizeidlist_list.index(prizeidlist)
else:
prizeidlist_list.append(prizeidlist)
return len(prizeidlist_list) - 1
# 現在地.
if 0 < page:
params['continue'] = '1'
params['cp'] = 0
else:
squares_master = squares_master_list.pop(0)
params['cp'] = squares_master.number
if len(squares_id_list) == 1:
# 動いていない.
if squares_master.last:
# 最終マス.
params['completeitem'] = get_prize_number(mapmaster.prize)
message_items.append(params['completeitem'])
else:
# 休み.
params['lt'] = logindata.lose_turns + 1
# マップ情報.
map_squares_master_list = BackendApi.get_loginbonus_sugoroku_map_squares_master_by_mapid(model_mgr, mapid, using=settings.DB_READONLY)
for squares_master in map_squares_master_list:
number = squares_master.number
params['et{}'.format(number)] = squares_master.event_type
params['ev{}'.format(number)] = squares_master.event_value
if squares_master.prize:
params['ei{}'.format(number)] = get_prize_number(squares_master.prize)
# 停まったマス.
params['pn'] = len(squares_master_list)
pre_event_type = Defines.SugorokuMapEventType.NONE
for i,squares_master in enumerate(squares_master_list):
if squares_master.mid == mapid:
params['p{}'.format(i)] = squares_master.number
if squares_master.prize:
message_items.append(get_prize_number(squares_master.prize))
elif pre_event_type == Defines.SugorokuMapEventType.BACK:
# 戻って前のマップへ.
pre_map_squares_master_list = BackendApi.get_loginbonus_sugoroku_map_squares_master_by_mapid(model_mgr, squares_master.mid, using=settings.DB_READONLY)
params['p{}'.format(i)] = squares_master.number - len(pre_map_squares_master_list)
else:
# 進んで次のマップへ.
params['p{}'.format(i)] = len(map_squares_master_list) + squares_master.number
pre_event_type = squares_master.event_type
# アイテム.
params['in'] = len(prizeidlist_list)
for i,prizeidlist in enumerate(prizeidlist_list):
# アイテム画像.
if i in message_items:
prizelist = BackendApi.get_prizelist(model_mgr, prizeidlist, using=settings.DB_READONLY)
prizeinfo = BackendApi.make_prizeinfo(self, prizelist, using=settings.DB_READONLY)
# アイテム名.
params['in{}'.format(i)] = Defines.STR_AND.join([listitem['text'] for listitem in prizeinfo['listitem_list']])
else:
prizelist = BackendApi.get_prizelist(model_mgr, [prizeidlist[0]], using=settings.DB_READONLY)
prizeinfo = BackendApi.make_prizeinfo(self, prizelist, using=settings.DB_READONLY)
# アイテム画像.
params['i{}'.format(i)] = prizeinfo['listitem_list'][0]['thumbUrl'].replace(params['pre'], '')
self.writeResponseBody(params)
def main(request):
return Handler.run(request)
|
[
"shangye@mail.com"
] |
shangye@mail.com
|
d371052b610c7808f4397cc46872d84018712958
|
78224a508b75e7958cec6a2759b8ba4c46cb4bfc
|
/exchange/okex/HttpMD5Util.py
|
a15b45816241c7353aa08cd55590e4fe1a805b91
|
[] |
no_license
|
80000v/CryptoArb
|
34e731b11c3b29a3643c1aa79b921e0ef879b4d9
|
5b9d3e05af99a70a09481f1370bc863f7ca84d66
|
refs/heads/master
| 2021-04-20T10:24:07.959747
| 2019-04-04T14:17:46
| 2019-04-04T14:17:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,370
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#用于进行http请求,以及MD5加密,生成签名的工具类
import requests
import hashlib
#初始化apikey,secretkey,url
apikey = '1cd704d7-d549-436b-a5ee-df7e401843d3'
secretkey = '1AE1EE7238F5485D35E128194B821181'
okcoinRESTURL = 'https://www.okcoin.cn'
BaseUrl = "/v2/auth/login"
DEFAULT_POST_HEADERS = {
# "Authorization":"eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiI5NjQ5MGI4Ni0zOWExLTQyMWEtYmEzYi03YTAxNTkwYTg1N2MiLCJhdWRpZW5jZSI6IndlYiIsImNyZWF0ZWQiOjE1MDE1NTkzMzE0MzEsImV4cCI6MTUwMjE2NDEzMX0.crVupk8Tc4ki_TIT-tLmTpBxEjdOt4Ww3b3GoP0TJebCUT_TIxvBjzeTFRnnchbGwUHvrSoqp0cVofVaENkA6Q"
"Authorization":None,
'Content-Type': 'application/json',
"User-Agent": "Chrome/39.0.2171.71",
"Accept": "application/json",
"authRequest":"authRequest"
}
def buildMySign(params,secretKey):
sign = ''
for key in sorted(params.keys()):
sign += key + '=' + str(params[key]) +'&'
data = sign+'secret_key='+secretKey
return hashlib.md5(data.encode("utf8")).hexdigest().upper()
def httpGet(url,resource,params=''):
# conn = http.client.HTTPSConnection(url, timeout=10)
# conn.request("GET",resource + '?' + params)
# response = conn.getresponse()
# data = response.read().decode('utf-8')
# return json.loads(data)
try:
response = requests.get(url, params, timeout=5)
if response.status_code == 200:
return response.json()
else:
return {"result":"fail"}
except Exception as e:
print("httpGet failed, detail is:%s" % e)
return
def httpPost(url,resource,params):
headers = {
"Content-type" : "application/x-www-form-urlencoded",
}
# conn = http.client.HTTPSConnection(url, timeout=10)
# temp_params = urllib.parse.urlencode(params)
# conn.request("POST", resource, temp_params, headers)
# response = conn.getresponse()
# data = response.read().decode('utf-8')
# params.clear()
# conn.close()
# return data
try:
if resource:
url = url + resource
response = requests.post(url, params, headers=headers, timeout=5)
if response.status_code == 200:
return response.json()
else:
return
except Exception as e:
print("httpPost failed, detail is:%s" % e)
return
|
[
"huang.xinyu@wanlitechnologies.com"
] |
huang.xinyu@wanlitechnologies.com
|
691231d66568dfb3947334005eca7c99975d2ce9
|
32d4e716d6291b95716541e55e166e9b8fc87ef4
|
/parser.py
|
7650fbaadac29f60240aa6bd7799aca1dd83e175
|
[] |
no_license
|
ShamilyanOksana/Parser
|
d0555e8e27679fb3c9876e1b2eab5503e032013a
|
05dcf604ff14d8ff60f4c8cdb619bd9c540dfa3c
|
refs/heads/master
| 2021-09-01T12:08:17.433032
| 2017-12-26T22:25:25
| 2017-12-26T22:25:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,514
|
py
|
import requests
from bs4 import BeautifulSoup
class Phone:
pass
def get_html(url):
url = "https://www.avito.ru/taganrog/telefony/samsung?q=sumsung&p=1"
r = requests.get(url)
return r.text
def get_total_pages(html):
soup = BeautifulSoup(html, 'lxml')
pages = soup.find('div', class_='pagination-pages').find_all('a', class_='pagination-page')[-1].get('href')
total_pages = pages.split('=')[1].split('&')[0]
return int(total_pages)
def print_information(all_info):
all_info.sort(key=lambda phone: phone.price)
for info in all_info:
print(info.title)
print(info.url)
print(info.price)
print(info.currency)
def get_page_data(html):
soup = BeautifulSoup(html, 'lxml')
ads = soup.find('div', class_='catalog-list').find_all('div', class_='description')
count = 0
for ad in ads:
all_info.append(Phone())
all_info[count].title = get_title(ad)
all_info[count].url = get_link(ad)
pre_price = get_price(ad)
all_info[count].price = pre_price[0]
all_info[count].currency = pre_price[1]
count+=1
return all_info
def get_title(current_ads):
try:
title = current_ads.find('a', class_='item-description-title-link').get('title')
return title
except Exception:
pass
def get_link(current_ads):
try:
link = "https://www.avito.ru" + current_ads.find('a', class_='item-description-title-link').get('href')
return link
except Exception:
pass
def get_price(current_ads):
try:
price = current_ads.find('div', class_='about').text.split(' ')[2:]
if price[0].isdigit() and price[1].isdigit():
currency = price[2]
price = int(price[0])*1000 + int(price[1])
else:
currency = price[1]
price = int(price[0])
return [price, currency]
except Exception:
pass
def main():
url = "https://www.avito.ru/taganrog/telefony/samsung?q=sumsung&p=1"
base_url = "https://www.avito.ru/taganrog/telefony/samsung?"
page_part = "p="
query_part = "&q=sumsung"
html = get_html(url)
total_pages = get_total_pages(html)
# for i in range(1, total_pages+1):
for i in range(1, 2):
url_gen = base_url + page_part + str(i) + query_part
html = get_html(url_gen)
all_info = get_page_data(html)
print_information(all_info)
all_info = []
if __name__ == "__main__":
main()
|
[
"shamilyanoksana@gmail.com"
] |
shamilyanoksana@gmail.com
|
d733e8db920ee09bf0f15babc827291aeda2b2a9
|
af6e9d54859eaa36742bd670da15ea5542793ca8
|
/5task/send.py
|
8833b1047975b74b572e5f9ffd283c0979f66fbe
|
[] |
no_license
|
gavritenkov/vezdecode
|
e5c068addfa56d0c5a277b861766330ad0c725e0
|
79c9dda1044dd69cbebb0cdf1e08030188251b4b
|
refs/heads/master
| 2023-04-11T09:31:14.123289
| 2021-04-24T18:07:59
| 2021-04-24T18:07:59
| 361,219,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
import string
import smtplib
import random
import urllib.request
from cryptography.fernet import Fernet
import base64
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
#Генератор ключей
def key_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
msg=str(input('Введите сообщение : '))
mail = str(input('Введите почту получателя : '))
password_provided = key_generator()
password = password_provided.encode()
salt = b'salt_'
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(password))
urllib.request.urlopen
msg=msg.encode()
f = Fernet(key)
msg=f.encrypt(msg)
msg=str(msg)
print("\nВаш зашифрованный текст: "+msg)
#SMTP
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
#Почта, созданная для ВездеКода. Именнно с нее будут отправляться сообщения
server.login("ExampleVezdehod@gmail.com", "VezdehodTula71")
#Отправка
server.sendmail("ExampleVezdehod@gmail.com", mail, msg)
print("\nСообщение было отправлено!\nПолучателю необходим ключ для расшифровки: " +password_provided)
input("")
|
[
"kgavritenkov@gmail.com"
] |
kgavritenkov@gmail.com
|
6af4d1ec5bd8fce9532cd1238fb58d598e8ad97f
|
ad7dd3db001cbf322d0944c120b42e78b9fe00b9
|
/champakraja/ramu.py
|
febc73ee66c76c06e50b7ac645f3a8c690a56002
|
[
"MIT"
] |
permissive
|
jeldikk/champakraja
|
ebfd4ff04a0a1e48b2d6f31c4695e4ddae532e64
|
1462be4c8458b5bc2816b9aa69c1845482e702e1
|
refs/heads/master
| 2022-12-03T03:04:33.217318
| 2020-08-22T16:53:09
| 2020-08-22T16:53:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
from .base import character
class ramu(character):
def __init__(self,name):
self._name = name
@property
def name(self):
return self._name
def books(self):
return ('chandamama', 'swathi', 'ramayanam', 'Mahabharatham',)
def hobbies(self):
return ('respecting', 'worship god',)
def activities(self):
return ('job', 'early namaskara', 'orthodox rituals')
def hairstyle(self):
return ('long hair with a pony tail',)
def nature(self):
return ('cowardice', 'responsible',)
|
[
"jeldi.kamal2011@gmail.com"
] |
jeldi.kamal2011@gmail.com
|
1b6117c360304db090e45da73264909875f05ed9
|
5beb2410b95be9d26cfca2094a446ec2be16ce50
|
/ma/01.py
|
4d605bc065cc51c0c1723ede396f4b18f370e22a
|
[] |
no_license
|
1361217049/python
|
abbde08f88125aa21e6f24aa5183798972c02af3
|
ae92c33437e617203b28aaf6c644c26a0c17fb69
|
refs/heads/master
| 2020-04-01T06:42:34.757234
| 2018-10-14T10:00:33
| 2018-10-14T10:00:33
| 152,960,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
#定义一个类
class Student():
def out(self):
print("我爱++++")
pass
han=Student()
han.out()
Student.__dict__
print(1)
|
[
"1361217049@qq.com"
] |
1361217049@qq.com
|
1eb7d4b356ecdfbafd7359821f946512d7724998
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/googlecloudsdk/generated_clients/apis/artifactregistry/v1beta2/resources.py
|
1c5440583e39b379a1c8a68cde0b2d6841f35146
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,295
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for Cloud Platform Apis generated from apitools."""
import enum
BASE_URL = 'https://artifactregistry.googleapis.com/v1beta2/'
DOCS_URL = 'https://cloud.google.com/artifacts/docs/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
PROJECTS = (
'projects',
'projects/{projectsId}',
{},
['projectsId'],
True
)
PROJECTS_LOCATIONS = (
'projects.locations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_OPERATIONS = (
'projects.locations.operations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/operations/'
'{operationsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES = (
'projects.locations.repositories',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES_FILES = (
'projects.locations.repositories.files',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}/files/{filesId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES_PACKAGES = (
'projects.locations.repositories.packages',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}/packages/{packagesId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES_PACKAGES_TAGS = (
'projects.locations.repositories.packages.tags',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}/packages/{packagesId}/tags/{tagsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES_PACKAGES_VERSIONS = (
'projects.locations.repositories.packages.versions',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}/packages/{packagesId}/versions/{versionsId}',
},
['name'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
625ed010dc1eb9f52ce77596a5a4e7dfeafa600d
|
6226e852484e3ceaf27389a021b3215a6ee02e3d
|
/Entrega 1/balistica.py
|
884537fba547bed4c42d61deec34e11d28f84cec
|
[] |
no_license
|
DiegoAparicio/MCOC2020-P1
|
d7e7dd2cd1a66694c914d4f552bf0ba5e76f44d2
|
84e2b7c7a1d3dfdd9eddb3f8f3e6ff4a111240ff
|
refs/heads/master
| 2022-12-23T08:21:15.692241
| 2020-09-12T01:11:16
| 2020-09-12T01:11:16
| 289,975,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,411
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 24 09:48:20 2020
@author: 56977
"""
import scipy as sp
from scipy.integrate import odeint
#parametros:
p = 1.225 #kg/m3
cd = 0.47
cm = 0.01
inch = 2.54*cm
D = 8.5*inch
r = D/2
A = sp.pi*r**2
CD = 0.5*p*cd*A
g = 9.81 #m/s2
m = 15
Vs = [0,10.,20.]
#V = 20
#funcion a integrar:
for V in Vs:
def bala(z,t):
zp = sp.zeros(4)
zp[0] = z[2]
zp[1] = z[3]
v = z[2:4]
v[0]= v[0]-V #velocidad menos viento
vnorm = sp.sqrt(sp.dot(v,v))
FD = -CD*sp.dot(v,v)*(v/vnorm)
zp[2] = FD[0]/m
zp[3] = FD[1]/m -g
return zp
#vector de tiempo
t = sp.linspace(0,30,1001)
#parte en el origen y tiene vx=vy=2 m/s
vi = 100*1000/3600
z0 = sp.array([0,0,vi,vi])
sol = odeint(bala,z0,t)
import matplotlib.pylab as plt
x = sol[:,0]
y = sol[:,1]
plt.figure(1)
plt.title("Trayectoria para distintos vientos")
plt.grid()
plt.axis([0,150,0,50])
plt.plot(x,y,label =f"V = {V} m/s")
plt.ylabel("Y (m)")
plt.xlabel("X (m)")
plt.legend(loc="upper right")
plt.savefig("trayectoria.png") #se genera la imagen en formato png
#plt.show() #se omite plt.show debido a que en el enunciado decia no abrir ventana de visualizacion
|
[
"noreply@github.com"
] |
DiegoAparicio.noreply@github.com
|
4d9f3c3aaa1eb99f9250a21ad48e579ff04e13ed
|
211092990562ac699369246c59dff2bee9192a49
|
/hw2/T2_P3.py
|
233ca33a51ad90369b3a0ad7bccce2b706851567
|
[] |
no_license
|
haritoshpatel1997/Harvard_Course_CS181_2021
|
337b00211b6f34586d9c1fd7950bbeee56dae9eb
|
3bc223f1f022bd4e224298b6d299b42c45672100
|
refs/heads/main
| 2023-04-06T10:35:31.811861
| 2021-04-23T15:16:05
| 2021-04-23T15:16:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,703
|
py
|
# Don't change these imports. Note that the last two are the
# class implementations that you will implement in
# T2_P3_LogisticRegression.py and T2_P3_GaussianGenerativeModel.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as c
import matplotlib.patches as mpatches
from T2_P3_LogisticRegression import LogisticRegression
from T2_P3_GaussianGenerativeModel import GaussianGenerativeModel
from T2_P3_KNNModel import KNNModel
# These are the hyperparameters to the classifiers. You may need to
# adjust these as you try to find the best fit for each classifier.
# Logistic Regression hyperparameters
eta = 0.1 # Learning rate
lam = 0.1 # Lambda for regularization
# Whether or not you want the plots to be displayed
show_charts = True
# DO NOT CHANGE ANYTHING BELOW THIS LINE!
# -----------------------------------------------------------------
# Visualize the decision boundary that a model produces
def visualize_boundary(model, X, y, title, width=2):
# Create a grid of points
x_min, x_max = min(X[:, 0] - width), max(X[:, 0] + width)
y_min, y_max = min(X[:, 1] - width), max(X[:, 1] + width)
xx, yy = np.meshgrid(
np.arange(x_min, x_max, 0.05),
np.arange(y_min, y_max, 0.05)
)
# Flatten the grid so the values match spec for self.predict
xx_flat = xx.flatten()
yy_flat = yy.flatten()
X_pred = np.vstack((xx_flat, yy_flat)).T
# Get the class predictions
Y_hat = model.predict(X_pred)
Y_hat = Y_hat.reshape((xx.shape[0], xx.shape[1]))
# Visualize them.
cmap = c.ListedColormap(['r', 'b', 'g'])
plt.figure()
plt.title(title)
plt.xlabel('Magnitude')
plt.ylabel('Temperature')
plt.pcolormesh(xx, yy, Y_hat, cmap=cmap, alpha=0.3)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap, linewidths=1,
edgecolors='black')
# Adding a legend and a title
red = mpatches.Patch(color='red', label='Dwarf')
blue = mpatches.Patch(color='blue', label='Giant')
green = mpatches.Patch(color='green', label='Supergiant')
plt.legend(handles=[red, blue, green])
# Saving the image to a file, and showing it as well
plt.savefig(title + '.png')
if show_charts:
plt.show()
# A mapping from string name to id
star_labels = {
'Dwarf': 0, # also corresponds to 'red' in the graphs
'Giant': 1, # also corresponds to 'blue' in the graphs
'Supergiant': 2 # also corresponds to 'green' in the graphs
}
# Read from file and extract X and y
df = pd.read_csv('data/hr.csv')
X = df[['Magnitude', 'Temperature']].values
y = np.array([star_labels[x] for x in df['Type']])
# Setting up and evaluating a number of different classification models
nb1 = GaussianGenerativeModel(is_shared_covariance=False)
nb1.fit(X, y)
visualize_boundary(nb1, X, y, 'generative_result_separate_covariances')
print('Separate Covariance negative log-likelihood: {}\n'
.format(nb1.negative_log_likelihood(X, y)))
nb2 = GaussianGenerativeModel(is_shared_covariance=True)
nb2.fit(X, y)
visualize_boundary(nb2, X, y, 'generative_result_shared_covariances')
print('Shared Covariance negative log-likelihood: {}\n'
.format(nb2.negative_log_likelihood(X, y)))
lr = LogisticRegression(eta=eta, lam=lam)
lr.fit(X, y)
lr.visualize_loss('logistic_regression_loss', show_charts=show_charts)
visualize_boundary(lr, X, y, 'logistic_regression_result')
knn1 = KNNModel(k=1)
knn1.fit(X, y)
visualize_boundary(knn1, X, y, 'knn1_result')
knn3 = KNNModel(k=3)
knn3.fit(X, y)
visualize_boundary(knn3, X, y, 'knn3_result')
knn5 = KNNModel(k=5)
knn5.fit(X, y)
visualize_boundary(knn5, X, y, 'knn5_result')
# Setting up some sample data
X_test = np.array([[6, 2]])
y_nb1 = nb1.predict(X_test)
y_nb2 = nb2.predict(X_test)
y_lr = lr.predict(X_test)
y_knn1 = knn1.predict(X_test)
y_knn3 = knn3.predict(X_test)
y_knn5 = knn5.predict(X_test)
# Predicting an unseen example
print('Test star type predictions for Separate Covariance Gaussian Model:')
print('magnitude 6 and temperature 2: {}\n'.format(y_nb1[0]))
print('Test star type predictions for Shared Covariance Gaussian Model:')
print('magnitude 6 and temperature 2: {}\n'.format(y_nb2[0]))
print('Test star type predictions for Linear Regression:')
print('magnitude 6 and temperature 2: {}'.format(y_lr[0]))
print('Test star type predictions for KNN Model with k=1:')
print('magnitude 6 and temperature 2: {}'.format(y_knn1[0]))
print('Test star type predictions for KNN Model with k=3:')
print('magnitude 6 and temperature 2: {}'.format(y_knn3[0]))
print('Test star type predictions for KNN Model with k=5:')
print('magnitude 6 and temperature 2: {}'.format(y_knn5[0]))
|
[
"jonathanchu33@gmail.com"
] |
jonathanchu33@gmail.com
|
2ff9dcabc42e8fe5f217ef5bf6abf5b015fb7183
|
4f5513932010a81b0330917d2aa2f4fde39a04d6
|
/wall_app/models.py
|
948459eec11f5d2f4b83d54fc36e7e806e8f502e
|
[] |
no_license
|
pfuentea/the_wall
|
c58067f0219040900b4240ec71f50afcbb4ceff2
|
550f59945720d8b148aed12b7856cbc443dd8c60
|
refs/heads/main
| 2023-07-22T07:18:13.667222
| 2021-09-08T03:15:57
| 2021-09-08T03:15:57
| 402,581,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,918
|
py
|
from django.db import models
# Create your models here.
class UserManager(models.Manager):
def basic_validator(self, postData):
errors={}
if postData['password_confirm']!=postData['password']:
errors["password"] = "Las contraseñas deben coincidir"
return errors
class User(models.Model):
name = models.CharField(max_length=255)
email= models.EmailField(unique=True)
password=models.CharField(max_length=255)
allowed= models.BooleanField(default =True)
avatar = models.URLField(
default=""
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
def __repr__(self) -> str:
return f'{self.id}:{self.name}'
def __str__(self) -> str:
return f'{self.id}:{self.name}'
class Mensaje(models.Model):
texto= models.TextField()
escritor= models.ForeignKey(User, related_name="mensajes", on_delete = models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __repr__(self) -> str:
return f'({self.id}){self.escritor.id} {self.escritor.name}:{self.texto}'
def __str__(self) -> str:
return f'({self.id}){self.escritor.id}:{self.texto}'
class Comentario(models.Model):
texto= models.TextField()
escritor= models.ForeignKey(User, related_name="comentarios", on_delete = models.CASCADE)
mensaje= models.ForeignKey(Mensaje, related_name="comentarios", on_delete = models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __repr__(self) -> str:
return f'({self.id}){self.escritor.id}/{self.mensaje.id}:{self.texto}'
def __str__(self) -> str:
return f'({self.id}){self.escritor.id}{self.escritor.name}/{self.mensaje.id}:{self.texto}'
|
[
"patricio.fuentealba.feliu@gmail.com"
] |
patricio.fuentealba.feliu@gmail.com
|
fd8cfb47b2d8e17dae6ea7bb6a37a38a95978a58
|
ef5f8a1d7b098391b5e5fce57edc83870204fe69
|
/albert_model/clue_classifier_utils_char_no_space.py
|
b1755d70cbfbb75c08b321f41ecb2ab40f4d9ea6
|
[
"Apache-2.0"
] |
permissive
|
guome/subchar-transformers
|
9829ded6c312adabf481c11ea25a2eaa069a1aaa
|
54c3bfb5c197946fa5a8b6ed5524b81284259613
|
refs/heads/master
| 2022-07-04T16:21:12.589815
| 2020-05-13T12:49:54
| 2020-05-13T12:49:54
| 263,630,138
| 1
| 0
| null | 2020-05-13T12:57:25
| 2020-05-13T12:57:24
| null |
UTF-8
|
Python
| false
| false
| 21,002
|
py
|
# -*- coding: utf-8 -*-
# @Author: bo.shi
# @Date: 2019-12-01 22:28:41
# @Last Modified by: bo.shi
# @Last Modified time: 2019-12-02 18:36:50
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for GLUE classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import csv
import os
import six
import tensorflow as tf
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def __init__(self, args):
self.args = args
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, delimiter="\t", quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _read_txt(cls, input_file):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = f.readlines()
lines = []
for line in reader:
lines.append(line.strip().split("_!_"))
return lines
@classmethod
def _read_json(cls, input_file):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = f.readlines()
lines = []
for line in reader:
lines.append(json.loads(line.strip()))
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def _create_examples(self, lines, set_type):
"""See base class."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = convert_to_unicode(line['premise'])
text_b = convert_to_unicode(line['hypo'])
label = convert_to_unicode(line['label']) if set_type != 'test' else 'contradiction'
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class TnewsProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
labels = []
for i in range(17):
if i == 5 or i == 11:
continue
labels.append(str(100 + i))
return labels
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['sentence'].strip()
if hasattr(self.args, "max_sent_length"):
text_a = text_a[: self.args.max_sent_length]
if self.args.do_lower_case:
text_a = text_a.lower()
text_a = convert_to_unicode(text_a)
text_b = None
label = convert_to_unicode(line['label']) if set_type != 'test' else "100"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class iFLYTEKDataProcessor(DataProcessor):
"""Processor for the iFLYTEKData data set (GLUE version)."""
def __init__(self, args):
super(iFLYTEKDataProcessor, self).__init__(args)
self.args = args
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
labels = []
for i in range(119):
labels.append(str(i))
return labels
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
# dict_char2comp = json.load(open("./resources/char2comp.json", "r"))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['sentence'].strip()
if hasattr(self.args, "max_sent_length"):
text_a = text_a[: self.args.max_sent_length]
if self.args.do_lower_case:
text_a = text_a.lower()
# print(text_a)
text_a = convert_to_unicode(text_a)
text_b = None
label = convert_to_unicode(line['label']) if set_type != 'test' else "0"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
if i < 5:
print(text_a)
print(text_b)
return examples
class ChnSentiCorpDataProcessor(DataProcessor):
"""Processor for the iFLYTEKData data set (GLUE version)."""
def __init__(self, args):
super(ChnSentiCorpDataProcessor, self).__init__(args)
self.args = args
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
labels = []
for i in range(2):
labels.append(str(i))
return labels
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
# dict_char2comp = json.load(open("./resources/char2comp.json", "r"))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['sentence'].strip()
if hasattr(self.args, "max_sent_length"):
text_a = text_a[: self.args.max_sent_length]
if self.args.do_lower_case:
text_a = text_a.lower()
# print(text_a)
text_a = convert_to_unicode(text_a)
text_b = None
label = convert_to_unicode(line['label']) if set_type != 'test' else "0"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
if i < 5:
print(text_a)
print(text_b)
return examples
class LCQMCProcessor(DataProcessor):
"""Processor for the internal data set. sentence pair classification"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['sentence1'].strip()
if hasattr(self.args, "max_sent_length"):
text_a = text_a[: self.args.max_sent_length]
if self.args.do_lower_case:
text_a = text_a.lower()
text_a = convert_to_unicode(text_a)
text_b = line['sentence2'].strip()
if hasattr(self.args, "max_sent_length"):
text_b = text_b[: self.args.max_sent_length]
if self.args.do_lower_case:
text_b = text_b.lower()
text_b = convert_to_unicode(text_b)
label = convert_to_unicode(line['label']) if set_type != 'test' else '0'
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
if i < 5:
print(text_a)
print(text_b)
return examples
class AFQMCProcessor(DataProcessor):
"""Processor for the internal data set. sentence pair classification"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = convert_to_unicode(line['sentence1'])
text_b = convert_to_unicode(line['sentence2'])
label = convert_to_unicode(line['label']) if set_type != 'test' else '0'
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class CMNLIProcessor(DataProcessor):
"""Processor for the CMNLI data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples_json(os.path.join(data_dir, "train.json"), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples_json(os.path.join(data_dir, "dev.json"), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples_json(os.path.join(data_dir, "test.json"), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples_json(self, file_name, set_type):
"""Creates examples for the training and dev sets."""
examples = []
lines = tf.gfile.Open(file_name, "r")
index = 0
for line in lines:
line_obj = json.loads(line)
index = index + 1
guid = "%s-%s" % (set_type, index)
text_a = convert_to_unicode(line_obj["sentence1"])
text_b = convert_to_unicode(line_obj["sentence2"])
label = convert_to_unicode(line_obj["label"]) if set_type != 'test' else 'neutral'
if label != "-":
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class CslProcessor(DataProcessor):
"""Processor for the CSL data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = convert_to_unicode(" ".join(line['keyword']))
text_b = convert_to_unicode(line['abst'])
label = convert_to_unicode(line['label']) if set_type != 'test' else '0'
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WSCProcessor(DataProcessor):
"""Processor for the internal data set. sentence pair classification"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["true", "false"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = convert_to_unicode(line['text'])
text_a_list = list(text_a)
target = line['target']
query = target['span1_text']
query_idx = target['span1_index']
pronoun = target['span2_text']
pronoun_idx = target['span2_index']
assert text_a[pronoun_idx: (pronoun_idx + len(pronoun))
] == pronoun, "pronoun: {}".format(pronoun)
assert text_a[query_idx: (query_idx + len(query))] == query, "query: {}".format(query)
if pronoun_idx > query_idx:
text_a_list.insert(query_idx, "_")
text_a_list.insert(query_idx + len(query) + 1, "_")
text_a_list.insert(pronoun_idx + 2, "[")
text_a_list.insert(pronoun_idx + len(pronoun) + 2 + 1, "]")
else:
text_a_list.insert(pronoun_idx, "[")
text_a_list.insert(pronoun_idx + len(pronoun) + 1, "]")
text_a_list.insert(query_idx + 2, "_")
text_a_list.insert(query_idx + len(query) + 2 + 1, "_")
text_a = "".join(text_a_list)
if set_type == "test":
label = "true"
else:
label = line['label']
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class COPAProcessor(DataProcessor):
"""Processor for the internal data set. sentence pair classification"""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
# dev_0827.tsv
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.json")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
@classmethod
def _create_examples_one(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid1 = "%s-%s" % (set_type, i)
# try:
if line['question'] == 'cause':
text_a = convert_to_unicode(line['premise'] + '原因是什么呢?' + line['choice0'])
text_b = convert_to_unicode(line['premise'] + '原因是什么呢?' + line['choice1'])
else:
text_a = convert_to_unicode(line['premise'] + '造成了什么影响呢?' + line['choice0'])
text_b = convert_to_unicode(line['premise'] + '造成了什么影响呢?' + line['choice1'])
label = convert_to_unicode(str(1 if line['label'] == 0 else 0)) if set_type != 'test' else '0'
examples.append(
InputExample(guid=guid1, text_a=text_a, text_b=text_b, label=label))
# except Exception as e:
# print('###error.i:',e, i, line)
return examples
@classmethod
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
i = 2 * i
guid1 = "%s-%s" % (set_type, i)
guid2 = "%s-%s" % (set_type, i + 1)
# try:
premise = convert_to_unicode(line['premise'])
choice0 = convert_to_unicode(line['choice0'])
label = convert_to_unicode(str(1 if line['label'] == 0 else 0)) if set_type != 'test' else '0'
#text_a2 = convert_to_unicode(line['premise'])
choice1 = convert_to_unicode(line['choice1'])
label2 = convert_to_unicode(
str(0 if line['label'] == 0 else 1)) if set_type != 'test' else '0'
if line['question'] == 'effect':
text_a = premise
text_b = choice0
text_a2 = premise
text_b2 = choice1
elif line['question'] == 'cause':
text_a = choice0
text_b = premise
text_a2 = choice1
text_b2 = premise
else:
print('wrong format!!')
return None
examples.append(
InputExample(guid=guid1, text_a=text_a, text_b=text_b, label=label))
examples.append(
InputExample(guid=guid2, text_a=text_a2, text_b=text_b2, label=label2))
# except Exception as e:
# print('###error.i:',e, i, line)
return examples
|
[
"michael_wzhu91@163.com"
] |
michael_wzhu91@163.com
|
8927c9bfdeb3e5161e03c5bbfb20291758317781
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2367/60791/254956.py
|
3891197249694bfc95edf61b7fdb4f59e0c7209d
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
k = int(input())
n = '1'
if(k%2==0 or k%5==0):
print(-1)
else:
while(int(n)%k != 0):
n += '1'
print(len(n))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
672931fd1ee8dae6d584fb3ff8d812002ab628cc
|
e62a8c1ee3ac295f8028164d6ba4993c189fd774
|
/btpython/testbikieu.py
|
4397d561992f473bb43fea830ed8f408eaa117f4
|
[] |
no_license
|
thanhthai3457/Linux
|
8ac32919a59189ff35e9c2c3883303893bd245f7
|
55fd16be99922a1c6c9958ae3c1f0af40879b5a7
|
refs/heads/master
| 2020-03-11T18:43:05.248945
| 2018-06-13T15:45:50
| 2018-06-13T15:45:50
| 130,185,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
from bikeu import thai
sv1 = thai()
sv1.set_ten()
sv1.set_sdt()
print ("Thông tin")
sv1.In()
|
[
"thanh@example.com"
] |
thanh@example.com
|
85daa9a73cfbe7b2a17557ab40ced26375f501d9
|
911fc2c6bc552d83fb0d2481d556e0979cd20101
|
/mdb.py
|
1756c62bf6bbe864e38ce14e929d13411a37b47c
|
[] |
no_license
|
riyasleo10/AM_filter_bot
|
75ed5b67632efa8c3d18911b6fdeb5437ad190c5
|
4193148a0cc4e5e2eaf7caf94943d2a44f4cb3f6
|
refs/heads/main
| 2023-03-22T11:31:36.098569
| 2021-03-18T17:42:54
| 2021-03-18T17:42:54
| 348,805,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,001
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @trojanzhex
import re
import pymongo
from pymongo.errors import DuplicateKeyError
from marshmallow.exceptions import ValidationError
from config import DATABASE_URI, DATABASE_NAME
myclient = pymongo.MongoClient(DATABASE_URI)
mydb = myclient[DATABASE_NAME]
async def savefiles(docs, group_id):
mycol = mydb[str(group_id)]
try:
mycol.insert_many(docs, ordered=False)
except Exception:
pass
async def channelgroup(channel_id, channel_name, group_id, group_name):
mycol = mydb["ALL DETAILS"]
channel_details = {
"channel_id" : channel_id,
"channel_name" : channel_name
}
data = {
'_id': group_id,
'group_name' : group_name,
'channel_details' : [channel_details],
}
if mycol.count_documents( {"_id": group_id} ) == 0:
try:
mycol.insert_one(data)
except:
print('Some error occured!')
else:
print(f"files in '{channel_name}' linked to '{group_name}' ")
else:
try:
mycol.update_one({'_id': group_id}, {"$push": {"channel_details": channel_details}})
except:
print('Some error occured!')
else:
print(f"files in '{channel_name}' linked to '{group_name}' ")
async def ifexists(channel_id, group_id):
mycol = mydb["ALL DETAILS"]
query = mycol.count_documents( {"_id": group_id} )
if query == 0:
return False
else:
ids = mycol.find( {'_id': group_id} )
channelids = []
for id in ids:
for chid in id['channel_details']:
channelids.append(chid['channel_id'])
if channel_id in channelids:
return True
else:
return False
async def deletefiles(channel_id, channel_name, group_id, group_name):
mycol1 = mydb["ALL DETAILS"]
try:
mycol1.update_one(
{"_id": group_id},
{"$pull" : { "channel_details" : {"channel_id":channel_id} } }
)
except:
pass
mycol2 = mydb[str(group_id)]
query2 = {'channel_id' : channel_id}
try:
mycol2.delete_many(query2)
except:
print("Couldn't delete channel")
return False
else:
print(f"filters from '{channel_name}' deleted in '{group_name}'")
return True
async def deletealldetails(group_id):
mycol = mydb["ALL DETAILS"]
query = { "_id": group_id }
try:
mycol.delete_one(query)
except:
pass
async def deletegroupcol(group_id):
mycol = mydb[str(group_id)]
if mycol.count() == 0:
return 1
try:
mycol.drop()
except Exception as e:
print(f"delall group col drop error - {str(e)}")
return 2
else:
return 0
async def channeldetails(group_id):
mycol = mydb["ALL DETAILS"]
query = mycol.count_documents( {"_id": group_id} )
if query == 0:
return False
else:
ids = mycol.find( {'_id': group_id} )
chdetails = []
for id in ids:
for chid in id['channel_details']:
chdetails.append(
str(chid['channel_name']) + " ( <code>" + str(chid['channel_id']) + "</code> )"
)
return chdetails
async def countfilters(group_id):
mycol = mydb[str(group_id)]
query = mycol.count()
if query == 0:
return False
else:
return query
async def findgroupid(channel_id):
mycol = mydb["ALL DETAILS"]
ids = mycol.find()
groupids = []
for id in ids:
for chid in id['channel_details']:
if channel_id == chid['channel_id']:
groupids.append(id['_id'])
return groupids
async def searchquery(group_id, name):
mycol = mydb[str(group_id)]
filenames = []
filelinks = []
# looking for a better regex :(
pattern = name.lower().strip().replace(' ','.*'
|
[
"noreply@github.com"
] |
riyasleo10.noreply@github.com
|
e6dfd9cb391b1dc09795b1911c78d7980a0ff1ee
|
b7f45072d056b80ed49e6bcde91877d8576e970d
|
/ImageJ/py/Wayne-blob-example.py
|
610a35e6e5ddb80455ce608015ed6b1efdfc7ff2
|
[] |
no_license
|
jrminter/tips
|
128a18ee55655a13085c174d532c77bcea412754
|
f48f8b202f8bf9e36cb6d487a23208371c79718e
|
refs/heads/master
| 2022-06-14T08:46:28.972743
| 2022-05-30T19:29:28
| 2022-05-30T19:29:28
| 11,463,325
| 5
| 8
| null | 2019-12-18T16:24:02
| 2013-07-17T00:16:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 321
|
py
|
from org.python.core import codecs
codecs.setDefaultEncoding('utf-8')
import os
from ij import IJ, WindowManager
IJ.run("Close All")
img = IJ.openImage("http://wsr.imagej.net/images/blobs.gif")
IJ.setAutoThreshold(img, "Default")
IJ.run(img, "Analyze Particles...", " show=[Bare Outlines] include in_situ")
img.show()
|
[
"jrminter@gmail.com"
] |
jrminter@gmail.com
|
9805ffe4daef50c8bdfe737999913fe9357c8479
|
e4da82e4beb9b1af7694fd5b49824a1c53ee59ff
|
/AutoWorkup/SEMTools/registration/averagebraingenerator.py
|
b206faa7d7b842adead8675771f35338e6d91db4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
ipekoguz/BRAINSTools
|
c8732a9206525adb5779eb0c2ed97f448e2df47f
|
dc32fa0820a0d0b3bd882fa744e79194c9c137bc
|
refs/heads/master
| 2021-01-18T08:37:03.883250
| 2013-05-14T21:08:33
| 2013-05-14T21:08:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,709
|
py
|
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class AverageBrainGeneratorInputSpec(CommandLineInputSpec):
inputDirectory = File(desc="Image To Warp", exists=True, argstr="--inputDirectory %s")
templateVolume = File(desc="Reference image defining the output space", exists=True, argstr="--templateVolume %s")
resolusion = traits.Str(desc="The resolusion.", argstr="--resolusion %s")
iteration = traits.Str(desc="The iteration.", argstr="--iteration %s")
pixelType = traits.Enum("uchar", "short", "ushort", "int", "uint", "float", desc="Specifies the pixel type for the input/output images", argstr="--pixelType %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Resulting deformed image", argstr="--outputVolume %s")
class AverageBrainGeneratorOutputSpec(TraitedSpec):
outputVolume = File(desc="Resulting deformed image", exists=True)
class AverageBrainGenerator(SEMLikeCommandLine):
"""title: Average Brain Generator
category: Registration
description:
This programs creates synthesized average brain.
version: 0.1
documentation-url: http:://mri.radiology.uiowa.edu/mriwiki
license: NEED TO ADD
contributor: This tool was developed by Yongqiang Zhao.
"""
input_spec = AverageBrainGeneratorInputSpec
output_spec = AverageBrainGeneratorOutputSpec
_cmd = " AverageBrainGenerator "
_outputs_filenames = {'outputVolume':'outputVolume'}
|
[
"hans-johnson@uiowa.edu"
] |
hans-johnson@uiowa.edu
|
4ff8a625e52e7a2fc0f40fd40fdb70a36086c6e2
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/sunData/SType/ST_facets/ST_facets00201m/ST_facets00201m9_p.py
|
6b09bb1b8dd9512268b76bbd79e2c658e0d3fc7d
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 139
|
py
|
from output.models.sun_data.stype.st_facets.st_facets00201m.st_facets00201m9_xsd.st_facets00201m9 import Test
obj = Test(
value=10
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
172576681e45df0d4e6966c9a2513b6ebdfbff4e
|
846cbb8cc97c667c1f2969fca12b835c3843f170
|
/magpy/lib/format_sfs.py
|
a36994e4972d2df583ae37a95c7050c0af4825eb
|
[
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
geomagpy/magpy
|
f33a4a7ae95f95d2e5e3d09b571d2fa6f2905174
|
79f3420c4526c735869715e8c358848d790e982b
|
refs/heads/master
| 2023-08-17T08:39:48.757501
| 2023-07-19T11:25:00
| 2023-07-19T11:25:00
| 47,394,862
| 40
| 20
|
BSD-3-Clause
| 2021-01-26T12:29:02
| 2015-12-04T09:38:09
|
Python
|
UTF-8
|
Python
| false
| false
| 5,996
|
py
|
"""
MagPy
Auxiliary input filter - WIC/WIK
Written by Roman Leonhardt June 2012
- contains test and read function, toDo: write function
"""
from magpy.stream import *
def isSFDMI(filename):
"""
Checks whether a file is spanish DMI format.
Time is in seconds relative to one day
"""
try:
temp = open(filename, 'rt').readline()
except:
return False
if len(temp) >= 9:
if temp[9] in ['o','+','-']: # Prevent errors with GFZ kp
return False
sp = temp.split()
if not len(sp) == 6:
return False
if not isNumber(sp[0]):
return False
#logging.info(" Found SFS file")
return True
def isSFGSM(filename):
"""
Checks whether a file is spanish GSM format.
Time is in seconds relative to one day
"""
try:
fh = open(filename, 'rt')
temp = fh.readline()
except:
return False
sp = temp.split()
if len(sp) != 2:
return False
if not isNumber(sp[0]):
return False
try:
if not 20000 < float(sp[1]) < 80000:
return False
except:
return False
return True
def readSFDMI(filename, headonly=False, **kwargs):
"""
Reading SF DMI format data.
Looks like:
0.03 99.11 -29.76 26.14 22.05 30.31
5.04 98.76 -29.78 26.20 22.04 30.31
10.01 98.85 -29.76 26.04 22.04 30.31
15.15 98.63 -29.79 26.20 22.04 30.31
20.12 98.85 -29.78 26.11 22.04 30.31
first column are seconds of day
"""
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
getfile = True
fh = open(filename, 'rt')
# read file and split text into channels
stream = DataStream()
if stream.header is None:
headers = {}
else:
headers = stream.header
data = []
key = None
# get day from filename (platform independent)
splitpath = os.path.split(filename)
daystring = splitpath[1].split('.')
try:
day = datetime.strftime(datetime.strptime(daystring[0], "%d%m%Y"),"%Y-%m-%d")
except:
logging.warning("Wrong dateformat in Filename %s" % daystring[0])
fh.close()
return DataStream([], headers)
# Select only files within eventually defined time range
if starttime:
if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if endtime:
if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if getfile:
for line in fh:
if line.isspace():
# blank line
continue
else:
row = LineStruct()
elem = line.split()
if (len(elem) == 6):
row.time=date2num(datetime.strptime(day,"%Y-%m-%d"))+ float(elem[0])/86400
xval = float(elem[1])
yval = float(elem[2])
zval = float(elem[3])
row.x = xval
row.y = yval
row.z = zval
row.t1 = float(elem[4])
row.t2 = float(elem[5])
stream.add(row)
stream.header['col-x'] = 'x'
stream.header['col-y'] = 'y'
stream.header['col-z'] = 'z'
stream.header['col-t1'] = 'T1'
stream.header['col-t2'] = 'T2'
stream.header['unit-col-x'] = 'nT'
stream.header['unit-col-y'] = 'nT'
stream.header['unit-col-z'] = 'nT'
stream.header['unit-col-t1'] = 'deg C'
stream.header['unit-col-t2'] = 'deg C'
else:
headers = stream.header
stream =[]
fh.close()
return DataStream(stream, headers)
def readSFGSM(filename, headonly=False, **kwargs):
"""
Reading SF GSM format data.
Looks like:
22 42982.35
52 42982.43
82 42982.47
first column are seconds of day
"""
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
getfile = True
fh = open(filename, 'rt')
# read file and split text into channels
stream = DataStream()
if stream.header is None:
headers = {}
else:
headers = stream.header
data = []
key = None
# get day from filename (platform independent)
splitpath = os.path.split(filename)
daystring = splitpath[1].split('.')
try:
day = datetime.strftime(datetime.strptime(daystring[0], "%d%m%Y"),"%Y-%m-%d")
except:
logging.warning("Wrong dateformat in Filename %s" % daystring[0])
return []
# Select only files within eventually defined time range
if starttime:
if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if endtime:
if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if getfile:
for line in fh:
if line.isspace():
# blank line
continue
else:
row = LineStruct()
elem = line.split()
if (len(elem) == 2):
row.time=date2num(datetime.strptime(day,"%Y-%m-%d"))+ float(elem[0])/86400
row.f = float(elem[1])
stream.add(row)
stream.header['col-f'] = 'f'
stream.header['unit-col-f'] = 'nT'
else:
headers = stream.header
stream =[]
fh.close()
return DataStream(stream, headers)
|
[
"roman.leonhardt@zamg.ac.at"
] |
roman.leonhardt@zamg.ac.at
|
3477ca40e3be9c089491a0edef84de178170c43e
|
533c1ccd1eb1c4c735c6473381c64770d8103246
|
/lbpi/wrappers/adt_ens.py
|
29a1a2ef5f96d665989172968bf34b8e0216c90f
|
[
"MIT"
] |
permissive
|
nairvinayv/random_scripts
|
fc9278ce4be4908368311993918854de8330e032
|
6e1cc8d82cf61ae245108a69ffa1359f636f37f7
|
refs/heads/master
| 2022-02-14T09:01:20.866580
| 2022-02-02T21:33:05
| 2022-02-02T21:33:05
| 39,131,991
| 0
| 0
|
MIT
| 2021-09-27T04:50:05
| 2015-07-15T10:58:07
|
Shell
|
UTF-8
|
Python
| false
| false
| 3,320
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 26 09:14:50 2017
This script is for making sure two uploaded receptor conformations are of the
same ensembles which will be used for the calculation of average SNR of multiple
conformations
@author: nabina
"""
#import re
import sys,os
import subprocess as sb
import shutil
arg1 = sys.argv[1] # flag arguments for defining the modules to be operated
arg2 = sys.argv[2] # wrapper files
arg3 = sys.argv[3] # working folder
def work_files():
"""
This function is to find out the folders present in ensembles and prepare receptor
in each folders and also to copy ligands to each folders. Finally, depending
on the flag defined it helps in running autodock or LIBSA
"""
pdb_path = '{}protein.pdb'.format(arg3)
ligand_path = '{}ligand.pdbqt'.format(arg3)
chnatm_path = '{}chnatm.dat'.format(arg3)
protein_path = []
protein_pdbqt = []
receptor_path = []
aname = {};
a_count = list(range(len(os.listdir('{}ensembles'.format(arg3)))))
for i in range(len(a_count)):aname[(i)] = '{}ensembles/folder{}/'.format(arg3, a_count[i])
dirr = list(aname.values())
for i in range(len(dirr)):
protein_path.append('{}protein.pdb'.format(dirr[i]))
protein_pdbqt.append('{}protein.pdbqt'.format(dirr[i]))
receptor_path.append('{}receptor.pdb'.format(dirr[i]))
for i in range(len(dirr)):
if arg1 == 'adt':
space = sys.argv[4]
points = sys.argv[5]
evals = sys.argv[6]
gens = sys.argv[7]
run = sys.argv[8]
sb.call(['python', '{}pdb_prepare.py'.format(arg2), pdb_path, chnatm_path, protein_path[i], protein_pdbqt[i], receptor_path[i]])
shutil.copy(ligand_path, dirr[i]) # copying of ligand_pdbqt to ensemble folders
sb.call(['python', '{}adt.py'.format(arg2), dirr[i], space, points, evals, gens, run])
if arg1 == 'libsa_none':
LIBSA = sys.argv[4]
sb.call(['python', '{}libsa_none.py'.format(arg2), dirr[i], LIBSA, 'none', '0.05','1','0.4','4'])
sb.call(['python', '{}libsa_none.py'.format(arg2), dirr[i], LIBSA, 'affinity_only', '0.05','1','0.4','4'])
if arg1 == 'libsa':
LIBSA = sys.argv[4]
arg5 = sys.argv[5]
arg6 = sys.argv[6]
arg7 = sys.argv[7]
arg8 = sys.argv[8]
arg9 = sys.argv[9]
sb.call(['python', '{}libsa.py'.format(arg2), dirr[i], LIBSA, arg5,arg6, arg7, arg8, arg9])
"""
Subprocess call for views file to automate adt and LIBSA for ensembles
sb.call(['python', '{}adt_ens.py'.format(wrappers), 'adt', wrappers, current_dir, space, points, evals, gens, run])
sb.call(['python', '{}adt_ens.py'.format(wrappers), 'libsa_none', wrappers, current_dir,LIBSA])
sb.call(['python', '{}adt_ens.py'.format(wrappers), 'libsa', wrappers, current_dir, LIBSA, libsa_flags, energy_steps, percentchange, aux_peak, cutoff])
"""
if __name__ == '__main__':
work_files()
|
[
"noreply@github.com"
] |
nairvinayv.noreply@github.com
|
c7ffbc120879b204d210b4e4d5cc28f0f5f98edd
|
086722e5e0a7a88654ad78c696d5e22e6b700e1a
|
/pythonwithcsv.py
|
be793bbffb4556da3531ea3dc7e4b33b3c8fe6d2
|
[] |
no_license
|
84karandeepsingh/datavizwithcsv
|
5541840d3e7e3b126c1017fd28c6b0865f164415
|
7cb82d8063802aebdf59a647bfc19f9b32063850
|
refs/heads/master
| 2020-04-06T19:51:51.419261
| 2018-11-15T21:01:34
| 2018-11-15T21:01:34
| 157,752,241
| 0
| 0
| null | 2018-11-15T21:01:35
| 2018-11-15T18:05:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,459
|
py
|
import csv
import numpy as np
import matplotlib.pyplot as plt
# figure out what data we want to use
categories = [] # these are the column headers in the CSV file
installs = [] # this is the installs row
ratings = [] # this is the ratings row
with open('data/googeplaystore.csv') as csvfile:
reader = csv.reader(csvfile)
line_count = 0
for row in reader:
# move the page column header out of the actual data to get a clean dataset
if line_count is 0: # this will be text, not data
print('pushing categories into a separate array')
categories.append(row) # push the text into this array
line_count += 1 # increment the line count for the next loop
else:
# grab the ratings and push them into the ratings array
ratingsData= row[2]
ratingsData = ratingsData.replace("NaN", "0")
ratings.append(float(ratingsData)) # int turn a string (piece of text) into a number
# print('pushing ratings data into the ratings array')
installData = row[5]
installData = installData.replace(",", "") # get rid of the commas
# get rid of the trailing "+"
installs.append(np.char.strip(installData, "+"))
line_count += 1
# get some values we can work with
# how many ratings are 4+?
# how many are below 2?
# how many are the middle?
np_ratings = np.array(ratings) # turn a plain Python list into a Numpy array
popular_apps = np_ratings > 4
print("popular apps:", len(np_ratings[popular_apps]))
percent_popular = int(len(np_ratings[popular_apps]) / len(np_ratings) * 100)
print(percent_popular)
unpopular_apps = np_ratings < 4
print("unpopular apps:", len(np_ratings[unpopular_apps]))
percent_unpopular = int(len(np_ratings[unpopular_apps]) / len(np_ratings) * 100)
print(percent_unpopular)
kinda_popular = int(100 - (percent_popular + percent_unpopular))
print(kinda_popular)
# do a visualization with our shiny new data
labels = "Sucks", "Meh", "Love it!"
sizes = [percent_unpopular, kinda_popular, percent_popular]
colors = ['yellowgreen', 'lightgreen', 'lightskyblue']
explode = (0.1, 0.1, 0.15)
plt.pie(sizes, explode=explode, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
plt.legend(labels, loc=1)
plt.title("Do we love us some apps?")
plt.xlabel("User Ratings - App Install (10,000+ apps")
plt.show()
print(categories)
print('first row of data:', installs[0])
print('last row of data:', installs[-1])
|
[
"k_thind92494@tss-hr420-dm29.fc.ca"
] |
k_thind92494@tss-hr420-dm29.fc.ca
|
500aef746d79ed087cee6c69260b5b7ab0ba585d
|
12c15a95f6105f58cce4595db4541e2967abc86a
|
/PyFury/CodeMonk V2/XOR.py
|
dec64e446ce09fcec64e31f94dcc7013cd2395c2
|
[] |
no_license
|
avinash28196/PyFury-V1.0
|
da5c85fd561ee7edc01f7ece9f4657191ae0f015
|
84ed41c13e2fdd96fc1556915709f0c87655af56
|
refs/heads/master
| 2021-07-05T08:42:10.291793
| 2019-02-25T17:06:46
| 2019-02-25T17:06:46
| 125,209,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
Test = int(input())
for i in range(Test):
N = int(input())
count = 0
for i in range (1,N+1):
for j in range(i+1, N+1):
# print (i, j)
xor = i ^ j
if (xor <= N or xor == N):
count += 1
print(count)
|
[
"nextbitgeek@Avinashs-MacBook-Pro.local"
] |
nextbitgeek@Avinashs-MacBook-Pro.local
|
eddff0d30d84daa619346f62be32cd51bd14262c
|
702c8a229ec80537e9864959220c75aaabb28548
|
/taobao.py
|
01f8d6dd7e92faac959d6bc370761b440d2e7af4
|
[] |
no_license
|
17181370591/wode
|
67de606298da7daf9e73dae8822a03ade9065ddc
|
4c574ec33f17c2b65f1fec7eb0adfb6dd05f141e
|
refs/heads/master
| 2021-06-23T18:21:18.796955
| 2019-06-13T09:56:48
| 2019-06-13T09:56:48
| 114,825,512
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
p=urlopen('https://buyertrade.taobao.com/trade/itemlist/list_bought_items.htm?spm=a3204.7139825.a2226mz.9.I5133L&t=20110530')
print(p.read())
|
[
"noreply@github.com"
] |
17181370591.noreply@github.com
|
a0a6e2b478307867d176521ffe24feb3a9ea24cb
|
382c3368b5a8a13d57bcff7951334e57f919d964
|
/remote-scripts/samples/APC40_20/SpecialMixerComponent.py
|
c157f77a22c018cdeaf7228a4fc43b005f301133
|
[
"Apache-2.0"
] |
permissive
|
jim-cooley/abletonremotescripts
|
c60a22956773253584ffce9bc210c0804bb153e1
|
a652c1cbe496548f16a79bb7f81ce3ea3545649c
|
refs/heads/master
| 2021-01-22T02:48:04.820586
| 2017-04-06T09:58:58
| 2017-04-06T09:58:58
| 28,599,515
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,246
|
py
|
# emacs-mode: -*- python-*-
# -*- coding: utf-8 -*-
from _Framework.MixerComponent import MixerComponent
from SpecialChannelStripComponent import SpecialChannelStripComponent
from _Framework.ButtonElement import ButtonElement #added
from _Framework.EncoderElement import EncoderElement #added
class SpecialMixerComponent(MixerComponent):
' Special mixer class that uses return tracks alongside midi and audio tracks, and only maps prehear when shifted '
__module__ = __name__
def __init__(self, num_tracks):
MixerComponent.__init__(self, num_tracks)
self._shift_button = None #added
self._shift_pressed = False #added
def set_shift_button(self, button): #added
assert ((button == None) or (isinstance(button, ButtonElement) and button.is_momentary()))
if (self._shift_button != button):
if (self._shift_button != None):
self._shift_button.remove_value_listener(self._shift_value)
self._shift_button = button
if (self._shift_button != None):
self._shift_button.add_value_listener(self._shift_value)
self.update()
def _shift_value(self, value): #added
assert (self._shift_button != None)
assert (value in range(128))
self._shift_pressed = (value != 0)
self.update()
def update(self): #added override
if self._allow_updates:
master_track = self.song().master_track
if self.is_enabled():
if (self._prehear_volume_control != None):
if self._shift_pressed: #added
self._prehear_volume_control.connect_to(master_track.mixer_device.cue_volume)
else:
self._prehear_volume_control.release_parameter() #added
if (self._crossfader_control != None):
self._crossfader_control.connect_to(master_track.mixer_device.crossfader)
else:
if (self._prehear_volume_control != None):
self._prehear_volume_control.release_parameter()
if (self._crossfader_control != None):
self._crossfader_control.release_parameter()
if (self._bank_up_button != None):
self._bank_up_button.turn_off()
if (self._bank_down_button != None):
self._bank_down_button.turn_off()
if (self._next_track_button != None):
self._next_track_button.turn_off()
if (self._prev_track_button != None):
self._prev_track_button.turn_off()
self._rebuild_callback()
else:
self._update_requests += 1
def tracks_to_use(self):
return (self.song().visible_tracks + self.song().return_tracks)
def _create_strip(self):
return SpecialChannelStripComponent()
def disconnect(self): #added
MixerComponent.disconnect(self)
if (self._shift_button != None):
self._shift_button.remove_value_listener(self._shift_value)
self._shift_button = None
# local variables:
# tab-width: 4
|
[
"jim@ubixlabs.com"
] |
jim@ubixlabs.com
|
6d89a520c12d1cf396cf97386448d6f6738ff2d8
|
2597f120ba197ec63497263b75b003f17dd41d37
|
/manage.py
|
54c3bd9664497fe87eee913f00da595bf9bbee72
|
[] |
no_license
|
vir-mir/reactjs-test
|
e86be29b939d77b9d5be5ea5c7ffe47dd7a54293
|
8e2da61fb7c13ae85a2ce835fb84893ad855d547
|
refs/heads/master
| 2021-01-02T09:26:14.815846
| 2014-07-30T04:11:26
| 2014-07-30T04:11:26
| 29,864,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "virmir.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"virmir49@gmail.com"
] |
virmir49@gmail.com
|
e6e7dda1c960f07e3ef950b406a97d1171f4fa8d
|
fe0edb968d9d20c8dcdd994e293db418c451ce53
|
/amazon/LCAOfBinaryTree/solution.py
|
5d3bd6f8a9f7c77189b09b2403dbaeac851e2ceb
|
[] |
no_license
|
childxr/lintleetcode
|
d079b1a01fb623f2cb093b0fe665c21a18ec1b6a
|
e8d472ab237d61fed923df25c91823371c63445b
|
refs/heads/master
| 2020-03-20T08:39:10.473582
| 2018-08-04T22:56:55
| 2018-08-04T22:56:55
| 137,315,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param: root: The root of the binary search tree.
@param: A: A TreeNode in a Binary.
@param: B: A TreeNode in a Binary.
@return: Return the least common ancestor(LCA) of the two nodes.
"""
def findNodes(self, root, A, B):
if root is None:
return root
if root == A or root == B:
return root
left = self.findNodes(root.left, A, B)
right = self.findNodes(root.right, A, B)
if left is not None and right is not None:
return root
return left if left is not None else right
def lowestCommonAncestor(self, root, A, B):
# write your code here
return self.findNodes(root, A, B)
|
[
"rxie@juniper.net"
] |
rxie@juniper.net
|
edbda326ea8cc86ed561de36cac7f9cfb7b215e5
|
97763df96bc21d91e46e3a98f9ee2b55f557035e
|
/qa/rpc-tests/wallet.py
|
096b0a373b81c77ca811d0f202f25b8aea30c591
|
[
"MIT"
] |
permissive
|
jaagcoin/JAAGCoin-Core
|
2f0138c38e28b98878bbcd5f011ab84d1441bb57
|
87073dbff406e2d95a6e9d81521973c3c8cef350
|
refs/heads/master
| 2020-03-26T05:34:39.790028
| 2018-08-30T15:46:16
| 2018-08-30T15:46:16
| 144,563,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,705
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [['-usehd={:d}'.format(i%2==0)] for i in range(4)]
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir, self.extra_args[:3], redirect_stderr=True)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 500)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
assert_equal(self.nodes[1].getbalance(), 500)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 210 JAAG from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 110)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 100)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises_message(JSONRPCException, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 200)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 1000 JAAG in block rewards plus fees, but
# minus the 210 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 1000-210)
assert_equal(self.nodes[2].getbalance(), 210)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True, False, True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True, False, True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 1000)
assert_equal(self.nodes[2].getbalance("from1"), 1000-210)
# Send 100 JAAG normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.00001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 100, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('900'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('100'))
# Send 100 JAAG with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 100, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('200'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 100 JAAG
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('100')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 100 JAAG with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir, self.extra_args[3], redirect_stderr=True))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 499.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
cbAddr = self.nodes[1].getnewaddress()
blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0]
cbTxId = self.nodes[0].getblock(blkHash)['tx'][0]
self.sync_all()
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(cbTxId)
# check if wallet or blockchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label, s)
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
# disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463
# '-salvagewallet',
]
chainlimit = 6
for m in maintenance:
print("check " + m)
stop_nodes(self.nodes)
# set lower ancestor limit for later
self.nodes = start_nodes(3, self.options.tmpdir, [[m, "-limitancestorcount="+str(chainlimit)]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
# ==Check that wallet prefers to use coins that don't exceed mempool limits =====
# Get all non-zero utxos together
chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
self.nodes[0].generate(1)
node0_balance = self.nodes[0].getbalance()
# Split into two chains
rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')})
signedtx = self.nodes[0].signrawtransaction(rawtx)
singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
# Make a long chain of unconfirmed payments without hitting mempool limit
# Each tx we make leaves only one output of change on a chain 1 longer
# Since the amount to send is always much less than the outputs, we only ever need one output
# So we should be able to generate exactly chainlimit txs for each original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
for i in range(chainlimit*2):
txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit*2)
assert_equal(len(txid_list), chainlimit*2)
# Without walletrejectlongchains, we will still generate a txid
# The tx will be stored in the wallet but not accepted to the mempool
extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))
assert(extra_txid not in self.nodes[0].getrawmempool())
assert(extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()])
self.nodes[0].abandontransaction(extra_txid)
total_txs = len(self.nodes[0].listtransactions("*",99999))
# Try with walletrejectlongchains
# Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
stop_node(self.nodes[0],0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-walletrejectlongchains", "-limitancestorcount="+str(2*chainlimit)])
# wait for loadmempool
timeout = 10
while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit*2):
time.sleep(0.5)
timeout -= 0.5
assert_equal(len(self.nodes[0].getrawmempool()), chainlimit*2)
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
assert_raises_message(JSONRPCException, "mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*",99999)))
if __name__ == '__main__':
WalletTest().main()
|
[
"dmitriy@Dmitriys-iMac.local"
] |
dmitriy@Dmitriys-iMac.local
|
078d8878c03008b44ffb9bcebc52d9ae1bf3d187
|
dbc08e2b8b1d257b4ad0a12eeefb5d8ac2168045
|
/ClassifyProducts.py
|
1377966c9d462c28a9a812de9fbcb8962c764702
|
[] |
no_license
|
lauraabend/NLP_ProductClassifier
|
885329bad7f0dd26688361b679cfa1e25f14be5e
|
0c659165a7c444ef07c34cbf2452ad57ad6510de
|
refs/heads/master
| 2021-01-18T12:46:00.842400
| 2016-07-01T21:17:08
| 2016-07-01T21:17:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,295
|
py
|
from nltk.corpus import wordnet
import numpy as np
import pandas as pd
from nltk.tokenize import TweetTokenizer
from nltk.tag import pos_tag
from nltk.stem.porter import *
from nltk.corpus import stopwords
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
def assign_product_to_class(class_descriptions, description_of_product):
comparison_list = []
description_of_product = list(set(description_of_product))
description_of_product = [word for word in description_of_product if word not in stopwords.words('english')]
for className in class_descriptions.keys():
comparison_per_class = []
for word1 in class_descriptions[className]:
word_from_list1 = wordnet.synsets(word1)
for word2 in description_of_product:
word_from_list2 = wordnet.synsets(word2)
if word_from_list1 and word_from_list2:
s = word_from_list1[0].wup_similarity(word_from_list2[0])
comparison_per_class.append(s)
comparison_per_class = [item for item in comparison_per_class if item != None]
list_of_similar_values = sorted(comparison_per_class, reverse=True)[:5]
comparison_list.append([np.mean(list_of_similar_values), className])
return sorted(comparison_list, reverse=True)
stemmer = PorterStemmer()
tknzr = TweetTokenizer()
classDescriptions = {
"Camera & Photo": ["lens", "camera", "photo", "camcorder", "photography", "image", "film", "digital", "monitor", "record"],
"Bedding & Bath": ["bed", "bath", "sheet", "towel", "shower", "tube", "bathroom", "bedroom", "pillow", "mattress", "sleep"],
"Exercise & Fitness": ["exercise", "fitness", "sport", "games", "weight", "train", "resistance", "soccer", "tennis", "golf", "yoga", "basketball", "fit"]
}
for i in classDescriptions.keys():
classDescriptions[i] = [stemmer.stem(word) for word in classDescriptions[i]]
file = pd.read_csv("./test_set2.csv", delimiter=";", encoding='latin-1')
list_of_products = list(zip(file["Product_id"].tolist(), file["Description"], file["Category"]))
list_of_products_ready = [list(elem) for elem in list_of_products]
real_label = []
prediction = []
for i in range(len(list_of_products_ready)):
# Tokenize the sentence
tokenized_words = tknzr.tokenize(list_of_products_ready[i][1])
list_of_products_ready[i].pop(1)
# Stem the words
stemed_words = [stemmer.stem(plural) for plural in tokenized_words]
# Tag the morphology of the word
tagged_words = pos_tag(stemed_words)
# Only select the NN and NNP
only_nouns = [word for word, pos in tagged_words if pos == 'NN' or pos == 'NNP']
# Append the resulting words
list_of_products_ready[i].append(only_nouns)
# Start classification
similatiry_to_classes = assign_product_to_class(classDescriptions, list_of_products_ready[i][2])
list_of_products_ready[i].insert(2, similatiry_to_classes[0][1])
real_label.append(list_of_products_ready[i][1])
prediction.append(list_of_products_ready[i][2])
print(list_of_products_ready[i])
print(confusion_matrix(real_label, prediction))
print(classification_report(real_label, prediction, target_names=["Exercise & Fitness", "Camera & Photo", "Bedding & Bath"]))
|
[
"martin.maseda@gmail.com"
] |
martin.maseda@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.