blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
55c78f9adc431c314924891a2846056a21118d3d
|
f5ddc6122e361e9a6508ced36a3ebfc3c0814356
|
/beanstalkd/south_migrations/0001_initial.py
|
0feafbae89dbb4b68ad52f1d1664693fbbc7e076
|
[
"Apache-2.0"
] |
permissive
|
baitcode/django-beanstalkd
|
84c920bd8860a0af4fbbf1f06137f73390594c43
|
27696832fd5bffcabe96f787d2608fb1fbb0ec5a
|
refs/heads/master
| 2021-01-24T06:04:53.263333
| 2014-10-30T11:41:03
| 2014-10-30T11:41:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,995
|
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tube'
db.create_table('beanstalk_tube', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('buried', self.gf('django.db.models.fields.IntegerField')(null=True)),
('delayed', self.gf('django.db.models.fields.IntegerField')(null=True)),
('ready', self.gf('django.db.models.fields.IntegerField')(null=True)),
('reserved', self.gf('django.db.models.fields.IntegerField')(null=True)),
('urgent', self.gf('django.db.models.fields.IntegerField')(null=True)),
))
db.send_create_signal('beanstalkd', ['Tube'])
# Adding model 'Job'
db.create_table('beanstalk_job', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('beanstalk_id', self.gf('django.db.models.fields.IntegerField')()),
('instance_ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
('instance_port', self.gf('django.db.models.fields.IntegerField')()),
('tube', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['beanstalk.Tube'], null=True)),
('tube_name', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')(default='{}')),
('state', self.gf('django.db.models.fields.SmallIntegerField')(default=1)),
))
db.send_create_signal('beanstalkd', ['Job'])
def backwards(self, orm):
# Deleting model 'Tube'
db.delete_table('beanstalk_tube')
# Deleting model 'Job'
db.delete_table('beanstalk_job')
models = {
'beanstalk.job': {
'Meta': {'object_name': 'Job'},
'beanstalk_id': ('django.db.models.fields.IntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'instance_port': ('django.db.models.fields.IntegerField', [], {}),
'message': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'tube': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beanstalk.Tube']", 'null': 'True'}),
'tube_name': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'beanstalk.tube': {
'Meta': {'ordering': "['name']", 'object_name': 'Tube'},
'buried': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'delayed': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'ready': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'urgent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
}
}
complete_apps = ['beanstalkd']
|
[
"ilya@ostrovok.ru"
] |
ilya@ostrovok.ru
|
f2d8006fa9d4e809157de1688060502edc3218c4
|
2368972f5cd45704b5ab1b4877f6409fc38bf693
|
/app/app.py
|
15563cc268e2f61394e02d4b08bcdf53cec19708
|
[] |
no_license
|
sergiodias28/manobra
|
7d67498521aabb0d8c9a5d9ebce97d39099913cb
|
5f38eef2035547807ae8aaa095a76961cb372852
|
refs/heads/master
| 2021-01-19T00:24:57.666548
| 2016-08-11T23:12:26
| 2016-08-11T23:12:26
| 65,052,300
| 0
| 0
| null | 2016-08-11T23:12:27
| 2016-08-05T22:15:02
|
Python
|
ISO-8859-1
|
Python
| false
| false
| 2,234
|
py
|
# -*- coding: utf-8 -*-
"""
Autman
~~~~~~
Sistema de automanção de manobras.
:copyright: (c) 2016 by Sergio Dias.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from sqlite3 import dbapi2 as sqlite3
from flask import Flask, request, session, g, jsonify, redirect, url_for, abort, \
render_template, flash
from time import gmtime, strftime
import paramiko
import time
# create our little application :)
app = Flask(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
#DATABASE=os.path.join(app.root_path, 'autman.db'),
DEBUG=True,
SECRET_KEY='bZJc2sWbQLKos6GkHn/VB9oXwQt8S0R0kRvJ5/xJ89E=',
USERNAME='admin',
PASSWORD='default',
IP_SAGE='192.168.0.18',
USER_SAGE='sage',
PASS_SAGE='sage'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
#Conecta ao banco
conn = sqlite3.connect('autman.db')
comandos = conn.execute('select c.codigo as equipamento, c.tipo as tipo, a.comando as comando, d.codigo as unidade, b.descricao AS Acao from roteiro_comando a inner join roteiro_manobra_item b on b.id=a.id_roteiro_manobra_item inner join equipamento c on c.id=a.id_equipamento inner join unidade d on d.id=b.id_unidade')
if comandos:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(app.config['IP_SAGE'], username=app.config['USER_SAGE'], password=app.config['PASS_SAGE'])
for item_comando in comandos:
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("sage_ctrl %s:%s:%d %d" % (item_comando[3], item_comando[0], item_comando[1], item_comando[2]))
print "sage_ctrl %s:%s:%d %d" % (item_comando[3], item_comando[0], item_comando[1], item_comando[2]), "%s" % (item_comando[4])
time.sleep(4)
#ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("sage_ctrl %s:%s:%d %d" % (item_comando['unidade'], item_comando['equipamento'],item_comando['tipo'], item_comando['comando']))
#ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('sage_ctrl JCD:14C1:52 0')
#ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('ls')
#for line in ssh_stdout:
# print '... ' + line.strip('\n')
ssh.close()
|
[
"engsergiodias28@gmail.com"
] |
engsergiodias28@gmail.com
|
a49bed0ad6fb441cd8b332aad95442e6b04774ed
|
84bad7d10540d988e0a68051c0b9ff75a8a40b72
|
/agrigo/manage.py
|
329e08312ab6214c529e78643d9fb30594c0cd60
|
[
"BSD-3-Clause"
] |
permissive
|
cmdrspartacus/agrigo
|
cd22bac9c301c980259de65e71c60154c6a3f5f9
|
de6ae4980786ca29ff4ab743f64b1759016e1f57
|
refs/heads/master
| 2016-09-13T03:09:51.420817
| 2016-05-16T13:14:34
| 2016-05-16T13:14:34
| 58,933,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,463
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Management script."""
import os
from glob import glob
from subprocess import call
from flask_migrate import Migrate, MigrateCommand
from flask_script import Command, Manager, Option, Server, Shell
from flask_script.commands import Clean, ShowUrls
from agrigo.app import create_app
from agrigo.database import db
from agrigo.settings import DevConfig, ProdConfig
from agrigo.user.models import User
CONFIG = ProdConfig if os.environ.get('AGRIGO_ENV') == 'prod' else DevConfig
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
app = create_app(CONFIG)
manager = Manager(app)
migrate = Migrate(app, db)
def _make_context():
"""Return context dict for a shell session so you can access app, db, and the User model by default."""
return {'app': app, 'db': db, 'User': User}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
class Lint(Command):
"""Lint and check code style with flake8 and isort."""
def get_options(self):
"""Command line options."""
return (
Option('-f', '--fix-imports', action='store_true', dest='fix_imports', default=False,
help='Fix imports using isort, before linting'),
)
def run(self, fix_imports):
"""Run command."""
skip = ['requirements']
root_files = glob('*.py')
root_directories = [name for name in next(os.walk('.'))[1] if not name.startswith('.')]
files_and_directories = [arg for arg in root_files + root_directories if arg not in skip]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
print('{}: {}'.format(description, ' '.join(command_line)))
rv = call(command_line)
if rv is not 0:
exit(rv)
if fix_imports:
execute_tool('Fixing import order', 'isort', '-rc')
execute_tool('Checking code style', 'flake8')
manager.add_command('server', Server())
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
manager.add_command('urls', ShowUrls())
manager.add_command('clean', Clean())
manager.add_command('lint', Lint())
if __name__ == '__main__':
manager.run()
|
[
"tobiasorlando@gmail.com"
] |
tobiasorlando@gmail.com
|
b5131dfd5ec3d93cfb93ae353873b30d8f0cf541
|
5eb51cc393c35be33d8faca9548df0170c29bfc7
|
/base/getMeminfo.py
|
c728252269b1408de3315cb285df3750bb113edc
|
[] |
no_license
|
wxpokay/autogui
|
965f3dc1aa908ab62347f6872365f54c84eb9ea9
|
3f128b663bff61428f344579faab62668281a1a9
|
refs/heads/master
| 2021-01-22T17:22:38.663439
| 2016-03-14T08:46:38
| 2016-03-14T08:46:38
| 49,067,292
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,053
|
py
|
# -*- coding: utf-8 -*-
'''
Created on 2016-3-7
@author: Administrator
'''
import os
import time
import subprocess
script_dir = os.path.dirname(os.path.realpath(__file__))
print script_dir
result = "F:\\workspace\\autogui\\meminfo\\"
#获取系统当前时间
now = time.strftime('%Y-%m-%d-%H_%M_%S', time.localtime(time.time()))
day = time.strftime('%Y-%m-%d', time.localtime(time.time()))
#定义个报告存放路径,支持相对路径
tdresult = result + day
class getAndroidMem:
def def_file(self):
''' 定义 存放内存数据的文件 '''
print '定义 存放内存数据的文件 '
if os.path.exists(tdresult):
filename = tdresult + "\\" + now + "_mem.csv"
else:
os.mkdir(tdresult)
filename = tdresult + "\\" + now + "_mem.csv"
if(os.path.exists(filename)):
print "已经存在了"
else:
print "不存在"
return filename
def getMemDump(self,file_path):
''' 获取操作的dump文件 '''
print '获取操作的dump文件'
subprocess.Popen('adb shell am dumpheap com.jhd.help>' +file_path)
def getMemPic(self,file_path):
''' 获取内存趋势图 '''
print '获取内存趋势图 '
#f = open(file_path, 'w')
cmd1 = 'adb shell dumpsys meminfo com.jhd.help |findstr Pss>>' + file_path
#cmd1 = 'adb shell logcat -v time |findstr jhd >>' + file_path
print cmd1
subprocess.Popen(cmd1,shell=True)
cmd2 = 'adb shell dumpsys meminfo com.jhd.help |findstr Total>>' + file_path
subprocess.Popen(cmd2,shell=True)
cmd3 = 'adb shell dumpsys meminfo com.jhd.help |findstr TOTAL>>' + file_path
while(True):
subprocess.Popen(cmd3,shell=True)
if __name__ == "__main__":
filename = getAndroidMem().def_file()
#print filename
print '开始获取趋势'
getAndroidMem().getMemPic(filename)
|
[
"wuxiaoping1120@126.com"
] |
wuxiaoping1120@126.com
|
6c4853e5d42f5a96aabcd2f6bac19abb11fe102f
|
7d5738e9713ddae056138217238e39eb093574dd
|
/deeplearning/imagenet_labels.py
|
67f92e88da0d85045b9e0274a723db2b1d4c046c
|
[] |
no_license
|
tanle2694/deploy_deeplearning_model
|
1093fa7f7e7567540e8ac9863477906666332b97
|
7c5473feb4cc5a67b5f3b9626ddcbcf5091e5ecc
|
refs/heads/master
| 2022-10-12T20:13:42.699632
| 2020-06-10T08:04:24
| 2020-06-10T08:04:24
| 270,533,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,201
|
py
|
labels = ["tench",
"goldfish",
"great white shark",
"tiger shark",
"hammerhead shark",
"electric ray",
"stingray",
"cock",
"hen",
"ostrich",
"brambling",
"goldfinch",
"house finch",
"junco",
"indigo bunting",
"American robin",
"bulbul",
"jay",
"magpie",
"chickadee",
"American dipper",
"kite",
"bald eagle",
"vulture",
"great grey owl",
"fire salamander",
"smooth newt",
"newt",
"spotted salamander",
"axolotl",
"American bullfrog",
"tree frog",
"tailed frog",
"loggerhead sea turtle",
"leatherback sea turtle",
"mud turtle",
"terrapin",
"box turtle",
"banded gecko",
"green iguana",
"Carolina anole",
"desert grassland whiptail lizard",
"agama",
"frilled-necked lizard",
"alligator lizard",
"Gila monster",
"European green lizard",
"chameleon",
"Komodo dragon",
"Nile crocodile",
"American alligator",
"triceratops",
"worm snake",
"ring-necked snake",
"eastern hog-nosed snake",
"smooth green snake",
"kingsnake",
"garter snake",
"water snake",
"vine snake",
"night snake",
"boa constrictor",
"African rock python",
"Indian cobra",
"green mamba",
"sea snake",
"Saharan horned viper",
"eastern diamondback rattlesnake",
"sidewinder",
"trilobite",
"harvestman",
"scorpion",
"yellow garden spider",
"barn spider",
"European garden spider",
"southern black widow",
"tarantula",
"wolf spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse",
"prairie grouse",
"peacock",
"quail",
"partridge",
"grey parrot",
"macaw",
"sulphur-crested cockatoo",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"duck",
"red-breasted merganser",
"goose",
"black swan",
"tusker",
"echidna",
"platypus",
"wallaby",
"koala",
"wombat",
"jellyfish",
"sea anemone",
"brain coral",
"flatworm",
"nematode",
"conch",
"snail",
"slug",
"sea slug",
"chiton",
"chambered nautilus",
"Dungeness crab",
"rock crab",
"fiddler crab",
"red king crab",
"American lobster",
"spiny lobster",
"crayfish",
"hermit crab",
"isopod",
"white stork",
"black stork",
"spoonbill",
"flamingo",
"little blue heron",
"great egret",
"bittern",
"crane (bird)",
"limpkin",
"common gallinule",
"American coot",
"bustard",
"ruddy turnstone",
"dunlin",
"common redshank",
"dowitcher",
"oystercatcher",
"pelican",
"king penguin",
"albatross",
"grey whale",
"killer whale",
"dugong",
"sea lion",
"Chihuahua",
"Japanese Chin",
"Maltese",
"Pekingese",
"Shih Tzu",
"King Charles Spaniel",
"Papillon",
"toy terrier",
"Rhodesian Ridgeback",
"Afghan Hound",
"Basset Hound",
"Beagle",
"Bloodhound",
"Bluetick Coonhound",
"Black and Tan Coonhound",
"Treeing Walker Coonhound",
"English foxhound",
"Redbone Coonhound",
"borzoi",
"Irish Wolfhound",
"Italian Greyhound",
"Whippet",
"Ibizan Hound",
"Norwegian Elkhound",
"Otterhound",
"Saluki",
"Scottish Deerhound",
"Weimaraner",
"Staffordshire Bull Terrier",
"American Staffordshire Terrier",
"Bedlington Terrier",
"Border Terrier",
"Kerry Blue Terrier",
"Irish Terrier",
"Norfolk Terrier",
"Norwich Terrier",
"Yorkshire Terrier",
"Wire Fox Terrier",
"Lakeland Terrier",
"Sealyham Terrier",
"Airedale Terrier",
"Cairn Terrier",
"Australian Terrier",
"Dandie Dinmont Terrier",
"Boston Terrier",
"Miniature Schnauzer",
"Giant Schnauzer",
"Standard Schnauzer",
"Scottish Terrier",
"Tibetan Terrier",
"Australian Silky Terrier",
"Soft-coated Wheaten Terrier",
"West Highland White Terrier",
"Lhasa Apso",
"Flat-Coated Retriever",
"Curly-coated Retriever",
"Golden Retriever",
"Labrador Retriever",
"Chesapeake Bay Retriever",
"German Shorthaired Pointer",
"Vizsla",
"English Setter",
"Irish Setter",
"Gordon Setter",
"Brittany",
"Clumber Spaniel",
"English Springer Spaniel",
"Welsh Springer Spaniel",
"Cocker Spaniels",
"Sussex Spaniel",
"Irish Water Spaniel",
"Kuvasz",
"Schipperke",
"Groenendael",
"Malinois",
"Briard",
"Australian Kelpie",
"Komondor",
"Old English Sheepdog",
"Shetland Sheepdog",
"collie",
"Border Collie",
"Bouvier des Flandres",
"Rottweiler",
"German Shepherd Dog",
"Dobermann",
"Miniature Pinscher",
"Greater Swiss Mountain Dog",
"Bernese Mountain Dog",
"Appenzeller Sennenhund",
"Entlebucher Sennenhund",
"Boxer",
"Bullmastiff",
"Tibetan Mastiff",
"French Bulldog",
"Great Dane",
"St. Bernard",
"husky",
"Alaskan Malamute",
"Siberian Husky",
"Dalmatian",
"Affenpinscher",
"Basenji",
"pug",
"Leonberger",
"Newfoundland",
"Pyrenean Mountain Dog",
"Samoyed",
"Pomeranian",
"Chow Chow",
"Keeshond",
"Griffon Bruxellois",
"Pembroke Welsh Corgi",
"Cardigan Welsh Corgi",
"Toy Poodle",
"Miniature Poodle",
"Standard Poodle",
"Mexican hairless dog",
"grey wolf",
"Alaskan tundra wolf",
"red wolf",
"coyote",
"dingo",
"dhole",
"African wild dog",
"hyena",
"red fox",
"kit fox",
"Arctic fox",
"grey fox",
"tabby cat",
"tiger cat",
"Persian cat",
"Siamese cat",
"Egyptian Mau",
"cougar",
"lynx",
"leopard",
"snow leopard",
"jaguar",
"lion",
"tiger",
"cheetah",
"brown bear",
"American black bear",
"polar bear",
"sloth bear",
"mongoose",
"meerkat",
"tiger beetle",
"ladybug",
"ground beetle",
"longhorn beetle",
"leaf beetle",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant",
"grasshopper",
"cricket",
"stick insect",
"cockroach",
"mantis",
"cicada",
"leafhopper",
"lacewing",
"dragonfly",
"damselfly",
"red admiral",
"ringlet",
"monarch butterfly",
"small white",
"sulphur butterfly",
"gossamer-winged butterfly",
"starfish",
"sea urchin",
"sea cucumber",
"cottontail rabbit",
"hare",
"Angora rabbit",
"hamster",
"porcupine",
"fox squirrel",
"marmot",
"beaver",
"guinea pig",
"common sorrel",
"zebra",
"pig",
"wild boar",
"warthog",
"hippopotamus",
"ox",
"water buffalo",
"bison",
"ram",
"bighorn sheep",
"Alpine ibex",
"hartebeest",
"impala",
"gazelle",
"dromedary",
"llama",
"weasel",
"mink",
"European polecat",
"black-footed ferret",
"otter",
"skunk",
"badger",
"armadillo",
"three-toed sloth",
"orangutan",
"gorilla",
"chimpanzee",
"gibbon",
"siamang",
"guenon",
"patas monkey",
"baboon",
"macaque",
"langur",
"black-and-white colobus",
"proboscis monkey",
"marmoset",
"white-headed capuchin",
"howler monkey",
"titi",
"Geoffroy's spider monkey",
"common squirrel monkey",
"ring-tailed lemur",
"indri",
"Asian elephant",
"African bush elephant",
"red panda",
"giant panda",
"snoek",
"eel",
"coho salmon",
"rock beauty",
"clownfish",
"sturgeon",
"garfish",
"lionfish",
"pufferfish",
"abacus",
"abaya",
"academic gown",
"accordion",
"acoustic guitar",
"aircraft carrier",
"airliner",
"airship",
"altar",
"ambulance",
"amphibious vehicle",
"analog clock",
"apiary",
"apron",
"waste container",
"assault rifle",
"backpack",
"bakery",
"balance beam",
"balloon",
"ballpoint pen",
"Band-Aid",
"banjo",
"baluster",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel",
"wheelbarrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"swimming cap",
"bath towel",
"bathtub",
"station wagon",
"lighthouse",
"beaker",
"military cap",
"beer bottle",
"beer glass",
"bell-cot",
"bib",
"tandem bicycle",
"bikini",
"ring binder",
"binoculars",
"birdhouse",
"boathouse",
"bobsleigh",
"bolo tie",
"poke bonnet",
"bookcase",
"bookstore",
"bottle cap",
"bow",
"bow tie",
"brass",
"bra",
"breakwater",
"breastplate",
"broom",
"bucket",
"buckle",
"bulletproof vest",
"high-speed train",
"butcher shop",
"taxicab",
"cauldron",
"candle",
"cannon",
"canoe",
"can opener",
"cardigan",
"car mirror",
"carousel",
"tool kit",
"carton",
"car wheel",
"automated teller machine",
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello",
"mobile phone",
"chain",
"chain-link fence",
"chain mail",
"chainsaw",
"chest",
"chiffonier",
"chime",
"china cabinet",
"Christmas stocking",
"church",
"movie theater",
"cleaver",
"cliff dwelling",
"cloak",
"clogs",
"cocktail shaker",
"coffee mug",
"coffeemaker",
"coil",
"combination lock",
"computer keyboard",
"confectionery store",
"container ship",
"convertible",
"corkscrew",
"cornet",
"cowboy boot",
"cowboy hat",
"cradle",
"crane (machine)",
"crash helmet",
"crate",
"infant bed",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam",
"desk",
"desktop computer",
"rotary dial telephone",
"diaper",
"digital clock",
"digital watch",
"dining table",
"dishcloth",
"dishwasher",
"disc brake",
"dock",
"dog sled",
"dome",
"doormat",
"drilling rig",
"drum",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso machine",
"face powder",
"feather boa",
"filing cabinet",
"fireboat",
"fire engine",
"fire screen sheet",
"flagpole",
"flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster bed",
"freight car",
"French horn",
"frying pan",
"fur coat",
"garbage truck",
"gas mask",
"gas pump",
"goblet",
"go-kart",
"golf ball",
"golf cart",
"gondola",
"gong",
"gown",
"grand piano",
"greenhouse",
"grille",
"grocery store",
"guillotine",
"barrette",
"hair spray",
"half-track",
"hammer",
"hamper",
"hair dryer",
"hand-held computer",
"handkerchief",
"hard disk drive",
"harmonica",
"harp",
"harvester",
"hatchet",
"holster",
"home theater",
"honeycomb",
"hook",
"hoop skirt",
"horizontal bar",
"horse-drawn vehicle",
"hourglass",
"iPod",
"clothes iron",
"jack-o'-lantern",
"jeans",
"jeep",
"T-shirt",
"jigsaw puzzle",
"pulled rickshaw",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat",
"ladle",
"lampshade",
"laptop computer",
"lawn mower",
"lens cap",
"paper knife",
"library",
"lifeboat",
"lighter",
"limousine",
"ocean liner",
"lipstick",
"slip-on shoe",
"lotion",
"speaker",
"loupe",
"sawmill",
"magnetic compass",
"mail bag",
"mailbox",
"tights",
"tank suit",
"manhole cover",
"maraca",
"marimba",
"mask",
"match",
"maypole",
"maze",
"measuring cup",
"medicine chest",
"megalith",
"microphone",
"microwave oven",
"military uniform",
"milk can",
"minibus",
"miniskirt",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home",
"Model T",
"modem",
"monastery",
"monitor",
"moped",
"mortar",
"square academic cap",
"mosque",
"mosquito net",
"scooter",
"mountain bike",
"tent",
"computer mouse",
"mousetrap",
"moving van",
"muzzle",
"nail",
"neck brace",
"necklace",
"nipple",
"notebook computer",
"obelisk",
"oboe",
"ocarina",
"odometer",
"oil filter",
"organ",
"oscilloscope",
"overskirt",
"bullock cart",
"oxygen mask",
"packet",
"paddle",
"paddle wheel",
"padlock",
"paintbrush",
"pajamas",
"palace",
"pan flute",
"paper towel",
"parachute",
"parallel bars",
"park bench",
"parking meter",
"passenger car",
"patio",
"payphone",
"pedestal",
"pencil case",
"pencil sharpener",
"perfume",
"Petri dish",
"photocopier",
"plectrum",
"Pickelhaube",
"picket fence",
"pickup truck",
"pier",
"piggy bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate ship",
"pitcher",
"hand plane",
"planetarium",
"plastic bag",
"plate rack",
"plow",
"plunger",
"Polaroid camera",
"pole",
"police van",
"poncho",
"billiard table",
"soda bottle",
"pot",
"potter's wheel",
"power drill",
"prayer rug",
"printer",
"prison",
"projectile",
"projector",
"hockey puck",
"punching bag",
"purse",
"quill",
"quilt",
"race car",
"racket",
"radiator",
"radio",
"radio telescope",
"rain barrel",
"recreational vehicle",
"reel",
"reflex camera",
"refrigerator",
"remote control",
"restaurant",
"revolver",
"rifle",
"rocking chair",
"rotisserie",
"eraser",
"rugby ball",
"ruler",
"running shoe",
"safe",
"safety pin",
"salt shaker",
"sandal",
"sarong",
"saxophone",
"scabbard",
"weighing scale",
"school bus",
"schooner",
"scoreboard",
"CRT screen",
"screw",
"screwdriver",
"seat belt",
"sewing machine",
"shield",
"shoe store",
"shoji",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"ski mask",
"sleeping bag",
"slide rule",
"sliding door",
"slot machine",
"snorkel",
"snowmobile",
"snowplow",
"soap dispenser",
"soccer ball",
"sock",
"solar thermal collector",
"sombrero",
"soup bowl",
"space bar",
"space heater",
"space shuttle",
"spatula",
"motorboat",
"spider web",
"spindle",
"sports car",
"spotlight",
"stage",
"steam locomotive",
"through arch bridge",
"steel drum",
"stethoscope",
"scarf",
"stone wall",
"stopwatch",
"stove",
"strainer",
"tram",
"stretcher",
"couch",
"stupa",
"submarine",
"suit",
"sundial",
"sunglass",
"sunglasses",
"sunscreen",
"suspension bridge",
"mop",
"sweatshirt",
"swimsuit",
"swing",
"switch",
"syringe",
"table lamp",
"tank",
"tape player",
"teapot",
"teddy bear",
"television",
"tennis ball",
"thatched roof",
"front curtain",
"thimble",
"threshing machine",
"throne",
"tile roof",
"toaster",
"tobacco shop",
"toilet seat",
"torch",
"totem pole",
"tow truck",
"toy store",
"tractor",
"semi-trailer truck",
"tray",
"trench coat",
"tricycle",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus",
"trombone",
"tub",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle",
"upright piano",
"vacuum cleaner",
"vase",
"vault",
"velvet",
"vending machine",
"vestment",
"viaduct",
"violin",
"volleyball",
"waffle iron",
"wall clock",
"wallet",
"wardrobe",
"military aircraft",
"sink",
"washing machine",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"wing",
"wok",
"wooden spoon",
"wool",
"split-rail fence",
"shipwreck",
"yawl",
"yurt",
"website",
"comic book",
"crossword",
"traffic sign",
"traffic light",
"dust jacket",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot",
"trifle",
"ice cream",
"ice pop",
"baguette",
"bagel",
"pretzel",
"cheeseburger",
"hot dog",
"mashed potato",
"cabbage",
"broccoli",
"cauliflower",
"zucchini",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber",
"artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple",
"banana",
"jackfruit",
"custard apple",
"pomegranate",
"hay",
"carbonara",
"chocolate syrup",
"dough",
"meatloaf",
"pizza",
"pot pie",
"burrito",
"red wine",
"espresso",
"cup",
"eggnog",
"alp",
"bubble",
"cliff",
"coral reef",
"geyser",
"lakeshore",
"promontory",
"shoal",
"seashore",
"valley",
"volcano",
"baseball player",
"bridegroom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper",
"corn",
"acorn",
"rose hip",
"horse chestnut seed",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn mushroom",
"earth star",
"hen-of-the-woods",
"bolete",
"ear",
"toilet paper"]
|
[
"tanlm@datascience.com.vn"
] |
tanlm@datascience.com.vn
|
fb1dc48dad15f690de8d830e797d4ab28dc0f404
|
e247ce1a6e98772ad1fd7593f01d21971da7e738
|
/AlgorithmTest/Test/MatplotlibTest/TestMatlab.py
|
5706795c7c76e2b650e1c9ce131fc42f51dcb704
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
A666666685A/Multi-constrainedQoSRouting
|
a394d82d3acd71032918a8ffd651e42934f49baa
|
8ea2bd2a8602ed51379c7a89ea1fdf370b8f1ca7
|
refs/heads/master
| 2023-08-11T19:57:59.550777
| 2021-09-19T04:01:41
| 2021-09-19T04:01:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
s=raw_input("Input your age:")
if s =="":
raise Exception("Input must no be empty.")
try:
i=int(s)
except ValueError:
print "Could not convert data to an integer."
except:
print "Unknown exception!"
else: # It is useful for code that must be executed if the try clause does not raise an exception
print "You are %d" % i," years old"
finally: # Clean up action
print "Goodbye!"
|
[
"yueludanfeng@gmail.com"
] |
yueludanfeng@gmail.com
|
f4f95db04d1c22ec1a024380dac59917668cfa2d
|
d39eabbd338b6ad565c411784c62ba6cacbd88cc
|
/alfred/server/CommandLineInterface.py
|
5acd514f5a166f9d39c669de08dfe1d5f0b48557
|
[
"MIT"
] |
permissive
|
nakul225/alfred
|
f4d8680c614146c87824b14aedb7901a21e97136
|
0dfc90b830ca06403102db2de27c3f70607e976c
|
refs/heads/master
| 2021-01-01T06:03:27.866914
| 2017-07-17T01:37:19
| 2017-07-17T01:37:19
| 97,346,900
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,611
|
py
|
'''
Created on Mar 20, 2017
@author: nakul
'''
import traceback
import sys
class CommandLineInterface:
# Implementation that provides cmd line input/response interaction
def __init__(self, providedLife):
self.life = providedLife
def _show_usage(self):
print "\n==========================================================="
print "Supported commands are:"
print "Put Goals: \n\t pg <lowercase_goal_name_without_spaces> <lowercase_description_without_spaces>"
print "Put Step: \n\t ps <goal_name> <name> <cost_in_hours>"
print "Get Goals: \n\t gg"
print "Mark Step Complete: \n\t msc <goal_name> <step_name>"
print "Mark Step Incomplete: \n\t msi <goal_name> <step_name>"
print "Get Progress Summary: \n\t gps"
print "Exit Program: \n\t exit"
print "===========================================================\n"
def _show_progress(self):
#Iterates through each goal/category and shows progress for each one
self._show_progress_for_goals()
self._show_progress_for_categories()
def _show_progress_for_goals(self):
#Iterates through each goal and shows progress for each one
for goal in self.life.get_goals():
print "Goal " + goal.name + " is " + str(goal.get_progress_percentage()) + "% complete"
def _show_progress_for_categories(self):
#Iterates through each goal and shows progress for each one
for category in self.life.get_categories():
print "Category " + category.name + "has completed " + str(category.get_progress_percentage())
def _process_command(self, command):
lowercase_command = command.lower()
operation = lowercase_command.split()[0]
continue_program = True
if operation == Operation.EXIT.value:
continue_program = False
elif operation == Operation.PUT_GOAL.value:
self.put_goal(lowercase_command)
elif operation == Operation.GET_GOALS.value:
self.get_goals(lowercase_command)
elif operation == Operation.PUT_STEP.value:
self.put_step(lowercase_command)
elif operation == Operation.GET_PROGRESS_SUMMARY.value:
self.show_progress_summary()
elif operation == Operation.MARK_STEP_COMPLETE.value:
self.mark_step_complete(lowercase_command)
elif operation == Operation.MARK_STEP_INCOMPLETE.value:
self.mark_step_incomplete(lowercase_command)
else:
print "Operation not recognized. Please see usage:"
self._show_usage()
return continue_program
def show_progress_summary(self):
self._show_progress()
def put_goal(self, command):
#PutGoal <lowercase_goal_name_without_spaces> <lowercase_description_without_spaces>
elements = command.split()
name = elements[1].lower()
description = elements[2].lower()
goal = Goal.build_new_goal(name, description)
self.life.put_goal(goal)
def get_goals(self, command):
print "You have following goals in the system: "
for goal in self.life.get_goals():
goal.print_details()
def put_step(self, command):
#PutStep <name> <description> <cost_in_hours> <name_of_goal>
elements = command.split()
goal_name = elements[1].lower()
name = elements[2].lower()
description = ""
cost = int(elements[3])
step = Step.build_new_step(name, description, cost)
# Find the goal in life and add this step to it.
success = False
for goal in self.life.get_goals():
if goal.name == goal_name:
goal.put_step(step)
success=True
if success == False:
print "Specified goal not found!"
def _show_usage_and_accept_user_input(self):
# Show usage and accept user input
self._show_usage()
continue_flag = self._read_input_and_process()
return continue_flag
def mark_step_complete(self, command):
elements = command.split()
goal_name = elements[1]
step_name = elements[2]
print "Marking step "+ step_name + " in goal " + goal_name + " as COMPLETE"
for goal in self.life.get_goals():
if goal.name == goal_name:
goal.mark_step_complete(step_name)
def mark_step_incomplete(self, command):
elements = command.split()
goal_name = elements[1]
step_name = elements[2]
print "Marking step "+ step_name + " in goal " + goal_name + " as INCOMPLETE"
for goal in self.life.get_goals():
if goal.name == goal_name:
goal.mark_step_incomplete(step_name)
def main_menu_loop(self):
# Keeps the program running so that use can interact
should_keep_loop_running = True
while(should_keep_loop_running):
try:
should_keep_loop_running = self._show_usage_and_accept_user_input()
except:
print "Exception raised\n"
traceback.print_exc(file=sys.stdout)
def _process_single_command(self):
# Useful to accept single command invoked with the program. This is alternative to having conitinous loop of accepting commands and showing output.
try:
actual_command = " ".join(sys.argv[1:])
self._process_command(actual_command)
except:
print "Exception raised while dealing with input command"
self._show_usage()
|
[
"nakul225@gmail.com"
] |
nakul225@gmail.com
|
f8d2154649e59afa419b79b4777684cdda82eb5c
|
56b4d00870af18752b4414495b08e2ec3adf3ae4
|
/src/clims/api/endpoints/process_assignments.py
|
c5fd2f83c03d0928f0637275f0b82856ee822b26
|
[
"BSD-2-Clause"
] |
permissive
|
commonlims/commonlims
|
26c3f937eaa18e6935c5d3fcec823053ab7fefd9
|
36a02ed244c7b59ee1f2523e64e4749e404ab0f7
|
refs/heads/develop
| 2021-07-01T17:20:46.586630
| 2021-02-02T08:53:22
| 2021-02-02T08:53:22
| 185,200,241
| 4
| 1
|
NOASSERTION
| 2021-02-02T08:53:23
| 2019-05-06T13:16:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
from __future__ import absolute_import
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from sentry.api.base import Endpoint, SessionAuthentication
class ProcessAssignmentsEndpoint(Endpoint):
authentication_classes = (SessionAuthentication, )
permission_classes = (IsAuthenticated, )
def post(self, request, organization_slug):
"""
Assign one or more item to a workflow. The items are assigned by global_id.
"""
# TODO-auth: Ensure that the user is only assigning samples that are under the organization
# Entities is a list of global ids (e.g. Substance-100)
entities = request.data["entities"]
definition = request.data["definitionId"]
variables = request.data["variables"]
assignments = list()
assignments += self.app.workflows.batch_assign(
entities, definition, request.user, variables)
return Response({"assignments": len(assignments)}, status=201)
|
[
"costeinar@gmail.com"
] |
costeinar@gmail.com
|
1186de1cba914cdcc904a0e0a09520080aa16289
|
46492cc7429c83fe362b0ed566fc54982e52c46e
|
/pitches/main/forms.py
|
bb9c5b6a6c3f20f413c47970a696323c03307838
|
[
"MIT"
] |
permissive
|
jakhax/pitches
|
15c8d87825c879b56cd931d26d398e736636134f
|
e56358d00089bd46addd54192220bcca0478e0da
|
refs/heads/master
| 2020-03-18T00:36:09.254870
| 2018-05-20T14:48:14
| 2018-05-20T14:48:14
| 134,102,974
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,399
|
py
|
from flask import current_app, session
from flask_babel import lazy_gettext
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, BooleanField, SelectField, SubmitField, IntegerField
from wtforms import ValidationError
from wtforms.validators import DataRequired, InputRequired, Length, Email, Regexp
from ..models import Role, User, TopicGroup
class FormHelpersMixIn(object):
@property
def submit_fields(self):
return [getattr(self, field) for field, field_type in self._fields.items()
if isinstance(field_type, SubmitField)]
@staticmethod
def is_has_data(*fields):
return any([field.data for field in fields])
def get_flashed_errors(self):
errors = session.pop('_form_errors') if '_form_errors' in session else {}
self.errors.update(errors)
for field, errors in errors.items():
if hasattr(self, field):
form_field = getattr(self, field)
if form_field.errors:
form_field.errors.extend(errors)
else:
form_field.errors = errors
class EditProfileForm(FlaskForm):
name = StringField(lazy_gettext('Real name'), validators=[Length(0, 64)])
homeland = StringField(lazy_gettext('Homeland'), validators=[Length(0, 64)])
about = TextAreaField(lazy_gettext('About me'))
avatar = StringField(lazy_gettext('Link to avatar'), validators=[Length(0, 256)])
submit = SubmitField(lazy_gettext('Save'))
class EditProfileAdminForm(FlaskForm):
email = StringField(lazy_gettext('Email'), validators=[DataRequired(), Length(1, 64), Email()])
username = StringField(lazy_gettext('Username'), validators=[
DataRequired(), Length(1, 32), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0, lazy_gettext(
'Usernames must have only letters, numbers, dots or underscores'))])
confirmed = BooleanField(lazy_gettext('Confirmed'))
role = SelectField(lazy_gettext('Role'), coerce=int)
name = StringField(lazy_gettext('Real name'), validators=[Length(0, 64)])
homeland = StringField(lazy_gettext('Homeland'), validators=[Length(0, 64)])
about = TextAreaField(lazy_gettext('About me'))
avatar = StringField(lazy_gettext('Link to avatar'), validators=[Length(0, 256)])
submit = SubmitField(lazy_gettext('Save'))
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name) for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if (field.data.lower() != self.user.email
and User.query.filter_by(email=field.data.lower()).first()):
raise ValidationError(lazy_gettext('Email already registered.'))
def validate_username(self, field):
if (field.data.lower() != self.user.username_normalized
and User.query.filter_by(username_normalized=field.data.lower()).first()):
raise ValidationError(lazy_gettext('Username already in use.'))
class TopicForm(FlaskForm):
title = StringField(lazy_gettext('Title'), validators=[DataRequired(), Length(0, 128)])
group_id = IntegerField(lazy_gettext('Topic group ID'), validators=[InputRequired()])
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 20})
poll_question="Rank"
poll_answers="Upvote\n Downvote"
submit = SubmitField(lazy_gettext('Save'))
cancel = SubmitField(lazy_gettext('Cancel'))
delete = SubmitField(lazy_gettext('Delete'))
def remove_edit_fields(self):
del self.group_id
del self.delete
def validate_group_id(self, field):
if not TopicGroup.query.filter_by(id=field.data).first():
raise ValidationError(lazy_gettext('Topic group with such ID is not exist.'))
class TopicWithPollForm(FlaskForm):
title = StringField(lazy_gettext('Title'), validators=[DataRequired(), Length(0, 128)])
group_id = IntegerField(lazy_gettext('Topic group ID'), validators=[InputRequired()])
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 20})
poll_question = StringField(lazy_gettext('Poll question'), validators=[DataRequired(), Length(0, 256)])
poll_answers = TextAreaField(lazy_gettext('Poll answers'), validators=[DataRequired()], render_kw={'rows': 10})
submit = SubmitField(lazy_gettext('Save'))
cancel = SubmitField(lazy_gettext('Cancel'))
delete = SubmitField(lazy_gettext('Delete'))
def remove_edit_fields(self):
del self.group_id
del self.delete
def validate_group_id(self, field):
if not TopicGroup.query.filter_by(id=field.data).first():
raise ValidationError(lazy_gettext('Topic group with such ID is not exist.'))
class TopicGroupForm(FlaskForm):
title = StringField(lazy_gettext('Title'), validators=[DataRequired(), Length(0, 64)])
group_id = IntegerField(lazy_gettext('Parent topic group ID'), validators=[InputRequired()])
priority = SelectField(lazy_gettext('Priority'), coerce=int)
protected = BooleanField(lazy_gettext('Moderators only'))
submit = SubmitField(lazy_gettext('Save'))
cancel = SubmitField(lazy_gettext('Cancel'))
delete = SubmitField(lazy_gettext('Delete'))
def __init__(self, *args, **kwargs):
super(TopicGroupForm, self).__init__(*args, **kwargs)
self.priority.choices = [(p, p) for p in current_app.config['TOPIC_GROUP_PRIORITY']]
def remove_edit_fields(self):
del self.group_id
del self.delete
def validate_group_id(self, field):
if not TopicGroup.query.filter_by(id=field.data).first():
raise ValidationError(lazy_gettext('Topic group with such ID is not exist.'))
class CommentForm(FlaskForm):
body = TextAreaField(lazy_gettext('Leave your comment, {username}:'), validators=[DataRequired()],
render_kw={'rows': 4})
submit = SubmitField(lazy_gettext('Submit'))
def __init__(self, user, *args, **kwargs):
super(CommentForm, self).__init__(*args, **kwargs)
self.body.label.text = self.body.label.text.format(username=user.username)
class CommentEditForm(FlaskForm):
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 8})
submit = SubmitField(lazy_gettext('Save'))
cancel = SubmitField(lazy_gettext('Cancel'))
delete = SubmitField(lazy_gettext('Delete'))
class MessageReplyForm(FlaskForm):
title = StringField(lazy_gettext('Subject'), validators=[DataRequired(), Length(0, 128)])
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 4})
send = SubmitField(lazy_gettext('Send'))
close = SubmitField(lazy_gettext('Close'))
delete = SubmitField(lazy_gettext('Delete'))
class MessageSendForm(FlaskForm):
title = StringField(lazy_gettext('Subject'), validators=[DataRequired(), Length(0, 128)])
body = TextAreaField(lazy_gettext('Text'), validators=[DataRequired()], render_kw={'rows': 4})
send = SubmitField(lazy_gettext('Send'))
cancel = SubmitField(lazy_gettext('Cancel'))
class SearchForm(FlaskForm):
text = StringField('', validators=[DataRequired(), Length(1, 64)])
search = SubmitField(lazy_gettext('Search'))
|
[
"jackogina60@gmail.com"
] |
jackogina60@gmail.com
|
db996257ef666016749abab744fca60cc7c79dc3
|
2d2fcc54af513a84bc624589dc7c6a0316848784
|
/microbe/lib/python3.6/hmac.py
|
3433dd988ff9ff0a1ec0203ba62a078b796dcc94
|
[] |
no_license
|
tatyana-perlova/microbe-x
|
9becf3a176e1277a3bb4ffcd96d4b25365038bb8
|
5b364c09dcf43c3ab237c8d9304a4eaa9ecff33f
|
refs/heads/master
| 2022-12-13T18:29:59.372327
| 2018-02-06T19:35:11
| 2018-02-06T19:35:11
| 120,374,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
py
|
/home/perlusha/anaconda3/lib/python3.6/hmac.py
|
[
"tatyana.perlova@gmail.com"
] |
tatyana.perlova@gmail.com
|
baae6fae01fff3f6aec29b4e4d2b1d0690ecc8d7
|
41c74240ef78070ee5ad19ece21672e629da6881
|
/elections/migrations/0001_initial.py
|
47ba1d9d4599cd1ce1a4b0c10cf6582b2cf65c5b
|
[] |
no_license
|
NamGungGeon/DjangoStudy
|
33d3f3d66bcc6a9dafa9cbeee10f55b705d1755f
|
7985d384f26538b78414148c485d4a126c199ad0
|
refs/heads/master
| 2021-01-23T20:07:23.033394
| 2017-09-08T11:03:46
| 2017-09-08T11:03:46
| 102,852,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-03 14:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Candidate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
('introduction', models.TextField()),
('area', models.CharField(max_length=15)),
('party_number', models.IntegerField(default=1)),
],
),
]
|
[
"rndrjs123@naver.com"
] |
rndrjs123@naver.com
|
eef750f84f81a27c35f5f451faf9e9a1b93c1cc4
|
4c117ea3617a576ddd07d8ea8aaab1a925fc402f
|
/bin/individualization/VennPlot.py
|
18f444e66a82a4f9f64427b83e18f591f031b0f6
|
[] |
no_license
|
452990729/Rep-seq
|
7be6058ba3284bea81282f2db7fd3bd7895173ba
|
e217b115791e0aba064b2426e4502a5c1b032a94
|
refs/heads/master
| 2021-12-11T14:27:46.912144
| 2019-06-04T03:49:40
| 2019-06-04T03:49:40
| 190,124,555
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
#!/usr/bin/env python
import os
import sys
import re
import matplotlib
matplotlib.use('Agg')
import venn
from matplotlib import pyplot as plt
def HandleFq(file_in):
base = '_'.join(re.split('_', os.path.basename(file_in))[:2])
list_tmp = []
m = 0
with open(file_in, 'r') as in1:
for line in in1:
m += 1
if m%4 == 2:
list_tmp.append(line.strip())
return set(list_tmp), base
def ReadTab(file_in):
list_tmp = []
label = '_'.join(re.split('_', os.path.basename(file_in))[:2])
with open(file_in, 'r') as in1:
for line in in1.readlines()[1:]:
list_tmp.append(re.split('\t', line.strip())[36])
return set(list_tmp), label
def main():
len_arg = len(sys.argv)
if sys.argv[1] == 'fastq':
func = HandleFq
elif sys.argv[1] == 'tab':
func = ReadTab
list_l = []
list_lb = []
for i in range(len_arg-2):
l, lb = func(sys.argv[i+2])
list_l.append(l)
list_lb.append(lb)
labels = venn.get_labels(list_l, fill=['number',])
if len_arg == 4:
fig, ax = venn.venn2(labels, names=list_lb)
elif len_arg == 5:
fig, ax = venn.venn3(labels, names=list_lb)
elif len_arg == 6:
fig, ax = venn.venn4(labels, names=list_lb)
elif len_arg == 7:
fig, ax = venn.venn5(labels, names=list_lb)
elif len_arg == 8:
fig, ax = venn.venn6(labels, names=list_lb)
plt.savefig('{}wayvenn.png'.format(str(len_arg-2)))
if __name__ == '__main__':
main()
|
[
"452990729@qq.com"
] |
452990729@qq.com
|
9a2b4bd952a3bd412a603232556bd9cad7508e62
|
9638fccea89ece61f7ba1f985f488bf3e8671155
|
/venv/bin/jp.py
|
3187218827e61b01a87d6828b56a3e2045077914
|
[] |
no_license
|
ked66/ResearchNotes
|
7ada6bc14a54dd9c86719f901e090265738642b9
|
c653e02f78bf195dc417394baf0342033a9984e4
|
refs/heads/master
| 2023-03-04T05:00:48.261084
| 2021-02-12T20:15:27
| 2021-02-12T20:15:27
| 316,547,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,717
|
py
|
#!/Users/katie/PycharmProjects/ResearchNotes/venv/bin/python
import sys
import json
import argparse
from pprint import pformat
import jmespath
from jmespath import exceptions
def main():
parser = argparse.ArgumentParser()
parser.add_argument('expression')
parser.add_argument('-f', '--filename',
help=('The filename containing the input data. '
'If a filename is not given then data is '
'read from stdin.'))
parser.add_argument('--ast', action='store_true',
help=('Pretty print the AST, do not search the data.'))
args = parser.parse_args()
expression = args.expression
if args.ast:
# Only print the AST
expression = jmespath.compile(args.expression)
sys.stdout.write(pformat(expression.parsed))
sys.stdout.write('\n')
return 0
if args.filename:
with open(args.filename, 'r') as f:
data = json.load(f)
else:
data = sys.stdin.read()
data = json.loads(data)
try:
sys.stdout.write(json.dumps(
jmespath.search(expression, data), indent=4))
sys.stdout.write('\n')
except exceptions.ArityError as e:
sys.stderr.write("invalid-arity: %s\n" % e)
return 1
except exceptions.JMESPathTypeError as e:
sys.stderr.write("invalid-type: %s\n" % e)
return 1
except exceptions.UnknownFunctionError as e:
sys.stderr.write("unknown-function: %s\n" % e)
return 1
except exceptions.ParseError as e:
sys.stderr.write("syntax-error: %s\n" % e)
return 1
if __name__ == '__main__':
sys.exit(main())
|
[
"ked66@cornell.edu"
] |
ked66@cornell.edu
|
705c2db27a5d0906938b557caab4e18133150a24
|
19ac1d0131a14ba218fd2c55d585170222eb9400
|
/social_login/wsgi.py
|
9523f947cda705e24cea5e1c828e7fb9ee17044c
|
[] |
no_license
|
oereo/social-login
|
4ed27658c585dd0a24484e628e053070fe012518
|
41e67b889354189c986da45bcf03c20c1f1063e3
|
refs/heads/master
| 2023-01-15T22:38:06.667909
| 2020-11-22T12:12:08
| 2020-11-22T12:12:08
| 303,985,281
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for social_login project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'social_login.settings')
application = get_wsgi_application()
|
[
"dlstpgns0406@gmail.com"
] |
dlstpgns0406@gmail.com
|
230c93a04644bae6fca2f3d207a8e00cba3a24de
|
beae5a43e5bf3d3627d49531e5cc8365c204d15c
|
/contactnetwork/migrations/0002_auto_20180117_1457.py
|
7da9fdc770627bede76a26f59e0e2291f2f612df
|
[
"Apache-2.0"
] |
permissive
|
protwis/protwis
|
e8bbe928a571bc9d7186f62963d49afe1ed286bd
|
75993654db2b36e2a8f67fa38f9c9428ee4b4d90
|
refs/heads/master
| 2023-09-01T18:16:34.015041
| 2023-04-06T11:22:30
| 2023-04-06T11:22:30
| 50,017,823
| 31
| 92
|
Apache-2.0
| 2023-07-28T06:56:59
| 2016-01-20T09:02:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,796
|
py
|
# Generated by Django 2.0.1 on 2018-01-17 13:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contactnetwork', '0001_initial'),
('structure', '0001_initial'),
('residue', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='interactingresiduepair',
name='referenced_structure',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='structure.Structure'),
),
migrations.AddField(
model_name='interactingresiduepair',
name='res1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='residue1', to='residue.Residue'),
),
migrations.AddField(
model_name='interactingresiduepair',
name='res2',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='residue2', to='residue.Residue'),
),
migrations.CreateModel(
name='FaceToEdgeInteraction',
fields=[
('aromaticinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.AromaticInteraction')),
('res1_has_face', models.BooleanField()),
],
options={
'db_table': 'interaction_aromatic_face_edge',
},
bases=('contactnetwork.aromaticinteraction',),
),
migrations.CreateModel(
name='FaceToFaceInteraction',
fields=[
('aromaticinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.AromaticInteraction')),
],
options={
'db_table': 'interaction_aromatic_face_face',
},
bases=('contactnetwork.aromaticinteraction',),
),
migrations.CreateModel(
name='PiCationInteraction',
fields=[
('aromaticinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.AromaticInteraction')),
('res1_has_pi', models.BooleanField()),
],
options={
'db_table': 'interaction_aromatic_pi_cation',
},
bases=('contactnetwork.aromaticinteraction',),
),
migrations.CreateModel(
name='PolarBackboneSidechainInteraction',
fields=[
('polarinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.PolarInteraction')),
('res1_is_sidechain', models.BooleanField()),
],
options={
'db_table': 'interaction_polar_backbone_sidechain',
},
bases=('contactnetwork.polarinteraction',),
),
migrations.CreateModel(
name='PolarSidechainSidechainInteraction',
fields=[
('polarinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.PolarInteraction')),
],
options={
'db_table': 'interaction_polar_sidechain_sidechain',
},
bases=('contactnetwork.polarinteraction',),
),
]
|
[
"christian@munk.be"
] |
christian@munk.be
|
23206587aae4835dbc893edeaad63d67170d75c3
|
23e877d2e65cdc49cf9a456845470f97194674bc
|
/src/main/resources/http/http_request.py
|
e9a3e1cdc87380b5ff871b18466c069841a84cdd
|
[
"MIT"
] |
permissive
|
xebialabs-community/xld-github-dynamic-dictionaries-plugin
|
77da6a4fea1ca2b96207d77b0396011e088ac850
|
67c3a596f4a7f58f9d0a939bb57091d1f82c51ee
|
refs/heads/master
| 2021-07-13T17:15:15.222551
| 2020-11-02T12:49:14
| 2020-11-02T12:49:14
| 68,606,897
| 2
| 2
|
MIT
| 2021-03-26T22:14:23
| 2016-09-19T13:09:01
|
Python
|
UTF-8
|
Python
| false
| false
| 9,826
|
py
|
#
# Copyright 2020 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import re
import urllib
from java.lang import String
from org.apache.commons.codec.binary import Base64
from org.apache.http import HttpHost
from org.apache.http.client.config import RequestConfig
from org.apache.http.client.methods import HttpGet, HttpPost, HttpPut, HttpDelete
from org.apache.http.util import EntityUtils
from org.apache.http.impl.client import HttpClients
from http.http_response import HttpResponse
class HttpRequest:
def __init__(self, params, username = None, password = None):
"""
Builds an HttpRequest
:param params: an HttpConnection
:param username: the username
(optional, it will override the credentials defined on the HttpConnection object)
:param password: an password
(optional, it will override the credentials defined on the HttpConnection object)
"""
self.params = params
self.username = username
self.password = password
def do_request(self, **options):
"""
Performs an HTTP Request
:param options: A keyword arguments object with the following properties :
method: the HTTP method : 'GET', 'PUT', 'POST', 'DELETE'
(optional: GET will be used if empty)
context: the context url
(optional: the url on HttpConnection will be used if empty)
body: the body of the HTTP request for PUT & POST calls
(optional: an empty body will be used if empty)
contentType: the content type to use
(optional, no content type will be used if empty)
headers: a dictionary of headers key/values
(optional, no headers will be used if empty)
:return: an HttpResponse instance
"""
request = self.build_request(
options.get('method', 'GET'),
options.get('context', ''),
options.get('entity', ''),
options.get('contentType', None),
options.get('headers', None))
return self.execute_request(request)
def do_request_without_headers(self, **options):
"""
Performs an HTTP Request
:param options: A keyword arguments object with the following properties :
method: the HTTP method : 'GET', 'PUT', 'POST', 'DELETE'
(optional: GET will be used if empty)
context: the context url
(optional: the url on HttpConnection will be used if empty)
body: the body of the HTTP request for PUT & POST calls
(optional: an empty body will be used if empty)
contentType: the content type to use
(optional, no content type will be used if empty)
headers: a dictionary of headers key/values
(optional, no headers will be used if empty)
:return: an HttpResponse instance
"""
request = self.build_request_without_headers(
options.get('method', 'GET'),
options.get('context', ''),
options.get('entity', ''))
return self.execute_request(request)
def get(self, context, **options):
"""
Performs an Http GET Request
:param context: the context url
:param options: the options keyword argument described in do_request()
:return: an HttpResponse instance
"""
options['method'] = 'GET'
options['context'] = context
return self.do_request(**options)
def put(self, context, entity, **options):
"""
Performs an Http PUT Request
:param context: the context url
:param body: the body of the HTTP request
:param options: the options keyword argument described in do_request()
:return: an HttpResponse instance
"""
options['method'] = 'PUT'
options['context'] = context
options['entity'] = entity
return self.do_request(**options)
def post(self, context, entity, **options):
"""
Performs an Http POST Request
:param context: the context url
:param body: the body of the HTTP request
:param options: the options keyword argument described in do_request()
:return: an HttpResponse instance
"""
options['method'] = 'POST'
options['context'] = context
options['entity'] = entity
return self.do_request(**options)
def post_without_headers(self, context, entity, **options):
"""
Performs an Http POST Request
:param context: the context url
:param body: the body of the HTTP request
:param options: the options keyword argument described in do_request()
:return: an HttpResponse instance
"""
options['method'] = 'POST'
options['context'] = context
options['entity'] = entity
return self.do_request_without_headers(**options)
def delete(self, context, **options):
"""
Performs an Http DELETE Request
:param context: the context url
:param options: the options keyword argument described in do_request()
:return: an HttpResponse instance
"""
options['method'] = 'DELETE'
options['context'] = context
return self.do_request(**options)
def build_request(self, method, context, entity, contentType, headers):
url = self.quote(self.create_path(self.params.getUrl(), context))
method = method.upper()
if method == 'GET':
request = HttpGet(url)
elif method == 'POST':
request = HttpPost(url)
request.setEntity(entity)
elif method == 'PUT':
request = HttpPut(url)
request.setEntity(entity)
elif method == 'DELETE':
request = HttpDelete(url)
else:
raise Exception('Unsupported method: ' + method)
request.addHeader('Content-Type', contentType)
request.addHeader('Accept', contentType)
self.set_credentials(request)
self.set_proxy(request)
self.setHeaders(request, headers)
return request
def build_request_without_headers(self, method, context, entity):
url = self.quote(self.create_path(self.params.getUrl(), context))
method = method.upper()
if method == 'GET':
request = HttpGet(url)
elif method == 'POST':
request = HttpPost(url)
request.setEntity(entity)
elif method == 'PUT':
request = HttpPut(url)
request.setEntity(entity)
elif method == 'DELETE':
request = HttpDelete(url)
else:
raise Exception('Unsupported method: ' + method)
self.set_credentials(request)
self.set_proxy(request)
return request
def create_path(self, url, context):
url = re.sub('/*$', '', url)
if context is None:
return url
elif context.startswith('/'):
return url + context
else:
return url + '/' + context
def quote(self, url):
return urllib.quote(url, ':/?&=%')
def set_credentials(self, request):
if self.username:
username = self.username
password = self.password
elif self.params.getUsername():
username = self.params.getUsername()
password = self.params.getPassword()
else:
return
encoding = Base64.encodeBase64String(String(username + ':' + password).getBytes())
request.addHeader('Authorization', 'Basic ' + encoding)
def set_proxy(self, request):
if not self.params.getProxyHost():
return
proxy = HttpHost(self.params.getProxyHost(), int(self.params.getProxyPort()))
config = RequestConfig.custom().setProxy(proxy).build()
request.setConfig(config)
def setHeaders(self, request, headers):
if headers:
for key in headers:
request.setHeader(key, headers[key])
def execute_request(self, request):
client = None
response = None
try:
client = HttpClients.createDefault()
response = client.execute(request)
status = response.getStatusLine().getStatusCode()
entity = response.getEntity()
result = EntityUtils.toString(entity, "UTF-8") if entity else None
headers = response.getAllHeaders()
EntityUtils.consume(entity)
return HttpResponse(status, result, headers)
finally:
if response:
response.close()
if client:
client.close()
|
[
"bmoussaud@xebialabs.com"
] |
bmoussaud@xebialabs.com
|
a29347fa5a55f754c48ba25f7b9c8c93f00f8db4
|
a45e45b5b3b706f369f586e7b03c5972eb21b926
|
/pythonsyntax/any7.py
|
61226e6cc4351b5b432ec75fe82928ac0cf3f5e7
|
[] |
no_license
|
khagerman/Python-Practice
|
44882bbcf876ab06536da0d4ec0e1a5d9b2bf10d
|
982dc7595691f32a6da6ef8fb918ec9dfdfdfd93
|
refs/heads/main
| 2023-03-27T15:27:51.889132
| 2021-03-31T21:38:08
| 2021-03-31T21:38:08
| 350,499,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
def any7(nums):
"""Are any of these numbers a 7? (True/False)"""
# YOUR CODE HERE
for num in nums:
if num == 7:
return True
return False
print("should be true", any7([1, 2, 7, 4, 5]))
print("should be false", any7([1, 2, 4, 5]))
|
[
"71734063+khagerman@users.noreply.github.com"
] |
71734063+khagerman@users.noreply.github.com
|
c423950c678b966d72c428c4dadd7d1045308bbb
|
c536c764aab4170c64f3f8b78bd91593dcb161a3
|
/vigenereb62.py
|
037292215097560084e9451db9c5655b7c2fb996
|
[] |
no_license
|
numberly/vigenere-b62
|
63bbc95c1f9390e9623a5882a9c2a14d110851b4
|
3dea3394ee557ba2e589af014cbc4454ebbbc874
|
refs/heads/master
| 2023-02-16T02:13:31.254670
| 2021-01-11T15:24:58
| 2021-01-11T15:24:58
| 328,698,862
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
def iter_reverse_digits(number, base):
while number != 0:
digit = number % base
yield digit
number -= digit
number //= base
def encode(alphabets, seed, size=6):
if len(alphabets) < size:
raise ValueError("There should be an alphabet per character you want")
secret = "".join(
alphabets[i][digit]
for i, digit in enumerate(iter_reverse_digits(seed, len(alphabets[0])))
)
secret += "".join(alphabets[i][0] for i in range(len(secret), size))
return secret
|
[
"julien@thebault.co"
] |
julien@thebault.co
|
454eb93dccb38e7fc7963f055b5cbdc78b1c6663
|
95544e6ac0847dd7b21e6ec180d31a1bc5dedaed
|
/H4/TypeX/WatchingApp(TYPEX)-H4/env.py
|
37bb28626a2cdaf685df1d5954a74e8c1c6c963b
|
[] |
no_license
|
meanJustin/Real-Time-Trade-Watching-app
|
f965731da8e01bb81b8517c2080506246073c78c
|
867dde7a5a8ad2f67c2a19b46b15283720884678
|
refs/heads/main
| 2023-01-04T13:27:54.179141
| 2020-10-23T02:05:30
| 2020-10-23T02:05:30
| 306,504,056
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
#assign the source filepath
FILEPATH = "C:\\Users\\VMAK CAPITAL\\AppData\\Roaming\\MetaQuotes\\Terminal\\CEA95A93FC8D185DD2235895C53A5FFF\\MQL4\\Files\\"
#FILEPATH = ".\\Assets\\"
#assign the mastersheet filepath
MASTERFILEPATH = ".\\Assets\\"
#the measure is second
CYCLE_TIME = 3600 * 4
#the period of every index check time
CHECK_INDEX_DURATION_TIME = 3600
#the period of refresh or should I say update?
REFRESH_TIME = 5
#TYPEXMASTERSHEET.xlsx file name
TYPEX_MASTER = "MasterTypeXSheet.xlsx"
#out put file names
#TYPEX PRINT
TYPEX_PRINT = "TypeX.csv"
|
[
"69616732+meanJustin@users.noreply.github.com"
] |
69616732+meanJustin@users.noreply.github.com
|
5cd5782af0c7af2c6f90c48001a91cd1e255da08
|
c9e95974e3f3320f2da36ba23403d46e00ac884d
|
/projects/mars/model_classes/MarsSurface.py
|
00b902223fb2354c3cade7b796303c0febaf41b3
|
[
"MIT"
] |
permissive
|
ModelFlow/modelflow
|
877ff8d80ab2987b0572bebcf3753ae0942a5ae2
|
c2b720b2da8bb17462baff5c00bbe942644474b0
|
refs/heads/master
| 2023-07-12T17:22:49.540043
| 2021-08-26T03:51:26
| 2021-08-26T03:51:26
| 280,748,869
| 8
| 0
|
MIT
| 2021-08-18T19:48:57
| 2020-07-18T22:17:52
|
Python
|
UTF-8
|
Python
| false
| false
| 729
|
py
|
class MarsSurface:
name = "Mars Surface"
params = []
states = [
{
"key": "atmospheric_co2",
"label": "Atmospheric CO2",
"units": "kg",
"private": False,
"value": 999999999,
"confidence": 0,
"notes": "",
"source": "fake"
},
{
"key": "temperature",
"label": "Temperature",
"units": "c",
"private": False,
"value": 0,
"confidence": 0,
"notes": "",
"source": "fake"
}
]
@staticmethod
def run_step(states, params, utils):
# TODO: Have temperature change with time
pass
|
[
"1890491+adamraudonis@users.noreply.github.com"
] |
1890491+adamraudonis@users.noreply.github.com
|
8339c4b6670fe18b61771e18903739838373f58c
|
01ce2eec1fbad3fb2d98085ebfa9f27c7efb4152
|
/itertools/itertools-combinations.py
|
b32166fe2a76aece52bb636b0b8705a63f17c3ce
|
[
"MIT"
] |
permissive
|
anishLearnsToCode/hackerrank-python
|
4cfeaf85e33f05342df887896fa60dae5cc600a5
|
7d707c07af051e7b00471ebe547effd7e1d6d9d9
|
refs/heads/master
| 2023-01-01T23:39:01.143328
| 2020-11-01T07:31:15
| 2020-11-01T07:31:15
| 265,767,347
| 8
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
# https://www.hackerrank.com/challenges/itertools-combinations/problem
import itertools
line = input().split()
word = sorted(line[0])
k = int(line[1])
for i in range(1, k + 1):
for j in itertools.combinations(word, i):
print(''.join(j))
|
[
"anish_bt2k16@dtu.ac.in"
] |
anish_bt2k16@dtu.ac.in
|
b561d9b1c21f08c5647bd599c14beb24eee2dc86
|
e3d06e2f11e5afc623ffbd59143fa8b3dbd8f1f7
|
/DCGAN_train.py
|
056a21a18c7d859cb7c96536dd32ba00f620ae1e
|
[] |
no_license
|
yangpeiwen/implementation
|
931f6f1d8d475affcb95b6fd0baacfc0ec1325f5
|
a2bf3e1de98a78173f73e003bd888de9cd4a77e9
|
refs/heads/master
| 2020-04-29T15:18:00.296864
| 2019-03-27T12:47:09
| 2019-03-27T12:47:09
| 176,223,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,365
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#D网络中使用LeakyReLU作为激活函数
from __future__ import division, print_function, absolute_import
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from network_construction import DCGAN
#载入mnist数据集
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# 网路的两个输入,生成器需要的随机噪声和判别器需要的真实图片
noise_input = tf.placeholder(tf.float32, shape=[None, DCGAN.noise_dim])
real_image_input = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
#batch normalization需要的is_training参数
#is_training在这里需要还有DCGAN文件里面的函数也需要,直接调用DCGAN文件的placeholder创建
is_training = DCGAN.is_training
#训练网络实例化,返回的gen_vars与disc_vars暂时用不到
train_gen,train_disc,gen_loss,disc_loss,gen_vars,disc_vars = DCGAN.DCGAN_train(noise_input,real_image_input)
#开始训练
init = tf.global_variables_initializer()
sess = tf.Session()
#下面的训练参数与网络参数在DCGAN文件中都有,例:可以直接使用num_steps或者删去下面参数然后DCGAN.num_steps
# Training Params
num_steps = 10000
batch_size = 128
lr_generator = 0.002
lr_discriminator = 0.002
# Network Params
image_dim = 784 # 28*28 pixels * 1 channel
noise_dim = 100 # Noise data points
#初始化并且创建saver对象准备保存
sess.run(init)
saver = tf.train.Saver()
model_path = "/tmp/DCGAN_model.ckpt"
for i in range(1, DCGAN.num_steps+1):
batch_x, _ = mnist.train.next_batch(DCGAN.batch_size)
batch_x = np.reshape(batch_x, newshape=[-1, 28, 28, 1])
batch_x = batch_x * 2. - 1.
# 训练判别器
z = np.random.uniform(-1., 1., size=[DCGAN.batch_size, DCGAN.noise_dim])
_, dl = sess.run([train_disc, disc_loss], feed_dict={real_image_input: batch_x, noise_input: z, is_training:True})
# 训练生成器
z = np.random.uniform(-1., 1., size=[batch_size, noise_dim])
_, gl = sess.run([train_gen, gen_loss], feed_dict={noise_input: z, is_training:True})
if i % 500 == 0 or i == 1:
print('Step %i: Generator Loss: %f, Discriminator Loss: %f' % (i, gl, dl))
save_path = saver.save(sess,model_path)
print("Model saved in file: %s" % save_path)
|
[
"yangpeiwen"
] |
yangpeiwen
|
ac0eac50d356d658ba3b95fa27707c44039e1d5d
|
a96b98aaec11160c0b9c5f3cee3471c2f50e8c1d
|
/flask_backend/question-classification.py
|
120ba1ab70086e7ddb7908cd6d156d938cf2b7b6
|
[] |
no_license
|
duvsr01/NLP-based-QA-System-for-custom-KG
|
ea486c5cdede0ef6a4882b3490e15b9be6e4ce97
|
ae7af74b21079b1cc441676064e9aa387d8177a2
|
refs/heads/main
| 2023-04-30T21:52:52.736928
| 2021-05-15T00:26:40
| 2021-05-15T00:26:40
| 305,169,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,947
|
py
|
import pickle
# Training data
# X is the sample sentences
X = [
'How many courses are taught by Dan Harkey?',
'What is number of faculty in SJSU?',
'How many machine learning courses are on Coursera?',
'How many students are in the world?',
'What is the email of Ram Shyam?',
'What is the email address of Albert Einstein?',
'What is the deadline to pay Fall 2021 Tuition Fee?',
'What are office hours of Vinodh Gopinath?',
'How many courses are offered by University of Hogwarts?',
'How to pay tuition fees?',
'Phone number of Mr Sam Igloo?',
'How can I get a bus pass?'
]
# y is the intent class corresponding to sentences in X
y = [
'aggregation_question',
'aggregation_question',
'aggregation_question',
'aggregation_question',
'factoid_question',
'factoid_question',
'factoid_question',
'factoid_question',
'aggregation_question',
'factoid_question',
'factoid_question',
'factoid_question'
]
# Define the classifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
clf = Pipeline(
[
('tfidf', TfidfVectorizer()),
('sgd', SGDClassifier())
]
)
## Train the classifier
#clf.fit(X, y)
# Test your classifier
## New sentences (that weren't in X and your model never seen before)
new_sentences = [
'What is number of students that study in CMPE department?',
'How can I reach CMPE department?',
'How to apply for graduation?',
'How many faulty in CS department?',
'Number of students CS department?',
'What is the address of CS department?'
]
#predicted_intents = clf.predict(new_sentences)
filename = 'finalized_model.sav'
#pickle.dump(clf, open(filename, 'wb'))
loaded_model = pickle.load(open(filename, 'rb'))
predicted_intents = loaded_model.predict(new_sentences)
print(predicted_intents)
|
[
"vijendersingh.aswal@sjsu.edu"
] |
vijendersingh.aswal@sjsu.edu
|
aecd6191686bd841066715f69f2dbd3ae327fd10
|
6c55174a3ecfc0757ed04700ea4c549e6b9c45d2
|
/lib/koala/utils/mail.py
|
7b1dd4c0909e722872e3f53c3cf673b8b6b516a3
|
[] |
no_license
|
adefelicibus/koala-server
|
ce7cbc584b0775482b60e2eb72794104f2fe0cf3
|
defec28c30a9fc4df2b81efeb8df4fc727768540
|
refs/heads/master
| 2020-05-25T15:43:43.772302
| 2016-04-26T02:08:56
| 2016-04-26T02:08:56
| 38,928,778
| 2
| 3
| null | 2016-02-23T20:37:11
| 2015-07-11T14:38:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from email.Utils import formatdate
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
import datetime
import os
from koala.utils import show_error_message
# TODO: take the smtp configuration from galaxy's config.ini file
# TODO: review exception rules
def get_message_email(tool_name):
try:
now = datetime.datetime.now()
tupla = now.timetuple()
data = str(
tupla[2]) + '/' + str(tupla[1]) + '/' + \
str(tupla[0]) + ' ' + str(tupla[3]) + ':' + str(tupla[4]) + ':' + str(tupla[5])
tool_name = tool_name.replace('_', ' ')
messageEmail = '''Hi,
Your simulation has been conclued at ''' + data + '''.
You have to go to your History and download it.
Best Regards.
%s''' % tool_name
return messageEmail
except Exception, e:
show_error_message("Error while getMessageEmail email!\n%s" % e)
def send_email(de, para, assunto, mensagem, arquivos, servidor):
try:
# Cria o objeto da mensagem
msg = MIMEMultipart()
# Define o cabeçalho
msg['From'] = de
msg['To'] = para
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = assunto
# Atacha o texto da mensagem
msg.attach(MIMEText(mensagem))
# Atacha os arquivos
for arquivo in arquivos:
parte = MIMEBase('application', 'octet-stream')
parte.set_payload(open(arquivo, 'rb').read())
encoders.encode_base64(parte)
parte.add_header(
'Content-Disposition', 'attachment; filename="%s"' % os.path.basename(arquivo)
)
msg.attach(parte)
# Conecta ao servidor SMTP
smtp = smtplib.SMTP(servidor, 587)
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
# Faz login no servidor
smtp.login('adefelicibus@gmail.com', 'mami1752@')
try:
# Envia o e-mail
smtp.sendmail(de, para, msg.as_string())
finally:
# Desconecta do servidor
smtp.close()
except Exception, e:
show_error_message("Error when SendEmail:\n%s" % e)
|
[
"adefelicibus@gmail.com"
] |
adefelicibus@gmail.com
|
1bff440e67a7189665b42fe0833a0c9b007950e7
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_defenders.py
|
bb7548df4efbbe4fec4aeb39f3eec118e52a2ba7
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from xai.brain.wordbase.nouns._defender import _DEFENDER
#calss header
class _DEFENDERS(_DEFENDER, ):
def __init__(self,):
_DEFENDER.__init__(self)
self.name = "DEFENDERS"
self.specie = 'nouns'
self.basic = "defender"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
15d614e5ec83637c824c55ec0c2d7c4291482954
|
55877a854a6325b0ba8265645b94184f56839480
|
/spider/settings.py
|
e54a1a60ae1f076b59f6850ee210e7d072d32e79
|
[] |
no_license
|
xiaowuwuwuwuwu/scrapy_pager_frame
|
cc48cee4daaa655d78be336678ed18aa6e9037ca
|
bc3d9bd26b842fe66dba98ca3982ffd2fa1b8d39
|
refs/heads/master
| 2020-09-22T15:44:49.016852
| 2019-12-02T02:17:38
| 2019-12-02T02:17:38
| 225,263,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,803
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for tutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'spider'
SPIDER_MODULES = ['spider.spiders']
NEWSPIDER_MODULE = 'spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
# Obey robots.txt rules
#不按照rebots进行爬取
ROBOTSTXT_OBEY = False
# Redis
#SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderPriorityQueue'
#SCHEDULER_IDLE_BEFORE_CLOSE = 10
#REDIS_HOST = 'localhost'
#REDIS_PORT = 6379
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#处理并发数
#CONCURRENT_REQUESTS = 100
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
TELNETCONSOLE_ENABLED = True
TELNETCONSOLE_HOST = '127.0.0.1'
TELNETCONSOLE_PORT = '6023'
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# 中间件
# KEY=中间件;VALUE=中间件顺序
SPIDER_MIDDLEWARES = {
#'spider.middlewares.TutorialSpiderMiddleware': 543,
#'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 531,
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,
}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'spider.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': 500,
# 'tutorial.openextension.SpiderOpenCloseLogging': 501
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
# 执行的优先度
# KEY=管道文件;VALUE=管道顺序
ITEM_PIPELINES = {
'spider.pipelines.SpiderPipeline': 300,
#'scrapy_redis.pipelines.RedisPipeline': 301
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#开启爬取速度间隔
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#开始间隔时间为3秒
#AUTOTHROTTLE_START_DELAY = 3
# The maximum download delay to be set in case of high latencies
#如果请求未响应,最大延迟时间为20秒
#AUTOTHROTTLE_MAX_DELAY = 20
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#爬取时Debug信息
#AUTOTHROTTLE_DEBUG = True
DOWNLOAD_DELAY = 5
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
#测试扩展
#MYEXT_ENABLED = True
################日志################
#开启日志
#LOG_ENABLED = True
#日志文件位置
#LOG_FILE = "日志路径"
#日志编码
#LOG_ENCODING = "utf-8"
#日志级别
#LOG_LEVEL = "DEBUG"
#标准输出
#LOG_STDOUT = False
#开启Cookie追踪
COOKIES_ENABLED = True
COOKIES_DEBUG = True
##################Web###############
#开启web服务
WEBSERVICE_ENABLED = True
#日志文件位置
WEBSERVICE_LOGFILE = "日志路径"
#端口
WEBSERVICE_PORT = [6080, 7030]
#主机
WEBSERVICE_HOST = '127.0.0.1'
#################自动限速###############
#启用AutoThrottle扩展
#AUTOTHROTTLE_ENABLED = True
#初始下载延迟(单位:秒)
#AUTOTHROTTLE_START_DELAY = 1.0
#在高延迟情况下最大的下载延迟(单位秒)
#AUTOTHROTTLE_MAX_DELAY = 60.0
#起用AutoThrottle调试(debug)模式,展示每个接收到的response
#AUTOTHROTTLE_DEBUG = True
#DOWNLOAD_DELAY = 1.0
|
[
"1059174412@qq.com"
] |
1059174412@qq.com
|
83a5e8277279567beb43b9117f28f6b87142acf6
|
9f1a165798a13b4fd24b94d23eb137a6763a1bed
|
/tickets/migrations/0001_initial_squashed_0006_auto_20200610_1403.py
|
36b5098cc5ffd8d4476caa47cb08b33bf448b406
|
[
"MIT"
] |
permissive
|
AdamCottrill/ticket_tracker
|
42455ed9e4b0439df08694b0f73713163aace68a
|
72fad3cf9c0e7f44ca62650a2338a5ac7696bcbf
|
refs/heads/master
| 2023-03-04T11:15:55.097923
| 2022-08-25T20:10:46
| 2022-08-25T20:10:46
| 198,422,697
| 1
| 3
|
MIT
| 2023-02-15T18:25:54
| 2019-07-23T12:07:00
|
Python
|
UTF-8
|
Python
| false
| false
| 9,882
|
py
|
# Generated by Django 2.2.13 on 2020-06-10 18:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import taggit.managers
class Migration(migrations.Migration):
replaces = [
("tickets", "0001_initial"),
("tickets", "0002_auto_20190209_2214"),
("tickets", "0003_auto_20190210_1052"),
("tickets", "0004_auto_20190210_1942"),
("tickets", "0005_auto_20190723_1134"),
("tickets", "0006_auto_20200610_1403"),
]
dependencies = [
("taggit", "0003_taggeditem_add_unique_index"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Application",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("application", models.CharField(max_length=20)),
("slug", models.SlugField(editable=False, unique=True)),
],
),
migrations.CreateModel(
name="Ticket",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("active", models.BooleanField(default=True)),
(
"status",
models.CharField(
choices=[
("new", "New"),
("accepted", "Accepted"),
("assigned", "Assigned"),
("re-opened", "Re-Opened"),
("closed", "Closed"),
("duplicate", "Closed - Duplicate"),
("split", "Closed - Split"),
],
db_index=True,
default=True,
max_length=20,
),
),
(
"ticket_type",
models.CharField(
choices=[
("feature", "Feature Request"),
("bug", "Bug Report"),
("task", "Task"),
],
db_index=True,
default=True,
max_length=10,
),
),
("title", models.CharField(max_length=80)),
("description", models.TextField()),
("description_html", models.TextField(blank=True, editable=False)),
(
"priority",
models.IntegerField(
choices=[
(1, "Critical"),
(2, "High"),
(3, "Normal"),
(4, "Low"),
(5, "Very Low"),
],
db_index=True,
),
),
(
"created_on",
models.DateTimeField(
auto_now_add=True, verbose_name="date created"
),
),
(
"updated_on",
models.DateTimeField(auto_now=True, verbose_name="date updated"),
),
("votes", models.IntegerField(default=0)),
(
"application",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="tickets.Application",
),
),
(
"assigned_to",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="assigned_tickets",
to=settings.AUTH_USER_MODEL,
),
),
(
"parent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="tickets.Ticket",
),
),
(
"submitted_by",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="submitted_tickets",
to=settings.AUTH_USER_MODEL,
),
),
(
"tags",
taggit.managers.TaggableManager(
blank=True,
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
],
options={
"ordering": ["-created_on"],
},
managers=[
("all_tickets", django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name="TicketDuplicate",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"original",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="original",
to="tickets.Ticket",
),
),
(
"ticket",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="duplicate",
to="tickets.Ticket",
),
),
],
),
migrations.CreateModel(
name="UserVoteLog",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"ticket",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="tickets.Ticket"
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="FollowUp",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_on",
models.DateTimeField(
auto_now_add=True, verbose_name="date created"
),
),
("comment", models.TextField()),
("comment_html", models.TextField(blank=True, editable=False)),
(
"action",
models.CharField(
choices=[
("no_action", "No Action"),
("closed", "Closed"),
("re-opened", "Re-Opened"),
("split", "Split"),
],
db_index=True,
default="no_action",
max_length=20,
),
),
("private", models.BooleanField(default=False)),
(
"parent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="tickets.FollowUp",
),
),
(
"submitted_by",
models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
(
"ticket",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="tickets.Ticket"
),
),
],
),
]
|
[
"adam.cottrill@ontario.ca"
] |
adam.cottrill@ontario.ca
|
e4603076015ad9b338c87de21b02807faa509853
|
91948d5be26636f1f2b941cb933701ea626a695b
|
/amazon_longest_substring_with_no_repeat.py
|
30208e55e14fb6ba9b3eabe03ddda30851bc6a3b
|
[
"MIT"
] |
permissive
|
loghmanb/daily-coding-problem
|
4ae7dd201fde5ee1601e0acae9e9fc468dcd75c9
|
b2055dded4276611e0e7f1eb088e0027f603aa7b
|
refs/heads/master
| 2023-08-14T05:53:12.678760
| 2023-08-05T18:12:38
| 2023-08-05T18:12:38
| 212,894,228
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,709
|
py
|
'''
Longest Substring Without Repeat
Asked in: Amazon
https://www.interviewbit.com/problems/longest-substring-without-repeat/
Given a string,
find the length of the longest substring without repeating characters.
Example:
The longest substring without repeating letters for "abcabcbb" is "abc", which the length is 3.
For "bbbbb" the longest substring is "b", with the length of 1.
'''
# @param A : string
# @return an integer
def lengthOfLongestSubstring(A):
if not A: return 0
result = 0
letters = set()
N = len(A)
i = j = 0
while i<N and j<N:
if A[j] in letters:
letters.remove(A[i])
i += 1
else:
letters.add(A[j])
j += 1
result = max(result, j-i)
return result
if __name__ == "__main__":
data = [
['abcabcbb', 3],
['Wnb9z9dMc7E8v1RTUaZPoDNIAXRlzkqLaa97KMWLzbitaCkRpiE4J4hJWhRcGnC8H6mwasgDfZ76VKdXhvEYmYrZY4Cfmf4HoSlchYWFEb1xllGKyEEmZOLPh1V6RuM7Mxd7xK72aNrWS4MEaUmgEn7L4rW3o14Nq9l2EN4HH6uJWljI8a5irvuODHY7A7ku4PJY2anSWnfJJE1w8p12Ks3oZRxAF3atqGBlzVQ0gltOwYmeynttUmQ4QBDLDrS4zn4VRZLosOITo4JlIqPD6t4NjhHThOjJxpMp9fICkrgJeGiDAwsb8a3I7Txz5BBKV9bEfMsKNhCuY3W0ZHqY0MhBfz1CbYCzwZZdM4p65ppP9s5QJcfjadmMMi26JKz0TVVwvNA8LP5Vi1QsxId4SI19jfcUH97wmZu0pbw1zFtyJ8GAp5yjjQTzFIboC1iRzklnOJzJld9TMaxqvBNBJKIyDjWrdfLOY8FGMOcPhfJ97Dph35zfxYyUf4DIqFi94lm9J0skYqGz9JT0kiAABQZDazZcNi80dSSdveSl6h3dJjHmlK8qHIlDsqFd5FMhlEirax8WA0v3NDPT8vPhwKpxcnVeu14Gcxr3h1wAXXV0y7Xy9qqB2NQ5HQLJ7cyXAckEYHsLCPSy28xcdNJatx1KLWohOQado4WywJbGvsFR17rKmvOPABweXnFD3odrbSMD4Na4nuBBswvMmFRTUOcf7jZi4z5JnJqXz6hitaPnaEtjoSEBq82a52nvqYy7hhldBoxen2et2OMadVEHeTYLL7GLsIhTP6UizHIuzcJMljo4lFgW5AyrfUlIBPAlhwaSiJtTvcbVZynDSM6RO1PqFKWKg2MHIgNhjuzENg2oFCfW7z5KJvEL9qWqKzZNc0o3BMRjS04NCHFvhtsteQoQRgz84XZBHBJRdekCdcVVXu9c01gYRAz7oIAxN3zKZb64EFKssfQ4HW971jv3H7x5E9dAszA0HrKTONyZDGYtHWt4QLhNsIs8mo4AIN7ecFKewyvGECAnaJpDn1MTTS4yTgZnm6N6qnmfjVt6ZU51F9BxH0jVG0kovTGSjTUkmb1mRTLQE5mTlVHcEz3yBOh4WiFFJjKJdi1HBIBaDL4r45HzaBvmYJPlWIomkqKEmQ4rLAbYG7C5rFfpMu8rHvjU7hP0JVvteGtaGn7mqeKsn7CgrJX1tb8t0ldaS3iUy8SEKAo5IZHNKOfEaij3nI4oRVzeVOZsH91pMsA4jRYgEohubPW8ciXwVrFi1qEWjvB8gfalyP60n1fHyjsiLW0T5uY1JzQWHKCbLVh7QFoJFAEV0L516XmzIo556yRH1vhPnceOCjebqgsmO78AQ8Ir2d4pHFFHAGB9lESn3OtJye1Lcyq9D6X93UakA3JKVKEt6JZDLVBMp4msOefkPKSw59Uix9d9kOQm8WCepJTangdNSOKaxblZDNJ5eHvEroYacBhd9UdafEitdF3nfStF7AhkSfQVC61YWWkKTNdx96OoJGTnxuqt4oFZNFtO7aMuN3IJAkw3m3kgZFRGyd3D3wweagNL9XlYtvZwejbjpkDOZz33C0jbEWaMEaUPw6BG49XqyQoUwtriguO0yvWyaJqD4ye3o0E46huKYAsdKAq6MLWMxF6tfyPVaoqOGd0eOBHbAF89XXmDd4AIkoFPXkAOW8hln5nXnIWP6RBbfEkPPbxoToMbV', 27]
]
for d in data:
print('input', d[0], lengthOfLongestSubstring(d[0]))
|
[
"loghmanb@gmail.com"
] |
loghmanb@gmail.com
|
22dbc2be582ff1eae04ea4b6343fb46b0511f014
|
20552c79d92593ab8c574a61ac0dcbd25aa09e2e
|
/Account/models.py
|
825a6a29f4baa7fb9b2f27a207b867a72a95be82
|
[] |
no_license
|
junaidgirkar/Unicode_REST-API
|
85580f2c85148c1b11ee2fffaae8d8b40aa91def
|
d9f812f867aabec7df9458511dfb03e7794d7de7
|
refs/heads/master
| 2023-01-04T11:27:17.964846
| 2020-10-29T13:21:04
| 2020-10-29T13:21:04
| 297,365,250
| 1
| 2
| null | 2020-10-29T13:21:06
| 2020-09-21T14:29:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,331
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.utils.translation import ugettext_lazy as _
from .managers import UserManager, StudentManager, TeacherManager
# Create your models here.
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_('email address'), unique = True)
first_name = models.CharField(_('first_name'), max_length = 40)
last_name = models.CharField(_('last name'), max_length = 40)
date_joined = models.DateTimeField(_('date joined'), auto_now_add = True)
is_active = models.BooleanField(_('active'), default = True)
is_staff = models.BooleanField(_('staff status'), default=False)
is_superuser = models.BooleanField(_('is superuser'), default = False)
is_admin = models.BooleanField(_('is admin'), default=False)
is_student = models.BooleanField(_('is student'), default = False)
is_teacher = models.BooleanField(_('is teacher'), default = False)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_short_name(self):
return self.first_name
def get_full_name(self):
return self.first_name + "_" + self.last_name
def save(self, *args, **kwargs):
self.username = self.email
super(User, self).save(*args, **kwargs)
def __str__(self):
return self.email
class Student(User):
user = models.OneToOneField(User, on_delete=models.CASCADE, parent_link=True)
user.is_student = True
user.is_teacher = False
branch = models.CharField(max_length=40)
sap_id = models.CharField(max_length=12, default=0, blank=True)
objects = StudentManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def __str__(self):
return self.user.email
class Teacher(User):
user = models.OneToOneField(User, on_delete=models.CASCADE, parent_link=True)
user.is_student = False
user.is_teacher = True
subject = models.CharField(max_length=40)
objects = TeacherManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def __str__(self):
return self.user.email
|
[
"60307291+junaidgirkar@users.noreply.github.com"
] |
60307291+junaidgirkar@users.noreply.github.com
|
4dbac7a2a1cb6e13f4d8d326dca4790eaae5658c
|
2715a573e2faf4d52af2578c40e4fd3cbac80c05
|
/analysis/spectrum.py
|
9134c26ebdcf3d665cf13ef2876cc2d3e022a42b
|
[] |
no_license
|
legend-exp/CAGE
|
9a67d945727831c3b084e177db3a2ff28e4599b1
|
71dfd9f27b6125853e2d3e09d07db7836bf10348
|
refs/heads/master
| 2023-08-03T21:45:57.955025
| 2023-08-03T20:18:33
| 2023-08-03T20:18:33
| 198,919,238
| 0
| 15
| null | 2022-07-01T17:00:15
| 2019-07-26T00:35:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 439
|
py
|
import sys, h5py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pygama.io.io_base as io
def main():
filename = '/Users/gothman/Data/CAGE/pygama_dsp/dsp_run42.lh5'
plot_spectrum(filename)
def plot_spectrum(filename):
lh5 = io.LH5Store()
df = lh5.read_object('data', filename).get_dataframe()
df['trapE'].plot.hist(bins=1000)
plt.show()
if __name__ == '__main__':
main()
|
[
"gulden.othman@gmail.com"
] |
gulden.othman@gmail.com
|
5722c5bd79ba59802f5e4174de590823f9b31f54
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5631989306621952_1/Python/Hotshot8325/Q2.py
|
c61b1a46284a8ff8a0e7daff7477923bbd7b7f0f
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
#CodeJam pancake problem
import csv
import string
#import data from test file in the form [[[],[]],[[],[]].... with [[],[]] being one test case
with open('a-large.in') as csvfile:
testCase = csv.reader(csvfile, delimiter = ' ', quotechar='|')
rowNum = 0
inputText = []
#swapCount = []
for row in testCase:
#row = [str(i) for i in row]
if rowNum == 0:
numTestCases = int(row[0])
else:
inputText.append(row)
rowNum = rowNum + 1
for i in range(0,numTestCases):
letterInput = inputText[i][0]
lastWord = letterInput[0]
for j in range(1,len(letterInput)):
if string.uppercase.index(letterInput[j])>=string.uppercase.index(lastWord[0]):
lastWord = letterInput[j]+lastWord
else:
lastWord = lastWord +letterInput[j]
print "Case #"+str(i+1)+": "+lastWord
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
5f6965f66911a55288b83b23515ceb2fe17157db
|
9303cc8be6a467be84ff03a1e476c299d7001077
|
/main.py
|
9e2c4a87e3f5afc2f68c3148a7bf9ada1678b59f
|
[
"MIT"
] |
permissive
|
AuthFailed/nCoV-tgbot
|
8c5908983c7f299ae17f134756d87306e5c5acf4
|
d2ecea97b76b6d733d38573cce1a72b8c4a9868e
|
refs/heads/master
| 2022-09-04T23:04:32.702175
| 2022-08-25T00:40:00
| 2022-08-25T00:40:00
| 237,244,007
| 0
| 0
|
MIT
| 2020-01-30T17:14:49
| 2020-01-30T15:33:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,474
|
py
|
from aiogram import executor
from aiogram.types import *
import info_handler
import keyboard as kb
from config import dp, bot
@dp.message_handler(commands=["start"])
async def start_message(msg: Message):
await msg.reply(
text="Привет! Я отслеживаю статистику заражения 2019-nCoV.\n"
"Используйте /menu чтобы получить всю информацию.")
@dp.message_handler(commands=["menu"])
async def menu_message(msg: Message):
await msg.reply(
text="Используйте *кнопки ниже*:",
reply_markup=kb.main_menu(),
)
@dp.callback_query_handler(lambda _call: True)
async def handle_callbacks(call: CallbackQuery):
"""Отлавливаем кэллбэки телеграма."""
if call.data == "current_stats":
info = info_handler.get_main_info()
await call.message.edit_text(
f"*Статистика 2019-nCoV*:\n\n"
f"Зараженных ☣️: *{info['Infected']}*\n\n"
f"На подозрении ❓: *{info['Possible']}*\n\n"
f"На карантине ☢️: *{info['Quarantine']} ({info['Quarantined_Cities']} городов)\n\n*"
f"Вылечившихся 💊: *{info['Recovered']}*\n\n"
f"Смерти ☠️: *{info['Deaths']}*\n\n"
f"_Смертность составляет {info['Death_Rate']}%_\n"
f"Последнее обновление: *{info['Date']} MSK*",
reply_markup=kb.main_menu(),
)
await call.answer()
elif call.data == "quarantined_cities":
table = info_handler.get_table_cities()
answer_message = "*Города на карантине*\n(Город\t\t|\t\t дата закрытия\t\t|\t\tНаселение)__\n\n"
for i in range(len(table) - 1):
answer_message += f"{table[i][0]} - {table[i][1]} - {table[i][2]}\n"
await call.message.edit_text(
answer_message + "__",
reply_markup=kb.main_menu())
await call.answer()
elif call.data == "disease_forecast":
table = info_handler.disease_forecast()
answer_message = "*Прогноз заражения по Китаю на ближайшие 5 дней:*\n\n" \
"*Дата* |\t\t\t*Кол-во инфицированных*\n"
for i in range(len(table)):
answer_message += f"{table[i][0]}\t\t\t|\t\t\t{table[i][1]}\n"
answer_message = answer_message.replace("(Прогноз)", "`(Прогноз)`")
await call.message.edit_text(answer_message +
"\n\n_На основании данных статистики за последние 5 дней по Китаю (текущий день не учитывается)"
"\nСтатистика актуальна при среднем модификаторе заражения в 1.304180_",
reply_markup=kb.main_menu())
await call.answer()
elif call.data == "back_to_home":
await call.message.edit_text("Используйте *кнопки ниже*:",
reply_markup=kb.main_menu())
await call.answer()
@dp.inline_handler()
async def inline_stats(inline_query: InlineQuery):
info = info_handler.get_main_info()
text = (f"*Статистика 2019-nCoV*:\n\n"
f"Зараженных ☣️: *{info['Infected']}*\n\n"
f"На подозрении ❓: *{info['Possible']}*\n\n"
f"На карантине ☢️: *{info['Quarantine']} ({info['Quarantined_Cities']} городов)\n\n*"
f"Вылечившихся 💊: *{info['Recovered']}*\n\n"
f"Смерти ☠️: *{info['Deaths']}*\n\n"
f"_Смертность составляет {info['Death_Rate']}%_\n"
f"Последнее обновление: *{info['Date']} MSK*")
input_content = InputTextMessageContent(text)
item = InlineQueryResultArticle(
id="1", title="2019-nCoV stats", input_message_content=input_content
)
await bot.answer_inline_query(inline_query.id, results=[item], cache_time=1)
# @dp.errors_handler()
# async def error_handler():
if __name__ == "__main__":
executor.start_polling(dp, skip_updates=True)
|
[
"lenz1e973nyro"
] |
lenz1e973nyro
|
f50d553f88129bfc29a4c1bc98e9a6ddfe0af18b
|
090bceb6c9418b39056f8aa0204051da621eef01
|
/app/views.py
|
b24b5d9864c45018044b7a0e75b6974701d0c3e8
|
[] |
no_license
|
panasevychol/beetroot-test
|
627a1bb7b2935d908ed9b4da530ee77d21ae21fa
|
102b4dc1616f83038c5851da3f2c9dd83b8b2723
|
refs/heads/master
| 2021-05-01T22:42:43.723675
| 2016-12-30T13:13:51
| 2016-12-30T13:13:51
| 77,614,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
import json
import sys
import time
from flask import render_template, request
from . import app
from .utils import find_games
@app.route('/')
def index():
keywords = request.args.get('keywords', '')
return render_template('index.html', games=find_games(keywords))
|
[
"panasevychol@gmail.com"
] |
panasevychol@gmail.com
|
566302b568f0103bd3c6c2d54e6988ac6dd06f4b
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/JD9vSKZGrxQhLbA9r_11.py
|
8153c6d8cc99992256ea1d82f8771cd6328f44f3
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
def pile_of_cubes(m):
if m >= 10252519345963644753026: return None
x = m**0.5
if (x%1==0):
c = 1
while (x != c and x > 0):
x = x - c
c = c + 1
if (x == c):
return c
return None
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
a7f1b70b6ba4951bee8aab80789e69f1581b33d1
|
c6bf1b52dce9eff35a91f261aa3c33f83c887d3a
|
/bai 4.15.py
|
63f32d74a7b6c67b8ee870d15e735a7cfe4a8ca7
|
[] |
no_license
|
bachdinhthang59ktdh/b-i-t-p-ktlt-tr-n-l-p
|
bfc88fe8a97a0524680d1063daa8d5283a38f8e1
|
7500173e45d0ac032d8657c82e53742de43f1b15
|
refs/heads/master
| 2022-08-31T22:55:29.845869
| 2020-05-25T06:22:38
| 2020-05-25T06:22:38
| 262,918,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 92
|
py
|
s=input('nhap chuoi s').split()
s.sort()
for h in s:
print(h)
|
[
"noreply@github.com"
] |
noreply@github.com
|
4082075c20005fab8b339bf42d30021fa63be367
|
efdc94781d5be9e018c84d5ac5d1b988c2806c68
|
/images_dialog.py
|
0d5eaf1776a5d2229eca96f68c63264926d00079
|
[] |
no_license
|
vadimmpog/PyCalib
|
bf0a8d46a086feef4bca5d33d7222578c1e98ff0
|
0508dd1745ef341f86f5d9b7977f05d7dc3c031b
|
refs/heads/main
| 2023-08-20T03:07:56.594670
| 2021-10-27T06:43:37
| 2021-10-27T06:43:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,255
|
py
|
from PyQt5 import QtCore
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtWidgets import QDialog
from PyQt5 import QtWidgets
import imutils
class ImagesDialog(QDialog):
def __init__(self, frames, show=False):
super().__init__()
self.current_image = 0
self.frames = frames
self.frames_num = len(frames)
self.show = show
if not show:
self.selected_frames = [False for _ in range(self.frames_num)]
self.setWindowTitle("Добавление")
self.setFixedSize(724, 519)
self.buttonBox = QtWidgets.QDialogButtonBox(self)
self.buttonBox.setGeometry(QtCore.QRect(480, 470, 211, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.label = QtWidgets.QLabel(self)
self.label.setMargin(30)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayoutWidget = QtWidgets.QWidget(self)
self.gridLayoutWidget.setGeometry(QtCore.QRect(260, 430, 195, 80))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.pushButton_2 = QtWidgets.QPushButton(self.gridLayoutWidget)
self.pushButton_2.setObjectName("pushButton_2")
self.gridLayout.addWidget(self.pushButton_2, 1, 1, 1, 1)
self.pushButton = QtWidgets.QPushButton(self.gridLayoutWidget)
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 1, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_2.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
if not show:
self.checkBox = QtWidgets.QCheckBox(self.gridLayoutWidget)
self.checkBox.setObjectName("checkBox")
self.gridLayout.addWidget(self.checkBox, 0, 1, 1, 1)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
QtCore.QMetaObject.connectSlotsByName(self)
self.set_logic()
def set_logic(self):
_translate = QtCore.QCoreApplication.translate
self.setWindowTitle("Просмотр фреймов")
self.label.setText("Пустой кадр")
self.pushButton_2.setText(">>")
self.pushButton_2.clicked.connect(self.next_image)
self.pushButton.setText("<<")
self.pushButton.clicked.connect(self.previous_image)
if not self.show:
self.checkBox.setText("выбрать")
self.checkBox.clicked.connect(self.select_frame)
self.choose_frames()
def choose_frames(self, i=0):
self.label_2.setText(f"{i + 1}/{self.frames_num}")
image = imutils.resize(self.frames[i], width=550)
height, width, channel = image.shape
bytesPerLine = 3 * width
qImg = QImage(image.data, width, height, bytesPerLine, QImage.Format_RGB888)
pix = QPixmap.fromImage(qImg)
self.label.setPixmap(pix)
def next_image(self):
if self.current_image < self.frames_num-1:
self.current_image += 1
self.choose_frames(i=self.current_image)
if not self.show:
self.checkBox.setChecked(self.selected_frames[self.current_image])
def previous_image(self):
if self.current_image > 0:
self.current_image -= 1
self.choose_frames(i=self.current_image)
if not self.show:
self.checkBox.setChecked(self.selected_frames[self.current_image])
def select_frame(self):
self.selected_frames[self.current_image] = not self.selected_frames[self.current_image]
def reject(self):
super().reject()
if not self.show:
self.selected_frames = None
|
[
"vadimmm120@yandex.ru"
] |
vadimmm120@yandex.ru
|
b3c4bd9dc92f583c4160e397ad5aca581ce33ed0
|
a14e3faea802cbe20e0c65995bf67b84c41bf0f4
|
/tests/test_car_generator.py
|
7f6bf58e5bcc413c4cd1624b849d2bdd5335d003
|
[
"MIT"
] |
permissive
|
DrimTim32/py_proj_lights
|
aafdc4b1a0d8de8926c56f92682a9058b3b92db7
|
a056e7292b0b81db95316d5d0f517c69a0d473e8
|
refs/heads/master
| 2020-07-29T00:37:29.021483
| 2017-02-07T15:51:09
| 2017-02-07T15:51:09
| 73,689,047
| 0
| 0
|
MIT
| 2020-07-14T19:00:44
| 2016-11-14T09:35:24
|
Python
|
UTF-8
|
Python
| false
| false
| 769
|
py
|
"""This file contains tests for car generator"""
import sys
from simulation import Directions, TurnDirection
from simulation.generators import CarProperGenerator
if "core" not in sys.path[0]:
sys.path.insert(0, 'core')
def test_lights_generator():
prob = {Directions.TOP: [[0, 0, 0]],
Directions.BOTTOM: [[0, 0, 0], [1, 0, 0]],
Directions.RIGHT: [[0, 1, 0]],
Directions.LEFT: [[0, 0, 1]]}
lg = CarProperGenerator(prob)
assert lg.generate(Directions.TOP, 0) is None
assert lg.generate(Directions.BOTTOM, 1).turn_direction == TurnDirection.RIGHT
assert lg.generate(Directions.RIGHT, 0).turn_direction == TurnDirection.STRAIGHT
assert lg.generate(Directions.LEFT, 0).turn_direction == TurnDirection.LEFT
|
[
"barteks95@gmail.com"
] |
barteks95@gmail.com
|
564f8f9e85d4c8a6057469a98f58669f1dfe7534
|
ae22eebfadfdeb33f5c972702a92be266248c5f7
|
/Project2_Flask/main_functions.py
|
a1d535862e0995209a154d4e27cb6ac53a887988
|
[] |
no_license
|
ecaru003/COP4813_Project2
|
964831ad9a50634dbaf0b2a397a18b3a76316b63
|
2cfc87d373340c36de11bb7c856addf4dcb905bc
|
refs/heads/master
| 2023-07-24T12:58:28.901004
| 2021-09-01T21:33:38
| 2021-09-01T21:33:38
| 315,692,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
import json
def read_from_file(file_name):
with open(file_name,"r") as read_file:
data=json.load(read_file)
print("You successfully read from {}.".format(file_name))
return data
def save_to_file(data,file_name):
with open(file_name,"w") as write_file:
json.dump(data,write_file,indent=2)
print("You successfully saved to {}.".format(file_name))
|
[
"ecaru003@fiu.edu"
] |
ecaru003@fiu.edu
|
65069c192bdcfc8bf792f8d1e63112e0837c7ea7
|
708e17ad98f3143abaf811357883e680991d711f
|
/python3/happyNum.py
|
26195bfb19651e99a7333f4f60b484243ba43fcc
|
[] |
no_license
|
yichuanma95/leetcode-solns
|
a363cc8e85f2e8cdd5d2cde6e976cd76d4c4ea93
|
6812253b90bdd5a35c6bfba8eac54da9be26d56c
|
refs/heads/master
| 2021-05-24T18:05:02.588481
| 2020-10-08T00:39:58
| 2020-10-08T00:39:58
| 253,690,413
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,132
|
py
|
'''
Problem 202: Happy Number
Write an algorithm to determine if a number is "happy".
A happy number is a number defined by the following process: Starting with any positive
integer, replace the number by the sum of the squares of its digits, and repeat the process
until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does
not include 1. Those numbers for which this process ends in 1 are happy numbers.
Example:
Input: 19
Output: true
Explanation:
1^2 + 9^2 = 82
8^2 + 2^2 = 68
6^2 + 8^2 = 100
1^2 + 0^2 + 0^2 = 1
Solution runtime: 24ms, faster than 99.77% of Python3 submissions
Solution memory usage: 12.7 MB, less than 100% of Python3 submissions
'''
class Solution:
def isHappy(self, n: int) -> bool:
''' (Solution, int) -> bool
Returns True iff n is a "happy" number, which is a number that results in a 1 after
a repetitive process of replacing the original number by the sum of digit squares.
>>> soln = Solution()
>>> soln.isHappy(19)
True
'''
# This set will store all the unique sum of digit squares generated while
# determining if n is "happy".
unique_digit_square_sums = set()
# Keep calculating the sum of digit squares until it's equal to 1, in this case
# return True, or it already is in the set, in this case return False.
while n not in unique_digit_square_sums:
unique_digit_square_sums.add(n)
n = self.sum_of_digit_squares(n)
if n == 1:
return True
return False
def sum_of_digit_squares(self, n):
''' (Solution, int) -> int
Calculates and returns the sum of squares of n's digits.
>>> soln = Solution()
>>> soln.sum_of_digit_squares(19)
82
>>> soln.sum_of_digit_squares(82)
68
'''
digit_square_sum = 0
while n > 0:
digit_square_sum += ((n % 10) ** 2)
n //= 10
return digit_square_sum
|
[
"ma.yich@husky.neu.edu"
] |
ma.yich@husky.neu.edu
|
7da8e44c7b81b0928a7aa944b72042d967acb70c
|
34f3d3c01a29b05e58d7dccca2ac5776e2324d0f
|
/files/zipModule.py
|
c6eb11ca46c953549e13a40ae56467d84e0acd7d
|
[] |
no_license
|
nethirangasai/pythonpgms
|
d50c485c7f13ba0bdd78b79508d4792caf5e7a20
|
c0bfddfea95b22e32cfa53ee8b531b6535b1df42
|
refs/heads/master
| 2020-05-27T09:55:00.094520
| 2019-05-26T07:56:01
| 2019-05-26T07:56:01
| 188,574,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
from zipfile import ZipFile,ZIP_DEFLATED
f=ZipFile('files.zip','w',ZIP_DEFLATED)
f.write('csvReading.py')
f.write('csvWriting.py')
f.write('students.csv')
f.close()
|
[
"rangasai.nethi@gmail.com"
] |
rangasai.nethi@gmail.com
|
9eb23f2fb0bdb9407531c0cc21444f0cba5aaead
|
aa1b98be1dabf14752750999b35aec8d819122fe
|
/utils.py
|
382c10d19fc66a245748c89531951d5c14186ced
|
[] |
no_license
|
tevonsb/a5
|
8fe8df7461c8515b649e3d3b601befc968c694d1
|
8d183228ed280582c45dba589f413405a49a49c4
|
refs/heads/master
| 2020-04-25T00:42:54.714606
| 2019-02-24T20:33:58
| 2019-02-24T20:33:58
| 172,386,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,328
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2018-19: Homework 5
nmt.py: NMT Model
Pencheng Yin <pcyin@cs.cmu.edu>
Sahil Chopra <schopra8@stanford.edu>
"""
import math
from typing import List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def pad_sents_char(sents, char_pad_token):
""" Pad list of sentences according to the longest sentence in the batch and max_word_length.
@param sents (list[list[list[int]]]): list of sentences, result of `words2charindices()`
from `vocab.py`
@param char_pad_token (int): index of the character-padding token
@returns sents_padded (list[list[list[int]]]): list of sentences where sentences/words shorter
than the max length sentence/word are padded out with the appropriate pad token, such that
each sentence in the batch now has same number of words and each word has an equal
number of characters
Output shape: (batch_size, max_sentence_length, max_word_length)
"""
# Words longer than 21 characters should be truncated
max_word_length = 21
### YOUR CODE HERE for part 1f
### TODO:
### Perform necessary padding to the sentences in the batch similar to the pad_sents()
### method below using the padding character from the arguments. You should ensure all
### sentences have the same number of words and each word has the same number of
### characters.
### Set padding words to a `max_word_length` sized vector of padding characters.
###
### You should NOT use the method `pad_sents()` below because of the way it handles
### padding and unknown words.
max_sentence_length = max([len(sent) for sent in sents])
pad_word = [char_pad_token for x in range(21)]
sents_padded = [[word+[char_pad_token for x in range(max_word_length - len(word))] for word in sent] for sent in sents]
sents_padded = [sent+[pad_word for x in range(max_sentence_length-len(sent))] for sent in sents_padded]
### END YOUR CODE
return sents_padded
def pad_sents(sents, pad_token):
""" Pad list of sentences according to the longest sentence in the batch.
@param sents (list[list[int]]): list of sentences, where each sentence
is represented as a list of words
@param pad_token (int): padding token
@returns sents_padded (list[list[int]]): list of sentences where sentences shorter
than the max length sentence are padded out with the pad_token, such that
each sentences in the batch now has equal length.
Output shape: (batch_size, max_sentence_length)
"""
sents_padded = []
max_len = max(len(s) for s in sents)
batch_size = len(sents)
for s in sents:
padded = [pad_token] * max_len
padded[:len(s)] = s
sents_padded.append(padded)
return sents_padded
def read_corpus(file_path, source):
""" Read file, where each sentence is dilineated by a `\n`.
@param file_path (str): path to file containing corpus
@param source (str): "tgt" or "src" indicating whether text
is of the source language or target language
"""
data = []
for line in open(file_path):
sent = line.strip().split(' ')
# only append <s> and </s> to the target sentence
if source == 'tgt':
sent = ['<s>'] + sent + ['</s>']
data.append(sent)
return data
def batch_iter(data, batch_size, shuffle=False):
""" Yield batches of source and target sentences reverse sorted by length (largest to smallest).
@param data (list of (src_sent, tgt_sent)): list of tuples containing source and target sentence
@param batch_size (int): batch size
@param shuffle (boolean): whether to randomly shuffle the dataset
"""
batch_num = math.ceil(len(data) / batch_size)
index_array = list(range(len(data)))
if shuffle:
np.random.shuffle(index_array)
for i in range(batch_num):
indices = index_array[i * batch_size: (i + 1) * batch_size]
examples = [data[idx] for idx in indices]
examples = sorted(examples, key=lambda e: len(e[0]), reverse=True)
src_sents = [e[0] for e in examples]
tgt_sents = [e[1] for e in examples]
yield src_sents, tgt_sents
|
[
"tevon.strandbrown@gmail.com"
] |
tevon.strandbrown@gmail.com
|
bb49d8dd28b9c93d2856e8511907f5a8c6efa6fb
|
ade3b5a88b2129d2e305d7be1a36dcda283a4c59
|
/Lab3/utils.py
|
8d718c9de473a5d7800d0b6f86c650cb1ac74dc5
|
[] |
no_license
|
jelenab98/DL_FER
|
0e299003d1a41a7b502853b0643cb9e0bf8138a9
|
258eba86c708b53f96e92f2c2f5e9cb458e093ef
|
refs/heads/master
| 2023-08-21T21:51:47.256101
| 2021-10-26T14:44:54
| 2021-10-26T14:44:54
| 347,167,124
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,674
|
py
|
<<<<<<< HEAD
from sklearn.metrics import confusion_matrix as conf_matrix
from torch.nn.utils.rnn import pad_sequence
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import Dataset
from torch.nn import Embedding
from pathlib import Path
from tqdm import tqdm
import pandas as pd
import numpy as np
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
PADDING_TOKEN = "<PAD>" # 0
UNKNOWN_TOKEN = "<UNK>" # 1
class Instance:
def __init__(self, input_text: [str], target: str):
self.text = input_text
self.label = target
class Vocab:
def __init__(self, frequencies: dict, max_size: int = -1, min_freq: int = 0, is_target: bool = False):
if is_target:
self.stoi = dict()
self.itos = dict()
else:
self.stoi = {PADDING_TOKEN: 0, UNKNOWN_TOKEN: 1}
self.itos = {0: PADDING_TOKEN, 1: UNKNOWN_TOKEN}
self.is_target = is_target
self.max_size = max_size
self.min_freq = min_freq
i = len(self.itos)
for key, value in sorted(frequencies.items(), key=lambda x: x[1], reverse=True):
if (self.max_size != -1) and (len(self.itos) >= self.max_size):
break
if value >= self.min_freq:
self.stoi[key] = i
self.itos[i] = key
i += 1
else:
break
def __len__(self):
return len(self.itos)
def encode(self, inputs: [str]):
numericalized_inputs = []
for token in inputs:
if token in self.stoi:
numericalized_inputs.append(self.stoi[token])
else:
numericalized_inputs.append(self.stoi[UNKNOWN_TOKEN])
return torch.tensor(numericalized_inputs)
def reverse_numericalize(self, inputs: list):
tokens = []
for numericalized_item in inputs:
if numericalized_item in self.itos:
tokens.append(self.itos[numericalized_item])
else:
tokens.append(UNKNOWN_TOKEN)
return tokens
class NLPDataset(Dataset):
def __init__(self, text_vocab: Vocab, target_vocab: Vocab, path: Path):
self.vocab_input_text = text_vocab
self.vocab_targets = target_vocab
self.instances = []
data = pd.read_csv(path, header=None)
for i in range(len(data)):
text = data[0][i]
label = data[1][i]
self.instances.append(Instance(space_tokenizer(text), label.strip()))
def __len__(self):
return len(self.instances)
def __getitem__(self, item):
instance_item = self.instances[item]
text = instance_item.text
label = [instance_item.label]
return self.vocab_input_text.encode(text), self.vocab_targets.encode(label)
def space_tokenizer(raw_text: str):
return raw_text.strip("\n").strip("\r").split(" ")
def get_embedding_matrix(vocab: Vocab, dim: int = 300, freeze: bool = True, path: Path = None):
matrix = torch.normal(mean=0, std=1, size=(len(vocab), dim))
matrix[0] = torch.zeros(size=[dim])
if path is not None:
data = pd.read_csv(path, header=None, delimiter=" ")
for i in range(len(data)):
row = data.loc[i]
token = row.loc[0]
if token in vocab.stoi:
tmp_array = []
for j in range(1, len(row)):
tmp_array.append(row[j])
matrix[vocab.stoi[token]] = torch.tensor(tmp_array)
return Embedding.from_pretrained(matrix, padding_idx=0, freeze=freeze)
def pad_collate_fn(batch, pad_index=0):
texts, labels = zip(*batch)
lengths = torch.tensor([len(text) for text in texts])
return pad_sequence(texts, batch_first=True, padding_value=pad_index), torch.tensor(labels), lengths
def get_frequencies(path, is_target=False):
frequencies = {}
data = pd.read_csv(path, header=None)
idx = 1 if is_target else 0
for i in range(len(data)):
inputs = data[idx][i].strip().split(" ")
for token in inputs:
if token in frequencies:
frequencies[token] += 1
else:
frequencies[token] = 1
return frequencies
def train_valid(model, train_data, valid_data, optimizer, criterion, train_logger,
valid_logger, save_path: Path = None, epochs=100, gradient_clip=0.25):
best_f1 = -1
for epoch in range(epochs):
model.train()
confusion_matrix = np.zeros(shape=(2, 2))
losses = []
for idx, batch in tqdm(enumerate(train_data), total=len(train_data)):
model.zero_grad()
x, y, lengths = batch
x = x.to(device)
y = y.to(device)
output = model(x).reshape(y.shape)
loss = criterion(output, y.float())
loss.backward()
clip_grad_norm_(model.parameters(), max_norm=gradient_clip)
optimizer.step()
predictions = torch.sigmoid(output).round().int().detach().cpu().numpy()
confusion_matrix += conf_matrix(y.detach().cpu().numpy(), predictions)
losses.append(loss.item())
acc, p, r, f1 = calculate_stats(confusion_matrix)
train_stats = f"Loss: {np.average(losses):.4f}, Acc: {100 * acc:.2f}%, F1: {100 * f1:.2f}%"
train_stats2 = f"{np.average(losses)}, {acc}, {f1}"
print("[TRAIN STATS:] " + train_stats)
train_logger.update(train_stats2)
acc_v, p_v, r_v, f1_v, loss_v = evaluate(model, valid_data, criterion)
valid_stats = f"Loss: {np.average(loss_v):.4f}, Acc: {100 * acc_v:.2f}%, F1: {100 * f1_v:.2f}%"
valid_stats2 = f"{np.average(loss_v)}, {acc_v}, {f1_v}"
print("[VALID STATS:] " + valid_stats)
valid_logger.update(valid_stats2)
if f1_v > best_f1:
torch.save(model, save_path / "best_model.pth")
print(f"Best model saved at {epoch} epoch.")
def calculate_stats(confusion_matrix):
acc = np.sum(confusion_matrix.diagonal()) / np.sum(confusion_matrix)
p = confusion_matrix[0, 0] / np.sum(confusion_matrix[0, :])
r = confusion_matrix[0, 0] / np.sum(confusion_matrix[:, 0])
f1 = 2 * p * r / (p + r)
return acc, p, r, f1
def evaluate(model, data, criterion):
confusion_matrix = np.zeros(shape=(2, 2))
losses = list()
model.eval()
with torch.no_grad():
for idx, batch in tqdm(enumerate(data), total=len(data)):
x, y, lengths = batch
x = x.to(device)
y = y.to(device)
output = model(x).reshape(shape=y.shape)
loss = criterion(output, y.float())
losses.append(loss.item())
predictions = torch.sigmoid(output).round().int().detach().cpu().numpy()
confusion_matrix += conf_matrix(y.detach().cpu().numpy(), predictions)
acc, p, r, f1 = calculate_stats(confusion_matrix)
loss = np.average(losses)
return acc, p, r, f1, loss
class Logger:
def __init__(self, path: Path, start_message: str):
with path.open(mode="w") as f:
f.write(f"{start_message}\n")
self.path = path
def update(self, message):
with self.path.open(mode="a") as f:
f.write(f"{message}\n")
=======
from sklearn.metrics import confusion_matrix as conf_matrix
from torch.nn.utils.rnn import pad_sequence
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import Dataset
from torch.nn import Embedding
from pathlib import Path
from tqdm import tqdm
import pandas as pd
import numpy as np
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
PADDING_TOKEN = "<PAD>" # 0
UNKNOWN_TOKEN = "<UNK>" # 1
class Instance:
def __init__(self, input_text: [str], target: str):
self.text = input_text
self.label = target
class Vocab:
def __init__(self, frequencies: dict, max_size: int = -1, min_freq: int = 0, is_target: bool = False):
if is_target:
self.stoi = dict()
self.itos = dict()
else:
self.stoi = {PADDING_TOKEN: 0, UNKNOWN_TOKEN: 1}
self.itos = {0: PADDING_TOKEN, 1: UNKNOWN_TOKEN}
self.is_target = is_target
self.max_size = max_size
self.min_freq = min_freq
i = len(self.itos)
for key, value in sorted(frequencies.items(), key=lambda x: x[1], reverse=True):
if (self.max_size != -1) and (len(self.itos) >= self.max_size):
break
if value >= self.min_freq:
self.stoi[key] = i
self.itos[i] = key
i += 1
else:
break
def __len__(self):
return len(self.itos)
def encode(self, inputs: [str]):
numericalized_inputs = []
for token in inputs:
if token in self.stoi:
numericalized_inputs.append(self.stoi[token])
else:
numericalized_inputs.append(self.stoi[UNKNOWN_TOKEN])
return torch.tensor(numericalized_inputs)
def reverse_numericalize(self, inputs: list):
tokens = []
for numericalized_item in inputs:
if numericalized_item in self.itos:
tokens.append(self.itos[numericalized_item])
else:
tokens.append(UNKNOWN_TOKEN)
return tokens
class NLPDataset(Dataset):
def __init__(self, text_vocab: Vocab, target_vocab: Vocab, path: Path):
self.vocab_input_text = text_vocab
self.vocab_targets = target_vocab
self.instances = []
data = pd.read_csv(path, header=None)
for i in range(len(data)):
text = data[0][i]
label = data[1][i]
self.instances.append(Instance(space_tokenizer(text), label.strip()))
def __len__(self):
return len(self.instances)
def __getitem__(self, item):
instance_item = self.instances[item]
text = instance_item.text
label = [instance_item.label]
return self.vocab_input_text.encode(text), self.vocab_targets.encode(label)
def space_tokenizer(raw_text: str):
return raw_text.strip("\n").strip("\r").split(" ")
def get_embedding_matrix(vocab: Vocab, dim: int = 300, freeze: bool = True, path: Path = None):
matrix = torch.normal(mean=0, std=1, size=(len(vocab), dim))
matrix[0] = torch.zeros(size=[dim])
if path is not None:
data = pd.read_csv(path, header=None, delimiter=" ")
for i in range(len(data)):
row = data.loc[i]
token = row.loc[0]
if token in vocab.stoi:
tmp_array = []
for j in range(1, len(row)):
tmp_array.append(row[j])
matrix[vocab.stoi[token]] = torch.tensor(tmp_array)
return Embedding.from_pretrained(matrix, padding_idx=0, freeze=freeze)
def pad_collate_fn(batch, pad_index=0):
texts, labels = zip(*batch)
lengths = torch.tensor([len(text) for text in texts])
return pad_sequence(texts, batch_first=True, padding_value=pad_index), torch.tensor(labels), lengths
def get_frequencies(path, is_target=False):
frequencies = {}
data = pd.read_csv(path, header=None)
idx = 1 if is_target else 0
for i in range(len(data)):
inputs = data[idx][i].strip().split(" ")
for token in inputs:
if token in frequencies:
frequencies[token] += 1
else:
frequencies[token] = 1
return frequencies
def train_valid(model, train_data, valid_data, optimizer, criterion, train_logger,
valid_logger, save_path: Path = None, epochs=100, gradient_clip=0.25):
best_f1 = -1
for epoch in range(epochs):
model.train()
confusion_matrix = np.zeros(shape=(2, 2))
losses = []
for idx, batch in tqdm(enumerate(train_data), total=len(train_data)):
model.zero_grad()
x, y, lengths = batch
x = x.to(device)
y = y.to(device)
output = model(x).reshape(y.shape)
loss = criterion(output, y.float())
loss.backward()
clip_grad_norm_(model.parameters(), max_norm=gradient_clip)
optimizer.step()
predictions = torch.sigmoid(output).round().int().detach().cpu().numpy()
confusion_matrix += conf_matrix(y.detach().cpu().numpy(), predictions)
losses.append(loss.item())
acc, p, r, f1 = calculate_stats(confusion_matrix)
train_stats = f"Loss: {np.average(losses):.4f}, Acc: {100 * acc:.2f}%, F1: {100 * f1:.2f}%"
train_stats2 = f"{np.average(losses)}, {acc}, {f1}"
print("[TRAIN STATS:] " + train_stats)
train_logger.update(train_stats2)
acc_v, p_v, r_v, f1_v, loss_v = evaluate(model, valid_data, criterion)
valid_stats = f"Loss: {np.average(loss_v):.4f}, Acc: {100 * acc_v:.2f}%, F1: {100 * f1_v:.2f}%"
valid_stats2 = f"{np.average(loss_v)}, {acc_v}, {f1_v}"
print("[VALID STATS:] " + valid_stats)
valid_logger.update(valid_stats2)
if f1_v > best_f1:
torch.save(model, save_path / "best_model.pth")
print(f"Best model saved at {epoch} epoch.")
def calculate_stats(confusion_matrix):
acc = np.sum(confusion_matrix.diagonal()) / np.sum(confusion_matrix)
p = confusion_matrix[0, 0] / np.sum(confusion_matrix[0, :])
r = confusion_matrix[0, 0] / np.sum(confusion_matrix[:, 0])
f1 = 2 * p * r / (p + r)
return acc, p, r, f1
def evaluate(model, data, criterion):
confusion_matrix = np.zeros(shape=(2, 2))
losses = list()
model.eval()
with torch.no_grad():
for idx, batch in tqdm(enumerate(data), total=len(data)):
x, y, lengths = batch
x = x.to(device)
y = y.to(device)
output = model(x).reshape(shape=y.shape)
loss = criterion(output, y.float())
losses.append(loss.item())
predictions = torch.sigmoid(output).round().int().detach().cpu().numpy()
confusion_matrix += conf_matrix(y.detach().cpu().numpy(), predictions)
acc, p, r, f1 = calculate_stats(confusion_matrix)
loss = np.average(losses)
return acc, p, r, f1, loss
class Logger:
def __init__(self, path: Path, start_message: str):
with path.open(mode="w") as f:
f.write(f"{start_message}\n")
self.path = path
def update(self, message):
with self.path.open(mode="a") as f:
f.write(f"{message}\n")
>>>>>>> ca8923228a32a1117eff983cbec160e90b72ca02
|
[
"jelena.bratulic@gmail.hr"
] |
jelena.bratulic@gmail.hr
|
32dab3a9805a876cadd1c98c55ad23f5d16cff81
|
2a58920968814b87ee93decf2b887747dbb56c12
|
/helpers/create_module/find_path.py
|
d81d21182ecc2c75dade8d606fbc7d80fa6d75c5
|
[] |
no_license
|
chrysa/gae-toolbox-2
|
5e52b2c2ce66358feb82bdd078d6b9ab9f08da2e
|
b666567359888ff29d2c3dddb0453b762a65d75a
|
refs/heads/master
| 2020-03-29T20:53:11.495776
| 2014-04-16T12:10:17
| 2014-04-16T12:10:17
| 15,781,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# disco-toolbox-2.helpers.find_path -- fichier de génération du chemin relatif du script en cours d'éxécution
import os
def find_path(module_type, mod_folder):
"""fonction de génération du chemin relatif du script en cours d'éxécution
:param module_type: type de module
:param mod_folder: nom du dossier contenant les modules admin et front
:returns: renvoi le chemin relatif du script
:rtype: string
"""
test = os.getcwd()[len(os.getcwd()) - 7:len(os.getcwd())] # isolation des 8 derniers caractères permettant de savoir si le script est appelé depuis le dossier d'installation
if module_type == 1:
if test == 'install' or test == 'helpers':
path_folder = os.getcwd()[0:len(os.getcwd()) - 8] + os.sep + 'src' + os.sep + mod_folder + os.sep + 'admin'
else:
path_folder = os.getcwd() + os.sep + 'src' + os.sep + mod_folder + os.sep + 'admin'
else:
if test == 'install' or test == 'helpers':
path_folder = os.getcwd()[0:len(os.getcwd()) - 8] + os.sep + 'src' + os.sep + mod_folder + os.sep + 'front'
else:
path_folder = os.getcwd() + os.sep + 'src' + os.sep + mod_folder + os.sep + 'front'
return path_folder
|
[
"agreau@student.42.fr"
] |
agreau@student.42.fr
|
029e8d41228f8d09c6e0cb103693dbf48021707d
|
eb008a137a8da49d48985240bea8c29e0966293a
|
/tools/config.py
|
5d5a6fcb30acfb04f0bf90925fd32b94d98ea154
|
[] |
no_license
|
Kukushenok/GunParty
|
4a5f7de407b68061c46cc645658b11cba3edd2d8
|
acac4ea8bd80ec9101a8a2f64a08f594f0edf31c
|
refs/heads/master
| 2021-05-12T11:09:17.202229
| 2018-02-24T18:32:15
| 2018-02-24T18:32:15
| 117,379,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
import configparser
import os
import pygame
class Config():
def __init__(self,defaultPath = None,path = None):
self.defaultPath = defaultPath
self.config = configparser.ConfigParser()
self.defaultConfig = configparser.ConfigParser()
if path:
self.config.read(os.path.join(path,'config.ini'))
if self.defaultPath:self.defaultConfig.read(os.path.join(self.defaultPath, 'default_config.ini'))
else:
self.config.read('config.ini')
if self.defaultPath:self.defaultConfig.read(os.path.join(self.defaultPath, 'default_config.ini'))
def get(self,item):
try:
return self.config["SETTINGS"][item]
except KeyError:
return self.defaultConfig["SETTINGS"][item]
def getAsDict(self,item):
toDict = ""
try:
toDict = self.config["SETTINGS"][item]
except KeyError:
toDict = self.defaultConfig["SETTINGS"][item]
dictPairs = toDict.split(",")
resDict = {}
for e in dictPairs:
splittedE = e.split(":")
exec("resDict["+splittedE[0]+"] = "+splittedE[1])
return resDict
|
[
"mrcoolmoder@gmail.com"
] |
mrcoolmoder@gmail.com
|
d292bf9b5228884b9307bbd114fbf6aae0eda93e
|
19b2856c718dab5380d381053c0f1d664faeab53
|
/Login/migrations/0001_initial.py
|
56a3b97852967f2bab2bfc9395c58579d0fbc9da
|
[] |
no_license
|
ywl1584/ywl1584.GraduationProject.io
|
7f62c50c939274039f304ccee378345fd083a2bf
|
18d6b1d199d3ba56ebee8de1c5551e01c7ab5bd5
|
refs/heads/master
| 2020-04-25T02:22:49.021355
| 2019-02-25T04:59:27
| 2019-02-25T04:59:27
| 172,437,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 992
|
py
|
# Generated by Django 2.1.2 on 2018-10-28 07:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('password', models.CharField(max_length=256)),
('email', models.EmailField(max_length=254, unique=True)),
('sex', models.CharField(choices=[('male', '男'), ('female', '女')], default='男', max_length=32)),
('c_time', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
'ordering': ['c_time'],
},
),
]
|
[
"your email address1270834936@qq.com"
] |
your email address1270834936@qq.com
|
57d8840f3ae45365005e9730310b3b9956021a54
|
eace995a65e1029cfb88c9a2764a831717b7b4cb
|
/rpn.py
|
30dcecb9a78d6373463dd42462d47f1d69f267b8
|
[
"MIT"
] |
permissive
|
HoangTuan110/rpn-calc
|
010115637c80417aefa088db04532c602ad0810e
|
8418999cd039cb0f63b828844e34b291e768533b
|
refs/heads/main
| 2023-04-08T13:06:35.181722
| 2021-04-11T11:27:06
| 2021-04-11T11:27:06
| 356,848,524
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
"""
This is a simple RPN (Reverse Polish Notation) calculator written in Python.
It may be quite slow, but I don't care lol.
"""
def calc(code):
# Variables
tokens = code.split(" ")
stack = []
ops = "+-*/"
result = ""
# Helper functions
push = lambda n: stack.append(n)
pop = lambda: stack.pop()
is_number = lambda ipt: ipt.isnumeric()
is_hex = lambda ipt: "x" in ipt
is_binary = lambda ipt: "b" in ipt
# Main part
for token in tokens:
if is_number(token) or is_hex(token) or is_binary(token):
push(eval(token))
if len(result) == 0:
result += f"{token}"
# This is to avoid the case that user put extra spaces at the end
# or the start of the input
elif token == "":
continue
elif token in ops:
op1, op2 = stack.pop(), stack.pop()
# Since 'result' have the first value ('op1') already in them,
# so we don't need to add it twice.
result += f" {token} {op2}"
push(eval(f"{op1} {token} {op2}"))
else:
print(f"Illegal character: {token}")
break
print(eval(result))
def repl():
while True:
calc(input(">> "))
repl()
|
[
"noreply@github.com"
] |
noreply@github.com
|
b48dcc67a5875823dc15b6cb4f7142b0cdc08af1
|
64cea21dc4834cc876b6788f4cb8572982d2f60a
|
/product_pricelist_report_qweb/tests/common.py
|
34ce1ca81ed41750f6ce505f83679205947fda18
|
[] |
no_license
|
yelizariev/addons-vauxoo
|
708463f847a75898d99fd8c2045d20ab9083b703
|
511dc410b4eba1f8ea939c6af02a5adea5122c92
|
refs/heads/8.0
| 2020-12-11T09:04:04.912471
| 2016-03-17T06:00:36
| 2016-03-17T06:00:36
| 53,125,976
| 3
| 2
| null | 2016-03-04T10:01:48
| 2016-03-04T10:01:48
| null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
# coding: utf-8
# ##########################################################################
# Module Writen to ODOO, Open Source Management Solution
#
# Copyright (c) 2015 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
# ###########################################################################
# Coded by: Luis Torres (luis_t@vauxoo.com)
# ###########################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# #############################################################################
from openerp.tests import common
import logging
_logger = logging.getLogger(__name__)
class TestXLSProductCommon(common.TransactionCase):
def setUp(self):
super(TestXLSProductCommon, self).setUp()
self.product_price_obj = self.env['product.price_list']
self.attachment_obj = self.env['ir.attachment']
self.price_list_id = self.ref('product.list0')
self.product = self.env.ref('product.product_product_7').copy()
|
[
"hbto@vauxoo.com"
] |
hbto@vauxoo.com
|
01056432f916ec5052c06f42038020cc0f7a42d4
|
27b2cee1701a2e3073ecf020065f697c5b145de0
|
/txboto/auth_handler.py
|
86da5f1288ca2ecd9647ca8feb619d35631317b4
|
[
"ADSL",
"BSD-3-Clause"
] |
permissive
|
2mf/txboto
|
25209b2d5c465ca093581dda281ae65e3e17103e
|
3ecc5c5e86b650edc6c3b42064a07d42faa210e4
|
refs/heads/master
| 2020-04-25T08:40:47.640350
| 2017-02-22T11:38:21
| 2017-02-22T11:38:21
| 45,603,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,618
|
py
|
# Copyright 2010 Google Inc.
# Copyright (c) 2015 Silver Egg Technology, Co., Ltd.
# Copyright (c) 2015 Michael Franke
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Defines an interface which all Auth handlers need to implement.
"""
from txboto.plugin import Plugin
class NotReadyToAuthenticate(Exception):
pass
class AuthHandler(Plugin):
capability = []
def __init__(self, host, config, provider):
"""Constructs the handlers.
:type host: string
:param host: The host to which the request is being sent.
:type config: txboto.pyami.Config
:param config: TxBoto configuration.
:type provider: txboto.provider.Provider
:param provider: Provider details.
Raises:
NotReadyToAuthenticate: if this handler is not willing to
authenticate for the given provider and config.
"""
pass
def add_auth(self, http_request):
"""Invoked to add authentication details to request.
:type http_request: txboto.connection.HTTPRequest
:param http_request: HTTP request that needs to be authenticated.
"""
pass
|
[
"mf33456@gmail.com"
] |
mf33456@gmail.com
|
338f0fba5917e4ae0b096d9a4b4b41e5389d4123
|
05e2452e154806455d2d829466055f0ac8a11f92
|
/Name/wsgi.py
|
64efb28a6a226a33c4fe67a9c9bcc6ede1cd3dee
|
[] |
no_license
|
WesamAlmasri/Translator
|
35a295ca8aa2ded1ccc315e19494201475491cf4
|
875a324a4cb7a75c7b80f51ba420c3efc2306092
|
refs/heads/main
| 2023-04-03T03:48:56.830044
| 2021-04-03T13:20:30
| 2021-04-03T13:20:30
| 353,406,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for Name project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Name.settings')
application = get_wsgi_application()
|
[
"mr0virus@gmail.com"
] |
mr0virus@gmail.com
|
b9862bab92c4aa791fbc0851e03b13c965d9dff8
|
8ee86008310da9954e3c200dd4711d295d449329
|
/blog/urls.py
|
0e0724f937f23bacaf9bde8904f4b9f53f37edd2
|
[] |
no_license
|
madp3e/Blog
|
846fef127330b9f600c7b0c15a080efb5de4a148
|
1379041c68c6e4045d25a5f1bf9ff325457788e7
|
refs/heads/master
| 2022-11-26T22:39:36.391205
| 2019-12-12T11:47:25
| 2019-12-12T11:47:25
| 227,589,494
| 0
| 0
| null | 2022-11-22T04:37:45
| 2019-12-12T11:19:41
|
Python
|
UTF-8
|
Python
| false
| false
| 774
|
py
|
from django.urls import path
from . import views
from .views import (PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
UserPostListView)
urlpatterns = [
path("", PostListView.as_view(), name="blog-home"),
path("post/<int:pk>/", PostDetailView.as_view(), name="post-detail"),
path("post/<int:pk>/update", PostUpdateView.as_view(), name="post-update"),
path("post/<int:pk>/delete", PostDeleteView.as_view(), name="post-delete"),
path("post/new/", PostCreateView.as_view(), name="post-create"),
path("about/", views.about, name="blog-about"),
path("user/<str:username>/", UserPostListView.as_view(), name="user-posts")
]
|
[
"ahmadfaizuddin17@gmail.com"
] |
ahmadfaizuddin17@gmail.com
|
c9062fbe8e75b4749ea59e439897d1de93808c00
|
a88ac040aa274d94ac8decbbf43a585af56cf825
|
/src/perftest.py
|
d8bb9fedffb0ea4c58157bde543ff3c510c1343f
|
[] |
no_license
|
s7evinkelevra/Agent-Model
|
6dd0544326502c00572db2c2f4cf9785092e9ef3
|
f25cde7190736778dbf0d0a5a45fa3a3f3f1efc3
|
refs/heads/master
| 2023-08-13T02:26:04.232434
| 2021-09-30T12:24:50
| 2021-09-30T12:24:50
| 403,736,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,984
|
py
|
from Bio.Seq import Seq
from matplotlib.ticker import LinearLocator
from matplotlib import cm
import random
from pprint import pprint
import itertools
from collections import deque
import uuid
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
rng = np.random.default_rng()
# Random base sequence of length
def randomDNAseq(length):
return ''.join(random.choice('GCAT') for _ in range(length))
# Random proteinogenic amino acids sequence of length
def randomASseq(length):
return ''.join(random.choice('ACDEFGHIKLMNOPQRSTUVWY') for _ in range(length))
# Random bitstring
def randomBitseq(length):
return ''.join(random.choice('01') for _ in range(length))
# Generate allele with unique id and random position in peptide space
def randomPSallele(peptide_space_length):
return {
"x": rng.integers(low=1, high=peptide_space_length),
"y": rng.integers(low=1, high=peptide_space_length),
"id": uuid.uuid4()
}
def sliding_window_iter(seq, width):
it = iter(seq)
result = tuple(itertools.islice(it, width))
if len(result) == width:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
# Sliding window iterator over sequence seq and of window width of n
def window(seq, n=2):
it = iter(seq)
win = deque((next(it, None) for _ in range(n)), maxlen=n)
yield win
append = win.append
for e in it:
append(e)
yield win
start_time = time.perf_counter_ns()
peptide_space_length = 1000
host_n = 10000
host_allele_initial_n = 150
host_allele_length = 9
host_fitness_initial = 1
host_fitness_increment = 0.2
host_species_n = 1
pathogen_n = 100000
pathogen_haplotype_initial_n = 400
pathogen_haplotype_length = 100
pathogen_fitness_initial = 1
pathogen_fitness_increment = 1
pathogen_species_n = 1
host_allele_pool = [[randomPSallele(peptide_space_length) for _ in range(
host_allele_initial_n)] for _ in range(host_species_n)]
def generateHost():
species = random.choice(range(host_species_n))
allele_1_data = random.choice(host_allele_pool[species])
allele_2_data = random.choice(host_allele_pool[species])
return {
"species": species,
"fitness": host_fitness_initial,
"allele_1_id": allele_1_data["id"],
"allele_1_x": allele_1_data["x"],
"allele_1_y": allele_1_data["y"],
"allele_2_id": allele_2_data["id"],
"allele_2_x": allele_2_data["x"],
"allele_2_y": allele_2_data["y"]
}
host_data = [generateHost() for _ in range(host_n)]
hosts = pd.DataFrame(host_data)
pathogen_haplotype_pool = [[randomPSallele(peptide_space_length) for _ in range(
pathogen_haplotype_initial_n)] for _ in range(pathogen_species_n)]
def generatePathogen():
species = random.choice(range(pathogen_species_n))
haplotype = random.choice(pathogen_haplotype_pool[species])
return {
"species": species,
"fitness": pathogen_fitness_initial,
"haplotype_id": haplotype["id"],
"haplotype_x": haplotype["x"],
"haplotype_y": haplotype["y"]
}
pathogen_data = [generatePathogen() for _ in range(pathogen_n)]
pathogens = pd.DataFrame(pathogen_data)
print(f'host count - {len(hosts)}')
print(f'host allele count (unique) - {len(hosts.allele_1_id.unique())}')
print(f'pathogen count - {len(pathogens)}')
print(
f'pathogen haplotype count (unique) - {len(pathogens.haplotype_id.unique())}')
sim_gen_n = 10000
sim_logging_interval = 50
sim_allele_subsample_n = 100
def uniqueAlleleCount():
print("yeee")
"""
print(hosts[['allele_1_id', 'allele_2_id']].value_counts())
print(hosts[['allele_1_id', 'allele_2_id']].values.ravel('K'))
print(len(pd.unique(hosts[['allele_1_id', 'allele_2_id']].values.ravel('K'))))
host_allele_all = hosts[['allele_1_id', 'allele_2_id']].values.ravel('K')
unique, counts = np.unique(host_allele_all, return_counts=True)
# print(np.asarray((unique,counts)).T)
print(counts)
plt.bar([str(i)[10:15] for i in unique], counts)
"""
def eucDist(x0, y0, x1, y1):
dX = x1 - x0
dY = y1 - y0
return np.sqrt(dX*dX + dY * dY)
def infect(host):
infecting_pathogen = pathogens.sample()
dist1 = eucDist(host["allele_1_x"], host["allele_1_y"],
infecting_pathogen["haplotype_x"], infecting_pathogen["haplotype_y"])
dist2 = eucDist(host["allele_2_x"], host["allele_2_y"],
infecting_pathogen["haplotype_x"], infecting_pathogen["haplotype_y"])
min_dist = np.min([dist1, dist2])
if(min_dist < 200):
return host["fitness"] - host_fitness_increment
else:
return host["fitness"]
"""
for i in range(sim_gen_n):
# log every sim_logging_interval'th generation
if(i % sim_logging_interval == 0):
print("logging data")
# infection regieme
## each host is infected between 1 and n times
infecting_pathogen_species = 0
hosts["fitness"] = hosts.apply(infect, axis=1)
print(hosts)
break
"""
end_time = time.perf_counter_ns()
print((end_time-start_time) / 1000)
|
[
"kelevra.1337@gmail.com"
] |
kelevra.1337@gmail.com
|
cd75f26df497e0e47746786f0197f8dc9b218f06
|
930c207e245c320b108e9699bbbb036260a36d6a
|
/BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/FCU_Return_Air_Temperature_Sensor.py
|
d4ac39c9698a57051d03037b2f79dc41b5511c4b
|
[] |
no_license
|
InnovationSE/BRICK-Generated-By-OLGA
|
24d278f543471e1ce622f5f45d9e305790181fff
|
7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2
|
refs/heads/master
| 2021-07-01T14:13:11.302860
| 2017-09-21T12:44:17
| 2017-09-21T12:44:17
| 104,251,784
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Return_Air_Temperature_Sensor import Return_Air_Temperature_Sensor
class FCU_Return_Air_Temperature_Sensor(Return_Air_Temperature_Sensor):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').FCU_Return_Air_Temperature_Sensor
|
[
"Andre.Ponnouradjane@non.schneider-electric.com"
] |
Andre.Ponnouradjane@non.schneider-electric.com
|
b52563bc708de755093f4abaf4427720c8741e1c
|
654acf62f757435f11afe3edb784c19ba9a996b5
|
/Cmimid/src/generalizetokens.py
|
b8d1ba37156b9a633f7d37b92cf041e669f90ced
|
[] |
no_license
|
anonymous-scientist/anonymous-scientist.github.io
|
92337f97ed48f68f2b8de0f2a23de31fac6ee702
|
b699788fc0c44d03e4d3e172428202f52a57fd08
|
refs/heads/master
| 2020-07-05T21:10:15.055470
| 2020-03-11T10:22:38
| 2020-03-11T10:22:38
| 202,777,252
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,277
|
py
|
#!/usr/bin/env python
import sys
import pudb
import grammartools
# ulimit -s 100000
sys.setrecursionlimit(99000)
import random
import string
import util
import copy
import json
import re
import fuzz as F
import subprocess
b = pudb.set_trace
def is_nt(token):
return token.startswith('<') and token.endswith('>')
def generalize_tokens(grammar):
g_ = {}
for k in grammar:
new_rules = []
for rule in grammar[k]:
new_rule = []
for token in rule:
if not is_nt(token):
new_rule.extend(list(token))
else:
new_rule.append(token)
new_rules.append(new_rule)
g_[k] = new_rules
return g_
def get_list_of_single_chars(grammar):
lst = []
for p,key in enumerate(grammar):
for q,rule in enumerate(grammar[key]):
for r,token in enumerate(rule):
if is_nt(token): continue
if len(token) == 1:
lst.append((key, q, r, token))
return lst
def remove_recursion(d):
new_d = {}
for k in d:
new_rs = []
for t in d[k]:
if t != k:
new_rs.append(t)
new_d[k] = new_rs
return new_d
def replaceable_with_kind(stree, orig, parent, gk, command):
my_node = None
def fill_tree(node):
nonlocal my_node
name, children = node
if name == gk:
my_node = [name, [[parent, []]]]
return my_node
elif not children:
if name in ASCII_MAP:
return (random.choice(ASCII_MAP[name]), [])
return (name, [])
else:
return (name, [fill_tree(c) for c in children])
tree0 = fill_tree(stree)
sval = util.tree_to_str(tree0)
assert my_node is not None
a1 = my_node, '', tree0
if parent == orig:
aX = ((gk, [[orig, []]]), '', tree0)
val = util.is_a_replaceable_with_b(a1, aX, command)
if val:
return True
else:
return False
else:
for pval in ASCII_MAP[parent]:
aX = ((gk, [[pval, []]]), '', tree0)
val = util.is_a_replaceable_with_b(a1, aX, command)
if val:
continue
else:
return False
return True
# string.ascii_letters The concatenation of the ascii_lowercase and ascii_uppercase constants described below. This value is not locale-dependent.
# string.ascii_lowercase The lowercase letters 'abcdefghijklmnopqrstuvwxyz'. This value is not locale-dependent and will not change.
# string.ascii_uppercase The uppercase letters 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'. This value is not locale-dependent and will not change.
# string.digits The string '0123456789'.
# string.hexdigits The string '0123456789abcdefABCDEF'.
# string.octdigits The string '01234567'.
# string.punctuation String of ASCII characters which are considered punctuation characters in the C locale: !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~.
# string.printable String of ASCII characters which are considered printable. This is a combination of digits, ascii_letters, punctuation, and whitespace.
# string.whitespace A string containing all ASCII characters that are considered whitespace. This includes the characters space, tab, linefeed, return, formfeed, and vertical tab.
def parent_map():
parent = {}
for sp in string.whitespace:
parent[sp] = '[__WHITESPACE__]'
for digit in string.digits:
parent[digit] = '[__DIGIT__]'
for ll in string.ascii_lowercase:
parent[ll] = '[__ASCII_LOWER__]'
for ul in string.ascii_uppercase:
parent[ul] = '[__ASCII_UPPER__]'
for p in string.punctuation:
parent[p] = '[__ASCII_PUNCT__]'
parent['[__WHITESPACE__]'] = '[__ASCII_PRINTABLE__]'
parent['[__DIGIT__]'] = '[__ASCII_ALPHANUM__]'
parent['[__ASCII_LOWER__]'] = '[__ASCII_LETTER__]'
parent['[__ASCII_UPPER__]'] = '[__ASCII_LETTER__]'
parent['[__ASCII_LETTER__]'] = '[__ASCII_ALPHANUM__]'
parent['[__ASCII_ALPHANUM__]'] = '[__ASCII_PRINTABLE__]'
parent['[__PUNCT__]'] = '[__ASCII_PRINTABLE__]'
return parent
ASCII_MAP = {
'[__WHITESPACE__]': string.whitespace,
'[__DIGIT__]': string.digits,
'[__ASCII_LOWER__]': string.ascii_lowercase,
'[__ASCII_UPPER__]': string.ascii_uppercase,
'[__ASCII_PUNCT__]': string.punctuation,
'[__ASCII_LETTER__]': string.ascii_letters,
'[__ASCII_ALPHANUM__]': string.ascii_letters + string.digits,
'[__ASCII_PRINTABLE__]': string.printable
}
PARENT_MAP = parent_map()
def find_max_generalized(tree, kind, gk, command):
if kind not in PARENT_MAP: return kind
parent = PARENT_MAP[kind]
if replaceable_with_kind(tree, kind, parent, gk, command):
return find_max_generalized(tree, parent, gk, command)
else:
return kind
def do_n(tree, kind, gk, command, n):
ret = []
for i in range(n):
pval = random.choice(ASCII_MAP[kind])
ret.append([pval, []])
return (gk, ret)
def find_max_widened(tree, kind, gk, command):
my_node = None
def fill_tree(node):
nonlocal my_node
name, children = node
if name == gk:
my_node = [name, [[kind, []]]]
return my_node
elif not children:
if name in ASCII_MAP:
return (random.choice(ASCII_MAP[name]), [])
return (name, [])
else:
return (name, [fill_tree(c) for c in children])
tree0 = fill_tree(tree)
sval = util.tree_to_str(tree0)
assert my_node is not None
a1 = my_node, '', tree0
# this is a single character. Now, try 2, 4 etc.
pvals = do_n(tree, kind, gk, command, 2)
aX = (pvals, '', tree0)
val = util.is_a_replaceable_with_b(a1, aX, command)
if not val: return kind
pvals = do_n(tree, kind, gk, command, 4)
aX = (pvals, '', tree0)
val = util.is_a_replaceable_with_b(a1, aX, command)
if not val: return kind
return kind + '+'
GK = '<__GENERALIZE__>'
MAX_CHECKS = 1000
def generalize_single_token(grammar, start, k, q, r, command, blacklist):
# first we replace the token with a temporary key
gk = GK
# was there a previous widened char? and if ther wase,
# do we belong to it?
char = grammar[k][q][r]
if r > 0 and grammar[k][q][r-1][-1] == '+':
# remove the +
last_char = grammar[k][q][r-1][0:-1]
if last_char in ASCII_MAP and char in ASCII_MAP[last_char]:
#we are part of the last.
grammar[k][q][r] = last_char + '+'
return grammar
g_ = copy.deepcopy(grammar)
g_[k][q][r] = gk
g_[gk] = [[char]]
#reachable_keys = grammartools.reachable_dict(g_)
# now, we need a path to reach this.
fg = grammartools.get_focused_grammar(g_, (gk, []))
fuzzer = F.LimitFuzzer(fg)
#skel_tree = find_path_key(g_, start, gk, reachable_keys, fuzzer)
tree = None
check = 0
while tree is None:
#tree = flush_tree(skel_tree, fuzzer, gk, char)
#tree = fuzzer.gen_key(grammartools.focused_key(start), depth=0, max_depth=1)
tree = fuzzer.iter_gen_key(grammartools.focused_key(start), max_depth=1)
val = util.check(char, char, '<__CHECK__(%d/%d)>' % (check, MAX_CHECKS), tree, command, char, char)
check += 1
if not val:
tree = None
if check > MAX_CHECKS:
print("Exhausted limit for key:%s, rule:%d, token:%d, char:%s" % (k, q, r, char), file=sys.stderr)
blacklist.append((k, q, r, char))
#raise "Exhausted limit for key:%s, rule:%d, token:%d, char:%s" % (k, q, r, char)
return grammar
# now we need to make sure that this works.
gen_token = find_max_generalized(tree, char, gk, command)
if gen_token != char:
# try widening
gen_token = find_max_widened(tree, gen_token, gk, command)
del g_[gk]
g_[k][q][r] = gen_token
# preserve the order
grammar[k][q][r] = gen_token
return grammar
def remove_duplicate_repetitions(g):
new_g = {}
for k in g:
new_rules = []
for rule in g[k]:
#srule = ''.join(rule)
new_rule = []
last = -1
for i,t in enumerate(rule):
if last >= 0 and len(t) > 0 and t[-1] == '+' and t == rule[last]:
continue
else:
last = i
new_rule.append(t)
#snrule = ''.join(new_rule)
#if srule != snrule:
# print("change:",file=sys.stderr)
# print(" ", srule, file=sys.stderr)
# print(" ", snrule, file=sys.stderr)
new_rules.append(new_rule)
new_g[k] = new_rules
return new_g
def main(args):
gfname = args[0]
with open(gfname) as f:
gf = json.load(fp=f)
grammar = gf['[grammar]']
start = gf['[start]']
command = gf['[command]']
# now, what we want to do is first regularize the grammar by splitting each
# multi-character tokens into single characters.
generalized_grammar = generalize_tokens(grammar)
# next, we want to get the list of all such instances
list_of_things_to_generalize = get_list_of_single_chars(generalized_grammar)
#print(len(list_of_things_to_generalize), file=sys.stderr)
# next, we want to generalie each in turn
# finally, we want to generalize the length.
#reachable_keys = reachable_dict(grammar)
g_ = generalized_grammar
blacklist = []
for k, q, r, t in list_of_things_to_generalize:
assert g_[k][q][r] == t
bl = []
g_ = generalize_single_token(g_, start, k, q, r, command, bl)
if bl:
print("Blacllisted:", bl, file=sys.stderr)
blacklist.extend(bl)
g = remove_duplicate_repetitions(g_)
g = grammartools.remove_duplicate_rules_in_a_key(g)
# finally, we want to generalize the length.
#g = generalize_size(g_)
print(json.dumps({'[start]': start, '[grammar]':g, '[command]': command, '[blacklist]': blacklist}, indent=4))
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"anonymous@anonymous.net"
] |
anonymous@anonymous.net
|
fa76acace0c4cd47c3cdb6b96aa8b5eed60ae7bf
|
8a41ef3e60355b867116754444d3b844721b7ff9
|
/how2pizza/pizza/admin.py
|
ea329c4b7c6a2a8d549786f75503dd73fe4627be
|
[
"MIT"
] |
permissive
|
ianonavy/how2pizza
|
12cc99b1f8adc6aa5513d396cb67ecb62039554e
|
ebac7b0cd2ea3be851eddb3fe221c11d1a2a426a
|
refs/heads/master
| 2021-01-23T22:07:46.822089
| 2015-05-28T05:55:59
| 2015-05-28T05:55:59
| 31,512,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
from django.contrib import admin
from pizza.models import PizzaOrder, PizzaOrderUserChoice, PizzaType
@admin.register(PizzaOrder)
class PizzaOrderAdmin(admin.ModelAdmin):
list_display = ('id', 'created_at')
@admin.register(PizzaOrderUserChoice)
class PizzaOrderUserChoiceAdmin(admin.ModelAdmin):
pass
@admin.register(PizzaType)
class PizzaTypeAdmin(admin.ModelAdmin):
pass
|
[
"ianonavy@gmail.com"
] |
ianonavy@gmail.com
|
3a829b2c788daa3d8a5b5cdfa4c5b6ccd3daabd7
|
fefa88dd63533ed36ec4f86c029b5d9a00a3ad82
|
/monapi/serializers.py
|
987d80eaf5e66ca2d97f70f100a17e9b7334545c
|
[] |
no_license
|
jeremyguiller/Api-mairie
|
f8fee21610acfb2ec20fdb761d5cb854a82480e5
|
7f1a6173e5ef0c25f2971f8a7e41adf8e88b8d8c
|
refs/heads/master
| 2023-04-06T15:33:48.750300
| 2021-04-26T11:44:03
| 2021-04-26T11:44:03
| 361,729,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
from rest_framework import serializers , fields
from .models import Location,Texte,Image,Administrateur
class Locationserializer(serializers.HyperlinkedModelSerializer):
date = serializers.DateTimeField()
class Meta:
model = Location
fields = ('date','name','confirmer')
class AdministrateurSerializers(serializers.HyperlinkedModelSerializer):
class Meta:
model = Administrateur
fields = ('name','email','mdp')
class TexteSerializers(serializers.HyperlinkedModelSerializer):
class Meta:
model = Texte
fields = ('intitule','texte')
class ImageSerializers(serializers.HyperlinkedModelSerializer):
class Meta:
model = Image
fields = ('description','image')
|
[
"guillerjeremy@gmail.com"
] |
guillerjeremy@gmail.com
|
782612e4635027ea04a2431e6dc0a11bcc45d1ee
|
e82ba9e19c415e5eeff4a48f52dbd7efc4ae4d6b
|
/9.sort/BubbleSort2.py
|
a43b5016f4aa1cde02f156f1bd522421ff774c94
|
[] |
no_license
|
GoldK11/dataSKKU
|
8a4dbbd5adb6b766a28cdfaba3b9a744992d4e41
|
24b5e82e5456daf3c07db271e1b6932661c967a3
|
refs/heads/master
| 2021-08-23T01:33:16.984279
| 2017-12-02T05:39:34
| 2017-12-02T05:39:34
| 112,315,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
# 0 부터 ( 좀 이상함)
def bubbleSort(l):
count =0
for i in range(len(l)):
for j in range(i+1,len(l)):
count+=1
if l[i]>l[j]:
(l[i],l[j])=(l[j],l[i])
return count
l = [53, 112, 174, 200, 258, 123, 184, 254, 232, 136, 198, 3, 286, 6, 62, 57, 110, 10, 17, 189, 291, 2, 245, 118, 226, 154, 33, 211, 285, 191, 289, 161, 56, 74, 241, 297, 249, 9, 208, 251, 63, 214, 145, 97, 75, 149, 158, 59, 275, 68, 95, 124, 32, 99, 167, 224, 197, 79, 296, 152, 171, 98, 30, 148, 26, 50, 266, 93, 293, 182, 181, 153, 88, 66, 210, 100, 127, 94, 247, 277, 44, 262, 77, 121, 138, 71, 82, 119, 37, 140, 233, 206, 237, 212, 231, 11, 248, 209, 271, 234, 255, 51, 25, 243, 163, 146, 172, 142, 238, 263, 114, 104, 253, 236, 4, 273, 54, 151, 73, 250, 204, 227, 107, 18, 92, 60, 187, 120, 102, 64, 128, 173, 281, 279, 282, 144, 219, 244, 269, 40, 180, 283, 126, 288, 45, 143, 91, 178, 157, 96, 70, 129, 109, 85, 147, 35, 90, 195, 261, 19, 22, 55, 267, 280, 299, 15, 199, 168, 108, 235, 105, 196, 135, 58, 155,
162, 101, 218, 24, 246, 207, 89, 132, 192, 14, 290, 1, 295, 188, 270, 201, 78, 229, 39, 274, 49, 13, 28, 65, 72, 52, 81, 217, 252, 220, 34, 31, 216, 139, 256, 169, 166, 27, 160, 12, 284, 111, 228, 0, 159, 8, 298, 122, 87, 41, 205, 215, 193, 165, 203, 221, 84, 7, 176, 80, 20, 125, 179, 141, 29, 134, 5, 257, 16, 268, 194, 202, 225, 23, 185, 36, 21, 117, 48, 76, 260, 186, 156, 170, 47, 223, 265, 287, 103, 42, 113, 38, 239, 115, 278, 230, 259, 61, 150, 69, 130, 133, 116, 164, 242, 213, 183, 67, 175, 131, 240, 264, 46, 276, 43, 86, 83, 106, 294, 177, 137, 292, 190, 222, 272]
print(bubbleSort(l))
print(l)
|
[
"ssori113@gmail.com"
] |
ssori113@gmail.com
|
b66bdcf6efc1e3d36d06876d5a98947743683ff5
|
95a05bee4ef9a16da7185e7651685d7df71d55af
|
/metadata.py
|
4daf5b1243803c996a12e9c057b935b032fb26d4
|
[
"Unlicense"
] |
permissive
|
ArniDagur/auto-rental
|
0f0b342c1a0d320100f4bcaba4a881f78358b76e
|
8b7fcf724c7501c0414454771addbd36be185b26
|
refs/heads/master
| 2020-04-10T16:44:39.510794
| 2018-12-10T10:06:25
| 2018-12-10T10:06:25
| 161,154,249
| 0
| 0
|
Unlicense
| 2018-12-10T10:01:58
| 2018-12-10T10:01:58
| null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
import os
from appdirs import user_data_dir
# Information for humans:
# -----------------------------------------------------------------------------
APPNAME = 'Auto-Rental'
AUTHOR = 'hopur-32'
# Information for computers:
# -----------------------------------------------------------------------------
DATA_DIR = user_data_dir(APPNAME, AUTHOR) # OS specific directory to store data
if not os.path.isdir(DATA_DIR):
os.makedirs(DATA_DIR)
|
[
"arnidg@protonmail.ch"
] |
arnidg@protonmail.ch
|
7421e6059aeff1e3016934fea7f9e2910344351e
|
83648babb83497ff162ccfa6104c1f09029bcb37
|
/local_global.py
|
aa1da55f52fde40d694f0c3e6e2fb5b0626ebf22
|
[] |
no_license
|
seeni-eldho/pythonProgram
|
aeeb5ec559049feb4d331b3a40e09f21f9b799b2
|
3361c4673d85e0bfb0df93414c573bdd3a4944b0
|
refs/heads/master
| 2023-08-07T17:54:24.405327
| 2021-09-22T09:40:04
| 2021-09-22T09:40:04
| 402,530,682
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 82
|
py
|
x=5
def foo():
global y
y=7
print('loccal',y)
foo()
print('local',y)
|
[
"seenieldho85@gmail.com"
] |
seenieldho85@gmail.com
|
c8e453ae1f4aa67ae58b7f6d6dd39e2b6c2afb3d
|
0367d2c25de1584fd064522e9b9efc8fa52d1478
|
/odd_eve_list.py
|
c97aec023602a1865225836fc042828587cb288f
|
[] |
no_license
|
sk013/Python_Basic_Programs
|
9d69698f28246f6787c695e20d5b2b4a45417019
|
c44ed384e8185261ef4fd715694362269837d6c8
|
refs/heads/main
| 2023-05-03T12:04:20.144301
| 2021-05-26T17:43:22
| 2021-05-26T17:43:22
| 371,121,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
def odd_eve(l):
odd = []
eve = []
for i in l:
if i%2==0:
eve.append(i)
else :
odd.append(i)
output = [eve,odd]
return output
numbers = [1,2,4,3,5,6,54,2,36,43,31]
print(odd_eve(numbers))
|
[
"noreply@github.com"
] |
noreply@github.com
|
1158acb79cf822c0ded1ea29f10b77727305c073
|
cd142a4e15d3576546fcb44841417039f0b8fb00
|
/build/double/catkin_generated/pkg.installspace.context.pc.py
|
9b014836f2e3e476722b6c40aa901294660dad37
|
[] |
no_license
|
mgou123/rplidar
|
4389819eb1998d404d1066c7b4a983972d236ce7
|
608c1f6da2d3e5a8bac06e8d55d8569af828a40b
|
refs/heads/master
| 2022-11-10T05:51:56.403293
| 2020-06-29T04:16:14
| 2020-06-29T04:16:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "double"
PROJECT_SPACE_DIR = "/home/xu/dogkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"492798337@qq.com"
] |
492798337@qq.com
|
488243e5d4538da2bac8bd00083dfb737797e000
|
4dcee7dff58a6f0364283787aa7ad1dff16721e1
|
/pre_pred_bert.py
|
85a565945bd985a6dbd43cc220760b93320738a5
|
[] |
no_license
|
karthikpuranik11/Masked-LM
|
ead8bcb5bcaedb8b62b627cc6dab2ce3c5fefcbe
|
bb049e493bc9968e3c50cac1fe88ebe7c436523f
|
refs/heads/main
| 2023-03-18T21:51:27.842906
| 2021-03-07T17:37:54
| 2021-03-07T17:37:54
| 342,780,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
a=predict_masked_sent('The animals came to the meeting.', top_k=5)
for j in range(len(a)):
x=0
a[j]=a[j].split()
#print(a[j])
tok = pos_tag(a[j])
for k in range(len(tok)):
if tok[k][0]=='[MASK]':
break
elif tok[k][1]=='IN' or tok[k][1]=='TO':
pred=' '.join(a[j])
print(pred)
x=1
break
if x==1:
break
|
[
"noreply@github.com"
] |
noreply@github.com
|
7ff7ebba377cd3e6d83e88368536f529b763202f
|
e966ac971af90faff55fce232620f3d0ad7f7fb8
|
/com/swj/OOP/Fundamental.py
|
a0241ea4e38f9bd58018cf65265a64b4d8590778
|
[] |
no_license
|
shouguouo/PythonDemo
|
f987b9849e01806ccb6c370bbd4d4ba9675629ec
|
d9011506e3474054e2f5b1246f8e014facea7961
|
refs/heads/master
| 2021-09-21T23:55:53.258819
| 2018-09-03T15:47:27
| 2018-09-03T15:47:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,441
|
py
|
# -*- coding: utf-8 -*-
# class Student(object): # 表示从object类中继承
# pass
# bart = Student()
# print(bart)
# print(Student)
# bart.name = 'swj'
# print(bart.name)
# class Student(object):
# def __init__(self, name, score): # 第一个参数永远是self表示创建的实例本身 不用传 其他参数需要传
# self.name = name
# self.score = score
# def print_score(self):
# print('%s:%s'%(self.name, self.score))
# def get_grade(self):
# if self.score >= 90:
# return 'A'
# elif self.score >= 60:
# return 'B'
# else:
# return 'C'
# bart = Student('swj', 99)
# print(bart.get_grade())
#
# # 数据封装
# bart.print_score()
# 访问权限 实例变量以__开头就变成了私有变量
# class Student(object):
# def __init__(self, name, score):
# self.__name = name
# self.__score = score
# def print_score(self):
# print('%s:%s'%(self.__name, self.__score))
# def get_name(self):
# return self.__name
# def get_score(self):
# return self.__score
# def set_score(self, score):
# if 0 <= score <= 100:
# self.__score = score
# else:
# raise ValueError('bad score')
#
# s = Student('swj', 99)
# # print(s.__name) # 无法访问
# print(s.get_name())
# s.print_score()
#
# # 类似__xx__的实例变量名 是特殊变量(以双下划线开头以双下划线结尾) 可以直接访问 但是不能用__name__ __score__这样的变量名
# # 以单下划线开头的变量 外部可以访问 但是“虽然我可以被访问,但是,请把我视为私有变量,不要随意访问”
# print(s._Student__name) # 私有变量也可以访问 但是强烈建议不要这么做 私有变量被内部包装为_Student__name
# s._Student__name = 'xhy'
# print(s.get_name())
# 继承与多态
class Animal(object):
def run(self):
print('animal is running...')
class Dog(Animal):
def run(self):
print('dog is running...')
def eat(self):
print('dog is eating...')
class Cat(Animal):
def run(self):
print('cat is running...')
def eat(self):
print('cat is running...')
def run_twice(animal):
animal.run()
animal.run()
Dog().run()
Cat().run()
run_twice(Animal())
run_twice(Dog()) # 多态
# 开闭原则 对扩展开放:允许新增Animal子类 对修改封闭:不需要修改run_twice()等接受Animal类型的函数
# 静态语言VS动态语言 静态语言必须要传入Animal或子类 否则就无法调用run()方法 动态语言则只需保证传入的对象一个run()方法 ----鸭子类型(file-like object)
# 获取对象信息
type(123) # int
type('str') # str
type(None) # NoneType
type(abs) # builtin_function_or_method
type(Animal()) # __main__.Animal
# 更多的type在types模块中定义
# 对于class的继承关系 使用type()不方便 可以用isinstance() 函数 优先使用isinstance()
# 使用dir()函数获得一个对象的所有属性和方法 返回包含str的list
dir('dir')
# len('ABC') 和'ABC'.__len__() 也可以自定义len方法
class MyDog(Dog):
def __len__(self):
return 100
print(len(MyDog()))
# 通过getattr()、setattr()、hasattr()可以直接操作一个对象的状态
# 类属性与实例属性 不要对实例属性和类属性使用相同的名字 会屏蔽掉类属性
|
[
"1132331056@qq.com"
] |
1132331056@qq.com
|
6a2758f58f6ef665dec7ea80ebf419557651d695
|
1443c180718ea74cb0862d112a7c08d6ec5d1828
|
/flaskfundamental/DojoSurvey2/DojoSurvey.py
|
2061cf3d5200337887342ecb7b0ebbf99da85a33
|
[] |
no_license
|
Dragonlizard1/Python_Project
|
7ca7e7f4245f1d1394542127c107fe5f79e0cafe
|
be83d84dddc6b1c30fd231a0e15f60da5a5bceb2
|
refs/heads/master
| 2020-03-12T23:41:16.856306
| 2018-04-24T16:39:50
| 2018-04-24T16:39:50
| 130,871,148
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 964
|
py
|
from flask import Flask, render_template, request, session, redirect, flash
app = Flask(__name__)
app.secret_key = 'ThisIsSecret'
@app.route("/")
def index():
return render_template("form.html")
@app.route("/result", methods = ["POST"])
def infoprocess():
name = request.form["name"]
location = request.form["location"]
language = request.form["language"]
comment = request.form["comment"]
if name == "":
flash("The name field is empty.")
if comment == "":
flash("Please add comment in.")
elif len(comment) > 120:
flash("Please put in less than 120 characters.")
return redirect ("/")
if comment == "":
flash("Please add comment in.")
return redirect ("/")
elif len(comment) > 120:
flash("Please put in less than 120 characters.")
return redirect ("/")
#print name
return render_template("result.html", name1 = name, location1 = location, language1 = language, comment1 = comment)
app.run(debug=True)
|
[
"bobbyimaging@gmail.com"
] |
bobbyimaging@gmail.com
|
691231d66568dfb3947334005eca7c99975d2ce9
|
32d4e716d6291b95716541e55e166e9b8fc87ef4
|
/parser.py
|
7650fbaadac29f60240aa6bd7799aca1dd83e175
|
[] |
no_license
|
ShamilyanOksana/Parser
|
d0555e8e27679fb3c9876e1b2eab5503e032013a
|
05dcf604ff14d8ff60f4c8cdb619bd9c540dfa3c
|
refs/heads/master
| 2021-09-01T12:08:17.433032
| 2017-12-26T22:25:25
| 2017-12-26T22:25:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,514
|
py
|
import requests
from bs4 import BeautifulSoup
class Phone:
pass
def get_html(url):
url = "https://www.avito.ru/taganrog/telefony/samsung?q=sumsung&p=1"
r = requests.get(url)
return r.text
def get_total_pages(html):
soup = BeautifulSoup(html, 'lxml')
pages = soup.find('div', class_='pagination-pages').find_all('a', class_='pagination-page')[-1].get('href')
total_pages = pages.split('=')[1].split('&')[0]
return int(total_pages)
def print_information(all_info):
all_info.sort(key=lambda phone: phone.price)
for info in all_info:
print(info.title)
print(info.url)
print(info.price)
print(info.currency)
def get_page_data(html):
soup = BeautifulSoup(html, 'lxml')
ads = soup.find('div', class_='catalog-list').find_all('div', class_='description')
count = 0
for ad in ads:
all_info.append(Phone())
all_info[count].title = get_title(ad)
all_info[count].url = get_link(ad)
pre_price = get_price(ad)
all_info[count].price = pre_price[0]
all_info[count].currency = pre_price[1]
count+=1
return all_info
def get_title(current_ads):
try:
title = current_ads.find('a', class_='item-description-title-link').get('title')
return title
except Exception:
pass
def get_link(current_ads):
try:
link = "https://www.avito.ru" + current_ads.find('a', class_='item-description-title-link').get('href')
return link
except Exception:
pass
def get_price(current_ads):
try:
price = current_ads.find('div', class_='about').text.split(' ')[2:]
if price[0].isdigit() and price[1].isdigit():
currency = price[2]
price = int(price[0])*1000 + int(price[1])
else:
currency = price[1]
price = int(price[0])
return [price, currency]
except Exception:
pass
def main():
url = "https://www.avito.ru/taganrog/telefony/samsung?q=sumsung&p=1"
base_url = "https://www.avito.ru/taganrog/telefony/samsung?"
page_part = "p="
query_part = "&q=sumsung"
html = get_html(url)
total_pages = get_total_pages(html)
# for i in range(1, total_pages+1):
for i in range(1, 2):
url_gen = base_url + page_part + str(i) + query_part
html = get_html(url_gen)
all_info = get_page_data(html)
print_information(all_info)
all_info = []
if __name__ == "__main__":
main()
|
[
"shamilyanoksana@gmail.com"
] |
shamilyanoksana@gmail.com
|
2847baf0977045d715c296153c4a804ffd01798a
|
0592c83ef8bed931d310c1233a0e329a21876cbe
|
/tests/test_dataset.py
|
d4f1a84980c48f6536ff0ed15f5ab4dc09a3e1f3
|
[] |
no_license
|
datastory-org/frame2package
|
81d12439715f42dce8cdbd80853c16bba481da28
|
bea7e7d45ced2e9792078088b1e6271360bc86f8
|
refs/heads/master
| 2020-04-17T04:50:40.112731
| 2019-06-18T10:58:18
| 2019-06-18T10:58:18
| 166,248,711
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,867
|
py
|
import unittest
import pandas as pd
from frame2package import Dataset, Concept
class DatasetTestCase(unittest.TestCase):
def setUp(self):
data = [
{
'country': 'Sweden',
'capital': 'Stockholm',
'year': 2000,
'population': 9_000_000
},
{
'country': 'Sweden',
'capital': 'Stockholm',
'year': 2019,
'population': 10_000_000
},
{
'country': 'Norway',
'capital': 'Oslo',
'year': 2000,
'population': 5_000_000
},
{
'country': 'Norway',
'capital': 'Oslo',
'year': 2019,
'population': 6_000_000
},
]
concepts = [
{
'concept': 'country',
'concept_type': 'entity_domain'
},
{
'concept': 'capital',
'concept_type': 'string'
},
{
'concept': 'population',
'concept_type': 'measure'
},
{
'concept': 'year',
'concept_type': 'time'
}
]
self.data = data
self.concepts = concepts
self.dataset = Dataset(pd.DataFrame(data), concepts)
def test_has_concepts(self):
self.assertTrue(hasattr(self.dataset, 'concepts'))
def test_has_entities(self):
self.assertTrue(hasattr(self.dataset, 'entities'))
def test_has_tables(self):
self.assertTrue(hasattr(self.dataset, 'tables'))
def test_has_data(self):
self.assertTrue(hasattr(self.dataset, 'data'))
def test_data_is_frame(self):
self.assertTrue(type(self.dataset.data) is pd.DataFrame)
def test_concept_type(self):
self.assertTrue(all([type(x) is Concept
for x in self.dataset.concepts]))
def test_has_correct_number_of_entities(self):
self.assertEqual(len(self.dataset.entities), 1)
def test_fails_if_missing_concepts(self):
data = pd.DataFrame(self.data)
def create_dataset_with_missing_concepts():
return Dataset(data, self.concepts[:-1])
self.assertRaises(ValueError, create_dataset_with_missing_concepts)
def test_creates_correct_table_name(self):
table_name = self.dataset.tables[0][0]
expected = 'ddf--datapoints--population--by--country--year.csv'
self.assertEqual(table_name, expected)
def test_creates_correct_table_size(self):
self.assertEqual(self.dataset.tables[0][1].shape, (4, 3))
def test_records_extra_string_concepts(self):
self.assertIn('capital', self.dataset.concepts)
|
[
"robin.linderborg@gmail.com"
] |
robin.linderborg@gmail.com
|
46f9807e15556efa7d2439bee101b14f588ee791
|
44413721791e00e5e0d728d2063cce9d072680bc
|
/env/bin/jupyter-nbextension
|
f272010110d6ad96be430c20709bbda7f2ea6cb7
|
[] |
no_license
|
andriyka/term-extraction-and-ontology-learning
|
5174ba52db93bc3dd22b75a41c998c5e23a3bcd5
|
2fa478f1f6f28949d461331f6e8348f86bd344e1
|
refs/heads/master
| 2020-03-21T19:05:07.755413
| 2018-07-09T16:46:58
| 2018-07-09T16:46:58
| 138,929,875
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
#!/home/ankus/Documents/ucu/terms/ate/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from notebook.nbextensions import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ankus@ciklum.com"
] |
ankus@ciklum.com
|
|
05f1c23936d977e70fdef1e44fc27ab9f069cadf
|
55647a80c8b412af9df0ba3f50595cc2f29c25e6
|
/res/scripts/common/Lib/encodings/gbk.py
|
4b4a46dcbfdea9c2f98724c76a52405e54febf9c
|
[] |
no_license
|
cnsuhao/WOT-0.9.17-CT
|
0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb
|
d1f932d8cabaf8aa21708622e87f83c8d24d6451
|
refs/heads/master
| 2021-06-08T18:11:07.039293
| 2016-11-19T19:12:37
| 2016-11-19T19:12:37
| null | 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,136
|
py
|
# 2016.11.19 19:58:56 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/encodings/gbk.py
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gbk')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(name='gbk', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\Lib\encodings\gbk.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:58:56 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
d371052b610c7808f4397cc46872d84018712958
|
78224a508b75e7958cec6a2759b8ba4c46cb4bfc
|
/exchange/okex/HttpMD5Util.py
|
a15b45816241c7353aa08cd55590e4fe1a805b91
|
[] |
no_license
|
80000v/CryptoArb
|
34e731b11c3b29a3643c1aa79b921e0ef879b4d9
|
5b9d3e05af99a70a09481f1370bc863f7ca84d66
|
refs/heads/master
| 2021-04-20T10:24:07.959747
| 2019-04-04T14:17:46
| 2019-04-04T14:17:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,370
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#用于进行http请求,以及MD5加密,生成签名的工具类
import requests
import hashlib
#初始化apikey,secretkey,url
apikey = '1cd704d7-d549-436b-a5ee-df7e401843d3'
secretkey = '1AE1EE7238F5485D35E128194B821181'
okcoinRESTURL = 'https://www.okcoin.cn'
BaseUrl = "/v2/auth/login"
DEFAULT_POST_HEADERS = {
# "Authorization":"eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiI5NjQ5MGI4Ni0zOWExLTQyMWEtYmEzYi03YTAxNTkwYTg1N2MiLCJhdWRpZW5jZSI6IndlYiIsImNyZWF0ZWQiOjE1MDE1NTkzMzE0MzEsImV4cCI6MTUwMjE2NDEzMX0.crVupk8Tc4ki_TIT-tLmTpBxEjdOt4Ww3b3GoP0TJebCUT_TIxvBjzeTFRnnchbGwUHvrSoqp0cVofVaENkA6Q"
"Authorization":None,
'Content-Type': 'application/json',
"User-Agent": "Chrome/39.0.2171.71",
"Accept": "application/json",
"authRequest":"authRequest"
}
def buildMySign(params,secretKey):
sign = ''
for key in sorted(params.keys()):
sign += key + '=' + str(params[key]) +'&'
data = sign+'secret_key='+secretKey
return hashlib.md5(data.encode("utf8")).hexdigest().upper()
def httpGet(url,resource,params=''):
# conn = http.client.HTTPSConnection(url, timeout=10)
# conn.request("GET",resource + '?' + params)
# response = conn.getresponse()
# data = response.read().decode('utf-8')
# return json.loads(data)
try:
response = requests.get(url, params, timeout=5)
if response.status_code == 200:
return response.json()
else:
return {"result":"fail"}
except Exception as e:
print("httpGet failed, detail is:%s" % e)
return
def httpPost(url,resource,params):
headers = {
"Content-type" : "application/x-www-form-urlencoded",
}
# conn = http.client.HTTPSConnection(url, timeout=10)
# temp_params = urllib.parse.urlencode(params)
# conn.request("POST", resource, temp_params, headers)
# response = conn.getresponse()
# data = response.read().decode('utf-8')
# params.clear()
# conn.close()
# return data
try:
if resource:
url = url + resource
response = requests.post(url, params, headers=headers, timeout=5)
if response.status_code == 200:
return response.json()
else:
return
except Exception as e:
print("httpPost failed, detail is:%s" % e)
return
|
[
"huang.xinyu@wanlitechnologies.com"
] |
huang.xinyu@wanlitechnologies.com
|
b74ebd69ba2428966df06b67ec9e088623bd0bc7
|
b7b2728bcfeda781ef79540dc46577f4a772e471
|
/django_hbase/models/exceptions.py
|
6cfcf27b10ce6d62f13764fc2cea8fbbda7e7c11
|
[] |
no_license
|
Veronica1026/django-twitter
|
7dd8e0efe84d50654bc92f83bf6ac0bb0c6b432e
|
e28e8fe5443db48b761cd2e4e6a43e0d0c3590ff
|
refs/heads/main
| 2023-08-25T16:15:36.092192
| 2021-10-23T10:41:00
| 2021-10-23T10:41:00
| 364,218,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
class BadRowKeyError(Exception):
pass
class EmptyColumnError(Exception):
pass
|
[
"543587590@qq.com"
] |
543587590@qq.com
|
0d3f672dc0e572c955fb17809d11692cbcc434be
|
c01e107f3b781df76f83ca470c22c32cacf7ddb3
|
/src/qsimulator.py
|
1d6549fd4668f7e4d098970cfbe3b45a9491cc92
|
[] |
no_license
|
UB-Quantic/EG-VQClass
|
593d24d10da3295532fa2064d098b59de433e91e
|
ff3ae612d666c80d6dbc38d461ecae79e3c82208
|
refs/heads/master
| 2020-04-27T17:28:11.308759
| 2019-03-26T20:28:21
| 2019-03-26T20:28:21
| 174,520,998
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,336
|
py
|
import numpy as np
import math
import cmath
Pi = math.pi
class QC(object):
def __init__(self, qubits):
self.size = qubits
"""
The quantum state is initialized with all qubits at 0.
"""
self.state = [0]*2**self.size
self.state[0] = 1.
def initialize(self):
"""Brings the state vector back to its initial state.
"""
self.state = [0]*2**self.size
self.state[0] = 1.
###############################
# 1-Qubit Gates
###############################
def h(self, m):
"""Apply the Hadamard Gate on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
"""
s = 1/np.sqrt(2)
if m>=self.size: raise ValueError('Qubit does not exist.')
for i in range(2**(self.size-1)):
I = 2*i-i%(2**m)
J = I+2**m
a = s*self.state[I] + s*self.state[J]
b = s*self.state[I] - s*self.state[J]
self.state[I] = a
self.state[J] = b
def x(self, m):
"""Apply the X Pauli Gate on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
for i in range(2**(self.size-1)):
I = 2*i-i%(2**m)
J = I+2**m
a = self.state[I]
self.state[I] = self.state[J]
self.state[J] = a
def y(self, m):
"""Apply the Y Pauli Gate on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
for i in range(2**(self.size-1)):
I = 2*i -i%(2**m)
J = I+2**m
a = -1.j * self.state[I]
self.state[I] = 1.j*self.state[J]
self.state[J] = a
def z(self, m):
"""Apply the Z Pauli Gate on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
for i in range(2**(self.size-1)):
J = 2*i - i%(2**m) + 2**m
self.state[J] *= -1
def s(self, m):
"""Apply the Phase Gate on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
for i in range(2**(self.size-1)):
J = 2*i - i%(2**m) + 2**m
self.state[J] *= 1.j
def t(self, m):
"""Apply the pi/8 Gate on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
aux = cmath.exp(0.25j*math.pi)
for i in range(2**(self.size-1)):
J = 2*i - i%(2**m) + 2**m
self.state[J] *= aux
def rx(self, m, th):
"""Apply a x-rotation on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
th (float): angle we rotate.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
th2 = 0.5*th
c = math.cos(th2)
s = -1.j * math.sin(th2) # beware of conventions
for i in range(2**(self.size-1)):
I = 2*i - i%2**m
J = I + 2**m
a = c*self.state[I] + s*self.state[J]
b = s*self.state[I] + c*self.state[J]
self.state[I] = a
self.state[J] = b
def ry(self, m, th):
"""Apply a y-rotation on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
th (float): angle we rotate.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
th2 = 0.5*th
c = math.cos(th2)
s = math.sin(th2) # beware of conventions
for i in range(2**(self.size-1)):
I = 2*i - i%2**m
J = I + 2**m
a = c*self.state[I] - s*self.state[J]
b = s*self.state[I] + c*self.state[J]
self.state[I] = a
self.state[J] = b
def rz(self, m, th):
"""Apply a z-rotation on the m'th qubit.
Args.
m (int): the qubit we apply our gate on.
th (float): angle we rotate.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
aux1 = cmath.exp(0.5j*th)
aux2 = cmath.exp(-0.5j*th)
for i in range(2**(self.size-1)):
I = 2*i - i%2**m
J = I + 2**m
self.state[I] *= aux1
self.state[J] *= aux2
#######################################
# 2-Qubit Gates, Entanglement
#######################################
def cnot(self, c, t):
"""Apply a Controlled-NOT gate.
Args.
c (int): control qubit.
t (int): target qubit.
"""
if c>=self.size: raise ValueError('Control does not exist.')
if t>=self.size: raise ValueError('Target does not exist.')
if c==t: raise ValueError('Control and Target cannot be the same.')
for i in range(2**(self.size-2)):
I = (2**c + i%2**c + ((i-i%2**c)*2)%2**t + 2*((i-i%2**c)*2 -
((2*(i-i%2**c))%2**t)))
J = I + 2**t
self.state[I], self.state[J] = self.state[J], self.state[I]
def cz(self, c, t):
"""Apply a Controlled-Z gate.
Args.
c (int): control qubit.
t (int): target qubit.
"""
if c>=self.size: raise ValueError('Control does not exist.')
if t>=self.size: raise ValueError('Target does not exist.')
if c==t: raise ValueError('Control and Target cannot be the same.')
if t<c: t,c = c,t
for i in range(2**(self.size-2)):
I = (2**c + i%2**c + ((i-i%2**c)*2)%2**t + 2*((i-i%2**c)*2 -
((2*(i-i%2**c))%2**t)) + 2**t)
self.state[I] *= -1
def swap(self, m, n):
"""Apply a SWAP gate.
Args.
m (int): first qubit.
n (int): second qubit.
"""
if m>=self.size: raise ValueError('First Qubit does not exist.')
if n>=self.size: raise ValueError('Second Qubit does not exist.')
if m==n: raise ValueError('Both Qubits cannot be the same.')
for i in range(2**(self.size-2)):
I = (i%2**m + ((i-i%2**m)*2)%2**n + 2*((i-i%2**m)*2 -
((2*(i-i%2**m))%2**n)) + 2**n)
J = I + 2**m - 2**n
self.state[I], self.state[J] = self.state[J], self.state[I]
############################################
# Circuits
############################################
# The following were created for classification using 4-qubits
def encode(self, point):
"""Creates the encoding layer.
Args.
point (dim=2 float): coordinates of one input point.
"""
for i in range(self.size):
self.h(i)
self.rz(i, point[i%2])
def blocka(self, angles, qubits=[0,1,2,3]):
"""Adds a block of type a.
Args.
angles (dim=8 float): rotation angles for each gate .
qubits (dim=4 int): qubits the block acts on.
"""
for i in range(4):
self.rx(qubits[i], angles[i])
self.cz(qubits[0], qubits[1])
self.cz(qubits[2], qubits[3])
for i in range(4):
self.ry(qubits[i], angles[4+i])
self.cz(qubits[1], qubits[2])
self.cz(qubits[0], qubits[3])
def blockb(self, angles, qubits=[0,1,2,3]):
"""Adds a block of type b.
Args.
angles (dim=8 float): rotation angles for each gate.
qubits (dim=4 int): qubits the block acts on.
"""
for i in range(4):
self.ry(qubits[i], angles[i])
self.cz(qubits[0], qubits[1])
self.cz(qubits[2], qubits[3])
for i in range(4):
self.rx(qubits[i], angles[4+i])
self.cz(qubits[1], qubits[2])
self.cz(qubits[0], qubits[3])
def blockc(self, angles, qubits=[0,1,2,3]):
"""Adds a block of type c.
Args.
angles (dim=8 float): rotation angles for each gate.
qubits (dim=4 int): qubits the block acts on.
"""
self.rx(qubits[0], angles[0])
self.ry(qubits[1], angles[1])
self.rx(qubits[2], angles[2])
self.ry(qubits[3], angles[3])
self.cz(qubits[0], qubits[1])
self.cz(qubits[2], qubits[3])
self.ry(qubits[0], angles[4])
self.rx(qubits[1], angles[5])
self.ry(qubits[2], angles[6])
self.rx(qubits[3], angles[7])
self.cz(qubits[1], qubits[2])
self.cz(qubits[0], qubits[3])
def blockd(self, angles, qubits=[0,1,2,3]):
"""Adds a block of type d.
Args.
angles (dim=8 float): rotation angles for each gate.
qubits (dim=4 int): qubits the block acts on.
"""
self.rx(qubits[0], angles[0])
self.ry(qubits[1], angles[1])
self.rx(qubits[2], angles[2])
self.ry(qubits[3], angles[3])
self.cz(qubits[0], qubits[1])
self.cz(qubits[2], qubits[3])
self.rx(qubits[0], angles[4])
self.ry(qubits[1], angles[5])
self.rx(qubits[2], angles[6])
self.ry(qubits[3], angles[7])
self.cz(qubits[1], qubits[2])
self.cz(qubits[0], qubits[3])
def blockx(self, angles, qubits=[0,1,2,3]):
"""Adds a block of type x.
Args.
angles (dim=8 float): rotation angles for each gate.
qubits (dim=4 int): qubits the block acts on.
"""
for i in range(4):
self.rx(qubits[i], angles[i])
self.cz(qubits[0], qubits[1])
self.cz(qubits[2], qubits[3])
for i in range(4):
self.rx(qubits[i], angles[4+i])
self.cz(qubits[1], qubits[2])
self.cz(qubits[0], qubits[3])
def blocky(self, angles, qubits=[0,1,2,3]):
"""Adds a block of type y.
Args.
angles(dim=8 float): rotation angles for each gate.
qubits (dim=4 int): qubits the block acts on.
"""
for i in range(4):
self.ry(qubits[i], angles[i])
self.cz(qubits[0], qubits[1])
self.cz(qubits[2], qubits[3])
for i in range(4):
self.ry(qubits[i], angles[4+i])
self.cz(qubits[1], qubits[2])
self.cz(qubits[0], qubits[3])
def add(self, typ, angles, qubits=[0,1,2,3]):
"""Adds a block of a certain type in a given position.
Args.
typ (char): type of circuit 'a', 'b', 'c' or 'd'.
angles (dim=8 float): rotation angles for each gate.
qubits (dim=4 int): which qubits the block acts on.
Rets.
success (int): indicates whether some error flag was raised.
"""
if(typ not in 'abcdxy'):
print("Wrong key for type.")
return 1
return {
'a': self.blocka(angles, qubits),
'b': self.blockb(angles, qubits),
'c': self.blockc(angles, qubits),
'd': self.blockd(angles, qubits),
'x': self.blockx(angles, qubits),
'y': self.blocky(angles, qubits)
}.get(typ, 1)
# The following are intended to be used with 1-qubit circuits.
def unitary(self, m, theta, phi, lamb):
"""Apply an arbitrary unitary gate on the m'th qubit.
Every unitary gate is characterized by three angles.
Args.
m (int): qubit the gate is applied on.
beta (float): first angle.
gamma (float): second angle.
delta (float): third angle.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
c = math.cos(0.5*gamma)
s = math.sin(0.5*gamma)
ephi = cmath.exp(1j*phi)
elamb = cmath.exp(1j*lamb)
for i in range(2**(self.size-1)):
I = 2*i -i%(2**m)
J = I+2**m
a = c*self.state[I] - s*elamb*self.state[J]
b = s*ephi*self.state[I] + c*ephi*elamb*self.state[J]
self.state[I] = a
self.state[J] = b
def block(self, m, point, angles, style=0):
"""Apply a learning block on the m'th qubit.
Args.
m (int): qubit the block is applied on.
point (dim=2 float): coordinates of input.
angles (dim=3 float): angles that determine a unitary gate.
style (int): customizes the block.
"""
if m>=self.size: raise ValueError('Qubit does not exist.')
if style:
self.unitary(m, point[0]+angles[0], point[1]+angles[1], angles[2])
else:
self.ry(m, point[0]*0.5*Pi)
self.rz(m, (1+point[1])*Pi)
self.unitary(m, angles[0], angles[1], angles[2])
|
[
"emgilfuster@gmail.com"
] |
emgilfuster@gmail.com
|
237743cb29e83580cbade37977253888764a05b4
|
f4f54015298eedfbbdfcaaf5e2a9603112f803a5
|
/sachin/gocept.filestore-0.3/gocept.filestore-0.3/src/gocept/filestore/tests.py
|
39487c46c2cf44f18a2df60610d46b4e1e9848c4
|
[] |
no_license
|
raviramawat8/Old_Python_Codes
|
f61e19bff46856fda230a096aa789c7e54bd97ca
|
f940aed0611b0636e1a1b6826fa009ceb2473c2b
|
refs/heads/master
| 2020-03-22T22:54:50.964816
| 2018-06-16T01:39:43
| 2018-06-16T01:39:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
# Copyright (c) 2007 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id: tests.py 5111 2007-08-30 11:27:23Z zagy $
import unittest
from zope.testing import doctest
def test_suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocFileSuite(
'README.txt',
optionflags=doctest.ELLIPSIS))
return suite
|
[
"sachinyadav3496@gmail.com"
] |
sachinyadav3496@gmail.com
|
672931fd1ee8dae6d584fb3ff8d812002ab628cc
|
e62a8c1ee3ac295f8028164d6ba4993c189fd774
|
/btpython/testbikieu.py
|
4397d561992f473bb43fea830ed8f408eaa117f4
|
[] |
no_license
|
thanhthai3457/Linux
|
8ac32919a59189ff35e9c2c3883303893bd245f7
|
55fd16be99922a1c6c9958ae3c1f0af40879b5a7
|
refs/heads/master
| 2020-03-11T18:43:05.248945
| 2018-06-13T15:45:50
| 2018-06-13T15:45:50
| 130,185,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
from bikeu import thai
sv1 = thai()
sv1.set_ten()
sv1.set_sdt()
print ("Thông tin")
sv1.In()
|
[
"thanh@example.com"
] |
thanh@example.com
|
9805ffe4daef50c8bdfe737999913fe9357c8479
|
e4da82e4beb9b1af7694fd5b49824a1c53ee59ff
|
/AutoWorkup/SEMTools/registration/averagebraingenerator.py
|
b206faa7d7b842adead8675771f35338e6d91db4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
ipekoguz/BRAINSTools
|
c8732a9206525adb5779eb0c2ed97f448e2df47f
|
dc32fa0820a0d0b3bd882fa744e79194c9c137bc
|
refs/heads/master
| 2021-01-18T08:37:03.883250
| 2013-05-14T21:08:33
| 2013-05-14T21:08:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,709
|
py
|
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class AverageBrainGeneratorInputSpec(CommandLineInputSpec):
inputDirectory = File(desc="Image To Warp", exists=True, argstr="--inputDirectory %s")
templateVolume = File(desc="Reference image defining the output space", exists=True, argstr="--templateVolume %s")
resolusion = traits.Str(desc="The resolusion.", argstr="--resolusion %s")
iteration = traits.Str(desc="The iteration.", argstr="--iteration %s")
pixelType = traits.Enum("uchar", "short", "ushort", "int", "uint", "float", desc="Specifies the pixel type for the input/output images", argstr="--pixelType %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Resulting deformed image", argstr="--outputVolume %s")
class AverageBrainGeneratorOutputSpec(TraitedSpec):
outputVolume = File(desc="Resulting deformed image", exists=True)
class AverageBrainGenerator(SEMLikeCommandLine):
"""title: Average Brain Generator
category: Registration
description:
This programs creates synthesized average brain.
version: 0.1
documentation-url: http:://mri.radiology.uiowa.edu/mriwiki
license: NEED TO ADD
contributor: This tool was developed by Yongqiang Zhao.
"""
input_spec = AverageBrainGeneratorInputSpec
output_spec = AverageBrainGeneratorOutputSpec
_cmd = " AverageBrainGenerator "
_outputs_filenames = {'outputVolume':'outputVolume'}
|
[
"hans-johnson@uiowa.edu"
] |
hans-johnson@uiowa.edu
|
6af4d1ec5bd8fce9532cd1238fb58d598e8ad97f
|
ad7dd3db001cbf322d0944c120b42e78b9fe00b9
|
/champakraja/ramu.py
|
febc73ee66c76c06e50b7ac645f3a8c690a56002
|
[
"MIT"
] |
permissive
|
jeldikk/champakraja
|
ebfd4ff04a0a1e48b2d6f31c4695e4ddae532e64
|
1462be4c8458b5bc2816b9aa69c1845482e702e1
|
refs/heads/master
| 2022-12-03T03:04:33.217318
| 2020-08-22T16:53:09
| 2020-08-22T16:53:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
from .base import character
class ramu(character):
def __init__(self,name):
self._name = name
@property
def name(self):
return self._name
def books(self):
return ('chandamama', 'swathi', 'ramayanam', 'Mahabharatham',)
def hobbies(self):
return ('respecting', 'worship god',)
def activities(self):
return ('job', 'early namaskara', 'orthodox rituals')
def hairstyle(self):
return ('long hair with a pony tail',)
def nature(self):
return ('cowardice', 'responsible',)
|
[
"jeldi.kamal2011@gmail.com"
] |
jeldi.kamal2011@gmail.com
|
76755ff963dbd261a204a635342afde89fe3cf1b
|
f12ca610566e7249c892811bafc37594abe7895a
|
/orangecontrib/text/country_codes.py
|
17a5b1ff2687507b4e62449ea0e34095ab18856a
|
[
"BSD-2-Clause"
] |
permissive
|
nagyistoce/orange3-text
|
d04e6dfa68a7e86a4947c08bc2a078b4c0e772f5
|
fbdc3320b00a88c62ba866a671f28694958f6921
|
refs/heads/master
| 2021-01-21T09:43:25.598139
| 2015-06-27T14:32:09
| 2015-06-27T14:32:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,422
|
py
|
"""Country names to ISO3166_alpha2 codes mapping
Roughly generated by the following bash script on GNU/Linux:
while read cc name; do
[ ! "$cc" ] &&
continue
out=$(isoquery $cc | cut -f3 --complement);
[ ! "$out" ] &&
out="$cc"
[ "$(echo $out | cut -f3)" = "$name" ] &&
name=''
echo -e "$out\t$name" |
sed -r 's/\s+$//' |
sed -r "s/\t/': ['/" |
sed -r "s/\t/', '/g" |
sed -r "s/^/'/" |
sed -r 's/$/'"'"',],/'
done < input/cc.list # cc.list from jVectorMap; format: lines start with ISO3166_alpha2_code else copied as is
Certain details updated by hand.
"""
CC_EUROPE = {
'_0': ['Kosovo', 'Kosovo, Republic of'],
'-99': ['N. Cyprus', 'North Cyprus'],
'AD': ['AND', 'Andorra'],
'AL': ['ALB', 'Albania'],
'AT': ['AUT', 'Austria'],
'AX': ['ALA', 'Åland Islands', 'Aland'],
'BA': ['BIH', 'Bosnia and Herzegovina', 'Bosnia and Herz.'],
'BE': ['BEL', 'Belgium'],
'BG': ['BGR', 'Bulgaria'],
'BY': ['BLR', 'Belarus'],
'CH': ['CHE', 'Switzerland'],
'CY': ['CYP', 'Cyprus'],
'CZ': ['CZE', 'Czech Republic', 'Czech Rep.'],
'DE': ['DEU', 'Germany'],
'DK': ['DNK', 'Denmark'],
'DZ': ['DZA', 'Algeria'],
'EE': ['EST', 'Estonia'],
'EG': ['EGY', 'Egypt'],
'ES': ['ESP', 'Spain'],
'FI': ['FIN', 'Finland'],
'FO': ['FRO', 'Faroe Islands', 'Faeroe Is.'],
'FR': ['FRA', 'France'],
'GB': ['GBR', 'United Kingdom'],
'GE': ['GEO', 'Georgia'],
'GG': ['GGY', 'Guernsey'],
'GR': ['GRC', 'Greece'],
'HR': ['HRV', 'Croatia'],
'HU': ['HUN', 'Hungary'],
'IE': ['IRL', 'Ireland'],
'IL': ['ISR', 'Israel'],
'IM': ['IMN', 'Isle of Man'],
'IQ': ['IRQ', 'Iraq'],
'IS': ['ISL', 'Iceland'],
'IT': ['ITA', 'Italy'],
'JE': ['JEY', 'Jersey'],
'JO': ['JOR', 'Jordan'],
'LB': ['LBN', 'Lebanon'],
'LI': ['LIE', 'Liechtenstein'],
'LT': ['LTU', 'Lithuania'],
'LU': ['LUX', 'Luxembourg'],
'LV': ['LVA', 'Latvia'],
'LY': ['LBY', 'Libya'],
'MA': ['MAR', 'Morocco'],
'MD': ['MDA', 'Moldova, Republic of', 'Moldova'],
'ME': ['MNE', 'Montenegro'],
'MK': ['MKD', 'Macedonia, Republic of', 'Macedonia'],
'MT': ['MLT', 'Malta'],
'NL': ['NLD', 'Netherlands'],
'NO': ['NOR', 'Norway'],
'PL': ['POL', 'Poland'],
'PS': ['PSE', 'Palestine, State of', 'Palestine'],
'PT': ['PRT', 'Portugal'],
'RO': ['ROU', 'Romania'],
'RS': ['SRB', 'Serbia'],
'RU': ['RUS', 'Russian Federation', 'Russia'],
'SA': ['SAU', 'Saudi Arabia'],
'SE': ['SWE', 'Sweden'],
'SI': ['SVN', 'Slovenia'],
'SK': ['SVK', 'Slovakia'],
'SM': ['SMR', 'San Marino'],
'SY': ['SYR', 'Syrian Arab Republic', 'Syria'],
'TN': ['TUN', 'Tunisia'],
'TR': ['TUR', 'Turkey'],
'UA': ['UKR', 'Ukraine'],
}
CC_WORLD = {
# Does NOT include CC_EUROPE
'_1': ['Somaliland',],
'AE': ['ARE', 'United Arab Emirates'],
'AF': ['AFG', 'Afghanistan'],
'AM': ['ARM', 'Armenia'],
'AO': ['AGO', 'Angola'],
'AR': ['ARG', 'Argentina'],
'AU': ['AUS', 'Australia'],
'AZ': ['AZE', 'Azerbaijan'],
'BD': ['BGD', 'Bangladesh'],
'BF': ['BFA', 'Burkina Faso'],
'BI': ['BDI', 'Burundi'],
'BJ': ['BEN', 'Benin'],
'BN': ['BRN', 'Brunei Darussalam', 'Brunei'],
'BO': ['BOL', 'Bolivia, Plurinational State of', 'Bolivia'],
'BR': ['BRA', 'Brazil'],
'BS': ['BHS', 'Bahamas'],
'BT': ['BTN', 'Bhutan'],
'BW': ['BWA', 'Botswana'],
'BZ': ['BLZ', 'Belize'],
'CA': ['CAN', 'Canada'],
'CD': ['COD', 'Congo, The Democratic Republic of the', 'Dem. Rep. Congo'],
'CF': ['CAF', 'Central African Republic', 'Central African Rep.'],
'CG': ['COG', 'Congo'],
'CI': ['CIV', "Côte d'Ivoire"],
'CL': ['CHL', 'Chile'],
'CM': ['CMR', 'Cameroon'],
'CN': ['CHN', 'China'],
'CO': ['COL', 'Colombia'],
'CR': ['CRI', 'Costa Rica'],
'CU': ['CUB', 'Cuba'],
'DJ': ['DJI', 'Djibouti'],
'DO': ['DOM', 'Dominican Republic', 'Dominican Rep.'],
'EC': ['ECU', 'Ecuador'],
'EH': ['ESH', 'Western Sahara', 'W. Sahara'],
'ER': ['ERI', 'Eritrea'],
'ET': ['ETH', 'Ethiopia'],
'FJ': ['FJI', 'Fiji'],
'FK': ['FLK', 'Falkland Islands [Malvinas]', 'Falkland Is.'],
'GA': ['GAB', 'Gabon'],
'GH': ['GHA', 'Ghana'],
'GL': ['GRL', 'Greenland'],
'GM': ['GMB', 'Gambia'],
'GN': ['GIN', 'Guinea'],
'GQ': ['GNQ', 'Equatorial Guinea', 'Eq. Guinea'],
'GT': ['GTM', 'Guatemala'],
'GW': ['GNB', 'Guinea-Bissau'],
'GY': ['GUY', 'Guyana'],
'HN': ['HND', 'Honduras'],
'HT': ['HTI', 'Haiti'],
'ID': ['IDN', 'Indonesia'],
'IN': ['IND', 'India'],
'IR': ['IRN', 'Iran, Islamic Republic of', 'Iran'],
'JM': ['JAM', 'Jamaica'],
'JP': ['JPN', 'Japan'],
'KE': ['KEN', 'Kenya'],
'KG': ['KGZ', 'Kyrgyzstan'],
'KH': ['KHM', 'Cambodia'],
'KP': ['PRK', "Korea, Democratic People's Republic of", 'Dem. Rep. Korea', 'North Korea'],
'KR': ['KOR', 'Korea, Republic of', 'Korea', 'South Korea'],
'KW': ['KWT', 'Kuwait'],
'KZ': ['KAZ', 'Kazakhstan'],
'LA': ['LAO', "Lao People's Democratic Republic", 'Lao PDR'],
'LK': ['LKA', 'Sri Lanka'],
'LR': ['LBR', 'Liberia'],
'LS': ['LSO', 'Lesotho'],
'MG': ['MDG', 'Madagascar'],
'ML': ['MLI', 'Mali'],
'MM': ['MMR', 'Myanmar'],
'MN': ['MNG', 'Mongolia'],
'MR': ['MRT', 'Mauritania'],
'MW': ['MWI', 'Malawi'],
'MX': ['MEX', 'Mexico'],
'MY': ['MYS', 'Malaysia'],
'MZ': ['MOZ', 'Mozambique'],
'NA': ['NAM', 'Namibia'],
'NC': ['NCL', 'New Caledonia'],
'NE': ['NER', 'Niger'],
'NG': ['NGA', 'Nigeria'],
'NI': ['NIC', 'Nicaragua'],
'NP': ['NPL', 'Nepal'],
'NZ': ['NZL', 'New Zealand'],
'OM': ['OMN', 'Oman'],
'PA': ['PAN', 'Panama'],
'PE': ['PER', 'Peru'],
'PG': ['PNG', 'Papua New Guinea'],
'PH': ['PHL', 'Philippines'],
'PK': ['PAK', 'Pakistan'],
'PR': ['PRI', 'Puerto Rico'],
'PY': ['PRY', 'Paraguay'],
'QA': ['QAT', 'Qatar'],
'RW': ['RWA', 'Rwanda'],
'SB': ['SLB', 'Solomon Islands', 'Solomon Is.'],
'SD': ['SDN', 'Sudan'],
'SL': ['SLE', 'Sierra Leone'],
'SN': ['SEN', 'Senegal'],
'SO': ['SOM', 'Somalia'],
'SR': ['SUR', 'Suriname'],
'SS': ['SSD', 'South Sudan', 'S. Sudan'],
'SV': ['SLV', 'El Salvador'],
'SZ': ['SWZ', 'Swaziland'],
'TD': ['TCD', 'Chad'],
'TF': ['ATF', 'French Southern Territories', 'Fr. S. Antarctic Lands'],
'TG': ['TGO', 'Togo'],
'TH': ['THA', 'Thailand'],
'TJ': ['TJK', 'Tajikistan'],
'TL': ['TLS', 'Timor-Leste'],
'TM': ['TKM', 'Turkmenistan'],
'TT': ['TTO', 'Trinidad and Tobago'],
'TW': ['TWN', 'Taiwan, Province of China', 'Taiwan'],
'TZ': ['TZA', 'Tanzania, United Republic of', 'Tanzania'],
'UG': ['UGA', 'Uganda'],
'US': ['USA', 'United States', 'United States of America'],
'UY': ['URY', 'Uruguay'],
'UZ': ['UZB', 'Uzbekistan'],
'VE': ['VEN', 'Venezuela, Bolivarian Republic of', 'Venezuela'],
'VN': ['VNM', 'Viet Nam', 'Vietnam'],
'VU': ['VUT', 'Vanuatu'],
'YE': ['YEM', 'Yemen'],
'ZA': ['ZAF', 'South Africa'],
'ZM': ['ZMB', 'Zambia'],
'ZW': ['ZWE', 'Zimbabwe'],
}
CC_WORLD.update(CC_EUROPE)
CC_USA = {
'US-AK': ['AK', 'Alaska'],
'US-AL': ['AL', 'Alabama'],
'US-AR': ['AR', 'Arkansas'],
'US-AZ': ['AZ', 'Arizona'],
'US-CA': ['CA', 'California'],
'US-CO': ['CO', 'Colorado'],
'US-CT': ['CT', 'Connecticut'],
'US-DC': ['DC', 'District of Columbia'],
'US-DE': ['DE', 'Delaware'],
'US-FL': ['FL', 'Florida'],
'US-GA': ['GA', 'Georgia'],
'US-HI': ['HI', 'Hawaii'],
'US-IA': ['IA', 'Iowa'],
'US-ID': ['ID', 'Idaho'],
'US-IL': ['IL', 'Illinois'],
'US-IN': ['IN', 'Indiana'],
'US-KS': ['KS', 'Kansas'],
'US-KY': ['KY', 'Kentucky'],
'US-LA': ['LA', 'Louisiana'],
'US-MA': ['MA', 'Massachusetts'],
'US-MD': ['MD', 'Maryland'],
'US-ME': ['ME', 'Maine'],
'US-MI': ['MI', 'Michigan'],
'US-MN': ['MN', 'Minnesota'],
'US-MO': ['MO', 'Missouri'],
'US-MS': ['MS', 'Mississippi'],
'US-MT': ['MT', 'Montana'],
'US-NC': ['NC', 'North Carolina'],
'US-ND': ['ND', 'North Dakota'],
'US-NE': ['NE', 'Nebraska'],
'US-NH': ['NH', 'New Hampshire'],
'US-NJ': ['NJ', 'New Jersey'],
'US-NM': ['NM', 'New Mexico'],
'US-NV': ['NV', 'Nevada'],
'US-NY': ['NY', 'New York'],
'US-OH': ['OH', 'Ohio'],
'US-OK': ['OK', 'Oklahoma'],
'US-OR': ['OR', 'Oregon'],
'US-PA': ['PA', 'Pennsylvania'],
'US-RI': ['RI', 'Rhode Island'],
'US-SC': ['SC', 'South Carolina'],
'US-SD': ['SD', 'South Dakota'],
'US-TN': ['TN', 'Tennessee'],
'US-TX': ['TX', 'Texas'],
'US-UT': ['UT', 'Utah'],
'US-VA': ['VA', 'Virginia'],
'US-VT': ['VT', 'Vermont'],
'US-WA': ['WA', 'Washington'],
'US-WI': ['WI', 'Wisconsin'],
'US-WV': ['WV', 'West Virginia'],
'US-WY': ['WY', 'Wyoming'],
}
def _invert_mapping(dict):
return {v:k for k in dict for v in dict[k]}
INV_CC_EUROPE = _invert_mapping(CC_EUROPE)
INV_CC_WORLD = _invert_mapping(CC_WORLD)
INV_CC_USA = _invert_mapping(CC_USA)
SET_CC_EUROPE = set(INV_CC_EUROPE.keys()) | set(INV_CC_EUROPE.values())
SET_CC_USA = set(INV_CC_USA.keys()) | set(INV_CC_USA.values())
|
[
"kerncece@gmail.com"
] |
kerncece@gmail.com
|
1b6117c360304db090e45da73264909875f05ed9
|
5beb2410b95be9d26cfca2094a446ec2be16ce50
|
/ma/01.py
|
4d605bc065cc51c0c1723ede396f4b18f370e22a
|
[] |
no_license
|
1361217049/python
|
abbde08f88125aa21e6f24aa5183798972c02af3
|
ae92c33437e617203b28aaf6c644c26a0c17fb69
|
refs/heads/master
| 2020-04-01T06:42:34.757234
| 2018-10-14T10:00:33
| 2018-10-14T10:00:33
| 152,960,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
#定义一个类
class Student():
def out(self):
print("我爱++++")
pass
han=Student()
han.out()
Student.__dict__
print(1)
|
[
"1361217049@qq.com"
] |
1361217049@qq.com
|
d733e8db920ee09bf0f15babc827291aeda2b2a9
|
af6e9d54859eaa36742bd670da15ea5542793ca8
|
/5task/send.py
|
8833b1047975b74b572e5f9ffd283c0979f66fbe
|
[] |
no_license
|
gavritenkov/vezdecode
|
e5c068addfa56d0c5a277b861766330ad0c725e0
|
79c9dda1044dd69cbebb0cdf1e08030188251b4b
|
refs/heads/master
| 2023-04-11T09:31:14.123289
| 2021-04-24T18:07:59
| 2021-04-24T18:07:59
| 361,219,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
import string
import smtplib
import random
import urllib.request
from cryptography.fernet import Fernet
import base64
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
#Генератор ключей
def key_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
msg=str(input('Введите сообщение : '))
mail = str(input('Введите почту получателя : '))
password_provided = key_generator()
password = password_provided.encode()
salt = b'salt_'
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(password))
urllib.request.urlopen
msg=msg.encode()
f = Fernet(key)
msg=f.encrypt(msg)
msg=str(msg)
print("\nВаш зашифрованный текст: "+msg)
#SMTP
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
#Почта, созданная для ВездеКода. Именнно с нее будут отправляться сообщения
server.login("ExampleVezdehod@gmail.com", "VezdehodTula71")
#Отправка
server.sendmail("ExampleVezdehod@gmail.com", mail, msg)
print("\nСообщение было отправлено!\nПолучателю необходим ключ для расшифровки: " +password_provided)
input("")
|
[
"kgavritenkov@gmail.com"
] |
kgavritenkov@gmail.com
|
85daa9a73cfbe7b2a17557ab40ced26375f501d9
|
911fc2c6bc552d83fb0d2481d556e0979cd20101
|
/mdb.py
|
1756c62bf6bbe864e38ce14e929d13411a37b47c
|
[] |
no_license
|
riyasleo10/AM_filter_bot
|
75ed5b67632efa8c3d18911b6fdeb5437ad190c5
|
4193148a0cc4e5e2eaf7caf94943d2a44f4cb3f6
|
refs/heads/main
| 2023-03-22T11:31:36.098569
| 2021-03-18T17:42:54
| 2021-03-18T17:42:54
| 348,805,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,001
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @trojanzhex
import re
import pymongo
from pymongo.errors import DuplicateKeyError
from marshmallow.exceptions import ValidationError
from config import DATABASE_URI, DATABASE_NAME
myclient = pymongo.MongoClient(DATABASE_URI)
mydb = myclient[DATABASE_NAME]
async def savefiles(docs, group_id):
mycol = mydb[str(group_id)]
try:
mycol.insert_many(docs, ordered=False)
except Exception:
pass
async def channelgroup(channel_id, channel_name, group_id, group_name):
mycol = mydb["ALL DETAILS"]
channel_details = {
"channel_id" : channel_id,
"channel_name" : channel_name
}
data = {
'_id': group_id,
'group_name' : group_name,
'channel_details' : [channel_details],
}
if mycol.count_documents( {"_id": group_id} ) == 0:
try:
mycol.insert_one(data)
except:
print('Some error occured!')
else:
print(f"files in '{channel_name}' linked to '{group_name}' ")
else:
try:
mycol.update_one({'_id': group_id}, {"$push": {"channel_details": channel_details}})
except:
print('Some error occured!')
else:
print(f"files in '{channel_name}' linked to '{group_name}' ")
async def ifexists(channel_id, group_id):
mycol = mydb["ALL DETAILS"]
query = mycol.count_documents( {"_id": group_id} )
if query == 0:
return False
else:
ids = mycol.find( {'_id': group_id} )
channelids = []
for id in ids:
for chid in id['channel_details']:
channelids.append(chid['channel_id'])
if channel_id in channelids:
return True
else:
return False
async def deletefiles(channel_id, channel_name, group_id, group_name):
mycol1 = mydb["ALL DETAILS"]
try:
mycol1.update_one(
{"_id": group_id},
{"$pull" : { "channel_details" : {"channel_id":channel_id} } }
)
except:
pass
mycol2 = mydb[str(group_id)]
query2 = {'channel_id' : channel_id}
try:
mycol2.delete_many(query2)
except:
print("Couldn't delete channel")
return False
else:
print(f"filters from '{channel_name}' deleted in '{group_name}'")
return True
async def deletealldetails(group_id):
mycol = mydb["ALL DETAILS"]
query = { "_id": group_id }
try:
mycol.delete_one(query)
except:
pass
async def deletegroupcol(group_id):
mycol = mydb[str(group_id)]
if mycol.count() == 0:
return 1
try:
mycol.drop()
except Exception as e:
print(f"delall group col drop error - {str(e)}")
return 2
else:
return 0
async def channeldetails(group_id):
mycol = mydb["ALL DETAILS"]
query = mycol.count_documents( {"_id": group_id} )
if query == 0:
return False
else:
ids = mycol.find( {'_id': group_id} )
chdetails = []
for id in ids:
for chid in id['channel_details']:
chdetails.append(
str(chid['channel_name']) + " ( <code>" + str(chid['channel_id']) + "</code> )"
)
return chdetails
async def countfilters(group_id):
mycol = mydb[str(group_id)]
query = mycol.count()
if query == 0:
return False
else:
return query
async def findgroupid(channel_id):
mycol = mydb["ALL DETAILS"]
ids = mycol.find()
groupids = []
for id in ids:
for chid in id['channel_details']:
if channel_id == chid['channel_id']:
groupids.append(id['_id'])
return groupids
async def searchquery(group_id, name):
mycol = mydb[str(group_id)]
filenames = []
filelinks = []
# looking for a better regex :(
pattern = name.lower().strip().replace(' ','.*'
|
[
"noreply@github.com"
] |
noreply@github.com
|
014cbf61158fb280b11d2f149b026f48d5234c0e
|
2e2a54e30f8c8018fe0d163a5fd4b0d854ef165d
|
/src/gluonts/torch/model/deep_npts/_network.py
|
c29d1935c3d32e884ec124b33fde866e0b55aa92
|
[
"Apache-2.0"
] |
permissive
|
kashif/gluon-ts
|
b742021ca0292ca2885b3b079150f24cdf3e6dec
|
a818f69dc049c1c1d57e09d2ccb8b5f7a0cff656
|
refs/heads/master
| 2023-09-05T00:00:22.861992
| 2023-08-09T15:47:28
| 2023-08-09T15:47:28
| 222,552,468
| 5
| 0
| null | 2019-11-18T21:56:52
| 2019-11-18T21:56:52
| null |
UTF-8
|
Python
| false
| false
| 14,377
|
py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from functools import partial
from typing import Optional, Callable, List, Union
import torch
from torch import nn
from torch.distributions import (
Categorical,
MixtureSameFamily,
Normal,
)
from gluonts.core.component import validated
from gluonts.torch.distributions import DiscreteDistribution
from .scaling import (
min_max_scaling,
standard_normal_scaling,
)
INPUT_SCALING_MAP = {
"min_max_scaling": partial(min_max_scaling, dim=1, keepdim=True),
"standard_normal_scaling": partial(
standard_normal_scaling, dim=1, keepdim=True
),
}
def init_weights(module: nn.Module, scale: float = 1.0):
if type(module) == nn.Linear:
nn.init.uniform_(module.weight, -scale, scale)
nn.init.zeros_(module.bias)
class FeatureEmbedder(nn.Module):
"""Creates a feature embedding for the static categorical features."""
@validated()
def __init__(
self,
cardinalities: List[int],
embedding_dimensions: List[int],
):
super().__init__()
assert (
len(cardinalities) > 0
), "Length of `cardinalities` list must be greater than zero"
assert len(cardinalities) == len(
embedding_dimensions
), "Length of `embedding_dims` and `embedding_dims` should match"
assert all(
[c > 0 for c in cardinalities]
), "Elements of `cardinalities` should be > 0"
assert all(
[d > 0 for d in embedding_dimensions]
), "Elements of `embedding_dims` should be > 0"
self.embedders = [
torch.nn.Embedding(num_embeddings=card, embedding_dim=dim)
for card, dim in zip(cardinalities, embedding_dimensions)
]
for embedder in self.embedders:
embedder.apply(init_weights)
def forward(self, features: torch.Tensor):
"""
Parameters
----------
features
Input features to the model, shape: (-1, num_features).
Returns
-------
torch.Tensor
Embedding, shape: (-1, sum(self.embedding_dimensions)).
"""
embedded_features = torch.cat(
[
embedder(features[:, i].long())
for i, embedder in enumerate(self.embedders)
],
dim=-1,
)
return embedded_features
class DeepNPTSNetwork(nn.Module):
"""Base class implementing a simple feed-forward neural network that takes
in static and dynamic features and produces `num_hidden_nodes` independent
outputs. These outputs are then used by derived classes to construct the
forecast distribution for a single time step.
Note that the dynamic features are just treated as independent features
without considering their temporal nature.
"""
@validated()
def __init__(
self,
context_length: int,
num_hidden_nodes: List[int],
cardinality: List[int],
embedding_dimension: List[int],
num_time_features: int,
batch_norm: bool = False,
input_scaling: Optional[Union[Callable, str]] = None,
dropout_rate: float = 0.0,
):
super().__init__()
self.context_length = context_length
self.num_hidden_nodes = num_hidden_nodes
self.batch_norm = batch_norm
self.input_scaling = (
INPUT_SCALING_MAP[input_scaling]
if isinstance(input_scaling, str)
else input_scaling
)
self.dropout_rate = dropout_rate
# Embedding for categorical features
self.embedder = FeatureEmbedder(
cardinalities=cardinality, embedding_dimensions=embedding_dimension
)
total_embedding_dim = sum(embedding_dimension)
# We have two target related features: past_target and observed value
# indicator each of length `context_length`.
# Also, +1 for the static real feature.
dimensions = [
context_length * (num_time_features + 2) + total_embedding_dim + 1
] + num_hidden_nodes
modules: List[nn.Module] = []
for in_features, out_features in zip(dimensions[:-1], dimensions[1:]):
modules += [nn.Linear(in_features, out_features), nn.ReLU()]
if self.batch_norm:
modules.append(nn.BatchNorm1d(out_features))
if self.dropout_rate > 0:
modules.append(nn.Dropout(self.dropout_rate))
self.model = nn.Sequential(*modules)
self.model.apply(partial(init_weights, scale=0.07))
# TODO: Handle missing values using the observed value indicator.
def forward(
self,
feat_static_cat: torch.Tensor,
feat_static_real: torch.Tensor,
past_target: torch.Tensor,
past_observed_values: torch.Tensor,
past_time_feat: torch.Tensor,
):
"""
Parameters
----------
feat_static_cat
Shape (-1, num_features).
feat_static_real
Shape (-1, num_features).
past_target
Shape (-1, context_length).
past_observed_values
Shape (-1, context_length).
past_time_feat
Shape (-1, context_length, self.num_time_features).
"""
x = past_target
if self.input_scaling:
loc, scale = self.input_scaling(x)
x_scaled = (x - loc) / scale
else:
x_scaled = x
embedded_cat = self.embedder(feat_static_cat)
static_feat = torch.cat(
(embedded_cat, torch.tensor(feat_static_real)),
dim=1,
)
time_features = torch.cat(
[
x_scaled.unsqueeze(dim=-1),
past_observed_values.unsqueeze(dim=-1),
past_time_feat,
],
dim=-1,
)
features = torch.cat(
[
time_features.reshape(time_features.shape[0], -1),
static_feat,
],
dim=-1,
)
return self.model(features)
class DeepNPTSNetworkDiscrete(DeepNPTSNetwork):
"""
Extends `DeepNTPSNetwork` by implementing the output layer which
converts the outputs from the base network into probabilities of length
`context_length`. These probabilities together with the past values in the
context window constitute the one-step-ahead forecast distribution.
Specifically, the forecast is always one of the values observed in the
context window with the corresponding predicted probability.
Parameters
----------
*args
Arguments to ``DeepNPTSNetwork``.
use_softmax
Flag indicating whether to use softmax or normalization for
converting the outputs of the base network to probabilities.
kwargs
Keyword arguments to ``DeepNPTSNetwork``.
"""
@validated()
def __init__(self, *args, use_softmax: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self.use_softmax = use_softmax
modules: List[nn.Module] = (
[] if self.dropout_rate > 0 else [nn.Dropout(self.dropout_rate)]
)
modules.append(
nn.Linear(self.num_hidden_nodes[-1], self.context_length)
)
self.output_layer = nn.Sequential(*modules)
self.output_layer.apply(init_weights)
def forward(
self,
feat_static_cat: torch.Tensor,
feat_static_real: torch.Tensor,
past_target: torch.Tensor,
past_observed_values: torch.Tensor,
past_time_feat: torch.Tensor,
) -> DiscreteDistribution:
h = super().forward(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_target=past_target,
past_observed_values=past_observed_values,
past_time_feat=past_time_feat,
)
outputs = self.output_layer(h)
probs = (
nn.functional.softmax(outputs, dim=1)
if self.use_softmax
else nn.functional.normalize(
nn.functional.softplus(outputs), p=1, dim=1
)
)
return DiscreteDistribution(values=past_target, probs=probs)
class DeepNPTSNetworkSmooth(DeepNPTSNetwork):
"""
Extends `DeepNTPSNetwork` by implementing the output layer which
converts the outputs from the base network into a smoothed mixture
distribution. The components of the mixture are Gaussians centered around
the observations in the context window. The mixing probabilities as well as
the width of the Gaussians are predicted by the network.
This mixture distribution represents the one-step-ahead forecast
distribution. Note that the forecast can contain values not observed in the
context window.
"""
@validated()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
modules = (
[] if self.dropout_rate > 0 else [nn.Dropout(self.dropout_rate)]
)
modules += [
nn.Linear(self.num_hidden_nodes[-1], self.context_length + 1),
nn.Softplus(),
]
self.output_layer = nn.Sequential(*modules)
self.output_layer.apply(init_weights)
def forward(
self,
feat_static_cat: torch.Tensor,
feat_static_real: torch.Tensor,
past_target: torch.Tensor,
past_observed_values: torch.Tensor,
past_time_feat: torch.Tensor,
) -> MixtureSameFamily:
h = super().forward(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_target=past_target,
past_observed_values=past_observed_values,
past_time_feat=past_time_feat,
)
outputs = self.output_layer(h)
probs = outputs[:, :-1]
kernel_width = outputs[:, -1:]
mix = Categorical(probs)
components = Normal(loc=past_target, scale=kernel_width)
return MixtureSameFamily(
mixture_distribution=mix, component_distribution=components
)
class DeepNPTSMultiStepNetwork(nn.Module):
"""
Implements multi-step prediction given a trained `DeepNPTSNetwork` model
that outputs one-step-ahead forecast distribution.
"""
@validated()
def __init__(
self,
net: DeepNPTSNetwork,
prediction_length: int,
num_parallel_samples: int = 100,
):
super().__init__()
self.net = net
self.prediction_length = prediction_length
self.num_parallel_samples = num_parallel_samples
def forward(
self,
feat_static_cat: torch.Tensor,
feat_static_real: torch.Tensor,
past_target: torch.Tensor,
past_observed_values: torch.Tensor,
past_time_feat: torch.Tensor,
future_time_feat: torch.Tensor,
):
"""Generates samples from the forecast distribution.
Parameters
----------
feat_static_cat
Shape (-1, num_features).
feat_static_real
Shape (-1, num_features).
past_target
Shape (-1, context_length).
past_observed_values
Shape (-1, context_length).
past_time_feat
Shape (-1, context_length, self.num_time_features).
future_time_feat
Shape (-1, prediction_length, self.num_time_features).
Returns
-------
torch.Tensor
Tensor containing samples from the predicted distribution.
Shape is (-1, self.num_parallel_samples, self.prediction_length).
"""
# Blow up the initial `x` by the number of parallel samples required.
# (batch_size * num_parallel_samples, context_length)
past_target = past_target.repeat_interleave(
self.num_parallel_samples, dim=0
)
# Note that gluonts returns empty future_observed_values.
future_observed_values = torch.ones(
(past_observed_values.shape[0], self.prediction_length)
)
observed_values = torch.cat(
[past_observed_values, future_observed_values], dim=1
)
observed_values = observed_values.repeat_interleave(
self.num_parallel_samples, dim=0
)
time_feat = torch.cat([past_time_feat, future_time_feat], dim=1)
time_feat = time_feat.repeat_interleave(
self.num_parallel_samples, dim=0
)
feat_static_cat = feat_static_cat.repeat_interleave(
self.num_parallel_samples, dim=0
)
feat_static_real = feat_static_real.repeat_interleave(
self.num_parallel_samples, dim=0
)
future_samples = []
for t in range(self.prediction_length):
distr = self.net(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_target=past_target,
past_observed_values=observed_values[
:, t : -self.prediction_length + t
],
past_time_feat=time_feat[
:, t : -self.prediction_length + t, :
],
)
samples = distr.sample()
if past_target.dim() != samples.dim():
samples = samples.unsqueeze(dim=-1)
future_samples.append(samples)
past_target = torch.cat([past_target[:, 1:], samples], dim=1)
# (batch_size * num_parallel_samples, prediction_length)
samples_out = torch.stack(future_samples, dim=1)
# (batch_size, num_parallel_samples, prediction_length)
return samples_out.reshape(
-1, self.num_parallel_samples, self.prediction_length
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
625ed010dc1eb9f52ce77596a5a4e7dfeafa600d
|
6226e852484e3ceaf27389a021b3215a6ee02e3d
|
/Entrega 1/balistica.py
|
884537fba547bed4c42d61deec34e11d28f84cec
|
[] |
no_license
|
DiegoAparicio/MCOC2020-P1
|
d7e7dd2cd1a66694c914d4f552bf0ba5e76f44d2
|
84e2b7c7a1d3dfdd9eddb3f8f3e6ff4a111240ff
|
refs/heads/master
| 2022-12-23T08:21:15.692241
| 2020-09-12T01:11:16
| 2020-09-12T01:11:16
| 289,975,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,411
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 24 09:48:20 2020
@author: 56977
"""
import scipy as sp
from scipy.integrate import odeint
#parametros:
p = 1.225 #kg/m3
cd = 0.47
cm = 0.01
inch = 2.54*cm
D = 8.5*inch
r = D/2
A = sp.pi*r**2
CD = 0.5*p*cd*A
g = 9.81 #m/s2
m = 15
Vs = [0,10.,20.]
#V = 20
#funcion a integrar:
for V in Vs:
def bala(z,t):
zp = sp.zeros(4)
zp[0] = z[2]
zp[1] = z[3]
v = z[2:4]
v[0]= v[0]-V #velocidad menos viento
vnorm = sp.sqrt(sp.dot(v,v))
FD = -CD*sp.dot(v,v)*(v/vnorm)
zp[2] = FD[0]/m
zp[3] = FD[1]/m -g
return zp
#vector de tiempo
t = sp.linspace(0,30,1001)
#parte en el origen y tiene vx=vy=2 m/s
vi = 100*1000/3600
z0 = sp.array([0,0,vi,vi])
sol = odeint(bala,z0,t)
import matplotlib.pylab as plt
x = sol[:,0]
y = sol[:,1]
plt.figure(1)
plt.title("Trayectoria para distintos vientos")
plt.grid()
plt.axis([0,150,0,50])
plt.plot(x,y,label =f"V = {V} m/s")
plt.ylabel("Y (m)")
plt.xlabel("X (m)")
plt.legend(loc="upper right")
plt.savefig("trayectoria.png") #se genera la imagen en formato png
#plt.show() #se omite plt.show debido a que en el enunciado decia no abrir ventana de visualizacion
|
[
"noreply@github.com"
] |
noreply@github.com
|
1eb7d4b356ecdfbafd7359821f946512d7724998
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/googlecloudsdk/generated_clients/apis/artifactregistry/v1beta2/resources.py
|
1c5440583e39b379a1c8a68cde0b2d6841f35146
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,295
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for Cloud Platform Apis generated from apitools."""
import enum
BASE_URL = 'https://artifactregistry.googleapis.com/v1beta2/'
DOCS_URL = 'https://cloud.google.com/artifacts/docs/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
PROJECTS = (
'projects',
'projects/{projectsId}',
{},
['projectsId'],
True
)
PROJECTS_LOCATIONS = (
'projects.locations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_OPERATIONS = (
'projects.locations.operations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/operations/'
'{operationsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES = (
'projects.locations.repositories',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES_FILES = (
'projects.locations.repositories.files',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}/files/{filesId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES_PACKAGES = (
'projects.locations.repositories.packages',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}/packages/{packagesId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES_PACKAGES_TAGS = (
'projects.locations.repositories.packages.tags',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}/packages/{packagesId}/tags/{tagsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REPOSITORIES_PACKAGES_VERSIONS = (
'projects.locations.repositories.packages.versions',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/repositories/'
'{repositoriesId}/packages/{packagesId}/versions/{versionsId}',
},
['name'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
eddff0d30d84daa619346f62be32cd51bd14262c
|
702c8a229ec80537e9864959220c75aaabb28548
|
/taobao.py
|
01f8d6dd7e92faac959d6bc370761b440d2e7af4
|
[] |
no_license
|
17181370591/wode
|
67de606298da7daf9e73dae8822a03ade9065ddc
|
4c574ec33f17c2b65f1fec7eb0adfb6dd05f141e
|
refs/heads/master
| 2021-06-23T18:21:18.796955
| 2019-06-13T09:56:48
| 2019-06-13T09:56:48
| 114,825,512
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
p=urlopen('https://buyertrade.taobao.com/trade/itemlist/list_bought_items.htm?spm=a3204.7139825.a2226mz.9.I5133L&t=20110530')
print(p.read())
|
[
"noreply@github.com"
] |
noreply@github.com
|
e6dfd9cb391b1dc09795b1911c78d7980a0ff1ee
|
b7f45072d056b80ed49e6bcde91877d8576e970d
|
/ImageJ/py/Wayne-blob-example.py
|
610a35e6e5ddb80455ce608015ed6b1efdfc7ff2
|
[] |
no_license
|
jrminter/tips
|
128a18ee55655a13085c174d532c77bcea412754
|
f48f8b202f8bf9e36cb6d487a23208371c79718e
|
refs/heads/master
| 2022-06-14T08:46:28.972743
| 2022-05-30T19:29:28
| 2022-05-30T19:29:28
| 11,463,325
| 5
| 8
| null | 2019-12-18T16:24:02
| 2013-07-17T00:16:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 321
|
py
|
from org.python.core import codecs
codecs.setDefaultEncoding('utf-8')
import os
from ij import IJ, WindowManager
IJ.run("Close All")
img = IJ.openImage("http://wsr.imagej.net/images/blobs.gif")
IJ.setAutoThreshold(img, "Default")
IJ.run(img, "Analyze Particles...", " show=[Bare Outlines] include in_situ")
img.show()
|
[
"jrminter@gmail.com"
] |
jrminter@gmail.com
|
8927c9bfdeb3e5161e03c5bbfb20291758317781
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2367/60791/254956.py
|
3891197249694bfc95edf61b7fdb4f59e0c7209d
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
k = int(input())
n = '1'
if(k%2==0 or k%5==0):
print(-1)
else:
while(int(n)%k != 0):
n += '1'
print(len(n))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
2ff9dcabc42e8fe5f217ef5bf6abf5b015fb7183
|
4f5513932010a81b0330917d2aa2f4fde39a04d6
|
/wall_app/models.py
|
948459eec11f5d2f4b83d54fc36e7e806e8f502e
|
[] |
no_license
|
pfuentea/the_wall
|
c58067f0219040900b4240ec71f50afcbb4ceff2
|
550f59945720d8b148aed12b7856cbc443dd8c60
|
refs/heads/main
| 2023-07-22T07:18:13.667222
| 2021-09-08T03:15:57
| 2021-09-08T03:15:57
| 402,581,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,918
|
py
|
from django.db import models
# Create your models here.
class UserManager(models.Manager):
def basic_validator(self, postData):
errors={}
if postData['password_confirm']!=postData['password']:
errors["password"] = "Las contraseñas deben coincidir"
return errors
class User(models.Model):
name = models.CharField(max_length=255)
email= models.EmailField(unique=True)
password=models.CharField(max_length=255)
allowed= models.BooleanField(default =True)
avatar = models.URLField(
default=""
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
def __repr__(self) -> str:
return f'{self.id}:{self.name}'
def __str__(self) -> str:
return f'{self.id}:{self.name}'
class Mensaje(models.Model):
texto= models.TextField()
escritor= models.ForeignKey(User, related_name="mensajes", on_delete = models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __repr__(self) -> str:
return f'({self.id}){self.escritor.id} {self.escritor.name}:{self.texto}'
def __str__(self) -> str:
return f'({self.id}){self.escritor.id}:{self.texto}'
class Comentario(models.Model):
texto= models.TextField()
escritor= models.ForeignKey(User, related_name="comentarios", on_delete = models.CASCADE)
mensaje= models.ForeignKey(Mensaje, related_name="comentarios", on_delete = models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __repr__(self) -> str:
return f'({self.id}){self.escritor.id}/{self.mensaje.id}:{self.texto}'
def __str__(self) -> str:
return f'({self.id}){self.escritor.id}{self.escritor.name}/{self.mensaje.id}:{self.texto}'
|
[
"patricio.fuentealba.feliu@gmail.com"
] |
patricio.fuentealba.feliu@gmail.com
|
c7ffbc120879b204d210b4e4d5cc28f0f5f98edd
|
086722e5e0a7a88654ad78c696d5e22e6b700e1a
|
/pythonwithcsv.py
|
be793bbffb4556da3531ea3dc7e4b33b3c8fe6d2
|
[] |
no_license
|
84karandeepsingh/datavizwithcsv
|
5541840d3e7e3b126c1017fd28c6b0865f164415
|
7cb82d8063802aebdf59a647bfc19f9b32063850
|
refs/heads/master
| 2020-04-06T19:51:51.419261
| 2018-11-15T21:01:34
| 2018-11-15T21:01:34
| 157,752,241
| 0
| 0
| null | 2018-11-15T21:01:35
| 2018-11-15T18:05:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,459
|
py
|
import csv
import numpy as np
import matplotlib.pyplot as plt
# figure out what data we want to use
categories = [] # these are the column headers in the CSV file
installs = [] # this is the installs row
ratings = [] # this is the ratings row
with open('data/googeplaystore.csv') as csvfile:
reader = csv.reader(csvfile)
line_count = 0
for row in reader:
# move the page column header out of the actual data to get a clean dataset
if line_count is 0: # this will be text, not data
print('pushing categories into a separate array')
categories.append(row) # push the text into this array
line_count += 1 # increment the line count for the next loop
else:
# grab the ratings and push them into the ratings array
ratingsData= row[2]
ratingsData = ratingsData.replace("NaN", "0")
ratings.append(float(ratingsData)) # int turn a string (piece of text) into a number
# print('pushing ratings data into the ratings array')
installData = row[5]
installData = installData.replace(",", "") # get rid of the commas
# get rid of the trailing "+"
installs.append(np.char.strip(installData, "+"))
line_count += 1
# get some values we can work with
# how many ratings are 4+?
# how many are below 2?
# how many are the middle?
np_ratings = np.array(ratings) # turn a plain Python list into a Numpy array
popular_apps = np_ratings > 4
print("popular apps:", len(np_ratings[popular_apps]))
percent_popular = int(len(np_ratings[popular_apps]) / len(np_ratings) * 100)
print(percent_popular)
unpopular_apps = np_ratings < 4
print("unpopular apps:", len(np_ratings[unpopular_apps]))
percent_unpopular = int(len(np_ratings[unpopular_apps]) / len(np_ratings) * 100)
print(percent_unpopular)
kinda_popular = int(100 - (percent_popular + percent_unpopular))
print(kinda_popular)
# do a visualization with our shiny new data
labels = "Sucks", "Meh", "Love it!"
sizes = [percent_unpopular, kinda_popular, percent_popular]
colors = ['yellowgreen', 'lightgreen', 'lightskyblue']
explode = (0.1, 0.1, 0.15)
plt.pie(sizes, explode=explode, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
plt.legend(labels, loc=1)
plt.title("Do we love us some apps?")
plt.xlabel("User Ratings - App Install (10,000+ apps")
plt.show()
print(categories)
print('first row of data:', installs[0])
print('last row of data:', installs[-1])
|
[
"k_thind92494@tss-hr420-dm29.fc.ca"
] |
k_thind92494@tss-hr420-dm29.fc.ca
|
078d8878c03008b44ffb9bcebc52d9ae1bf3d187
|
dbc08e2b8b1d257b4ad0a12eeefb5d8ac2168045
|
/ClassifyProducts.py
|
1377966c9d462c28a9a812de9fbcb8962c764702
|
[] |
no_license
|
lauraabend/NLP_ProductClassifier
|
885329bad7f0dd26688361b679cfa1e25f14be5e
|
0c659165a7c444ef07c34cbf2452ad57ad6510de
|
refs/heads/master
| 2021-01-18T12:46:00.842400
| 2016-07-01T21:17:08
| 2016-07-01T21:17:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,295
|
py
|
from nltk.corpus import wordnet
import numpy as np
import pandas as pd
from nltk.tokenize import TweetTokenizer
from nltk.tag import pos_tag
from nltk.stem.porter import *
from nltk.corpus import stopwords
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
def assign_product_to_class(class_descriptions, description_of_product):
comparison_list = []
description_of_product = list(set(description_of_product))
description_of_product = [word for word in description_of_product if word not in stopwords.words('english')]
for className in class_descriptions.keys():
comparison_per_class = []
for word1 in class_descriptions[className]:
word_from_list1 = wordnet.synsets(word1)
for word2 in description_of_product:
word_from_list2 = wordnet.synsets(word2)
if word_from_list1 and word_from_list2:
s = word_from_list1[0].wup_similarity(word_from_list2[0])
comparison_per_class.append(s)
comparison_per_class = [item for item in comparison_per_class if item != None]
list_of_similar_values = sorted(comparison_per_class, reverse=True)[:5]
comparison_list.append([np.mean(list_of_similar_values), className])
return sorted(comparison_list, reverse=True)
stemmer = PorterStemmer()
tknzr = TweetTokenizer()
classDescriptions = {
"Camera & Photo": ["lens", "camera", "photo", "camcorder", "photography", "image", "film", "digital", "monitor", "record"],
"Bedding & Bath": ["bed", "bath", "sheet", "towel", "shower", "tube", "bathroom", "bedroom", "pillow", "mattress", "sleep"],
"Exercise & Fitness": ["exercise", "fitness", "sport", "games", "weight", "train", "resistance", "soccer", "tennis", "golf", "yoga", "basketball", "fit"]
}
for i in classDescriptions.keys():
classDescriptions[i] = [stemmer.stem(word) for word in classDescriptions[i]]
file = pd.read_csv("./test_set2.csv", delimiter=";", encoding='latin-1')
list_of_products = list(zip(file["Product_id"].tolist(), file["Description"], file["Category"]))
list_of_products_ready = [list(elem) for elem in list_of_products]
real_label = []
prediction = []
for i in range(len(list_of_products_ready)):
# Tokenize the sentence
tokenized_words = tknzr.tokenize(list_of_products_ready[i][1])
list_of_products_ready[i].pop(1)
# Stem the words
stemed_words = [stemmer.stem(plural) for plural in tokenized_words]
# Tag the morphology of the word
tagged_words = pos_tag(stemed_words)
# Only select the NN and NNP
only_nouns = [word for word, pos in tagged_words if pos == 'NN' or pos == 'NNP']
# Append the resulting words
list_of_products_ready[i].append(only_nouns)
# Start classification
similatiry_to_classes = assign_product_to_class(classDescriptions, list_of_products_ready[i][2])
list_of_products_ready[i].insert(2, similatiry_to_classes[0][1])
real_label.append(list_of_products_ready[i][1])
prediction.append(list_of_products_ready[i][2])
print(list_of_products_ready[i])
print(confusion_matrix(real_label, prediction))
print(classification_report(real_label, prediction, target_names=["Exercise & Fitness", "Camera & Photo", "Bedding & Bath"]))
|
[
"martin.maseda@gmail.com"
] |
martin.maseda@gmail.com
|
4ff8a625e52e7a2fc0f40fd40fdb70a36086c6e2
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/sunData/SType/ST_facets/ST_facets00201m/ST_facets00201m9_p.py
|
6b09bb1b8dd9512268b76bbd79e2c658e0d3fc7d
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 139
|
py
|
from output.models.sun_data.stype.st_facets.st_facets00201m.st_facets00201m9_xsd.st_facets00201m9 import Test
obj = Test(
value=10
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
4d9f3c3aaa1eb99f9250a21ad48e579ff04e13ed
|
211092990562ac699369246c59dff2bee9192a49
|
/hw2/T2_P3.py
|
233ca33a51ad90369b3a0ad7bccce2b706851567
|
[] |
no_license
|
haritoshpatel1997/Harvard_Course_CS181_2021
|
337b00211b6f34586d9c1fd7950bbeee56dae9eb
|
3bc223f1f022bd4e224298b6d299b42c45672100
|
refs/heads/main
| 2023-04-06T10:35:31.811861
| 2021-04-23T15:16:05
| 2021-04-23T15:16:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,703
|
py
|
# Don't change these imports. Note that the last two are the
# class implementations that you will implement in
# T2_P3_LogisticRegression.py and T2_P3_GaussianGenerativeModel.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as c
import matplotlib.patches as mpatches
from T2_P3_LogisticRegression import LogisticRegression
from T2_P3_GaussianGenerativeModel import GaussianGenerativeModel
from T2_P3_KNNModel import KNNModel
# These are the hyperparameters to the classifiers. You may need to
# adjust these as you try to find the best fit for each classifier.
# Logistic Regression hyperparameters
eta = 0.1 # Learning rate
lam = 0.1 # Lambda for regularization
# Whether or not you want the plots to be displayed
show_charts = True
# DO NOT CHANGE ANYTHING BELOW THIS LINE!
# -----------------------------------------------------------------
# Visualize the decision boundary that a model produces
def visualize_boundary(model, X, y, title, width=2):
# Create a grid of points
x_min, x_max = min(X[:, 0] - width), max(X[:, 0] + width)
y_min, y_max = min(X[:, 1] - width), max(X[:, 1] + width)
xx, yy = np.meshgrid(
np.arange(x_min, x_max, 0.05),
np.arange(y_min, y_max, 0.05)
)
# Flatten the grid so the values match spec for self.predict
xx_flat = xx.flatten()
yy_flat = yy.flatten()
X_pred = np.vstack((xx_flat, yy_flat)).T
# Get the class predictions
Y_hat = model.predict(X_pred)
Y_hat = Y_hat.reshape((xx.shape[0], xx.shape[1]))
# Visualize them.
cmap = c.ListedColormap(['r', 'b', 'g'])
plt.figure()
plt.title(title)
plt.xlabel('Magnitude')
plt.ylabel('Temperature')
plt.pcolormesh(xx, yy, Y_hat, cmap=cmap, alpha=0.3)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap, linewidths=1,
edgecolors='black')
# Adding a legend and a title
red = mpatches.Patch(color='red', label='Dwarf')
blue = mpatches.Patch(color='blue', label='Giant')
green = mpatches.Patch(color='green', label='Supergiant')
plt.legend(handles=[red, blue, green])
# Saving the image to a file, and showing it as well
plt.savefig(title + '.png')
if show_charts:
plt.show()
# A mapping from string name to id
star_labels = {
'Dwarf': 0, # also corresponds to 'red' in the graphs
'Giant': 1, # also corresponds to 'blue' in the graphs
'Supergiant': 2 # also corresponds to 'green' in the graphs
}
# Read from file and extract X and y
df = pd.read_csv('data/hr.csv')
X = df[['Magnitude', 'Temperature']].values
y = np.array([star_labels[x] for x in df['Type']])
# Setting up and evaluating a number of different classification models
nb1 = GaussianGenerativeModel(is_shared_covariance=False)
nb1.fit(X, y)
visualize_boundary(nb1, X, y, 'generative_result_separate_covariances')
print('Separate Covariance negative log-likelihood: {}\n'
.format(nb1.negative_log_likelihood(X, y)))
nb2 = GaussianGenerativeModel(is_shared_covariance=True)
nb2.fit(X, y)
visualize_boundary(nb2, X, y, 'generative_result_shared_covariances')
print('Shared Covariance negative log-likelihood: {}\n'
.format(nb2.negative_log_likelihood(X, y)))
lr = LogisticRegression(eta=eta, lam=lam)
lr.fit(X, y)
lr.visualize_loss('logistic_regression_loss', show_charts=show_charts)
visualize_boundary(lr, X, y, 'logistic_regression_result')
knn1 = KNNModel(k=1)
knn1.fit(X, y)
visualize_boundary(knn1, X, y, 'knn1_result')
knn3 = KNNModel(k=3)
knn3.fit(X, y)
visualize_boundary(knn3, X, y, 'knn3_result')
knn5 = KNNModel(k=5)
knn5.fit(X, y)
visualize_boundary(knn5, X, y, 'knn5_result')
# Setting up some sample data
X_test = np.array([[6, 2]])
y_nb1 = nb1.predict(X_test)
y_nb2 = nb2.predict(X_test)
y_lr = lr.predict(X_test)
y_knn1 = knn1.predict(X_test)
y_knn3 = knn3.predict(X_test)
y_knn5 = knn5.predict(X_test)
# Predicting an unseen example
print('Test star type predictions for Separate Covariance Gaussian Model:')
print('magnitude 6 and temperature 2: {}\n'.format(y_nb1[0]))
print('Test star type predictions for Shared Covariance Gaussian Model:')
print('magnitude 6 and temperature 2: {}\n'.format(y_nb2[0]))
print('Test star type predictions for Linear Regression:')
print('magnitude 6 and temperature 2: {}'.format(y_lr[0]))
print('Test star type predictions for KNN Model with k=1:')
print('magnitude 6 and temperature 2: {}'.format(y_knn1[0]))
print('Test star type predictions for KNN Model with k=3:')
print('magnitude 6 and temperature 2: {}'.format(y_knn3[0]))
print('Test star type predictions for KNN Model with k=5:')
print('magnitude 6 and temperature 2: {}'.format(y_knn5[0]))
|
[
"jonathanchu33@gmail.com"
] |
jonathanchu33@gmail.com
|
6d89a520c12d1cf396cf97386448d6f6738ff2d8
|
2597f120ba197ec63497263b75b003f17dd41d37
|
/manage.py
|
54c3bd9664497fe87eee913f00da595bf9bbee72
|
[] |
no_license
|
vir-mir/reactjs-test
|
e86be29b939d77b9d5be5ea5c7ffe47dd7a54293
|
8e2da61fb7c13ae85a2ce835fb84893ad855d547
|
refs/heads/master
| 2021-01-02T09:26:14.815846
| 2014-07-30T04:11:26
| 2014-07-30T04:11:26
| 29,864,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "virmir.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"virmir49@gmail.com"
] |
virmir49@gmail.com
|
3477ca40e3be9c089491a0edef84de178170c43e
|
533c1ccd1eb1c4c735c6473381c64770d8103246
|
/lbpi/wrappers/adt_ens.py
|
29a1a2ef5f96d665989172968bf34b8e0216c90f
|
[
"MIT"
] |
permissive
|
nairvinayv/random_scripts
|
fc9278ce4be4908368311993918854de8330e032
|
6e1cc8d82cf61ae245108a69ffa1359f636f37f7
|
refs/heads/master
| 2022-02-14T09:01:20.866580
| 2022-02-02T21:33:05
| 2022-02-02T21:33:05
| 39,131,991
| 0
| 0
|
MIT
| 2021-09-27T04:50:05
| 2015-07-15T10:58:07
|
Shell
|
UTF-8
|
Python
| false
| false
| 3,320
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 26 09:14:50 2017
This script is for making sure two uploaded receptor conformations are of the
same ensembles which will be used for the calculation of average SNR of multiple
conformations
@author: nabina
"""
#import re
import sys,os
import subprocess as sb
import shutil
arg1 = sys.argv[1] # flag arguments for defining the modules to be operated
arg2 = sys.argv[2] # wrapper files
arg3 = sys.argv[3] # working folder
def work_files():
"""
This function is to find out the folders present in ensembles and prepare receptor
in each folders and also to copy ligands to each folders. Finally, depending
on the flag defined it helps in running autodock or LIBSA
"""
pdb_path = '{}protein.pdb'.format(arg3)
ligand_path = '{}ligand.pdbqt'.format(arg3)
chnatm_path = '{}chnatm.dat'.format(arg3)
protein_path = []
protein_pdbqt = []
receptor_path = []
aname = {};
a_count = list(range(len(os.listdir('{}ensembles'.format(arg3)))))
for i in range(len(a_count)):aname[(i)] = '{}ensembles/folder{}/'.format(arg3, a_count[i])
dirr = list(aname.values())
for i in range(len(dirr)):
protein_path.append('{}protein.pdb'.format(dirr[i]))
protein_pdbqt.append('{}protein.pdbqt'.format(dirr[i]))
receptor_path.append('{}receptor.pdb'.format(dirr[i]))
for i in range(len(dirr)):
if arg1 == 'adt':
space = sys.argv[4]
points = sys.argv[5]
evals = sys.argv[6]
gens = sys.argv[7]
run = sys.argv[8]
sb.call(['python', '{}pdb_prepare.py'.format(arg2), pdb_path, chnatm_path, protein_path[i], protein_pdbqt[i], receptor_path[i]])
shutil.copy(ligand_path, dirr[i]) # copying of ligand_pdbqt to ensemble folders
sb.call(['python', '{}adt.py'.format(arg2), dirr[i], space, points, evals, gens, run])
if arg1 == 'libsa_none':
LIBSA = sys.argv[4]
sb.call(['python', '{}libsa_none.py'.format(arg2), dirr[i], LIBSA, 'none', '0.05','1','0.4','4'])
sb.call(['python', '{}libsa_none.py'.format(arg2), dirr[i], LIBSA, 'affinity_only', '0.05','1','0.4','4'])
if arg1 == 'libsa':
LIBSA = sys.argv[4]
arg5 = sys.argv[5]
arg6 = sys.argv[6]
arg7 = sys.argv[7]
arg8 = sys.argv[8]
arg9 = sys.argv[9]
sb.call(['python', '{}libsa.py'.format(arg2), dirr[i], LIBSA, arg5,arg6, arg7, arg8, arg9])
"""
Subprocess call for views file to automate adt and LIBSA for ensembles
sb.call(['python', '{}adt_ens.py'.format(wrappers), 'adt', wrappers, current_dir, space, points, evals, gens, run])
sb.call(['python', '{}adt_ens.py'.format(wrappers), 'libsa_none', wrappers, current_dir,LIBSA])
sb.call(['python', '{}adt_ens.py'.format(wrappers), 'libsa', wrappers, current_dir, LIBSA, libsa_flags, energy_steps, percentchange, aux_peak, cutoff])
"""
if __name__ == '__main__':
work_files()
|
[
"noreply@github.com"
] |
noreply@github.com
|
500aef746d79ed087cee6c69260b5b7ab0ba585d
|
12c15a95f6105f58cce4595db4541e2967abc86a
|
/PyFury/CodeMonk V2/XOR.py
|
dec64e446ce09fcec64e31f94dcc7013cd2395c2
|
[] |
no_license
|
avinash28196/PyFury-V1.0
|
da5c85fd561ee7edc01f7ece9f4657191ae0f015
|
84ed41c13e2fdd96fc1556915709f0c87655af56
|
refs/heads/master
| 2021-07-05T08:42:10.291793
| 2019-02-25T17:06:46
| 2019-02-25T17:06:46
| 125,209,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
Test = int(input())
for i in range(Test):
N = int(input())
count = 0
for i in range (1,N+1):
for j in range(i+1, N+1):
# print (i, j)
xor = i ^ j
if (xor <= N or xor == N):
count += 1
print(count)
|
[
"nextbitgeek@Avinashs-MacBook-Pro.local"
] |
nextbitgeek@Avinashs-MacBook-Pro.local
|
f45604bd7b04946b6c72a23f38771783457a5ae7
|
9998ff1d80a5442970ffdc0b2dd343e3cab30ee8
|
/fiaqm.py
|
8d1e36ec5d2bdf3d45320c614cf86650b2282341
|
[
"MIT"
] |
permissive
|
cgomezsu/FIAQM
|
9baac1a9410a6ad19e67fff024a9ff15f24df70c
|
da44e370f40e573233a148414229359e7782ad0c
|
refs/heads/main
| 2023-05-13T02:17:19.833979
| 2021-05-24T01:25:17
| 2021-05-24T01:25:17
| 301,475,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,902
|
py
|
"""
v.e.s.
Federated Congestion Predictor + Intelligent AQM
MIT License
Copyright (c) 2020 Cesar A. Gomez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.log import info, setLogLevel
from mininet.cli import CLI
import numpy as np
import pandas as pd
import time, random, os
import substring as ss
import tuner as tnr
random.seed(7) # For reproducibility
class CreateTopo(Topo):
def build(self, n):
bra = self.addSwitch('bra1') # Border Router A
brb = self.addSwitch('brb1') # Border Router B
ixp = self.addSwitch('ixp1') # IXP switch
vs = self.addSwitch('vs1') # Virtual switch to emulate a tunnel connection between Learning Orchestrator and Local Learners
self.addLink(bra, ixp, bw=1000, delay='2ms') # Link between bra-eth1 and ixp-eth1
self.addLink(ixp, brb, bw=1000, delay='2ms') # Link between ixp-eth2 and brb-eth1
# Creation of hosts connected to Border Routers in each domain
for j in range(n):
BW = random.randint(250,500)
d = str(random.randint(2,10))+'ms' # Each host has a random propagation delay (between 2 and 10 ms) on its link connected to the corresponding router
ha = self.addHost('a%s' % (j+1), ip='10.10.0.%s' % (j+1))
self.addLink(ha, bra, bw=BW, delay=d)
BW = random.randint(250,500) # Random BW to limit rate on each interface
d = str(random.randint(2,10))+'ms'
hb = self.addHost('b%s' % (j+1), ip='10.11.0.%s' % (j+1))
self.addLink(hb, brb, bw=BW, delay=d)
hlla = self.addHost('lla1', ip='10.10.11.11') # Host acting as the Local Learner A
self.addLink(hlla, vs, bw=100)#, delay='2ms')
hllb = self.addHost('llb1', ip='10.10.11.12') # Host acting as the Local Learner B
self.addLink(hllb, vs, bw=100)#, delay='2ms')
hlo = self.addHost('lo1', ip='10.10.10.10') # Host acting as the Learning Orchestrator
self.addLink(hlo, vs, bw=100)#, delay='2ms')
ma = self.addHost('ma1', ip='10.0.0.10') # There are two monitor hosts for probing
self.addLink(ma, bra, bw=1000) # The BW of the monitor hosts are the same as the inter-domain links
mb = self.addHost('mb1', ip='10.0.0.11')
self.addLink(mb, brb, bw=1000)
setLogLevel('info') # To show info messages
n = 20 # Number of network elements connected per border router
topo = CreateTopo(n)
net = Mininet(topo, link=TCLink, autoSetMacs=True) # We use Traffic Control links
info('\n*** Starting network\n')
net.start()
# Creating network devices from topology
lo1 = net['lo1']
lla1 = net['lla1']
llb1 = net['llb1']
bra1 = net['bra1']
ixp1 = net['ixp1']
brb1 = net['brb1']
ma1 = net['ma1']
mb1 = net['mb1']
# AQM configuration for link between IXP switch and Border Router A
ixp1.cmd('tc qdisc del dev ixp1-eth1 root') # Clear current qdisc
ixp1.cmd('tc qdisc add dev ixp1-eth1 root handle 1:0 htb default 1') # Set the name of the root as 1:, for future references. The default class is 1
ixp1.cmd('tc class add dev ixp1-eth1 classid 1:1 htb rate 1000mbit') # Create class 1:1 as direct descendant of root (the parent is 1:)
ixp1.cmd('tc qdisc add dev ixp1-eth1 parent 1:1 handle 10:1 fq_codel limit 1000 target 50ms interval 1000ms noecn') # Create qdisc with ID (handle) 10 of class 1. Its parent class is 1:1. Queue size limited to 1000 pkts
# AQM configuration for link between Border Router B and IXP switch. This will be the IAQM
aqm_target = 5000
aqm_interval = 100 # Initial parameters
brb1.cmd('tc qdisc del dev brb1-eth1 root')
brb1.cmd('tc qdisc add dev brb1-eth1 root handle 1:0 htb default 1')
brb1.cmd('tc class add dev brb1-eth1 classid 1:1 htb rate 1000mbit')
brb1.cmd('tc qdisc add dev brb1-eth1 parent 1:1 handle 10:1 fq_codel limit 1000 target {}us interval {}ms noecn'.format(aqm_target,aqm_interval)) # Both target and interval of this queue will be set dynamically as the emulation runs
info('\n*** Setting up AQM for intra-domain link buffers at Border Router\n')
a = [0 for j in range(n)] # List initialization for hosts A
b = [0 for j in range(n)] # List initialization for hosts B
for j in range(n):
# Changing the queue discipline on interfaces connected to Border Router A
BW = random.randint(250,500) # Random BW to limit rate on each interface
bra1.cmd('tc qdisc del dev bra1-eth{} root'.format(j+2))
bra1.cmd('tc qdisc add dev bra1-eth{} root handle 1:0 htb default 1'.format(j+2))
bra1.cmd('tc class add dev bra1-eth{} classid 1:1 htb rate {}mbit'.format(j+2,BW))
bra1.cmd('tc qdisc add dev bra1-eth{} parent 1:1 handle 10:1 fq_codel limit 1000 target 2000us interval 40ms noecn'.format(j+2))
time.sleep(3) # Wait a moment while the AQM is configured
a[j] = net['a%s' % (j+1)] # Creating net devices from topology
b[j] = net['b%s' % (j+1)]
time.sleep(5)
info('\n*** Testing connectivity...\n')
for j in range(n):
net.ping(hosts=[a[j],b[j]])
net.ping(hosts=[lla1,lo1])
net.ping(hosts=[llb1,lo1])
net.ping(hosts=[ma1,mb1])
info('\n*** Starting AQM stat captures...\n')
bra1.cmd('bash ~/stat_bra.sh') # Bash script to capture AQM stats at Border Router A
ixp1.cmd('bash ~/stat_ixp.sh &') # Bash script to capture AQM stats at the IXP switch
brb1.cmd('bash ~/stat_brb.sh &') # Bash script to capture AQM stats at Border Router B
info('\n*** Starting RRUL traffic between pairs...\n')
for j in range(n):
a[j].cmd('netserver &') # Start server on domain-A hosts for RRUL tests
l = random.randint(300,900) # Random length for each RRUL test
b[j].cmd('bash rrul.sh 10.10.0.{} {} &'.format(j+1,l)) # Start RRUL tests on domain-B hosts
time.sleep(20) # Waiting time while all RRUL tests start
info('\n*** Capturing AQM stats for initial training...\n')
time.sleep(150) # Waiting time for the getting initial training samples
# If emulation gets stuck in the first period, it's porbably because there's not enough traffic samples to train the model. Leave a longer time
info('\n*** Starting Federated Congestion Predictor process...\n')
lo1.cmd('mkdir ~/LO') # Directory to store parameter files of Learning Orchestrator
lo1.cmd('mkdir ~/LO/Predictions') # Directory to store predictions of Learning Orchestrator
lla1.cmd('mkdir ~/LLA') # Directory to store files of Local Learner A
llb1.cmd('mkdir ~/LLB') # Directory to store files of Local Learner B
# Start SCP server on Learning Orchestrator
lo1.cmd('/usr/sbin/sshd -p 54321')
# Start learning process on Learning Orchestrator
lo1.cmd('source PyTorch/bin/activate') # Activate virtual environment where PyTorch is installed
lo1.cmd('python lo_train.py &')
lo1.cmd('python lo_predict.py &')
# Start learning process on Local Learner A
lla1.cmd('source PyTorch/bin/activate')
lla1.cmd('python ll_train.py &')
# Receive predictions
periods = 300 # Number of periods (of aprox 2 secs) to run the emulation
factor = 1000
S = 100 # Number of states: discrete levels of congestion [0, 100] in a period of 2 s
A = np.arange(1100, 11100, 100) # Set of actions: set value of target parameter in us
epsilon = 0.5
s_curr = random.randint(0,S) # Random initialization of the first observed state for tuning
ind_action = len(A)-1 # Initial action for tuning: max FQ-CoDel target considered (7.5 ms)
cong_pred_max = 1e-6
hist_pred = np.zeros(periods)
hist_local = np.zeros(periods)
hist_r = np.zeros(periods)
hist_rtt = np.ones(periods)*8
hist_tput = np.ones(periods)*100
ma1.cmd('iperf -s &') # Iperf server on Monitor A to measure throughput
t0 = time.time() # To take time of all periods
preds_file = '/home/ubuntu/LLB/dr_est.npy'
for i in range(periods):
# Measure RTT and throughput in 1 sec
mb1.cmd('iperf -c 10.0.0.10 -i 0.1 -t 1 | tail -1 > ~/LLB/tput.out &')
mb1.cmd('ping 10.0.0.10 -i 0.1 -w 1 -q | tail -1 > ~/LLB/ping.out')
print("*** Period",i)
print("+++ Configured target and interval parameters:",aqm_target,"us",aqm_interval,"ms")
# Load received file with predictions, sent by the Learning Orchestrator
while not os.path.isfile(preds_file):
time.sleep(0)
while not os.path.getsize(preds_file) >= 928: # To make sure that the file is not empty and still being transferred (file size with 100 predictions)
time.sleep(0)
dr_est = np.load(preds_file)
# Some signal rearrangements of the predictions
dr_est = dr_est*factor
dr_est = dr_est-dr_est.mean()
dr_est = np.abs(dr_est)
dr_est[dr_est > 1] = 1 # Avoid any possible outlier after rearranging
hist_pred[i] = dr_est.mean()
# Discretize values of predictions
if hist_pred[i] > cong_pred_max:
cong_pred_max = hist_pred[i] # Stores the max value of predicted congestion
s_next = int((hist_pred[i]/cong_pred_max)*S-1)
print("+++ Predicted level of congestion ahead:",s_next+1)
time.sleep(1) # The duration of each period is this waiting time + ping time
statinfo = os.stat('/home/ubuntu/LLB/ping.out')
if statinfo.st_size < 10:
mRTT = hist_rtt.max() # If no ping response was gotten, take the maximum in the records (worst case)
print ('>>> No mRTT response. Taking maximum known: %.3f' % mRTT, 'ms')
else:
din = open('/home/ubuntu/LLB/ping.out').readlines()
slice = ss.substringByInd(din[0],26,39)
text = (slice.split('/'))
mRTT = float(text[1])
print ('>>> mRTT: %.3f' % mRTT, 'ms')
hist_rtt[i] = mRTT
statinfo = os.stat('/home/ubuntu/LLB/tput.out')
if statinfo.st_size < 10:
tput = hist_tput.min() # If no tput response was gotten, take the minimum in the records (worst case)
print ('>>> No Tput response. Taking minimum known: %.3f' % tput, 'Mbps')
else:
din = open('/home/ubuntu/LLB/tput.out').readlines()
tput = float(ss.substringByInd(din[0],34,37))
unit = ss.substringByInd(din[0],39,39)
if unit == 'K':
tput = tput*0.001
print ('>>> Tput: %.3f' % tput, 'Mbps')
hist_tput[i] = tput
hist_r[i] = tput/mRTT
R = hist_r[i] # Reward is based on power function
print ('>>> Power: %.2f' % R)
if i >= 75: # Start the AQM tuning process after this period
# Update Q-values
Q = tnr.update(s_curr, ind_action, R, s_next)
s_curr = s_next
# Select action for next iteration
ind_action = tnr.action(s_curr, epsilon)
aqm_target = A[ind_action] # Select a FQ-CoDel target
aqm_interval = int(aqm_target/(0.05*1000)) # Select a FQ-CoDel interval. Tipycally, target is 5% of interval
brb1.cmd('tc qdisc change dev brb1-eth1 parent 1:1 handle 10:1 fq_codel limit 1000 target {}us interval {}ms noecn'.format(aqm_target,aqm_interval)) # Change the AQM parameters at Border Router B
print("*** Total wall time: ", time.time() - t0, " seconds")
np.save('/home/ubuntu/hist_pred.npy',hist_pred)
np.save('/home/ubuntu/hist_power.npy',hist_r)
np.save('/home/ubuntu/q-table.npy',Q)
#CLI(net) # Uncomment this line to explore the temporary files before exiting Mininet
info('*** Deleting temporary files...\n')
lo1.cmd('rm -r ~/LO')
lla1.cmd('rm -r ~/LLA')
llb1.cmd('rm -r ~/LLB')
info('*** Experiment finished!\n')
net.stop()
|
[
"noreply@github.com"
] |
noreply@github.com
|
e18cb7dd81804a2ba328dc66b22ac4a5eb10f3e6
|
92d79bbe1e94e192e9d4a728f99f6aecea500645
|
/attack/df_attack_2_tab_next.py
|
33f57d3a58533a0b661b278d2c17db343feda980
|
[] |
no_license
|
westzyan/attackWFP
|
e6f61dc7a6636640a298162941c5b7c882c1fc80
|
5e2227308b3ab7be5b607c4d8dddb4871ed56fc4
|
refs/heads/master
| 2023-03-02T19:16:14.846449
| 2021-02-09T18:04:42
| 2021-02-09T18:04:42
| 325,488,478
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,558
|
py
|
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, classification_report
from Model_DF import DFNet
import random
from keras.utils import np_utils
from keras.optimizers import Adamax
import numpy as np
import os
import tensorflow as tf
import keras
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
# Load data for non-defended dataset for CW setting
def LoadDataNoDefCW(second):
print("Loading defended dataset for closed-world scenario")
# Point to the directory storing data
# dataset_dir = '../dataset/ClosedWorld/NoDef/'
# dataset_dir = "/media/zyan/软件/张岩备份/PPT/DeepFingerprinting/df-master/dataset/ClosedWorld/NoDef/"
dataset_dir = "/home/thinkst/zyan/real_specified_split/round/second{}/".format(second)
# X represents a sequence of traffic directions
# y represents a sequence of corresponding label (website's label)
data = np.loadtxt(dataset_dir + "df_tcp_95000_10000_head_math_order.csv", delimiter=",")
print(data)
np.random.shuffle(data)
print(data)
print(len(data))
train_length = int(0.8 * len(data))
valid_length = int(0.1 * len(data))
test_length = len(data) - train_length - valid_length
train = data[:train_length, :]
valid = data[train_length: train_length + valid_length, :]
test = data[train_length + valid_length:, :]
X_train = train[:, :-1]
y_train = train[:, -1]
X_valid = valid[:, :-1]
y_valid = valid[:, -1]
X_test = test[:, :-1]
y_test = test[:, -1]
print("X: Training data's shape : ", X_train.shape)
print("y: Training data's shape : ", y_train.shape)
print("X: Validation data's shape : ", X_valid.shape)
print("y: Validation data's shape : ", y_valid.shape)
print("X: Testing data's shape : ", X_test.shape)
print("y: Testing data's shape : ", y_test.shape)
#
return X_train, y_train, X_valid, y_valid, X_test, y_test
if __name__ == '__main__':
for second in range(2, 9):
random.seed(0)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
description = "Training and evaluating DF model for closed-world scenario on non-defended dataset"
print(description)
# Training the DF model
NB_EPOCH = 20 # Number of training epoch
print("Number of Epoch: ", NB_EPOCH)
BATCH_SIZE = 128 # Batch size
VERBOSE = 2 # Output display mode
LENGTH = 10000 # Packet sequence length
OPTIMIZER = Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # Optimizer
NB_CLASSES = 95 # number of outputs = number of classes
INPUT_SHAPE = (LENGTH, 1)
# Data: shuffled and split between train and test sets
print("Loading and preparing data for training, and evaluating the model")
X_train, y_train, X_valid, y_valid, X_test, y_test = LoadDataNoDefCW(second)
# Please refer to the dataset format in readme
# K.set_image_dim_ordering("tf") # tf is tensorflow
# Convert data as float32 type
X_train = X_train.astype('float32')
X_valid = X_valid.astype('float32')
X_test = X_test.astype('float32')
y_train = y_train.astype('float32')
y_valid = y_valid.astype('float32')
y_test = y_test.astype('float32')
# we need a [Length x 1] x n shape as input to the DF CNN (Tensorflow)
X_train = X_train[:, :, np.newaxis]
X_valid = X_valid[:, :, np.newaxis]
X_test = X_test[:, :, np.newaxis]
print(X_train.shape[0], 'train samples')
print(X_valid.shape[0], 'validation samples')
print(X_test.shape[0], 'test samples')
# Convert class vectors to categorical classes matrices
y_train = np_utils.to_categorical(y_train, NB_CLASSES)
y_valid = np_utils.to_categorical(y_valid, NB_CLASSES)
y_test = np_utils.to_categorical(y_test, NB_CLASSES)
# Building and training model
print("Building and training DF model")
model = DFNet.build(input_shape=INPUT_SHAPE, classes=NB_CLASSES)
model.compile(loss="categorical_crossentropy", optimizer=OPTIMIZER, metrics=["accuracy"])
print("Model compiled")
# Start training
history = model.fit(X_train, y_train,
batch_size=BATCH_SIZE, epochs=NB_EPOCH,
verbose=VERBOSE, validation_data=(X_valid, y_valid))
# model.save('my_model_undef_tcp_10000_round2.h5')
# Start evaluating model with testing data
score_test = model.evaluate(X_test, y_test, verbose=VERBOSE)
print("Testing accuracy:", score_test[1])
y_pre = model.predict(X_test)
index_test = np.argmax(y_test, axis=1)
index_pre = np.argmax(y_pre, axis=1)
print(precision_recall_fscore_support(index_test, index_pre, average='macro'))
# Macro-P,Macro-R,Macro-F1
print(precision_recall_fscore_support(index_test, index_pre, average='micro'))
# Micro-P,Micro-R,Micro-F1
print(classification_report(index_test, index_pre))
score = classification_report(index_test, index_pre)
# 混淆矩阵并可视化
confmat = confusion_matrix(y_true=index_test, y_pred=index_pre) # 输出混淆矩阵
print(confmat)
with open("./overlap_second.txt", 'a') as f:
f.write("second:{} acc:{}\n".format(second, score_test[1]))
f.write(score)
f.close()
|
[
"15639067131@163.com"
] |
15639067131@163.com
|
172576681e45df0d4e6966c9a2513b6ebdfbff4e
|
846cbb8cc97c667c1f2969fca12b835c3843f170
|
/magpy/lib/format_sfs.py
|
a36994e4972d2df583ae37a95c7050c0af4825eb
|
[
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
geomagpy/magpy
|
f33a4a7ae95f95d2e5e3d09b571d2fa6f2905174
|
79f3420c4526c735869715e8c358848d790e982b
|
refs/heads/master
| 2023-08-17T08:39:48.757501
| 2023-07-19T11:25:00
| 2023-07-19T11:25:00
| 47,394,862
| 40
| 20
|
BSD-3-Clause
| 2021-01-26T12:29:02
| 2015-12-04T09:38:09
|
Python
|
UTF-8
|
Python
| false
| false
| 5,996
|
py
|
"""
MagPy
Auxiliary input filter - WIC/WIK
Written by Roman Leonhardt June 2012
- contains test and read function, toDo: write function
"""
from magpy.stream import *
def isSFDMI(filename):
"""
Checks whether a file is spanish DMI format.
Time is in seconds relative to one day
"""
try:
temp = open(filename, 'rt').readline()
except:
return False
if len(temp) >= 9:
if temp[9] in ['o','+','-']: # Prevent errors with GFZ kp
return False
sp = temp.split()
if not len(sp) == 6:
return False
if not isNumber(sp[0]):
return False
#logging.info(" Found SFS file")
return True
def isSFGSM(filename):
"""
Checks whether a file is spanish GSM format.
Time is in seconds relative to one day
"""
try:
fh = open(filename, 'rt')
temp = fh.readline()
except:
return False
sp = temp.split()
if len(sp) != 2:
return False
if not isNumber(sp[0]):
return False
try:
if not 20000 < float(sp[1]) < 80000:
return False
except:
return False
return True
def readSFDMI(filename, headonly=False, **kwargs):
"""
Reading SF DMI format data.
Looks like:
0.03 99.11 -29.76 26.14 22.05 30.31
5.04 98.76 -29.78 26.20 22.04 30.31
10.01 98.85 -29.76 26.04 22.04 30.31
15.15 98.63 -29.79 26.20 22.04 30.31
20.12 98.85 -29.78 26.11 22.04 30.31
first column are seconds of day
"""
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
getfile = True
fh = open(filename, 'rt')
# read file and split text into channels
stream = DataStream()
if stream.header is None:
headers = {}
else:
headers = stream.header
data = []
key = None
# get day from filename (platform independent)
splitpath = os.path.split(filename)
daystring = splitpath[1].split('.')
try:
day = datetime.strftime(datetime.strptime(daystring[0], "%d%m%Y"),"%Y-%m-%d")
except:
logging.warning("Wrong dateformat in Filename %s" % daystring[0])
fh.close()
return DataStream([], headers)
# Select only files within eventually defined time range
if starttime:
if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if endtime:
if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if getfile:
for line in fh:
if line.isspace():
# blank line
continue
else:
row = LineStruct()
elem = line.split()
if (len(elem) == 6):
row.time=date2num(datetime.strptime(day,"%Y-%m-%d"))+ float(elem[0])/86400
xval = float(elem[1])
yval = float(elem[2])
zval = float(elem[3])
row.x = xval
row.y = yval
row.z = zval
row.t1 = float(elem[4])
row.t2 = float(elem[5])
stream.add(row)
stream.header['col-x'] = 'x'
stream.header['col-y'] = 'y'
stream.header['col-z'] = 'z'
stream.header['col-t1'] = 'T1'
stream.header['col-t2'] = 'T2'
stream.header['unit-col-x'] = 'nT'
stream.header['unit-col-y'] = 'nT'
stream.header['unit-col-z'] = 'nT'
stream.header['unit-col-t1'] = 'deg C'
stream.header['unit-col-t2'] = 'deg C'
else:
headers = stream.header
stream =[]
fh.close()
return DataStream(stream, headers)
def readSFGSM(filename, headonly=False, **kwargs):
"""
Reading SF GSM format data.
Looks like:
22 42982.35
52 42982.43
82 42982.47
first column are seconds of day
"""
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
getfile = True
fh = open(filename, 'rt')
# read file and split text into channels
stream = DataStream()
if stream.header is None:
headers = {}
else:
headers = stream.header
data = []
key = None
# get day from filename (platform independent)
splitpath = os.path.split(filename)
daystring = splitpath[1].split('.')
try:
day = datetime.strftime(datetime.strptime(daystring[0], "%d%m%Y"),"%Y-%m-%d")
except:
logging.warning("Wrong dateformat in Filename %s" % daystring[0])
return []
# Select only files within eventually defined time range
if starttime:
if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if endtime:
if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if getfile:
for line in fh:
if line.isspace():
# blank line
continue
else:
row = LineStruct()
elem = line.split()
if (len(elem) == 2):
row.time=date2num(datetime.strptime(day,"%Y-%m-%d"))+ float(elem[0])/86400
row.f = float(elem[1])
stream.add(row)
stream.header['col-f'] = 'f'
stream.header['unit-col-f'] = 'nT'
else:
headers = stream.header
stream =[]
fh.close()
return DataStream(stream, headers)
|
[
"roman.leonhardt@zamg.ac.at"
] |
roman.leonhardt@zamg.ac.at
|
50363bbf710a2b67812e488531ed086fe0b32138
|
d40fbefbd5db39f1c3fb97f17ed54cb7b6f230e0
|
/datadog_checks_dev/datadog_checks/dev/tooling/config.py
|
7d63ecb7890e8d4df068f1419c36389ea8bb11bc
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
slightilusion/integrations-core
|
47a170d791e809f3a69c34e2426436a6c944c322
|
8f89e7ba35e6d27c9c1b36b9784b7454d845ba01
|
refs/heads/master
| 2020-05-20T18:34:41.716618
| 2019-05-08T21:51:17
| 2019-05-08T21:51:17
| 185,708,851
| 2
| 0
|
BSD-3-Clause
| 2019-05-09T02:05:19
| 2019-05-09T02:05:18
| null |
UTF-8
|
Python
| false
| false
| 3,143
|
py
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from collections import OrderedDict, deque
from copy import deepcopy
import toml
from appdirs import user_data_dir
from atomicwrites import atomic_write
from six import string_types
from ..compat import FileNotFoundError
from ..utils import ensure_parent_dir_exists, file_exists, read_file
APP_DIR = user_data_dir('dd-checks-dev', '')
CONFIG_FILE = os.path.join(APP_DIR, 'config.toml')
SECRET_KEYS = {'dd_api_key', 'github.token', 'pypi.pass', 'trello.key', 'trello.token'}
DEFAULT_CONFIG = OrderedDict(
[
('core', os.path.join('~', 'dd', 'integrations-core')),
('extras', os.path.join('~', 'dd', 'integrations-extras')),
('agent', os.path.join('~', 'dd', 'datadog-agent')),
('repo', 'core'),
('agent6', OrderedDict((('docker', 'datadog/agent-dev:master'), ('local', 'latest')))),
('agent5', OrderedDict((('docker', 'datadog/dev-dd-agent:master'), ('local', 'latest')))),
('dd_api_key', os.getenv('DD_API_KEY')),
('github', OrderedDict((('user', ''), ('token', '')))),
('pypi', OrderedDict((('user', ''), ('pass', '')))),
('trello', OrderedDict((('key', ''), ('token', '')))),
]
)
def config_file_exists():
return file_exists(CONFIG_FILE)
def copy_default_config():
return deepcopy(DEFAULT_CONFIG)
def save_config(config):
ensure_parent_dir_exists(CONFIG_FILE)
with atomic_write(CONFIG_FILE, mode='wb', overwrite=True) as f:
f.write(toml.dumps(config).encode('utf-8'))
def load_config():
config = copy_default_config()
try:
config.update(toml.loads(read_config_file(), OrderedDict))
except FileNotFoundError:
pass
return config
def read_config_file():
return read_file(CONFIG_FILE)
def read_config_file_scrubbed():
return toml.dumps(scrub_secrets(load_config()))
def restore_config():
config = copy_default_config()
save_config(config)
return config
def update_config():
config = copy_default_config()
config.update(load_config())
# Support legacy config where agent5 and agent6 were strings
if isinstance(config['agent6'], string_types):
config['agent6'] = OrderedDict((('docker', config['agent6']), ('local', 'latest')))
if isinstance(config['agent5'], string_types):
config['agent5'] = OrderedDict((('docker', config['agent5']), ('local', 'latest')))
save_config(config)
return config
def scrub_secrets(config):
for secret_key in SECRET_KEYS:
branch = config
paths = deque(secret_key.split('.'))
while paths:
path = paths.popleft()
if not hasattr(branch, 'get'):
break
if path in branch:
if not paths:
old_value = branch[path]
if isinstance(old_value, string_types):
branch[path] = '*' * len(old_value)
else:
branch = branch[path]
else:
break
return config
|
[
"noreply@github.com"
] |
noreply@github.com
|
235950d4728104e2a077b449c15418d2a6e7154c
|
057554afbdfec2f8689a999a15ba0848c620ab4f
|
/find_available_room.py
|
d3ca3e14301c511f7ba7b3c8037a7f21148694ab
|
[
"Apache-2.0"
] |
permissive
|
cnaert/roomfinder2
|
fe40c2728d19c92688ef4b86699db660f36fefb8
|
75040a2842058334fb5dfa9d12491e321bc88b43
|
refs/heads/master
| 2021-01-19T21:59:25.368371
| 2017-04-19T11:13:33
| 2017-04-19T11:13:33
| 88,688,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,600
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import subprocess
import getpass
from string import Template
import xml.etree.ElementTree as ET
import csv, codecs
import argparse
import datetime
now = datetime.datetime.now().replace(microsecond=0)
starttime_default = now.isoformat()
end_time_default = None
parser = argparse.ArgumentParser()
parser.add_argument("-url","--url", help="url for exhange server, e.g. 'https://mail.domain.com/ews/exchange.asmx'.",required=True)
parser.add_argument("-u","--user", help="user name for exchange/outlook",required=True)
parser.add_argument("-p","--password", help="password for exchange/outlook", required=True)
parser.add_argument("-start","--starttime", help="Starttime e.g. 2014-07-02T11:00:00 (default = now)", default=starttime_default)
parser.add_argument("-end","--endtime", help="Endtime e.g. 2014-07-02T12:00:00 (default = now+1h)", default=end_time_default)
#parser.add_argument("-n","--now", help="Will set starttime to now and endtime to now+1h", action="store_true")
parser.add_argument("-f","--file", help="csv filename with rooms to check (default=favorites.csv). Format: Name,email",default="favorites.csv")
args=parser.parse_args()
url = args.url
reader = csv.reader(codecs.open(args.file, 'r', encoding='utf-8'))
start_time = args.starttime
if not args.endtime:
start = datetime.datetime.strptime( start_time, "%Y-%m-%dT%H:%M:%S" )
end_time = (start + datetime.timedelta(hours=1)).isoformat()
else:
end_time = args.endtime
user = args.user
password = args.password
print "Searching for a room from " + start_time + " to " + end_time + ":"
print "{0:10s} {1:25s} {2:40s} {3:10s} {4:10s} {5:10s} {6:50s}".format("Status", "Room", "Email", "Level", "Zone", "Seats", "Description")
xml_template = open("getavailibility_template.xml", "r").read()
xml = Template(xml_template)
for room in reader:
data = unicode(xml.substitute(email=room[1],starttime=start_time,endtime=end_time))
header = "\"content-type: text/xml;charset=utf-8\""
command = "curl --silent --header " + header +" --data '" + data + "' --ntlm -u "+ user+":"+password+" "+ url
response = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).communicate()[0]
tree = ET.fromstring(response)
status = "Free"
# arrgh, namespaces!!
elems=tree.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}BusyType")
for elem in elems:
status=elem.text
print "{0:10s} {1:25s} {2:40s} {3:10s} {4:10s} {5:10s} {6:50s}".format(status, room[0], room[1], room[2], room[3], room[4], room[5] )
|
[
"cyrille.naert@dimensiondata.com"
] |
cyrille.naert@dimensiondata.com
|
a0a6e2b478307867d176521ffe24feb3a9ea24cb
|
382c3368b5a8a13d57bcff7951334e57f919d964
|
/remote-scripts/samples/APC40_20/SpecialMixerComponent.py
|
c157f77a22c018cdeaf7228a4fc43b005f301133
|
[
"Apache-2.0"
] |
permissive
|
jim-cooley/abletonremotescripts
|
c60a22956773253584ffce9bc210c0804bb153e1
|
a652c1cbe496548f16a79bb7f81ce3ea3545649c
|
refs/heads/master
| 2021-01-22T02:48:04.820586
| 2017-04-06T09:58:58
| 2017-04-06T09:58:58
| 28,599,515
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,246
|
py
|
# emacs-mode: -*- python-*-
# -*- coding: utf-8 -*-
from _Framework.MixerComponent import MixerComponent
from SpecialChannelStripComponent import SpecialChannelStripComponent
from _Framework.ButtonElement import ButtonElement #added
from _Framework.EncoderElement import EncoderElement #added
class SpecialMixerComponent(MixerComponent):
' Special mixer class that uses return tracks alongside midi and audio tracks, and only maps prehear when shifted '
__module__ = __name__
def __init__(self, num_tracks):
MixerComponent.__init__(self, num_tracks)
self._shift_button = None #added
self._shift_pressed = False #added
def set_shift_button(self, button): #added
assert ((button == None) or (isinstance(button, ButtonElement) and button.is_momentary()))
if (self._shift_button != button):
if (self._shift_button != None):
self._shift_button.remove_value_listener(self._shift_value)
self._shift_button = button
if (self._shift_button != None):
self._shift_button.add_value_listener(self._shift_value)
self.update()
def _shift_value(self, value): #added
assert (self._shift_button != None)
assert (value in range(128))
self._shift_pressed = (value != 0)
self.update()
def update(self): #added override
if self._allow_updates:
master_track = self.song().master_track
if self.is_enabled():
if (self._prehear_volume_control != None):
if self._shift_pressed: #added
self._prehear_volume_control.connect_to(master_track.mixer_device.cue_volume)
else:
self._prehear_volume_control.release_parameter() #added
if (self._crossfader_control != None):
self._crossfader_control.connect_to(master_track.mixer_device.crossfader)
else:
if (self._prehear_volume_control != None):
self._prehear_volume_control.release_parameter()
if (self._crossfader_control != None):
self._crossfader_control.release_parameter()
if (self._bank_up_button != None):
self._bank_up_button.turn_off()
if (self._bank_down_button != None):
self._bank_down_button.turn_off()
if (self._next_track_button != None):
self._next_track_button.turn_off()
if (self._prev_track_button != None):
self._prev_track_button.turn_off()
self._rebuild_callback()
else:
self._update_requests += 1
def tracks_to_use(self):
return (self.song().visible_tracks + self.song().return_tracks)
def _create_strip(self):
return SpecialChannelStripComponent()
def disconnect(self): #added
MixerComponent.disconnect(self)
if (self._shift_button != None):
self._shift_button.remove_value_listener(self._shift_value)
self._shift_button = None
# local variables:
# tab-width: 4
|
[
"jim@ubixlabs.com"
] |
jim@ubixlabs.com
|
e6e7dda1c960f07e3ef950b406a97d1171f4fa8d
|
fe0edb968d9d20c8dcdd994e293db418c451ce53
|
/amazon/LCAOfBinaryTree/solution.py
|
5d3bd6f8a9f7c77189b09b2403dbaeac851e2ceb
|
[] |
no_license
|
childxr/lintleetcode
|
d079b1a01fb623f2cb093b0fe665c21a18ec1b6a
|
e8d472ab237d61fed923df25c91823371c63445b
|
refs/heads/master
| 2020-03-20T08:39:10.473582
| 2018-08-04T22:56:55
| 2018-08-04T22:56:55
| 137,315,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param: root: The root of the binary search tree.
@param: A: A TreeNode in a Binary.
@param: B: A TreeNode in a Binary.
@return: Return the least common ancestor(LCA) of the two nodes.
"""
def findNodes(self, root, A, B):
if root is None:
return root
if root == A or root == B:
return root
left = self.findNodes(root.left, A, B)
right = self.findNodes(root.right, A, B)
if left is not None and right is not None:
return root
return left if left is not None else right
def lowestCommonAncestor(self, root, A, B):
# write your code here
return self.findNodes(root, A, B)
|
[
"rxie@juniper.net"
] |
rxie@juniper.net
|
1864abd09c45d30c777b5127b78b028f192c006a
|
ce3f2b03f38076b75544ab901662e6aeda35d97a
|
/manage.py
|
0f521b7d0f5bf1caf2215a08b13f9ff161682059
|
[] |
no_license
|
DivingCats/reflask
|
98799b7f693101a211152701cace06ef627233f3
|
1be5c61f3cf48b4e6e6a15fee56930f8166d3cd6
|
refs/heads/master
| 2022-12-09T22:13:58.644735
| 2020-02-08T09:56:14
| 2020-02-08T09:56:14
| 230,551,305
| 0
| 0
| null | 2022-12-08T03:34:28
| 2019-12-28T03:11:44
|
Python
|
UTF-8
|
Python
| false
| false
| 610
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/2/5 20:55
# @Author : DivingKitten
# @File : manage.py
# @Software: PyCharm
# @Desc : 启动脚本
from app import create_app
from flask_script import Manager, Shell
app = create_app('default')
manager = Manager(app)
def make_shell_context():
return dict(app=app)
manager.add_command("shell", Shell(make_context=make_shell_context))
@manager.command
def test():
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
# app.run()
manager.run()
|
[
"Unility@163.com"
] |
Unility@163.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.