blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
220 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
257 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
9a2b4bd952a3bd412a603232556bd9cad7508e62
9638fccea89ece61f7ba1f985f488bf3e8671155
/venv/bin/jp.py
3187218827e61b01a87d6828b56a3e2045077914
[]
no_license
ked66/ResearchNotes
7ada6bc14a54dd9c86719f901e090265738642b9
c653e02f78bf195dc417394baf0342033a9984e4
refs/heads/master
2023-03-04T05:00:48.261084
2021-02-12T20:15:27
2021-02-12T20:15:27
316,547,408
0
0
null
null
null
null
UTF-8
Python
false
false
1,717
py
#!/Users/katie/PycharmProjects/ResearchNotes/venv/bin/python import sys import json import argparse from pprint import pformat import jmespath from jmespath import exceptions def main(): parser = argparse.ArgumentParser() parser.add_argument('expression') parser.add_argument('-f', '--filename', help=('The filename containing the input data. ' 'If a filename is not given then data is ' 'read from stdin.')) parser.add_argument('--ast', action='store_true', help=('Pretty print the AST, do not search the data.')) args = parser.parse_args() expression = args.expression if args.ast: # Only print the AST expression = jmespath.compile(args.expression) sys.stdout.write(pformat(expression.parsed)) sys.stdout.write('\n') return 0 if args.filename: with open(args.filename, 'r') as f: data = json.load(f) else: data = sys.stdin.read() data = json.loads(data) try: sys.stdout.write(json.dumps( jmespath.search(expression, data), indent=4)) sys.stdout.write('\n') except exceptions.ArityError as e: sys.stderr.write("invalid-arity: %s\n" % e) return 1 except exceptions.JMESPathTypeError as e: sys.stderr.write("invalid-type: %s\n" % e) return 1 except exceptions.UnknownFunctionError as e: sys.stderr.write("unknown-function: %s\n" % e) return 1 except exceptions.ParseError as e: sys.stderr.write("syntax-error: %s\n" % e) return 1 if __name__ == '__main__': sys.exit(main())
[ "ked66@cornell.edu" ]
ked66@cornell.edu
f2d8006fa9d4e809157de1688060502edc3218c4
2368972f5cd45704b5ab1b4877f6409fc38bf693
/app/app.py
15563cc268e2f61394e02d4b08bcdf53cec19708
[]
no_license
sergiodias28/manobra
7d67498521aabb0d8c9a5d9ebce97d39099913cb
5f38eef2035547807ae8aaa095a76961cb372852
refs/heads/master
2021-01-19T00:24:57.666548
2016-08-11T23:12:26
2016-08-11T23:12:26
65,052,300
0
0
null
2016-08-11T23:12:27
2016-08-05T22:15:02
Python
ISO-8859-1
Python
false
false
2,234
py
# -*- coding: utf-8 -*- """ Autman ~~~~~~ Sistema de automanção de manobras. :copyright: (c) 2016 by Sergio Dias. :license: BSD, see LICENSE for more details. """ import os import sys from sqlite3 import dbapi2 as sqlite3 from flask import Flask, request, session, g, jsonify, redirect, url_for, abort, \ render_template, flash from time import gmtime, strftime import paramiko import time # create our little application :) app = Flask(__name__) # Load default config and override config from an environment variable app.config.update(dict( #DATABASE=os.path.join(app.root_path, 'autman.db'), DEBUG=True, SECRET_KEY='bZJc2sWbQLKos6GkHn/VB9oXwQt8S0R0kRvJ5/xJ89E=', USERNAME='admin', PASSWORD='default', IP_SAGE='192.168.0.18', USER_SAGE='sage', PASS_SAGE='sage' )) app.config.from_envvar('FLASKR_SETTINGS', silent=True) #Conecta ao banco conn = sqlite3.connect('autman.db') comandos = conn.execute('select c.codigo as equipamento, c.tipo as tipo, a.comando as comando, d.codigo as unidade, b.descricao AS Acao from roteiro_comando a inner join roteiro_manobra_item b on b.id=a.id_roteiro_manobra_item inner join equipamento c on c.id=a.id_equipamento inner join unidade d on d.id=b.id_unidade') if comandos: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(app.config['IP_SAGE'], username=app.config['USER_SAGE'], password=app.config['PASS_SAGE']) for item_comando in comandos: ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("sage_ctrl %s:%s:%d %d" % (item_comando[3], item_comando[0], item_comando[1], item_comando[2])) print "sage_ctrl %s:%s:%d %d" % (item_comando[3], item_comando[0], item_comando[1], item_comando[2]), "%s" % (item_comando[4]) time.sleep(4) #ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("sage_ctrl %s:%s:%d %d" % (item_comando['unidade'], item_comando['equipamento'],item_comando['tipo'], item_comando['comando'])) #ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('sage_ctrl JCD:14C1:52 0') #ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('ls') #for line in ssh_stdout: # print '... ' + line.strip('\n') ssh.close()
[ "engsergiodias28@gmail.com" ]
engsergiodias28@gmail.com
baae6fae01fff3f6aec29b4e4d2b1d0690ecc8d7
41c74240ef78070ee5ad19ece21672e629da6881
/elections/migrations/0001_initial.py
47ba1d9d4599cd1ce1a4b0c10cf6582b2cf65c5b
[]
no_license
NamGungGeon/DjangoStudy
33d3f3d66bcc6a9dafa9cbeee10f55b705d1755f
7985d384f26538b78414148c485d4a126c199ad0
refs/heads/master
2021-01-23T20:07:23.033394
2017-09-08T11:03:46
2017-09-08T11:03:46
102,852,405
0
0
null
null
null
null
UTF-8
Python
false
false
732
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2017-09-03 14:08 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Candidate', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=10)), ('introduction', models.TextField()), ('area', models.CharField(max_length=15)), ('party_number', models.IntegerField(default=1)), ], ), ]
[ "rndrjs123@naver.com" ]
rndrjs123@naver.com
23206587aae4835dbc893edeaad63d67170d75c3
23e877d2e65cdc49cf9a456845470f97194674bc
/src/main/resources/http/http_request.py
e9a3e1cdc87380b5ff871b18466c069841a84cdd
[ "MIT" ]
permissive
xebialabs-community/xld-github-dynamic-dictionaries-plugin
77da6a4fea1ca2b96207d77b0396011e088ac850
67c3a596f4a7f58f9d0a939bb57091d1f82c51ee
refs/heads/master
2021-07-13T17:15:15.222551
2020-11-02T12:49:14
2020-11-02T12:49:14
68,606,897
2
2
MIT
2021-03-26T22:14:23
2016-09-19T13:09:01
Python
UTF-8
Python
false
false
9,826
py
# # Copyright 2020 XEBIALABS # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # import re import urllib from java.lang import String from org.apache.commons.codec.binary import Base64 from org.apache.http import HttpHost from org.apache.http.client.config import RequestConfig from org.apache.http.client.methods import HttpGet, HttpPost, HttpPut, HttpDelete from org.apache.http.util import EntityUtils from org.apache.http.impl.client import HttpClients from http.http_response import HttpResponse class HttpRequest: def __init__(self, params, username = None, password = None): """ Builds an HttpRequest :param params: an HttpConnection :param username: the username (optional, it will override the credentials defined on the HttpConnection object) :param password: an password (optional, it will override the credentials defined on the HttpConnection object) """ self.params = params self.username = username self.password = password def do_request(self, **options): """ Performs an HTTP Request :param options: A keyword arguments object with the following properties : method: the HTTP method : 'GET', 'PUT', 'POST', 'DELETE' (optional: GET will be used if empty) context: the context url (optional: the url on HttpConnection will be used if empty) body: the body of the HTTP request for PUT & POST calls (optional: an empty body will be used if empty) contentType: the content type to use (optional, no content type will be used if empty) headers: a dictionary of headers key/values (optional, no headers will be used if empty) :return: an HttpResponse instance """ request = self.build_request( options.get('method', 'GET'), options.get('context', ''), options.get('entity', ''), options.get('contentType', None), options.get('headers', None)) return self.execute_request(request) def do_request_without_headers(self, **options): """ Performs an HTTP Request :param options: A keyword arguments object with the following properties : method: the HTTP method : 'GET', 'PUT', 'POST', 'DELETE' (optional: GET will be used if empty) context: the context url (optional: the url on HttpConnection will be used if empty) body: the body of the HTTP request for PUT & POST calls (optional: an empty body will be used if empty) contentType: the content type to use (optional, no content type will be used if empty) headers: a dictionary of headers key/values (optional, no headers will be used if empty) :return: an HttpResponse instance """ request = self.build_request_without_headers( options.get('method', 'GET'), options.get('context', ''), options.get('entity', '')) return self.execute_request(request) def get(self, context, **options): """ Performs an Http GET Request :param context: the context url :param options: the options keyword argument described in do_request() :return: an HttpResponse instance """ options['method'] = 'GET' options['context'] = context return self.do_request(**options) def put(self, context, entity, **options): """ Performs an Http PUT Request :param context: the context url :param body: the body of the HTTP request :param options: the options keyword argument described in do_request() :return: an HttpResponse instance """ options['method'] = 'PUT' options['context'] = context options['entity'] = entity return self.do_request(**options) def post(self, context, entity, **options): """ Performs an Http POST Request :param context: the context url :param body: the body of the HTTP request :param options: the options keyword argument described in do_request() :return: an HttpResponse instance """ options['method'] = 'POST' options['context'] = context options['entity'] = entity return self.do_request(**options) def post_without_headers(self, context, entity, **options): """ Performs an Http POST Request :param context: the context url :param body: the body of the HTTP request :param options: the options keyword argument described in do_request() :return: an HttpResponse instance """ options['method'] = 'POST' options['context'] = context options['entity'] = entity return self.do_request_without_headers(**options) def delete(self, context, **options): """ Performs an Http DELETE Request :param context: the context url :param options: the options keyword argument described in do_request() :return: an HttpResponse instance """ options['method'] = 'DELETE' options['context'] = context return self.do_request(**options) def build_request(self, method, context, entity, contentType, headers): url = self.quote(self.create_path(self.params.getUrl(), context)) method = method.upper() if method == 'GET': request = HttpGet(url) elif method == 'POST': request = HttpPost(url) request.setEntity(entity) elif method == 'PUT': request = HttpPut(url) request.setEntity(entity) elif method == 'DELETE': request = HttpDelete(url) else: raise Exception('Unsupported method: ' + method) request.addHeader('Content-Type', contentType) request.addHeader('Accept', contentType) self.set_credentials(request) self.set_proxy(request) self.setHeaders(request, headers) return request def build_request_without_headers(self, method, context, entity): url = self.quote(self.create_path(self.params.getUrl(), context)) method = method.upper() if method == 'GET': request = HttpGet(url) elif method == 'POST': request = HttpPost(url) request.setEntity(entity) elif method == 'PUT': request = HttpPut(url) request.setEntity(entity) elif method == 'DELETE': request = HttpDelete(url) else: raise Exception('Unsupported method: ' + method) self.set_credentials(request) self.set_proxy(request) return request def create_path(self, url, context): url = re.sub('/*$', '', url) if context is None: return url elif context.startswith('/'): return url + context else: return url + '/' + context def quote(self, url): return urllib.quote(url, ':/?&=%') def set_credentials(self, request): if self.username: username = self.username password = self.password elif self.params.getUsername(): username = self.params.getUsername() password = self.params.getPassword() else: return encoding = Base64.encodeBase64String(String(username + ':' + password).getBytes()) request.addHeader('Authorization', 'Basic ' + encoding) def set_proxy(self, request): if not self.params.getProxyHost(): return proxy = HttpHost(self.params.getProxyHost(), int(self.params.getProxyPort())) config = RequestConfig.custom().setProxy(proxy).build() request.setConfig(config) def setHeaders(self, request, headers): if headers: for key in headers: request.setHeader(key, headers[key]) def execute_request(self, request): client = None response = None try: client = HttpClients.createDefault() response = client.execute(request) status = response.getStatusLine().getStatusCode() entity = response.getEntity() result = EntityUtils.toString(entity, "UTF-8") if entity else None headers = response.getAllHeaders() EntityUtils.consume(entity) return HttpResponse(status, result, headers) finally: if response: response.close() if client: client.close()
[ "bmoussaud@xebialabs.com" ]
bmoussaud@xebialabs.com
a399756b98d4f56f840f7c66e636e9b73a4a9272
86a1aab74d3c1c991c8effa835ce8f9dfcf980b3
/utils/src/arguments.py
36b2e4e521b8431242ca3521587776f357f90e0d
[ "MIT" ]
permissive
CBIIT/nci-hitif
a97e93c005c079d7650b2ef73bcd77e518a30fcd
2f825cbcba92ff2fdffac60de56604578f31e937
refs/heads/master
2023-04-15T04:55:05.703576
2021-04-21T20:11:06
2021-04-21T20:11:06
97,508,922
1
5
MIT
2023-03-24T23:35:20
2017-07-17T18:25:44
Jupyter Notebook
UTF-8
Python
false
false
2,106
py
import argparse def get_unet_parser(): parser = argparse.ArgumentParser(description="spot learner") parser.add_argument('images', help="The 2d numpy array image stack or 128 * 128") parser.add_argument('masks', help="The 2d numpy array mask (16bits) stack or 128 * 128") parser.add_argument('--nlayers', default=4, type = int, dest='nlayers', help="The number of layer in the forward path ") parser.add_argument('--num_filters', default=32, type = int, dest='num_filters', help="The number of convolution filters in the first layer") parser.add_argument('--conv_size', default='3', type = int, dest='conv_size', help="The convolution filter size.") parser.add_argument('--dropout', default=None, type = float, dest='dropout', help="Include a droupout layer with a specific dropout value.") parser.add_argument('--activation', default='relu',dest='activation', help="Activation function.") parser.add_argument('--augmentation', default=1, type = float, dest='augmentation', help="Augmentation factor for the training set.") parser.add_argument('--initialize', default=None, dest='initialize', help="Numpy array for weights initialization.") parser.add_argument('--normalize_mask', action='store_true', dest='normalize_mask', help="Normalize the mask in case of uint8 to 0-1 by dividing by 255.") parser.add_argument('--predict', action='store_true', dest='predict', help="Use the model passed in initialize to perform segmentation") parser.add_argument('--loss_func', default='dice', dest='loss_func', help="Keras supported loss function, or 'dice'. ") parser.add_argument('--last_act', default='sigmoid', dest='last_act', help="The activation function for the last layer.") parser.add_argument('--batch_norm', default=False, action = "store_true", dest='batch_norm', help="Enable batch normalization") parser.add_argument('--lr', default='1e-5', type = float, dest='lr', help="Initial learning rate for the optimizer") parser.add_argument('--rotate', default=False, action = "store_true", dest='rotate', help="") return parser
[ "george.zaki@nih.gov" ]
george.zaki@nih.gov
db996257ef666016749abab744fca60cc7c79dc3
2d2fcc54af513a84bc624589dc7c6a0316848784
/microbe/lib/python3.6/hmac.py
3433dd988ff9ff0a1ec0203ba62a078b796dcc94
[]
no_license
tatyana-perlova/microbe-x
9becf3a176e1277a3bb4ffcd96d4b25365038bb8
5b364c09dcf43c3ab237c8d9304a4eaa9ecff33f
refs/heads/master
2022-12-13T18:29:59.372327
2018-02-06T19:35:11
2018-02-06T19:35:11
120,374,885
0
0
null
null
null
null
UTF-8
Python
false
false
46
py
/home/perlusha/anaconda3/lib/python3.6/hmac.py
[ "tatyana.perlova@gmail.com" ]
tatyana.perlova@gmail.com
230c93a04644bae6fca2f3d207a8e00cba3a24de
beae5a43e5bf3d3627d49531e5cc8365c204d15c
/contactnetwork/migrations/0002_auto_20180117_1457.py
7da9fdc770627bede76a26f59e0e2291f2f612df
[ "Apache-2.0" ]
permissive
protwis/protwis
e8bbe928a571bc9d7186f62963d49afe1ed286bd
75993654db2b36e2a8f67fa38f9c9428ee4b4d90
refs/heads/master
2023-09-01T18:16:34.015041
2023-04-06T11:22:30
2023-04-06T11:22:30
50,017,823
31
92
Apache-2.0
2023-07-28T06:56:59
2016-01-20T09:02:48
Python
UTF-8
Python
false
false
3,796
py
# Generated by Django 2.0.1 on 2018-01-17 13:57 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('contactnetwork', '0001_initial'), ('structure', '0001_initial'), ('residue', '0001_initial'), ] operations = [ migrations.AddField( model_name='interactingresiduepair', name='referenced_structure', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='structure.Structure'), ), migrations.AddField( model_name='interactingresiduepair', name='res1', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='residue1', to='residue.Residue'), ), migrations.AddField( model_name='interactingresiduepair', name='res2', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='residue2', to='residue.Residue'), ), migrations.CreateModel( name='FaceToEdgeInteraction', fields=[ ('aromaticinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.AromaticInteraction')), ('res1_has_face', models.BooleanField()), ], options={ 'db_table': 'interaction_aromatic_face_edge', }, bases=('contactnetwork.aromaticinteraction',), ), migrations.CreateModel( name='FaceToFaceInteraction', fields=[ ('aromaticinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.AromaticInteraction')), ], options={ 'db_table': 'interaction_aromatic_face_face', }, bases=('contactnetwork.aromaticinteraction',), ), migrations.CreateModel( name='PiCationInteraction', fields=[ ('aromaticinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.AromaticInteraction')), ('res1_has_pi', models.BooleanField()), ], options={ 'db_table': 'interaction_aromatic_pi_cation', }, bases=('contactnetwork.aromaticinteraction',), ), migrations.CreateModel( name='PolarBackboneSidechainInteraction', fields=[ ('polarinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.PolarInteraction')), ('res1_is_sidechain', models.BooleanField()), ], options={ 'db_table': 'interaction_polar_backbone_sidechain', }, bases=('contactnetwork.polarinteraction',), ), migrations.CreateModel( name='PolarSidechainSidechainInteraction', fields=[ ('polarinteraction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='contactnetwork.PolarInteraction')), ], options={ 'db_table': 'interaction_polar_sidechain_sidechain', }, bases=('contactnetwork.polarinteraction',), ), ]
[ "christian@munk.be" ]
christian@munk.be
b561d9b1c21f08c5647bd599c14beb24eee2dc86
e3d06e2f11e5afc623ffbd59143fa8b3dbd8f1f7
/DCGAN_train.py
056a21a18c7d859cb7c96536dd32ba00f620ae1e
[]
no_license
yangpeiwen/implementation
931f6f1d8d475affcb95b6fd0baacfc0ec1325f5
a2bf3e1de98a78173f73e003bd888de9cd4a77e9
refs/heads/master
2020-04-29T15:18:00.296864
2019-03-27T12:47:09
2019-03-27T12:47:09
176,223,518
0
0
null
null
null
null
UTF-8
Python
false
false
2,365
py
#!/usr/bin/env python # coding: utf-8 # In[ ]: #D网络中使用LeakyReLU作为激活函数 from __future__ import division, print_function, absolute_import import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from network_construction import DCGAN #载入mnist数据集 from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # 网路的两个输入,生成器需要的随机噪声和判别器需要的真实图片 noise_input = tf.placeholder(tf.float32, shape=[None, DCGAN.noise_dim]) real_image_input = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) #batch normalization需要的is_training参数 #is_training在这里需要还有DCGAN文件里面的函数也需要,直接调用DCGAN文件的placeholder创建 is_training = DCGAN.is_training #训练网络实例化,返回的gen_vars与disc_vars暂时用不到 train_gen,train_disc,gen_loss,disc_loss,gen_vars,disc_vars = DCGAN.DCGAN_train(noise_input,real_image_input) #开始训练 init = tf.global_variables_initializer() sess = tf.Session() #下面的训练参数与网络参数在DCGAN文件中都有,例:可以直接使用num_steps或者删去下面参数然后DCGAN.num_steps # Training Params num_steps = 10000 batch_size = 128 lr_generator = 0.002 lr_discriminator = 0.002 # Network Params image_dim = 784 # 28*28 pixels * 1 channel noise_dim = 100 # Noise data points #初始化并且创建saver对象准备保存 sess.run(init) saver = tf.train.Saver() model_path = "/tmp/DCGAN_model.ckpt" for i in range(1, DCGAN.num_steps+1): batch_x, _ = mnist.train.next_batch(DCGAN.batch_size) batch_x = np.reshape(batch_x, newshape=[-1, 28, 28, 1]) batch_x = batch_x * 2. - 1. # 训练判别器 z = np.random.uniform(-1., 1., size=[DCGAN.batch_size, DCGAN.noise_dim]) _, dl = sess.run([train_disc, disc_loss], feed_dict={real_image_input: batch_x, noise_input: z, is_training:True}) # 训练生成器 z = np.random.uniform(-1., 1., size=[batch_size, noise_dim]) _, gl = sess.run([train_gen, gen_loss], feed_dict={noise_input: z, is_training:True}) if i % 500 == 0 or i == 1: print('Step %i: Generator Loss: %f, Discriminator Loss: %f' % (i, gl, dl)) save_path = saver.save(sess,model_path) print("Model saved in file: %s" % save_path)
[ "yangpeiwen" ]
yangpeiwen
15d614e5ec83637c824c55ec0c2d7c4291482954
55877a854a6325b0ba8265645b94184f56839480
/spider/settings.py
e54a1a60ae1f076b59f6850ee210e7d072d32e79
[]
no_license
xiaowuwuwuwuwu/scrapy_pager_frame
cc48cee4daaa655d78be336678ed18aa6e9037ca
bc3d9bd26b842fe66dba98ca3982ffd2fa1b8d39
refs/heads/master
2020-09-22T15:44:49.016852
2019-12-02T02:17:38
2019-12-02T02:17:38
225,263,455
0
0
null
null
null
null
UTF-8
Python
false
false
4,803
py
# -*- coding: utf-8 -*- # Scrapy settings for tutorial project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # http://doc.scrapy.org/en/latest/topics/settings.html # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html BOT_NAME = 'spider' SPIDER_MODULES = ['spider.spiders'] NEWSPIDER_MODULE = 'spider.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent #USER_AGENT = 'tutorial (+http://www.yourdomain.com)' # Obey robots.txt rules #不按照rebots进行爬取 ROBOTSTXT_OBEY = False # Redis #SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderPriorityQueue' #SCHEDULER_IDLE_BEFORE_CLOSE = 10 #REDIS_HOST = 'localhost' #REDIS_PORT = 6379 # Configure maximum concurrent requests performed by Scrapy (default: 16) #处理并发数 #CONCURRENT_REQUESTS = 100 # Configure a delay for requests for the same website (default: 0) # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs #DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) TELNETCONSOLE_ENABLED = True TELNETCONSOLE_HOST = '127.0.0.1' TELNETCONSOLE_PORT = '6023' # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html # 中间件 # KEY=中间件;VALUE=中间件顺序 SPIDER_MIDDLEWARES = { #'spider.middlewares.TutorialSpiderMiddleware': 543, #'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 531, 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700, } # Enable or disable downloader middlewares # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = { # 'spider.middlewares.MyCustomDownloaderMiddleware': 543, #} # Enable or disable extensions # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': 500, # 'tutorial.openextension.SpiderOpenCloseLogging': 501 #} # Configure item pipelines # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html # 执行的优先度 # KEY=管道文件;VALUE=管道顺序 ITEM_PIPELINES = { 'spider.pipelines.SpiderPipeline': 300, #'scrapy_redis.pipelines.RedisPipeline': 301 } # Enable and configure the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html #开启爬取速度间隔 #AUTOTHROTTLE_ENABLED = True # The initial download delay #开始间隔时间为3秒 #AUTOTHROTTLE_START_DELAY = 3 # The maximum download delay to be set in case of high latencies #如果请求未响应,最大延迟时间为20秒 #AUTOTHROTTLE_MAX_DELAY = 20 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #爬取时Debug信息 #AUTOTHROTTLE_DEBUG = True DOWNLOAD_DELAY = 5 # Enable and configure HTTP caching (disabled by default) # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' #测试扩展 #MYEXT_ENABLED = True ################日志################ #开启日志 #LOG_ENABLED = True #日志文件位置 #LOG_FILE = "日志路径" #日志编码 #LOG_ENCODING = "utf-8" #日志级别 #LOG_LEVEL = "DEBUG" #标准输出 #LOG_STDOUT = False #开启Cookie追踪 COOKIES_ENABLED = True COOKIES_DEBUG = True ##################Web############### #开启web服务 WEBSERVICE_ENABLED = True #日志文件位置 WEBSERVICE_LOGFILE = "日志路径" #端口 WEBSERVICE_PORT = [6080, 7030] #主机 WEBSERVICE_HOST = '127.0.0.1' #################自动限速############### #启用AutoThrottle扩展 #AUTOTHROTTLE_ENABLED = True #初始下载延迟(单位:秒) #AUTOTHROTTLE_START_DELAY = 1.0 #在高延迟情况下最大的下载延迟(单位秒) #AUTOTHROTTLE_MAX_DELAY = 60.0 #起用AutoThrottle调试(debug)模式,展示每个接收到的response #AUTOTHROTTLE_DEBUG = True #DOWNLOAD_DELAY = 1.0
[ "1059174412@qq.com" ]
1059174412@qq.com
c423950c678b966d72c428c4dadd7d1045308bbb
c536c764aab4170c64f3f8b78bd91593dcb161a3
/vigenereb62.py
037292215097560084e9451db9c5655b7c2fb996
[]
no_license
numberly/vigenere-b62
63bbc95c1f9390e9623a5882a9c2a14d110851b4
3dea3394ee557ba2e589af014cbc4454ebbbc874
refs/heads/master
2023-02-16T02:13:31.254670
2021-01-11T15:24:58
2021-01-11T15:24:58
328,698,862
4
1
null
null
null
null
UTF-8
Python
false
false
535
py
def iter_reverse_digits(number, base): while number != 0: digit = number % base yield digit number -= digit number //= base def encode(alphabets, seed, size=6): if len(alphabets) < size: raise ValueError("There should be an alphabet per character you want") secret = "".join( alphabets[i][digit] for i, digit in enumerate(iter_reverse_digits(seed, len(alphabets[0]))) ) secret += "".join(alphabets[i][0] for i in range(len(secret), size)) return secret
[ "julien@thebault.co" ]
julien@thebault.co
a29347fa5a55f754c48ba25f7b9c8c93f00f8db4
a45e45b5b3b706f369f586e7b03c5972eb21b926
/pythonsyntax/any7.py
61226e6cc4351b5b432ec75fe82928ac0cf3f5e7
[]
no_license
khagerman/Python-Practice
44882bbcf876ab06536da0d4ec0e1a5d9b2bf10d
982dc7595691f32a6da6ef8fb918ec9dfdfdfd93
refs/heads/main
2023-03-27T15:27:51.889132
2021-03-31T21:38:08
2021-03-31T21:38:08
350,499,257
0
0
null
null
null
null
UTF-8
Python
false
false
265
py
def any7(nums): """Are any of these numbers a 7? (True/False)""" # YOUR CODE HERE for num in nums: if num == 7: return True return False print("should be true", any7([1, 2, 7, 4, 5])) print("should be false", any7([1, 2, 4, 5]))
[ "71734063+khagerman@users.noreply.github.com" ]
71734063+khagerman@users.noreply.github.com
705c2db27a5d0906938b557caab4e18133150a24
19ac1d0131a14ba218fd2c55d585170222eb9400
/social_login/wsgi.py
9523f947cda705e24cea5e1c828e7fb9ee17044c
[]
no_license
oereo/social-login
4ed27658c585dd0a24484e628e053070fe012518
41e67b889354189c986da45bcf03c20c1f1063e3
refs/heads/master
2023-01-15T22:38:06.667909
2020-11-22T12:12:08
2020-11-22T12:12:08
303,985,281
1
0
null
null
null
null
UTF-8
Python
false
false
401
py
""" WSGI config for social_login project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'social_login.settings') application = get_wsgi_application()
[ "dlstpgns0406@gmail.com" ]
dlstpgns0406@gmail.com
1bff440e67a7189665b42fe0833a0c9b007950e7
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/nouns/_defenders.py
bb7548df4efbbe4fec4aeb39f3eec118e52a2ba7
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
252
py
from xai.brain.wordbase.nouns._defender import _DEFENDER #calss header class _DEFENDERS(_DEFENDER, ): def __init__(self,): _DEFENDER.__init__(self) self.name = "DEFENDERS" self.specie = 'nouns' self.basic = "defender" self.jsondata = {}
[ "xingwang1991@gmail.com" ]
xingwang1991@gmail.com
8339c4b6670fe18b61771e18903739838373f58c
01ce2eec1fbad3fb2d98085ebfa9f27c7efb4152
/itertools/itertools-combinations.py
b32166fe2a76aece52bb636b0b8705a63f17c3ce
[ "MIT" ]
permissive
anishLearnsToCode/hackerrank-python
4cfeaf85e33f05342df887896fa60dae5cc600a5
7d707c07af051e7b00471ebe547effd7e1d6d9d9
refs/heads/master
2023-01-01T23:39:01.143328
2020-11-01T07:31:15
2020-11-01T07:31:15
265,767,347
8
4
null
null
null
null
UTF-8
Python
false
false
252
py
# https://www.hackerrank.com/challenges/itertools-combinations/problem import itertools line = input().split() word = sorted(line[0]) k = int(line[1]) for i in range(1, k + 1): for j in itertools.combinations(word, i): print(''.join(j))
[ "anish_bt2k16@dtu.ac.in" ]
anish_bt2k16@dtu.ac.in
e4603076015ad9b338c87de21b02807faa509853
91948d5be26636f1f2b941cb933701ea626a695b
/amazon_longest_substring_with_no_repeat.py
30208e55e14fb6ba9b3eabe03ddda30851bc6a3b
[ "MIT" ]
permissive
loghmanb/daily-coding-problem
4ae7dd201fde5ee1601e0acae9e9fc468dcd75c9
b2055dded4276611e0e7f1eb088e0027f603aa7b
refs/heads/master
2023-08-14T05:53:12.678760
2023-08-05T18:12:38
2023-08-05T18:12:38
212,894,228
1
0
null
null
null
null
UTF-8
Python
false
false
2,709
py
''' Longest Substring Without Repeat Asked in: Amazon https://www.interviewbit.com/problems/longest-substring-without-repeat/ Given a string, find the length of the longest substring without repeating characters. Example: The longest substring without repeating letters for "abcabcbb" is "abc", which the length is 3. For "bbbbb" the longest substring is "b", with the length of 1. ''' # @param A : string # @return an integer def lengthOfLongestSubstring(A): if not A: return 0 result = 0 letters = set() N = len(A) i = j = 0 while i<N and j<N: if A[j] in letters: letters.remove(A[i]) i += 1 else: letters.add(A[j]) j += 1 result = max(result, j-i) return result if __name__ == "__main__": data = [ ['abcabcbb', 3], ['Wnb9z9dMc7E8v1RTUaZPoDNIAXRlzkqLaa97KMWLzbitaCkRpiE4J4hJWhRcGnC8H6mwasgDfZ76VKdXhvEYmYrZY4Cfmf4HoSlchYWFEb1xllGKyEEmZOLPh1V6RuM7Mxd7xK72aNrWS4MEaUmgEn7L4rW3o14Nq9l2EN4HH6uJWljI8a5irvuODHY7A7ku4PJY2anSWnfJJE1w8p12Ks3oZRxAF3atqGBlzVQ0gltOwYmeynttUmQ4QBDLDrS4zn4VRZLosOITo4JlIqPD6t4NjhHThOjJxpMp9fICkrgJeGiDAwsb8a3I7Txz5BBKV9bEfMsKNhCuY3W0ZHqY0MhBfz1CbYCzwZZdM4p65ppP9s5QJcfjadmMMi26JKz0TVVwvNA8LP5Vi1QsxId4SI19jfcUH97wmZu0pbw1zFtyJ8GAp5yjjQTzFIboC1iRzklnOJzJld9TMaxqvBNBJKIyDjWrdfLOY8FGMOcPhfJ97Dph35zfxYyUf4DIqFi94lm9J0skYqGz9JT0kiAABQZDazZcNi80dSSdveSl6h3dJjHmlK8qHIlDsqFd5FMhlEirax8WA0v3NDPT8vPhwKpxcnVeu14Gcxr3h1wAXXV0y7Xy9qqB2NQ5HQLJ7cyXAckEYHsLCPSy28xcdNJatx1KLWohOQado4WywJbGvsFR17rKmvOPABweXnFD3odrbSMD4Na4nuBBswvMmFRTUOcf7jZi4z5JnJqXz6hitaPnaEtjoSEBq82a52nvqYy7hhldBoxen2et2OMadVEHeTYLL7GLsIhTP6UizHIuzcJMljo4lFgW5AyrfUlIBPAlhwaSiJtTvcbVZynDSM6RO1PqFKWKg2MHIgNhjuzENg2oFCfW7z5KJvEL9qWqKzZNc0o3BMRjS04NCHFvhtsteQoQRgz84XZBHBJRdekCdcVVXu9c01gYRAz7oIAxN3zKZb64EFKssfQ4HW971jv3H7x5E9dAszA0HrKTONyZDGYtHWt4QLhNsIs8mo4AIN7ecFKewyvGECAnaJpDn1MTTS4yTgZnm6N6qnmfjVt6ZU51F9BxH0jVG0kovTGSjTUkmb1mRTLQE5mTlVHcEz3yBOh4WiFFJjKJdi1HBIBaDL4r45HzaBvmYJPlWIomkqKEmQ4rLAbYG7C5rFfpMu8rHvjU7hP0JVvteGtaGn7mqeKsn7CgrJX1tb8t0ldaS3iUy8SEKAo5IZHNKOfEaij3nI4oRVzeVOZsH91pMsA4jRYgEohubPW8ciXwVrFi1qEWjvB8gfalyP60n1fHyjsiLW0T5uY1JzQWHKCbLVh7QFoJFAEV0L516XmzIo556yRH1vhPnceOCjebqgsmO78AQ8Ir2d4pHFFHAGB9lESn3OtJye1Lcyq9D6X93UakA3JKVKEt6JZDLVBMp4msOefkPKSw59Uix9d9kOQm8WCepJTangdNSOKaxblZDNJ5eHvEroYacBhd9UdafEitdF3nfStF7AhkSfQVC61YWWkKTNdx96OoJGTnxuqt4oFZNFtO7aMuN3IJAkw3m3kgZFRGyd3D3wweagNL9XlYtvZwejbjpkDOZz33C0jbEWaMEaUPw6BG49XqyQoUwtriguO0yvWyaJqD4ye3o0E46huKYAsdKAq6MLWMxF6tfyPVaoqOGd0eOBHbAF89XXmDd4AIkoFPXkAOW8hln5nXnIWP6RBbfEkPPbxoToMbV', 27] ] for d in data: print('input', d[0], lengthOfLongestSubstring(d[0]))
[ "loghmanb@gmail.com" ]
loghmanb@gmail.com
aecd6191686bd841066715f69f2dbd3ae327fd10
6c55174a3ecfc0757ed04700ea4c549e6b9c45d2
/lib/koala/utils/mail.py
7b1dd4c0909e722872e3f53c3cf673b8b6b516a3
[]
no_license
adefelicibus/koala-server
ce7cbc584b0775482b60e2eb72794104f2fe0cf3
defec28c30a9fc4df2b81efeb8df4fc727768540
refs/heads/master
2020-05-25T15:43:43.772302
2016-04-26T02:08:56
2016-04-26T02:08:56
38,928,778
2
3
null
2016-02-23T20:37:11
2015-07-11T14:38:48
Python
UTF-8
Python
false
false
2,478
py
#!/usr/bin/env python # -*- coding: utf-8 -*- from email.Utils import formatdate from email import encoders from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import smtplib import datetime import os from koala.utils import show_error_message # TODO: take the smtp configuration from galaxy's config.ini file # TODO: review exception rules def get_message_email(tool_name): try: now = datetime.datetime.now() tupla = now.timetuple() data = str( tupla[2]) + '/' + str(tupla[1]) + '/' + \ str(tupla[0]) + ' ' + str(tupla[3]) + ':' + str(tupla[4]) + ':' + str(tupla[5]) tool_name = tool_name.replace('_', ' ') messageEmail = '''Hi, Your simulation has been conclued at ''' + data + '''. You have to go to your History and download it. Best Regards. %s''' % tool_name return messageEmail except Exception, e: show_error_message("Error while getMessageEmail email!\n%s" % e) def send_email(de, para, assunto, mensagem, arquivos, servidor): try: # Cria o objeto da mensagem msg = MIMEMultipart() # Define o cabeçalho msg['From'] = de msg['To'] = para msg['Date'] = formatdate(localtime=True) msg['Subject'] = assunto # Atacha o texto da mensagem msg.attach(MIMEText(mensagem)) # Atacha os arquivos for arquivo in arquivos: parte = MIMEBase('application', 'octet-stream') parte.set_payload(open(arquivo, 'rb').read()) encoders.encode_base64(parte) parte.add_header( 'Content-Disposition', 'attachment; filename="%s"' % os.path.basename(arquivo) ) msg.attach(parte) # Conecta ao servidor SMTP smtp = smtplib.SMTP(servidor, 587) smtp.ehlo() smtp.starttls() smtp.ehlo() # Faz login no servidor smtp.login('adefelicibus@gmail.com', 'mami1752@') try: # Envia o e-mail smtp.sendmail(de, para, msg.as_string()) finally: # Desconecta do servidor smtp.close() except Exception, e: show_error_message("Error when SendEmail:\n%s" % e)
[ "adefelicibus@gmail.com" ]
adefelicibus@gmail.com
ac0eac50d356d658ba3b95fa27707c44039e1d5d
a96b98aaec11160c0b9c5f3cee3471c2f50e8c1d
/flask_backend/question-classification.py
120ba1ab70086e7ddb7908cd6d156d938cf2b7b6
[]
no_license
duvsr01/NLP-based-QA-System-for-custom-KG
ea486c5cdede0ef6a4882b3490e15b9be6e4ce97
ae7af74b21079b1cc441676064e9aa387d8177a2
refs/heads/main
2023-04-30T21:52:52.736928
2021-05-15T00:26:40
2021-05-15T00:26:40
305,169,636
0
0
null
null
null
null
UTF-8
Python
false
false
1,947
py
import pickle # Training data # X is the sample sentences X = [ 'How many courses are taught by Dan Harkey?', 'What is number of faculty in SJSU?', 'How many machine learning courses are on Coursera?', 'How many students are in the world?', 'What is the email of Ram Shyam?', 'What is the email address of Albert Einstein?', 'What is the deadline to pay Fall 2021 Tuition Fee?', 'What are office hours of Vinodh Gopinath?', 'How many courses are offered by University of Hogwarts?', 'How to pay tuition fees?', 'Phone number of Mr Sam Igloo?', 'How can I get a bus pass?' ] # y is the intent class corresponding to sentences in X y = [ 'aggregation_question', 'aggregation_question', 'aggregation_question', 'aggregation_question', 'factoid_question', 'factoid_question', 'factoid_question', 'factoid_question', 'aggregation_question', 'factoid_question', 'factoid_question', 'factoid_question' ] # Define the classifier from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import SGDClassifier from sklearn.pipeline import Pipeline clf = Pipeline( [ ('tfidf', TfidfVectorizer()), ('sgd', SGDClassifier()) ] ) ## Train the classifier #clf.fit(X, y) # Test your classifier ## New sentences (that weren't in X and your model never seen before) new_sentences = [ 'What is number of students that study in CMPE department?', 'How can I reach CMPE department?', 'How to apply for graduation?', 'How many faulty in CS department?', 'Number of students CS department?', 'What is the address of CS department?' ] #predicted_intents = clf.predict(new_sentences) filename = 'finalized_model.sav' #pickle.dump(clf, open(filename, 'wb')) loaded_model = pickle.load(open(filename, 'rb')) predicted_intents = loaded_model.predict(new_sentences) print(predicted_intents)
[ "vijendersingh.aswal@sjsu.edu" ]
vijendersingh.aswal@sjsu.edu
4dbac7a2a1cb6e13f4d8d326dca4790eaae5658c
2715a573e2faf4d52af2578c40e4fd3cbac80c05
/analysis/spectrum.py
9134c26ebdcf3d665cf13ef2876cc2d3e022a42b
[]
no_license
legend-exp/CAGE
9a67d945727831c3b084e177db3a2ff28e4599b1
71dfd9f27b6125853e2d3e09d07db7836bf10348
refs/heads/master
2023-08-03T21:45:57.955025
2023-08-03T20:18:33
2023-08-03T20:18:33
198,919,238
0
15
null
2022-07-01T17:00:15
2019-07-26T00:35:47
Jupyter Notebook
UTF-8
Python
false
false
439
py
import sys, h5py import pandas as pd import numpy as np import matplotlib.pyplot as plt import pygama.io.io_base as io def main(): filename = '/Users/gothman/Data/CAGE/pygama_dsp/dsp_run42.lh5' plot_spectrum(filename) def plot_spectrum(filename): lh5 = io.LH5Store() df = lh5.read_object('data', filename).get_dataframe() df['trapE'].plot.hist(bins=1000) plt.show() if __name__ == '__main__': main()
[ "gulden.othman@gmail.com" ]
gulden.othman@gmail.com
4082075c20005fab8b339bf42d30021fa63be367
efdc94781d5be9e018c84d5ac5d1b988c2806c68
/images_dialog.py
0d5eaf1776a5d2229eca96f68c63264926d00079
[]
no_license
vadimmpog/PyCalib
bf0a8d46a086feef4bca5d33d7222578c1e98ff0
0508dd1745ef341f86f5d9b7977f05d7dc3c031b
refs/heads/main
2023-08-20T03:07:56.594670
2021-10-27T06:43:37
2021-10-27T06:43:37
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,255
py
from PyQt5 import QtCore from PyQt5.QtGui import QPixmap, QImage from PyQt5.QtWidgets import QDialog from PyQt5 import QtWidgets import imutils class ImagesDialog(QDialog): def __init__(self, frames, show=False): super().__init__() self.current_image = 0 self.frames = frames self.frames_num = len(frames) self.show = show if not show: self.selected_frames = [False for _ in range(self.frames_num)] self.setWindowTitle("Добавление") self.setFixedSize(724, 519) self.buttonBox = QtWidgets.QDialogButtonBox(self) self.buttonBox.setGeometry(QtCore.QRect(480, 470, 211, 32)) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.label = QtWidgets.QLabel(self) self.label.setMargin(30) self.label.setAlignment(QtCore.Qt.AlignCenter) self.label.setObjectName("label") self.gridLayoutWidget = QtWidgets.QWidget(self) self.gridLayoutWidget.setGeometry(QtCore.QRect(260, 430, 195, 80)) self.gridLayoutWidget.setObjectName("gridLayoutWidget") self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setObjectName("gridLayout") self.pushButton_2 = QtWidgets.QPushButton(self.gridLayoutWidget) self.pushButton_2.setObjectName("pushButton_2") self.gridLayout.addWidget(self.pushButton_2, 1, 1, 1, 1) self.pushButton = QtWidgets.QPushButton(self.gridLayoutWidget) self.pushButton.setObjectName("pushButton") self.gridLayout.addWidget(self.pushButton, 1, 0, 1, 1) self.label_2 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_2.setLayoutDirection(QtCore.Qt.LeftToRight) self.label_2.setAlignment(QtCore.Qt.AlignCenter) self.label_2.setObjectName("label_2") self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1) if not show: self.checkBox = QtWidgets.QCheckBox(self.gridLayoutWidget) self.checkBox.setObjectName("checkBox") self.gridLayout.addWidget(self.checkBox, 0, 1, 1, 1) self.buttonBox.accepted.connect(self.accept) self.buttonBox.rejected.connect(self.reject) QtCore.QMetaObject.connectSlotsByName(self) self.set_logic() def set_logic(self): _translate = QtCore.QCoreApplication.translate self.setWindowTitle("Просмотр фреймов") self.label.setText("Пустой кадр") self.pushButton_2.setText(">>") self.pushButton_2.clicked.connect(self.next_image) self.pushButton.setText("<<") self.pushButton.clicked.connect(self.previous_image) if not self.show: self.checkBox.setText("выбрать") self.checkBox.clicked.connect(self.select_frame) self.choose_frames() def choose_frames(self, i=0): self.label_2.setText(f"{i + 1}/{self.frames_num}") image = imutils.resize(self.frames[i], width=550) height, width, channel = image.shape bytesPerLine = 3 * width qImg = QImage(image.data, width, height, bytesPerLine, QImage.Format_RGB888) pix = QPixmap.fromImage(qImg) self.label.setPixmap(pix) def next_image(self): if self.current_image < self.frames_num-1: self.current_image += 1 self.choose_frames(i=self.current_image) if not self.show: self.checkBox.setChecked(self.selected_frames[self.current_image]) def previous_image(self): if self.current_image > 0: self.current_image -= 1 self.choose_frames(i=self.current_image) if not self.show: self.checkBox.setChecked(self.selected_frames[self.current_image]) def select_frame(self): self.selected_frames[self.current_image] = not self.selected_frames[self.current_image] def reject(self): super().reject() if not self.show: self.selected_frames = None
[ "vadimmm120@yandex.ru" ]
vadimmm120@yandex.ru
5722c5bd79ba59802f5e4174de590823f9b31f54
6b2a8dd202fdce77c971c412717e305e1caaac51
/solutions_5631989306621952_1/Python/Hotshot8325/Q2.py
c61b1a46284a8ff8a0e7daff7477923bbd7b7f0f
[]
no_license
alexandraback/datacollection
0bc67a9ace00abbc843f4912562f3a064992e0e9
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
refs/heads/master
2021-01-24T18:27:24.417992
2017-05-23T09:23:38
2017-05-23T09:23:38
84,313,442
2
4
null
null
null
null
UTF-8
Python
false
false
968
py
#CodeJam pancake problem import csv import string #import data from test file in the form [[[],[]],[[],[]].... with [[],[]] being one test case with open('a-large.in') as csvfile: testCase = csv.reader(csvfile, delimiter = ' ', quotechar='|') rowNum = 0 inputText = [] #swapCount = [] for row in testCase: #row = [str(i) for i in row] if rowNum == 0: numTestCases = int(row[0]) else: inputText.append(row) rowNum = rowNum + 1 for i in range(0,numTestCases): letterInput = inputText[i][0] lastWord = letterInput[0] for j in range(1,len(letterInput)): if string.uppercase.index(letterInput[j])>=string.uppercase.index(lastWord[0]): lastWord = letterInput[j]+lastWord else: lastWord = lastWord +letterInput[j] print "Case #"+str(i+1)+": "+lastWord
[ "alexandra1.back@gmail.com" ]
alexandra1.back@gmail.com
22dbc2be582ff1eae04ea4b6343fb46b0511f014
20552c79d92593ab8c574a61ac0dcbd25aa09e2e
/Account/models.py
825a6a29f4baa7fb9b2f27a207b867a72a95be82
[]
no_license
junaidgirkar/Unicode_REST-API
85580f2c85148c1b11ee2fffaae8d8b40aa91def
d9f812f867aabec7df9458511dfb03e7794d7de7
refs/heads/master
2023-01-04T11:27:17.964846
2020-10-29T13:21:04
2020-10-29T13:21:04
297,365,250
1
2
null
2020-10-29T13:21:06
2020-09-21T14:29:20
Python
UTF-8
Python
false
false
2,331
py
from django.db import models from django.contrib.auth.models import AbstractBaseUser, BaseUserManager from django.contrib.auth.models import PermissionsMixin from django.utils.translation import ugettext_lazy as _ from .managers import UserManager, StudentManager, TeacherManager # Create your models here. class User(AbstractBaseUser, PermissionsMixin): email = models.EmailField(_('email address'), unique = True) first_name = models.CharField(_('first_name'), max_length = 40) last_name = models.CharField(_('last name'), max_length = 40) date_joined = models.DateTimeField(_('date joined'), auto_now_add = True) is_active = models.BooleanField(_('active'), default = True) is_staff = models.BooleanField(_('staff status'), default=False) is_superuser = models.BooleanField(_('is superuser'), default = False) is_admin = models.BooleanField(_('is admin'), default=False) is_student = models.BooleanField(_('is student'), default = False) is_teacher = models.BooleanField(_('is teacher'), default = False) objects = UserManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['first_name', 'last_name'] class Meta: verbose_name = _('user') verbose_name_plural = _('users') def get_short_name(self): return self.first_name def get_full_name(self): return self.first_name + "_" + self.last_name def save(self, *args, **kwargs): self.username = self.email super(User, self).save(*args, **kwargs) def __str__(self): return self.email class Student(User): user = models.OneToOneField(User, on_delete=models.CASCADE, parent_link=True) user.is_student = True user.is_teacher = False branch = models.CharField(max_length=40) sap_id = models.CharField(max_length=12, default=0, blank=True) objects = StudentManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = [] def __str__(self): return self.user.email class Teacher(User): user = models.OneToOneField(User, on_delete=models.CASCADE, parent_link=True) user.is_student = False user.is_teacher = True subject = models.CharField(max_length=40) objects = TeacherManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = [] def __str__(self): return self.user.email
[ "60307291+junaidgirkar@users.noreply.github.com" ]
60307291+junaidgirkar@users.noreply.github.com
f50d553f88129bfc29a4c1bc98e9a6ddfe0af18b
090bceb6c9418b39056f8aa0204051da621eef01
/app/views.py
b24b5d9864c45018044b7a0e75b6974701d0c3e8
[]
no_license
panasevychol/beetroot-test
627a1bb7b2935d908ed9b4da530ee77d21ae21fa
102b4dc1616f83038c5851da3f2c9dd83b8b2723
refs/heads/master
2021-05-01T22:42:43.723675
2016-12-30T13:13:51
2016-12-30T13:13:51
77,614,588
0
0
null
null
null
null
UTF-8
Python
false
false
275
py
import json import sys import time from flask import render_template, request from . import app from .utils import find_games @app.route('/') def index(): keywords = request.args.get('keywords', '') return render_template('index.html', games=find_games(keywords))
[ "panasevychol@gmail.com" ]
panasevychol@gmail.com
65069c192bdcfc8bf792f8d1e63112e0837c7ea7
708e17ad98f3143abaf811357883e680991d711f
/python3/happyNum.py
26195bfb19651e99a7333f4f60b484243ba43fcc
[]
no_license
yichuanma95/leetcode-solns
a363cc8e85f2e8cdd5d2cde6e976cd76d4c4ea93
6812253b90bdd5a35c6bfba8eac54da9be26d56c
refs/heads/master
2021-05-24T18:05:02.588481
2020-10-08T00:39:58
2020-10-08T00:39:58
253,690,413
2
0
null
null
null
null
UTF-8
Python
false
false
2,132
py
''' Problem 202: Happy Number Write an algorithm to determine if a number is "happy". A happy number is a number defined by the following process: Starting with any positive integer, replace the number by the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers. Example: Input: 19 Output: true Explanation: 1^2 + 9^2 = 82 8^2 + 2^2 = 68 6^2 + 8^2 = 100 1^2 + 0^2 + 0^2 = 1 Solution runtime: 24ms, faster than 99.77% of Python3 submissions Solution memory usage: 12.7 MB, less than 100% of Python3 submissions ''' class Solution: def isHappy(self, n: int) -> bool: ''' (Solution, int) -> bool Returns True iff n is a "happy" number, which is a number that results in a 1 after a repetitive process of replacing the original number by the sum of digit squares. >>> soln = Solution() >>> soln.isHappy(19) True ''' # This set will store all the unique sum of digit squares generated while # determining if n is "happy". unique_digit_square_sums = set() # Keep calculating the sum of digit squares until it's equal to 1, in this case # return True, or it already is in the set, in this case return False. while n not in unique_digit_square_sums: unique_digit_square_sums.add(n) n = self.sum_of_digit_squares(n) if n == 1: return True return False def sum_of_digit_squares(self, n): ''' (Solution, int) -> int Calculates and returns the sum of squares of n's digits. >>> soln = Solution() >>> soln.sum_of_digit_squares(19) 82 >>> soln.sum_of_digit_squares(82) 68 ''' digit_square_sum = 0 while n > 0: digit_square_sum += ((n % 10) ** 2) n //= 10 return digit_square_sum
[ "ma.yich@husky.neu.edu" ]
ma.yich@husky.neu.edu
5f6965f66911a55288b83b23515ceb2fe17157db
9303cc8be6a467be84ff03a1e476c299d7001077
/main.py
9e2c4a87e3f5afc2f68c3148a7bf9ada1678b59f
[ "MIT" ]
permissive
AuthFailed/nCoV-tgbot
8c5908983c7f299ae17f134756d87306e5c5acf4
d2ecea97b76b6d733d38573cce1a72b8c4a9868e
refs/heads/master
2022-09-04T23:04:32.702175
2022-08-25T00:40:00
2022-08-25T00:40:00
237,244,007
0
0
MIT
2020-01-30T17:14:49
2020-01-30T15:33:42
Python
UTF-8
Python
false
false
4,474
py
from aiogram import executor from aiogram.types import * import info_handler import keyboard as kb from config import dp, bot @dp.message_handler(commands=["start"]) async def start_message(msg: Message): await msg.reply( text="Привет! Я отслеживаю статистику заражения 2019-nCoV.\n" "Используйте /menu чтобы получить всю информацию.") @dp.message_handler(commands=["menu"]) async def menu_message(msg: Message): await msg.reply( text="Используйте *кнопки ниже*:", reply_markup=kb.main_menu(), ) @dp.callback_query_handler(lambda _call: True) async def handle_callbacks(call: CallbackQuery): """Отлавливаем кэллбэки телеграма.""" if call.data == "current_stats": info = info_handler.get_main_info() await call.message.edit_text( f"*Статистика 2019-nCoV*:\n\n" f"Зараженных ☣️: *{info['Infected']}*\n\n" f"На подозрении ❓: *{info['Possible']}*\n\n" f"На карантине ☢️: *{info['Quarantine']} ({info['Quarantined_Cities']} городов)\n\n*" f"Вылечившихся 💊: *{info['Recovered']}*\n\n" f"Смерти ☠️: *{info['Deaths']}*\n\n" f"_Смертность составляет {info['Death_Rate']}%_\n" f"Последнее обновление: *{info['Date']} MSK*", reply_markup=kb.main_menu(), ) await call.answer() elif call.data == "quarantined_cities": table = info_handler.get_table_cities() answer_message = "*Города на карантине*\n(Город\t\t|\t\t дата закрытия\t\t|\t\tНаселение)__\n\n" for i in range(len(table) - 1): answer_message += f"{table[i][0]} - {table[i][1]} - {table[i][2]}\n" await call.message.edit_text( answer_message + "__", reply_markup=kb.main_menu()) await call.answer() elif call.data == "disease_forecast": table = info_handler.disease_forecast() answer_message = "*Прогноз заражения по Китаю на ближайшие 5 дней:*\n\n" \ "*Дата* |\t\t\t*Кол-во инфицированных*\n" for i in range(len(table)): answer_message += f"{table[i][0]}\t\t\t|\t\t\t{table[i][1]}\n" answer_message = answer_message.replace("(Прогноз)", "`(Прогноз)`") await call.message.edit_text(answer_message + "\n\n_На основании данных статистики за последние 5 дней по Китаю (текущий день не учитывается)" "\nСтатистика актуальна при среднем модификаторе заражения в 1.304180_", reply_markup=kb.main_menu()) await call.answer() elif call.data == "back_to_home": await call.message.edit_text("Используйте *кнопки ниже*:", reply_markup=kb.main_menu()) await call.answer() @dp.inline_handler() async def inline_stats(inline_query: InlineQuery): info = info_handler.get_main_info() text = (f"*Статистика 2019-nCoV*:\n\n" f"Зараженных ☣️: *{info['Infected']}*\n\n" f"На подозрении ❓: *{info['Possible']}*\n\n" f"На карантине ☢️: *{info['Quarantine']} ({info['Quarantined_Cities']} городов)\n\n*" f"Вылечившихся 💊: *{info['Recovered']}*\n\n" f"Смерти ☠️: *{info['Deaths']}*\n\n" f"_Смертность составляет {info['Death_Rate']}%_\n" f"Последнее обновление: *{info['Date']} MSK*") input_content = InputTextMessageContent(text) item = InlineQueryResultArticle( id="1", title="2019-nCoV stats", input_message_content=input_content ) await bot.answer_inline_query(inline_query.id, results=[item], cache_time=1) # @dp.errors_handler() # async def error_handler(): if __name__ == "__main__": executor.start_polling(dp, skip_updates=True)
[ "lenz1e973nyro" ]
lenz1e973nyro
b3c4bd9dc92f583c4160e397ad5aca581ce33ed0
a14e3faea802cbe20e0c65995bf67b84c41bf0f4
/tests/test_car_generator.py
7f6bf58e5bcc413c4cd1624b849d2bdd5335d003
[ "MIT" ]
permissive
DrimTim32/py_proj_lights
aafdc4b1a0d8de8926c56f92682a9058b3b92db7
a056e7292b0b81db95316d5d0f517c69a0d473e8
refs/heads/master
2020-07-29T00:37:29.021483
2017-02-07T15:51:09
2017-02-07T15:51:09
73,689,047
0
0
MIT
2020-07-14T19:00:44
2016-11-14T09:35:24
Python
UTF-8
Python
false
false
769
py
"""This file contains tests for car generator""" import sys from simulation import Directions, TurnDirection from simulation.generators import CarProperGenerator if "core" not in sys.path[0]: sys.path.insert(0, 'core') def test_lights_generator(): prob = {Directions.TOP: [[0, 0, 0]], Directions.BOTTOM: [[0, 0, 0], [1, 0, 0]], Directions.RIGHT: [[0, 1, 0]], Directions.LEFT: [[0, 0, 1]]} lg = CarProperGenerator(prob) assert lg.generate(Directions.TOP, 0) is None assert lg.generate(Directions.BOTTOM, 1).turn_direction == TurnDirection.RIGHT assert lg.generate(Directions.RIGHT, 0).turn_direction == TurnDirection.STRAIGHT assert lg.generate(Directions.LEFT, 0).turn_direction == TurnDirection.LEFT
[ "barteks95@gmail.com" ]
barteks95@gmail.com
564f8f9e85d4c8a6057469a98f58669f1dfe7534
ae22eebfadfdeb33f5c972702a92be266248c5f7
/Project2_Flask/main_functions.py
a1d535862e0995209a154d4e27cb6ac53a887988
[]
no_license
ecaru003/COP4813_Project2
964831ad9a50634dbaf0b2a397a18b3a76316b63
2cfc87d373340c36de11bb7c856addf4dcb905bc
refs/heads/master
2023-07-24T12:58:28.901004
2021-09-01T21:33:38
2021-09-01T21:33:38
315,692,275
0
0
null
null
null
null
UTF-8
Python
false
false
390
py
import json def read_from_file(file_name): with open(file_name,"r") as read_file: data=json.load(read_file) print("You successfully read from {}.".format(file_name)) return data def save_to_file(data,file_name): with open(file_name,"w") as write_file: json.dump(data,write_file,indent=2) print("You successfully saved to {}.".format(file_name))
[ "ecaru003@fiu.edu" ]
ecaru003@fiu.edu
566302b568f0103bd3c6c2d54e6988ac6dd06f4b
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
/JD9vSKZGrxQhLbA9r_11.py
8153c6d8cc99992256ea1d82f8771cd6328f44f3
[]
no_license
daniel-reich/ubiquitous-fiesta
26e80f0082f8589e51d359ce7953117a3da7d38c
9af2700dbe59284f5697e612491499841a6c126f
refs/heads/master
2023-04-05T06:40:37.328213
2021-04-06T20:17:44
2021-04-06T20:17:44
355,318,759
0
0
null
null
null
null
UTF-8
Python
false
false
255
py
def pile_of_cubes(m): if m >= 10252519345963644753026: return None x = m**0.5 if (x%1==0): c = 1 while (x != c and x > 0): x = x - c c = c + 1 if (x == c): return c return None
[ "daniel.reich@danielreichs-MacBook-Pro.local" ]
daniel.reich@danielreichs-MacBook-Pro.local
83a5e8277279567beb43b9117f28f6b87142acf6
9f1a165798a13b4fd24b94d23eb137a6763a1bed
/tickets/migrations/0001_initial_squashed_0006_auto_20200610_1403.py
36b5098cc5ffd8d4476caa47cb08b33bf448b406
[ "MIT" ]
permissive
AdamCottrill/ticket_tracker
42455ed9e4b0439df08694b0f73713163aace68a
72fad3cf9c0e7f44ca62650a2338a5ac7696bcbf
refs/heads/master
2023-03-04T11:15:55.097923
2022-08-25T20:10:46
2022-08-25T20:10:46
198,422,697
1
3
MIT
2023-02-15T18:25:54
2019-07-23T12:07:00
Python
UTF-8
Python
false
false
9,882
py
# Generated by Django 2.2.13 on 2020-06-10 18:13 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.db.models.manager import taggit.managers class Migration(migrations.Migration): replaces = [ ("tickets", "0001_initial"), ("tickets", "0002_auto_20190209_2214"), ("tickets", "0003_auto_20190210_1052"), ("tickets", "0004_auto_20190210_1942"), ("tickets", "0005_auto_20190723_1134"), ("tickets", "0006_auto_20200610_1403"), ] dependencies = [ ("taggit", "0003_taggeditem_add_unique_index"), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name="Application", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("application", models.CharField(max_length=20)), ("slug", models.SlugField(editable=False, unique=True)), ], ), migrations.CreateModel( name="Ticket", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("active", models.BooleanField(default=True)), ( "status", models.CharField( choices=[ ("new", "New"), ("accepted", "Accepted"), ("assigned", "Assigned"), ("re-opened", "Re-Opened"), ("closed", "Closed"), ("duplicate", "Closed - Duplicate"), ("split", "Closed - Split"), ], db_index=True, default=True, max_length=20, ), ), ( "ticket_type", models.CharField( choices=[ ("feature", "Feature Request"), ("bug", "Bug Report"), ("task", "Task"), ], db_index=True, default=True, max_length=10, ), ), ("title", models.CharField(max_length=80)), ("description", models.TextField()), ("description_html", models.TextField(blank=True, editable=False)), ( "priority", models.IntegerField( choices=[ (1, "Critical"), (2, "High"), (3, "Normal"), (4, "Low"), (5, "Very Low"), ], db_index=True, ), ), ( "created_on", models.DateTimeField( auto_now_add=True, verbose_name="date created" ), ), ( "updated_on", models.DateTimeField(auto_now=True, verbose_name="date updated"), ), ("votes", models.IntegerField(default=0)), ( "application", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="tickets.Application", ), ), ( "assigned_to", models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="assigned_tickets", to=settings.AUTH_USER_MODEL, ), ), ( "parent", models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to="tickets.Ticket", ), ), ( "submitted_by", models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name="submitted_tickets", to=settings.AUTH_USER_MODEL, ), ), ( "tags", taggit.managers.TaggableManager( blank=True, help_text="A comma-separated list of tags.", through="taggit.TaggedItem", to="taggit.Tag", verbose_name="Tags", ), ), ], options={ "ordering": ["-created_on"], }, managers=[ ("all_tickets", django.db.models.manager.Manager()), ], ), migrations.CreateModel( name="TicketDuplicate", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "original", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="original", to="tickets.Ticket", ), ), ( "ticket", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="duplicate", to="tickets.Ticket", ), ), ], ), migrations.CreateModel( name="UserVoteLog", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "ticket", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="tickets.Ticket" ), ), ( "user", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, ), ), ], ), migrations.CreateModel( name="FollowUp", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "created_on", models.DateTimeField( auto_now_add=True, verbose_name="date created" ), ), ("comment", models.TextField()), ("comment_html", models.TextField(blank=True, editable=False)), ( "action", models.CharField( choices=[ ("no_action", "No Action"), ("closed", "Closed"), ("re-opened", "Re-Opened"), ("split", "Split"), ], db_index=True, default="no_action", max_length=20, ), ), ("private", models.BooleanField(default=False)), ( "parent", models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to="tickets.FollowUp", ), ), ( "submitted_by", models.ForeignKey( default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, ), ), ( "ticket", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="tickets.Ticket" ), ), ], ), ]
[ "adam.cottrill@ontario.ca" ]
adam.cottrill@ontario.ca
7da8e44c7b81b0928a7aa944b72042d967acb70c
34f3d3c01a29b05e58d7dccca2ac5776e2324d0f
/files/zipModule.py
c6eb11ca46c953549e13a40ae56467d84e0acd7d
[]
no_license
nethirangasai/pythonpgms
d50c485c7f13ba0bdd78b79508d4792caf5e7a20
c0bfddfea95b22e32cfa53ee8b531b6535b1df42
refs/heads/master
2020-05-27T09:55:00.094520
2019-05-26T07:56:01
2019-05-26T07:56:01
188,574,442
0
0
null
null
null
null
UTF-8
Python
false
false
165
py
from zipfile import ZipFile,ZIP_DEFLATED f=ZipFile('files.zip','w',ZIP_DEFLATED) f.write('csvReading.py') f.write('csvWriting.py') f.write('students.csv') f.close()
[ "rangasai.nethi@gmail.com" ]
rangasai.nethi@gmail.com
a7f1b70b6ba4951bee8aab80789e69f1581b33d1
c6bf1b52dce9eff35a91f261aa3c33f83c887d3a
/bai 4.15.py
63f32d74a7b6c67b8ee870d15e735a7cfe4a8ca7
[]
no_license
bachdinhthang59ktdh/b-i-t-p-ktlt-tr-n-l-p
bfc88fe8a97a0524680d1063daa8d5283a38f8e1
7500173e45d0ac032d8657c82e53742de43f1b15
refs/heads/master
2022-08-31T22:55:29.845869
2020-05-25T06:22:38
2020-05-25T06:22:38
262,918,963
0
0
null
null
null
null
UTF-8
Python
false
false
92
py
s=input('nhap chuoi s').split() s.sort() for h in s: print(h)
[ "noreply@github.com" ]
bachdinhthang59ktdh.noreply@github.com
d292bf9b5228884b9307bbd114fbf6aae0eda93e
19b2856c718dab5380d381053c0f1d664faeab53
/Login/migrations/0001_initial.py
56a3b97852967f2bab2bfc9395c58579d0fbc9da
[]
no_license
ywl1584/ywl1584.GraduationProject.io
7f62c50c939274039f304ccee378345fd083a2bf
18d6b1d199d3ba56ebee8de1c5551e01c7ab5bd5
refs/heads/master
2020-04-25T02:22:49.021355
2019-02-25T04:59:27
2019-02-25T04:59:27
172,437,057
0
0
null
null
null
null
UTF-8
Python
false
false
992
py
# Generated by Django 2.1.2 on 2018-10-28 07:42 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=128, unique=True)), ('password', models.CharField(max_length=256)), ('email', models.EmailField(max_length=254, unique=True)), ('sex', models.CharField(choices=[('male', '男'), ('female', '女')], default='男', max_length=32)), ('c_time', models.DateTimeField(auto_now_add=True)), ], options={ 'verbose_name': '用户', 'verbose_name_plural': '用户', 'ordering': ['c_time'], }, ), ]
[ "your email address1270834936@qq.com" ]
your email address1270834936@qq.com
b48dcc67a5875823dc15b6cb4f7142b0cdc08af1
64cea21dc4834cc876b6788f4cb8572982d2f60a
/product_pricelist_report_qweb/tests/common.py
34ce1ca81ed41750f6ce505f83679205947fda18
[]
no_license
yelizariev/addons-vauxoo
708463f847a75898d99fd8c2045d20ab9083b703
511dc410b4eba1f8ea939c6af02a5adea5122c92
refs/heads/8.0
2020-12-11T09:04:04.912471
2016-03-17T06:00:36
2016-03-17T06:00:36
53,125,976
3
2
null
2016-03-04T10:01:48
2016-03-04T10:01:48
null
UTF-8
Python
false
false
1,707
py
# coding: utf-8 # ########################################################################## # Module Writen to ODOO, Open Source Management Solution # # Copyright (c) 2015 Vauxoo - http://www.vauxoo.com/ # All Rights Reserved. # info Vauxoo (info@vauxoo.com) # ########################################################################### # Coded by: Luis Torres (luis_t@vauxoo.com) # ########################################################################### # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # ############################################################################# from openerp.tests import common import logging _logger = logging.getLogger(__name__) class TestXLSProductCommon(common.TransactionCase): def setUp(self): super(TestXLSProductCommon, self).setUp() self.product_price_obj = self.env['product.price_list'] self.attachment_obj = self.env['ir.attachment'] self.price_list_id = self.ref('product.list0') self.product = self.env.ref('product.product_product_7').copy()
[ "hbto@vauxoo.com" ]
hbto@vauxoo.com
32dab3a9805a876cadd1c98c55ad23f5d16cff81
2a58920968814b87ee93decf2b887747dbb56c12
/helpers/create_module/find_path.py
d81d21182ecc2c75dade8d606fbc7d80fa6d75c5
[]
no_license
chrysa/gae-toolbox-2
5e52b2c2ce66358feb82bdd078d6b9ab9f08da2e
b666567359888ff29d2c3dddb0453b762a65d75a
refs/heads/master
2020-03-29T20:53:11.495776
2014-04-16T12:10:17
2014-04-16T12:10:17
15,781,217
0
0
null
null
null
null
UTF-8
Python
false
false
1,370
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # disco-toolbox-2.helpers.find_path -- fichier de génération du chemin relatif du script en cours d'éxécution import os def find_path(module_type, mod_folder): """fonction de génération du chemin relatif du script en cours d'éxécution :param module_type: type de module :param mod_folder: nom du dossier contenant les modules admin et front :returns: renvoi le chemin relatif du script :rtype: string """ test = os.getcwd()[len(os.getcwd()) - 7:len(os.getcwd())] # isolation des 8 derniers caractères permettant de savoir si le script est appelé depuis le dossier d'installation if module_type == 1: if test == 'install' or test == 'helpers': path_folder = os.getcwd()[0:len(os.getcwd()) - 8] + os.sep + 'src' + os.sep + mod_folder + os.sep + 'admin' else: path_folder = os.getcwd() + os.sep + 'src' + os.sep + mod_folder + os.sep + 'admin' else: if test == 'install' or test == 'helpers': path_folder = os.getcwd()[0:len(os.getcwd()) - 8] + os.sep + 'src' + os.sep + mod_folder + os.sep + 'front' else: path_folder = os.getcwd() + os.sep + 'src' + os.sep + mod_folder + os.sep + 'front' return path_folder
[ "agreau@student.42.fr" ]
agreau@student.42.fr
c9062fbe8e75b4749ea59e439897d1de93808c00
a88ac040aa274d94ac8decbbf43a585af56cf825
/src/perftest.py
d8bb9fedffb0ea4c58157bde543ff3c510c1343f
[]
no_license
s7evinkelevra/Agent-Model
6dd0544326502c00572db2c2f4cf9785092e9ef3
f25cde7190736778dbf0d0a5a45fa3a3f3f1efc3
refs/heads/master
2023-08-13T02:26:04.232434
2021-09-30T12:24:50
2021-09-30T12:24:50
403,736,037
0
0
null
null
null
null
UTF-8
Python
false
false
4,984
py
from Bio.Seq import Seq from matplotlib.ticker import LinearLocator from matplotlib import cm import random from pprint import pprint import itertools from collections import deque import uuid import pandas as pd import numpy as np import matplotlib.pyplot as plt import time rng = np.random.default_rng() # Random base sequence of length def randomDNAseq(length): return ''.join(random.choice('GCAT') for _ in range(length)) # Random proteinogenic amino acids sequence of length def randomASseq(length): return ''.join(random.choice('ACDEFGHIKLMNOPQRSTUVWY') for _ in range(length)) # Random bitstring def randomBitseq(length): return ''.join(random.choice('01') for _ in range(length)) # Generate allele with unique id and random position in peptide space def randomPSallele(peptide_space_length): return { "x": rng.integers(low=1, high=peptide_space_length), "y": rng.integers(low=1, high=peptide_space_length), "id": uuid.uuid4() } def sliding_window_iter(seq, width): it = iter(seq) result = tuple(itertools.islice(it, width)) if len(result) == width: yield result for elem in it: result = result[1:] + (elem,) yield result # Sliding window iterator over sequence seq and of window width of n def window(seq, n=2): it = iter(seq) win = deque((next(it, None) for _ in range(n)), maxlen=n) yield win append = win.append for e in it: append(e) yield win start_time = time.perf_counter_ns() peptide_space_length = 1000 host_n = 10000 host_allele_initial_n = 150 host_allele_length = 9 host_fitness_initial = 1 host_fitness_increment = 0.2 host_species_n = 1 pathogen_n = 100000 pathogen_haplotype_initial_n = 400 pathogen_haplotype_length = 100 pathogen_fitness_initial = 1 pathogen_fitness_increment = 1 pathogen_species_n = 1 host_allele_pool = [[randomPSallele(peptide_space_length) for _ in range( host_allele_initial_n)] for _ in range(host_species_n)] def generateHost(): species = random.choice(range(host_species_n)) allele_1_data = random.choice(host_allele_pool[species]) allele_2_data = random.choice(host_allele_pool[species]) return { "species": species, "fitness": host_fitness_initial, "allele_1_id": allele_1_data["id"], "allele_1_x": allele_1_data["x"], "allele_1_y": allele_1_data["y"], "allele_2_id": allele_2_data["id"], "allele_2_x": allele_2_data["x"], "allele_2_y": allele_2_data["y"] } host_data = [generateHost() for _ in range(host_n)] hosts = pd.DataFrame(host_data) pathogen_haplotype_pool = [[randomPSallele(peptide_space_length) for _ in range( pathogen_haplotype_initial_n)] for _ in range(pathogen_species_n)] def generatePathogen(): species = random.choice(range(pathogen_species_n)) haplotype = random.choice(pathogen_haplotype_pool[species]) return { "species": species, "fitness": pathogen_fitness_initial, "haplotype_id": haplotype["id"], "haplotype_x": haplotype["x"], "haplotype_y": haplotype["y"] } pathogen_data = [generatePathogen() for _ in range(pathogen_n)] pathogens = pd.DataFrame(pathogen_data) print(f'host count - {len(hosts)}') print(f'host allele count (unique) - {len(hosts.allele_1_id.unique())}') print(f'pathogen count - {len(pathogens)}') print( f'pathogen haplotype count (unique) - {len(pathogens.haplotype_id.unique())}') sim_gen_n = 10000 sim_logging_interval = 50 sim_allele_subsample_n = 100 def uniqueAlleleCount(): print("yeee") """ print(hosts[['allele_1_id', 'allele_2_id']].value_counts()) print(hosts[['allele_1_id', 'allele_2_id']].values.ravel('K')) print(len(pd.unique(hosts[['allele_1_id', 'allele_2_id']].values.ravel('K')))) host_allele_all = hosts[['allele_1_id', 'allele_2_id']].values.ravel('K') unique, counts = np.unique(host_allele_all, return_counts=True) # print(np.asarray((unique,counts)).T) print(counts) plt.bar([str(i)[10:15] for i in unique], counts) """ def eucDist(x0, y0, x1, y1): dX = x1 - x0 dY = y1 - y0 return np.sqrt(dX*dX + dY * dY) def infect(host): infecting_pathogen = pathogens.sample() dist1 = eucDist(host["allele_1_x"], host["allele_1_y"], infecting_pathogen["haplotype_x"], infecting_pathogen["haplotype_y"]) dist2 = eucDist(host["allele_2_x"], host["allele_2_y"], infecting_pathogen["haplotype_x"], infecting_pathogen["haplotype_y"]) min_dist = np.min([dist1, dist2]) if(min_dist < 200): return host["fitness"] - host_fitness_increment else: return host["fitness"] """ for i in range(sim_gen_n): # log every sim_logging_interval'th generation if(i % sim_logging_interval == 0): print("logging data") # infection regieme ## each host is infected between 1 and n times infecting_pathogen_species = 0 hosts["fitness"] = hosts.apply(infect, axis=1) print(hosts) break """ end_time = time.perf_counter_ns() print((end_time-start_time) / 1000)
[ "kelevra.1337@gmail.com" ]
kelevra.1337@gmail.com
57d8840f3ae45365005e9730310b3b9956021a54
eace995a65e1029cfb88c9a2764a831717b7b4cb
/rpn.py
30dcecb9a78d6373463dd42462d47f1d69f267b8
[ "MIT" ]
permissive
HoangTuan110/rpn-calc
010115637c80417aefa088db04532c602ad0810e
8418999cd039cb0f63b828844e34b291e768533b
refs/heads/main
2023-04-08T13:06:35.181722
2021-04-11T11:27:06
2021-04-11T11:27:06
356,848,524
1
0
null
null
null
null
UTF-8
Python
false
false
1,334
py
""" This is a simple RPN (Reverse Polish Notation) calculator written in Python. It may be quite slow, but I don't care lol. """ def calc(code): # Variables tokens = code.split(" ") stack = [] ops = "+-*/" result = "" # Helper functions push = lambda n: stack.append(n) pop = lambda: stack.pop() is_number = lambda ipt: ipt.isnumeric() is_hex = lambda ipt: "x" in ipt is_binary = lambda ipt: "b" in ipt # Main part for token in tokens: if is_number(token) or is_hex(token) or is_binary(token): push(eval(token)) if len(result) == 0: result += f"{token}" # This is to avoid the case that user put extra spaces at the end # or the start of the input elif token == "": continue elif token in ops: op1, op2 = stack.pop(), stack.pop() # Since 'result' have the first value ('op1') already in them, # so we don't need to add it twice. result += f" {token} {op2}" push(eval(f"{op1} {token} {op2}")) else: print(f"Illegal character: {token}") break print(eval(result)) def repl(): while True: calc(input(">> ")) repl()
[ "noreply@github.com" ]
HoangTuan110.noreply@github.com
9eb23f2fb0bdb9407531c0cc21444f0cba5aaead
aa1b98be1dabf14752750999b35aec8d819122fe
/utils.py
382c10d19fc66a245748c89531951d5c14186ced
[]
no_license
tevonsb/a5
8fe8df7461c8515b649e3d3b601befc968c694d1
8d183228ed280582c45dba589f413405a49a49c4
refs/heads/master
2020-04-25T00:42:54.714606
2019-02-24T20:33:58
2019-02-24T20:33:58
172,386,884
0
0
null
null
null
null
UTF-8
Python
false
false
4,328
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ CS224N 2018-19: Homework 5 nmt.py: NMT Model Pencheng Yin <pcyin@cs.cmu.edu> Sahil Chopra <schopra8@stanford.edu> """ import math from typing import List import numpy as np import torch import torch.nn as nn import torch.nn.functional as F def pad_sents_char(sents, char_pad_token): """ Pad list of sentences according to the longest sentence in the batch and max_word_length. @param sents (list[list[list[int]]]): list of sentences, result of `words2charindices()` from `vocab.py` @param char_pad_token (int): index of the character-padding token @returns sents_padded (list[list[list[int]]]): list of sentences where sentences/words shorter than the max length sentence/word are padded out with the appropriate pad token, such that each sentence in the batch now has same number of words and each word has an equal number of characters Output shape: (batch_size, max_sentence_length, max_word_length) """ # Words longer than 21 characters should be truncated max_word_length = 21 ### YOUR CODE HERE for part 1f ### TODO: ### Perform necessary padding to the sentences in the batch similar to the pad_sents() ### method below using the padding character from the arguments. You should ensure all ### sentences have the same number of words and each word has the same number of ### characters. ### Set padding words to a `max_word_length` sized vector of padding characters. ### ### You should NOT use the method `pad_sents()` below because of the way it handles ### padding and unknown words. max_sentence_length = max([len(sent) for sent in sents]) pad_word = [char_pad_token for x in range(21)] sents_padded = [[word+[char_pad_token for x in range(max_word_length - len(word))] for word in sent] for sent in sents] sents_padded = [sent+[pad_word for x in range(max_sentence_length-len(sent))] for sent in sents_padded] ### END YOUR CODE return sents_padded def pad_sents(sents, pad_token): """ Pad list of sentences according to the longest sentence in the batch. @param sents (list[list[int]]): list of sentences, where each sentence is represented as a list of words @param pad_token (int): padding token @returns sents_padded (list[list[int]]): list of sentences where sentences shorter than the max length sentence are padded out with the pad_token, such that each sentences in the batch now has equal length. Output shape: (batch_size, max_sentence_length) """ sents_padded = [] max_len = max(len(s) for s in sents) batch_size = len(sents) for s in sents: padded = [pad_token] * max_len padded[:len(s)] = s sents_padded.append(padded) return sents_padded def read_corpus(file_path, source): """ Read file, where each sentence is dilineated by a `\n`. @param file_path (str): path to file containing corpus @param source (str): "tgt" or "src" indicating whether text is of the source language or target language """ data = [] for line in open(file_path): sent = line.strip().split(' ') # only append <s> and </s> to the target sentence if source == 'tgt': sent = ['<s>'] + sent + ['</s>'] data.append(sent) return data def batch_iter(data, batch_size, shuffle=False): """ Yield batches of source and target sentences reverse sorted by length (largest to smallest). @param data (list of (src_sent, tgt_sent)): list of tuples containing source and target sentence @param batch_size (int): batch size @param shuffle (boolean): whether to randomly shuffle the dataset """ batch_num = math.ceil(len(data) / batch_size) index_array = list(range(len(data))) if shuffle: np.random.shuffle(index_array) for i in range(batch_num): indices = index_array[i * batch_size: (i + 1) * batch_size] examples = [data[idx] for idx in indices] examples = sorted(examples, key=lambda e: len(e[0]), reverse=True) src_sents = [e[0] for e in examples] tgt_sents = [e[1] for e in examples] yield src_sents, tgt_sents
[ "tevon.strandbrown@gmail.com" ]
tevon.strandbrown@gmail.com
338f0fba5917e4ae0b096d9a4b4b41e5389d4123
05e2452e154806455d2d829466055f0ac8a11f92
/Name/wsgi.py
64efb28a6a226a33c4fe67a9c9bcc6ede1cd3dee
[]
no_license
WesamAlmasri/Translator
35a295ca8aa2ded1ccc315e19494201475491cf4
875a324a4cb7a75c7b80f51ba420c3efc2306092
refs/heads/main
2023-04-03T03:48:56.830044
2021-04-03T13:20:30
2021-04-03T13:20:30
353,406,814
0
0
null
null
null
null
UTF-8
Python
false
false
401
py
""" WSGI config for Name project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Name.settings') application = get_wsgi_application()
[ "mr0virus@gmail.com" ]
mr0virus@gmail.com
029e8d41228f8d09c6e0cb103693dbf48021707d
eb008a137a8da49d48985240bea8c29e0966293a
/tools/config.py
5d5a6fcb30acfb04f0bf90925fd32b94d98ea154
[]
no_license
Kukushenok/GunParty
4a5f7de407b68061c46cc645658b11cba3edd2d8
acac4ea8bd80ec9101a8a2f64a08f594f0edf31c
refs/heads/master
2021-05-12T11:09:17.202229
2018-02-24T18:32:15
2018-02-24T18:32:15
117,379,627
0
0
null
null
null
null
UTF-8
Python
false
false
1,190
py
import configparser import os import pygame class Config(): def __init__(self,defaultPath = None,path = None): self.defaultPath = defaultPath self.config = configparser.ConfigParser() self.defaultConfig = configparser.ConfigParser() if path: self.config.read(os.path.join(path,'config.ini')) if self.defaultPath:self.defaultConfig.read(os.path.join(self.defaultPath, 'default_config.ini')) else: self.config.read('config.ini') if self.defaultPath:self.defaultConfig.read(os.path.join(self.defaultPath, 'default_config.ini')) def get(self,item): try: return self.config["SETTINGS"][item] except KeyError: return self.defaultConfig["SETTINGS"][item] def getAsDict(self,item): toDict = "" try: toDict = self.config["SETTINGS"][item] except KeyError: toDict = self.defaultConfig["SETTINGS"][item] dictPairs = toDict.split(",") resDict = {} for e in dictPairs: splittedE = e.split(":") exec("resDict["+splittedE[0]+"] = "+splittedE[1]) return resDict
[ "mrcoolmoder@gmail.com" ]
mrcoolmoder@gmail.com
b9862bab92c4aa791fbc0851e03b13c965d9dff8
8ee86008310da9954e3c200dd4711d295d449329
/blog/urls.py
0e0724f937f23bacaf9bde8904f4b9f53f37edd2
[]
no_license
madp3e/Blog
846fef127330b9f600c7b0c15a080efb5de4a148
1379041c68c6e4045d25a5f1bf9ff325457788e7
refs/heads/master
2022-11-26T22:39:36.391205
2019-12-12T11:47:25
2019-12-12T11:47:25
227,589,494
0
0
null
2022-11-22T04:37:45
2019-12-12T11:19:41
Python
UTF-8
Python
false
false
774
py
from django.urls import path from . import views from .views import (PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, UserPostListView) urlpatterns = [ path("", PostListView.as_view(), name="blog-home"), path("post/<int:pk>/", PostDetailView.as_view(), name="post-detail"), path("post/<int:pk>/update", PostUpdateView.as_view(), name="post-update"), path("post/<int:pk>/delete", PostDeleteView.as_view(), name="post-delete"), path("post/new/", PostCreateView.as_view(), name="post-create"), path("about/", views.about, name="blog-about"), path("user/<str:username>/", UserPostListView.as_view(), name="user-posts") ]
[ "ahmadfaizuddin17@gmail.com" ]
ahmadfaizuddin17@gmail.com
01056432f916ec5052c06f42038020cc0f7a42d4
27b2cee1701a2e3073ecf020065f697c5b145de0
/txboto/auth_handler.py
86da5f1288ca2ecd9647ca8feb619d35631317b4
[ "ADSL", "BSD-3-Clause" ]
permissive
2mf/txboto
25209b2d5c465ca093581dda281ae65e3e17103e
3ecc5c5e86b650edc6c3b42064a07d42faa210e4
refs/heads/master
2020-04-25T08:40:47.640350
2017-02-22T11:38:21
2017-02-22T11:38:21
45,603,488
0
0
null
null
null
null
UTF-8
Python
false
false
2,618
py
# Copyright 2010 Google Inc. # Copyright (c) 2015 Silver Egg Technology, Co., Ltd. # Copyright (c) 2015 Michael Franke # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation and/or # other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED # OF THE POSSIBILITY OF SUCH DAMAGE. """ Defines an interface which all Auth handlers need to implement. """ from txboto.plugin import Plugin class NotReadyToAuthenticate(Exception): pass class AuthHandler(Plugin): capability = [] def __init__(self, host, config, provider): """Constructs the handlers. :type host: string :param host: The host to which the request is being sent. :type config: txboto.pyami.Config :param config: TxBoto configuration. :type provider: txboto.provider.Provider :param provider: Provider details. Raises: NotReadyToAuthenticate: if this handler is not willing to authenticate for the given provider and config. """ pass def add_auth(self, http_request): """Invoked to add authentication details to request. :type http_request: txboto.connection.HTTPRequest :param http_request: HTTP request that needs to be authenticated. """ pass
[ "mf33456@gmail.com" ]
mf33456@gmail.com
3a829b2c788daa3d8a5b5cdfa4c5b6ccd3daabd7
fefa88dd63533ed36ec4f86c029b5d9a00a3ad82
/monapi/serializers.py
987d80eaf5e66ca2d97f70f100a17e9b7334545c
[]
no_license
jeremyguiller/Api-mairie
f8fee21610acfb2ec20fdb761d5cb854a82480e5
7f1a6173e5ef0c25f2971f8a7e41adf8e88b8d8c
refs/heads/master
2023-04-06T15:33:48.750300
2021-04-26T11:44:03
2021-04-26T11:44:03
361,729,314
0
0
null
null
null
null
UTF-8
Python
false
false
763
py
from rest_framework import serializers , fields from .models import Location,Texte,Image,Administrateur class Locationserializer(serializers.HyperlinkedModelSerializer): date = serializers.DateTimeField() class Meta: model = Location fields = ('date','name','confirmer') class AdministrateurSerializers(serializers.HyperlinkedModelSerializer): class Meta: model = Administrateur fields = ('name','email','mdp') class TexteSerializers(serializers.HyperlinkedModelSerializer): class Meta: model = Texte fields = ('intitule','texte') class ImageSerializers(serializers.HyperlinkedModelSerializer): class Meta: model = Image fields = ('description','image')
[ "guillerjeremy@gmail.com" ]
guillerjeremy@gmail.com
fa76acace0c4cd47c3cdb6b96aa8b5eed60ae7bf
8a41ef3e60355b867116754444d3b844721b7ff9
/how2pizza/pizza/admin.py
ea329c4b7c6a2a8d549786f75503dd73fe4627be
[ "MIT" ]
permissive
ianonavy/how2pizza
12cc99b1f8adc6aa5513d396cb67ecb62039554e
ebac7b0cd2ea3be851eddb3fe221c11d1a2a426a
refs/heads/master
2021-01-23T22:07:46.822089
2015-05-28T05:55:59
2015-05-28T05:55:59
31,512,638
0
0
null
null
null
null
UTF-8
Python
false
false
392
py
from django.contrib import admin from pizza.models import PizzaOrder, PizzaOrderUserChoice, PizzaType @admin.register(PizzaOrder) class PizzaOrderAdmin(admin.ModelAdmin): list_display = ('id', 'created_at') @admin.register(PizzaOrderUserChoice) class PizzaOrderUserChoiceAdmin(admin.ModelAdmin): pass @admin.register(PizzaType) class PizzaTypeAdmin(admin.ModelAdmin): pass
[ "ianonavy@gmail.com" ]
ianonavy@gmail.com
7421e6059aeff1e3016934fea7f9e2910344351e
83648babb83497ff162ccfa6104c1f09029bcb37
/local_global.py
aa1da55f52fde40d694f0c3e6e2fb5b0626ebf22
[]
no_license
seeni-eldho/pythonProgram
aeeb5ec559049feb4d331b3a40e09f21f9b799b2
3361c4673d85e0bfb0df93414c573bdd3a4944b0
refs/heads/master
2023-08-07T17:54:24.405327
2021-09-22T09:40:04
2021-09-22T09:40:04
402,530,682
0
1
null
null
null
null
UTF-8
Python
false
false
82
py
x=5 def foo(): global y y=7 print('loccal',y) foo() print('local',y)
[ "seenieldho85@gmail.com" ]
seenieldho85@gmail.com
05f1c23936d977e70fdef1e44fc27ab9f069cadf
55647a80c8b412af9df0ba3f50595cc2f29c25e6
/res/scripts/common/Lib/encodings/gbk.py
4b4a46dcbfdea9c2f98724c76a52405e54febf9c
[]
no_license
cnsuhao/WOT-0.9.17-CT
0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb
d1f932d8cabaf8aa21708622e87f83c8d24d6451
refs/heads/master
2021-06-08T18:11:07.039293
2016-11-19T19:12:37
2016-11-19T19:12:37
null
0
0
null
null
null
null
WINDOWS-1250
Python
false
false
1,136
py
# 2016.11.19 19:58:56 Střední Evropa (běžný čas) # Embedded file name: scripts/common/Lib/encodings/gbk.py import _codecs_cn, codecs import _multibytecodec as mbc codec = _codecs_cn.getcodec('gbk') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo(name='gbk', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter) # okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\Lib\encodings\gbk.pyc # decompiled 1 files: 1 okay, 0 failed, 0 verify failed # 2016.11.19 19:58:56 Střední Evropa (běžný čas)
[ "info@webium.sk" ]
info@webium.sk
c8e453ae1f4aa67ae58b7f6d6dd39e2b6c2afb3d
0367d2c25de1584fd064522e9b9efc8fa52d1478
/odd_eve_list.py
c97aec023602a1865225836fc042828587cb288f
[]
no_license
sk013/Python_Basic_Programs
9d69698f28246f6787c695e20d5b2b4a45417019
c44ed384e8185261ef4fd715694362269837d6c8
refs/heads/main
2023-05-03T12:04:20.144301
2021-05-26T17:43:22
2021-05-26T17:43:22
371,121,213
0
0
null
null
null
null
UTF-8
Python
false
false
260
py
def odd_eve(l): odd = [] eve = [] for i in l: if i%2==0: eve.append(i) else : odd.append(i) output = [eve,odd] return output numbers = [1,2,4,3,5,6,54,2,36,43,31] print(odd_eve(numbers))
[ "noreply@github.com" ]
sk013.noreply@github.com
782612e4635027ea04a2431e6dc0a11bcc45d1ee
e82ba9e19c415e5eeff4a48f52dbd7efc4ae4d6b
/9.sort/BubbleSort2.py
a43b5016f4aa1cde02f156f1bd522421ff774c94
[]
no_license
GoldK11/dataSKKU
8a4dbbd5adb6b766a28cdfaba3b9a744992d4e41
24b5e82e5456daf3c07db271e1b6932661c967a3
refs/heads/master
2021-08-23T01:33:16.984279
2017-12-02T05:39:34
2017-12-02T05:39:34
112,315,898
0
0
null
null
null
null
UTF-8
Python
false
false
1,661
py
# 0 부터 ( 좀 이상함) def bubbleSort(l): count =0 for i in range(len(l)): for j in range(i+1,len(l)): count+=1 if l[i]>l[j]: (l[i],l[j])=(l[j],l[i]) return count l = [53, 112, 174, 200, 258, 123, 184, 254, 232, 136, 198, 3, 286, 6, 62, 57, 110, 10, 17, 189, 291, 2, 245, 118, 226, 154, 33, 211, 285, 191, 289, 161, 56, 74, 241, 297, 249, 9, 208, 251, 63, 214, 145, 97, 75, 149, 158, 59, 275, 68, 95, 124, 32, 99, 167, 224, 197, 79, 296, 152, 171, 98, 30, 148, 26, 50, 266, 93, 293, 182, 181, 153, 88, 66, 210, 100, 127, 94, 247, 277, 44, 262, 77, 121, 138, 71, 82, 119, 37, 140, 233, 206, 237, 212, 231, 11, 248, 209, 271, 234, 255, 51, 25, 243, 163, 146, 172, 142, 238, 263, 114, 104, 253, 236, 4, 273, 54, 151, 73, 250, 204, 227, 107, 18, 92, 60, 187, 120, 102, 64, 128, 173, 281, 279, 282, 144, 219, 244, 269, 40, 180, 283, 126, 288, 45, 143, 91, 178, 157, 96, 70, 129, 109, 85, 147, 35, 90, 195, 261, 19, 22, 55, 267, 280, 299, 15, 199, 168, 108, 235, 105, 196, 135, 58, 155, 162, 101, 218, 24, 246, 207, 89, 132, 192, 14, 290, 1, 295, 188, 270, 201, 78, 229, 39, 274, 49, 13, 28, 65, 72, 52, 81, 217, 252, 220, 34, 31, 216, 139, 256, 169, 166, 27, 160, 12, 284, 111, 228, 0, 159, 8, 298, 122, 87, 41, 205, 215, 193, 165, 203, 221, 84, 7, 176, 80, 20, 125, 179, 141, 29, 134, 5, 257, 16, 268, 194, 202, 225, 23, 185, 36, 21, 117, 48, 76, 260, 186, 156, 170, 47, 223, 265, 287, 103, 42, 113, 38, 239, 115, 278, 230, 259, 61, 150, 69, 130, 133, 116, 164, 242, 213, 183, 67, 175, 131, 240, 264, 46, 276, 43, 86, 83, 106, 294, 177, 137, 292, 190, 222, 272] print(bubbleSort(l)) print(l)
[ "ssori113@gmail.com" ]
ssori113@gmail.com
cd75f26df497e0e47746786f0197f8dc9b218f06
930c207e245c320b108e9699bbbb036260a36d6a
/BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/FCU_Return_Air_Temperature_Sensor.py
d4ac39c9698a57051d03037b2f79dc41b5511c4b
[]
no_license
InnovationSE/BRICK-Generated-By-OLGA
24d278f543471e1ce622f5f45d9e305790181fff
7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2
refs/heads/master
2021-07-01T14:13:11.302860
2017-09-21T12:44:17
2017-09-21T12:44:17
104,251,784
1
0
null
null
null
null
UTF-8
Python
false
false
457
py
from rdflib import Namespace, Graph, Literal, RDF, URIRef from rdfalchemy.rdfSubject import rdfSubject from rdfalchemy import rdfSingle, rdfMultiple, rdfList from brick.brickschema.org.schema._1_0_2.Brick.Return_Air_Temperature_Sensor import Return_Air_Temperature_Sensor class FCU_Return_Air_Temperature_Sensor(Return_Air_Temperature_Sensor): rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').FCU_Return_Air_Temperature_Sensor
[ "Andre.Ponnouradjane@non.schneider-electric.com" ]
Andre.Ponnouradjane@non.schneider-electric.com
b66bdcf6efc1e3d36d06876d5a98947743683ff5
95a05bee4ef9a16da7185e7651685d7df71d55af
/metadata.py
4daf5b1243803c996a12e9c057b935b032fb26d4
[ "Unlicense" ]
permissive
ArniDagur/auto-rental
0f0b342c1a0d320100f4bcaba4a881f78358b76e
8b7fcf724c7501c0414454771addbd36be185b26
refs/heads/master
2020-04-10T16:44:39.510794
2018-12-10T10:06:25
2018-12-10T10:06:25
161,154,249
0
0
Unlicense
2018-12-10T10:01:58
2018-12-10T10:01:58
null
UTF-8
Python
false
false
444
py
import os from appdirs import user_data_dir # Information for humans: # ----------------------------------------------------------------------------- APPNAME = 'Auto-Rental' AUTHOR = 'hopur-32' # Information for computers: # ----------------------------------------------------------------------------- DATA_DIR = user_data_dir(APPNAME, AUTHOR) # OS specific directory to store data if not os.path.isdir(DATA_DIR): os.makedirs(DATA_DIR)
[ "arnidg@protonmail.ch" ]
arnidg@protonmail.ch
6a2758f58f6ef665dec7ea80ebf419557651d695
1443c180718ea74cb0862d112a7c08d6ec5d1828
/flaskfundamental/DojoSurvey2/DojoSurvey.py
2061cf3d5200337887342ecb7b0ebbf99da85a33
[]
no_license
Dragonlizard1/Python_Project
7ca7e7f4245f1d1394542127c107fe5f79e0cafe
be83d84dddc6b1c30fd231a0e15f60da5a5bceb2
refs/heads/master
2020-03-12T23:41:16.856306
2018-04-24T16:39:50
2018-04-24T16:39:50
130,871,148
0
0
null
null
null
null
UTF-8
Python
false
false
964
py
from flask import Flask, render_template, request, session, redirect, flash app = Flask(__name__) app.secret_key = 'ThisIsSecret' @app.route("/") def index(): return render_template("form.html") @app.route("/result", methods = ["POST"]) def infoprocess(): name = request.form["name"] location = request.form["location"] language = request.form["language"] comment = request.form["comment"] if name == "": flash("The name field is empty.") if comment == "": flash("Please add comment in.") elif len(comment) > 120: flash("Please put in less than 120 characters.") return redirect ("/") if comment == "": flash("Please add comment in.") return redirect ("/") elif len(comment) > 120: flash("Please put in less than 120 characters.") return redirect ("/") #print name return render_template("result.html", name1 = name, location1 = location, language1 = language, comment1 = comment) app.run(debug=True)
[ "bobbyimaging@gmail.com" ]
bobbyimaging@gmail.com
46f9807e15556efa7d2439bee101b14f588ee791
44413721791e00e5e0d728d2063cce9d072680bc
/env/bin/jupyter-nbextension
f272010110d6ad96be430c20709bbda7f2ea6cb7
[]
no_license
andriyka/term-extraction-and-ontology-learning
5174ba52db93bc3dd22b75a41c998c5e23a3bcd5
2fa478f1f6f28949d461331f6e8348f86bd344e1
refs/heads/master
2020-03-21T19:05:07.755413
2018-07-09T16:46:58
2018-07-09T16:46:58
138,929,875
2
0
null
null
null
null
UTF-8
Python
false
false
258
#!/home/ankus/Documents/ucu/terms/ate/env/bin/python # -*- coding: utf-8 -*- import re import sys from notebook.nbextensions import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "ankus@ciklum.com" ]
ankus@ciklum.com
b52563bc708de755093f4abaf4427720c8741e1c
654acf62f757435f11afe3edb784c19ba9a996b5
/Cmimid/src/generalizetokens.py
b8d1ba37156b9a633f7d37b92cf041e669f90ced
[]
no_license
anonymous-scientist/anonymous-scientist.github.io
92337f97ed48f68f2b8de0f2a23de31fac6ee702
b699788fc0c44d03e4d3e172428202f52a57fd08
refs/heads/master
2020-07-05T21:10:15.055470
2020-03-11T10:22:38
2020-03-11T10:22:38
202,777,252
0
1
null
null
null
null
UTF-8
Python
false
false
10,277
py
#!/usr/bin/env python import sys import pudb import grammartools # ulimit -s 100000 sys.setrecursionlimit(99000) import random import string import util import copy import json import re import fuzz as F import subprocess b = pudb.set_trace def is_nt(token): return token.startswith('<') and token.endswith('>') def generalize_tokens(grammar): g_ = {} for k in grammar: new_rules = [] for rule in grammar[k]: new_rule = [] for token in rule: if not is_nt(token): new_rule.extend(list(token)) else: new_rule.append(token) new_rules.append(new_rule) g_[k] = new_rules return g_ def get_list_of_single_chars(grammar): lst = [] for p,key in enumerate(grammar): for q,rule in enumerate(grammar[key]): for r,token in enumerate(rule): if is_nt(token): continue if len(token) == 1: lst.append((key, q, r, token)) return lst def remove_recursion(d): new_d = {} for k in d: new_rs = [] for t in d[k]: if t != k: new_rs.append(t) new_d[k] = new_rs return new_d def replaceable_with_kind(stree, orig, parent, gk, command): my_node = None def fill_tree(node): nonlocal my_node name, children = node if name == gk: my_node = [name, [[parent, []]]] return my_node elif not children: if name in ASCII_MAP: return (random.choice(ASCII_MAP[name]), []) return (name, []) else: return (name, [fill_tree(c) for c in children]) tree0 = fill_tree(stree) sval = util.tree_to_str(tree0) assert my_node is not None a1 = my_node, '', tree0 if parent == orig: aX = ((gk, [[orig, []]]), '', tree0) val = util.is_a_replaceable_with_b(a1, aX, command) if val: return True else: return False else: for pval in ASCII_MAP[parent]: aX = ((gk, [[pval, []]]), '', tree0) val = util.is_a_replaceable_with_b(a1, aX, command) if val: continue else: return False return True # string.ascii_letters The concatenation of the ascii_lowercase and ascii_uppercase constants described below. This value is not locale-dependent. # string.ascii_lowercase The lowercase letters 'abcdefghijklmnopqrstuvwxyz'. This value is not locale-dependent and will not change. # string.ascii_uppercase The uppercase letters 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'. This value is not locale-dependent and will not change. # string.digits The string '0123456789'. # string.hexdigits The string '0123456789abcdefABCDEF'. # string.octdigits The string '01234567'. # string.punctuation String of ASCII characters which are considered punctuation characters in the C locale: !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~. # string.printable String of ASCII characters which are considered printable. This is a combination of digits, ascii_letters, punctuation, and whitespace. # string.whitespace A string containing all ASCII characters that are considered whitespace. This includes the characters space, tab, linefeed, return, formfeed, and vertical tab. def parent_map(): parent = {} for sp in string.whitespace: parent[sp] = '[__WHITESPACE__]' for digit in string.digits: parent[digit] = '[__DIGIT__]' for ll in string.ascii_lowercase: parent[ll] = '[__ASCII_LOWER__]' for ul in string.ascii_uppercase: parent[ul] = '[__ASCII_UPPER__]' for p in string.punctuation: parent[p] = '[__ASCII_PUNCT__]' parent['[__WHITESPACE__]'] = '[__ASCII_PRINTABLE__]' parent['[__DIGIT__]'] = '[__ASCII_ALPHANUM__]' parent['[__ASCII_LOWER__]'] = '[__ASCII_LETTER__]' parent['[__ASCII_UPPER__]'] = '[__ASCII_LETTER__]' parent['[__ASCII_LETTER__]'] = '[__ASCII_ALPHANUM__]' parent['[__ASCII_ALPHANUM__]'] = '[__ASCII_PRINTABLE__]' parent['[__PUNCT__]'] = '[__ASCII_PRINTABLE__]' return parent ASCII_MAP = { '[__WHITESPACE__]': string.whitespace, '[__DIGIT__]': string.digits, '[__ASCII_LOWER__]': string.ascii_lowercase, '[__ASCII_UPPER__]': string.ascii_uppercase, '[__ASCII_PUNCT__]': string.punctuation, '[__ASCII_LETTER__]': string.ascii_letters, '[__ASCII_ALPHANUM__]': string.ascii_letters + string.digits, '[__ASCII_PRINTABLE__]': string.printable } PARENT_MAP = parent_map() def find_max_generalized(tree, kind, gk, command): if kind not in PARENT_MAP: return kind parent = PARENT_MAP[kind] if replaceable_with_kind(tree, kind, parent, gk, command): return find_max_generalized(tree, parent, gk, command) else: return kind def do_n(tree, kind, gk, command, n): ret = [] for i in range(n): pval = random.choice(ASCII_MAP[kind]) ret.append([pval, []]) return (gk, ret) def find_max_widened(tree, kind, gk, command): my_node = None def fill_tree(node): nonlocal my_node name, children = node if name == gk: my_node = [name, [[kind, []]]] return my_node elif not children: if name in ASCII_MAP: return (random.choice(ASCII_MAP[name]), []) return (name, []) else: return (name, [fill_tree(c) for c in children]) tree0 = fill_tree(tree) sval = util.tree_to_str(tree0) assert my_node is not None a1 = my_node, '', tree0 # this is a single character. Now, try 2, 4 etc. pvals = do_n(tree, kind, gk, command, 2) aX = (pvals, '', tree0) val = util.is_a_replaceable_with_b(a1, aX, command) if not val: return kind pvals = do_n(tree, kind, gk, command, 4) aX = (pvals, '', tree0) val = util.is_a_replaceable_with_b(a1, aX, command) if not val: return kind return kind + '+' GK = '<__GENERALIZE__>' MAX_CHECKS = 1000 def generalize_single_token(grammar, start, k, q, r, command, blacklist): # first we replace the token with a temporary key gk = GK # was there a previous widened char? and if ther wase, # do we belong to it? char = grammar[k][q][r] if r > 0 and grammar[k][q][r-1][-1] == '+': # remove the + last_char = grammar[k][q][r-1][0:-1] if last_char in ASCII_MAP and char in ASCII_MAP[last_char]: #we are part of the last. grammar[k][q][r] = last_char + '+' return grammar g_ = copy.deepcopy(grammar) g_[k][q][r] = gk g_[gk] = [[char]] #reachable_keys = grammartools.reachable_dict(g_) # now, we need a path to reach this. fg = grammartools.get_focused_grammar(g_, (gk, [])) fuzzer = F.LimitFuzzer(fg) #skel_tree = find_path_key(g_, start, gk, reachable_keys, fuzzer) tree = None check = 0 while tree is None: #tree = flush_tree(skel_tree, fuzzer, gk, char) #tree = fuzzer.gen_key(grammartools.focused_key(start), depth=0, max_depth=1) tree = fuzzer.iter_gen_key(grammartools.focused_key(start), max_depth=1) val = util.check(char, char, '<__CHECK__(%d/%d)>' % (check, MAX_CHECKS), tree, command, char, char) check += 1 if not val: tree = None if check > MAX_CHECKS: print("Exhausted limit for key:%s, rule:%d, token:%d, char:%s" % (k, q, r, char), file=sys.stderr) blacklist.append((k, q, r, char)) #raise "Exhausted limit for key:%s, rule:%d, token:%d, char:%s" % (k, q, r, char) return grammar # now we need to make sure that this works. gen_token = find_max_generalized(tree, char, gk, command) if gen_token != char: # try widening gen_token = find_max_widened(tree, gen_token, gk, command) del g_[gk] g_[k][q][r] = gen_token # preserve the order grammar[k][q][r] = gen_token return grammar def remove_duplicate_repetitions(g): new_g = {} for k in g: new_rules = [] for rule in g[k]: #srule = ''.join(rule) new_rule = [] last = -1 for i,t in enumerate(rule): if last >= 0 and len(t) > 0 and t[-1] == '+' and t == rule[last]: continue else: last = i new_rule.append(t) #snrule = ''.join(new_rule) #if srule != snrule: # print("change:",file=sys.stderr) # print(" ", srule, file=sys.stderr) # print(" ", snrule, file=sys.stderr) new_rules.append(new_rule) new_g[k] = new_rules return new_g def main(args): gfname = args[0] with open(gfname) as f: gf = json.load(fp=f) grammar = gf['[grammar]'] start = gf['[start]'] command = gf['[command]'] # now, what we want to do is first regularize the grammar by splitting each # multi-character tokens into single characters. generalized_grammar = generalize_tokens(grammar) # next, we want to get the list of all such instances list_of_things_to_generalize = get_list_of_single_chars(generalized_grammar) #print(len(list_of_things_to_generalize), file=sys.stderr) # next, we want to generalie each in turn # finally, we want to generalize the length. #reachable_keys = reachable_dict(grammar) g_ = generalized_grammar blacklist = [] for k, q, r, t in list_of_things_to_generalize: assert g_[k][q][r] == t bl = [] g_ = generalize_single_token(g_, start, k, q, r, command, bl) if bl: print("Blacllisted:", bl, file=sys.stderr) blacklist.extend(bl) g = remove_duplicate_repetitions(g_) g = grammartools.remove_duplicate_rules_in_a_key(g) # finally, we want to generalize the length. #g = generalize_size(g_) print(json.dumps({'[start]': start, '[grammar]':g, '[command]': command, '[blacklist]': blacklist}, indent=4)) if __name__ == '__main__': main(sys.argv[1:])
[ "anonymous@anonymous.net" ]
anonymous@anonymous.net
b74ebd69ba2428966df06b67ec9e088623bd0bc7
b7b2728bcfeda781ef79540dc46577f4a772e471
/django_hbase/models/exceptions.py
6cfcf27b10ce6d62f13764fc2cea8fbbda7e7c11
[]
no_license
Veronica1026/django-twitter
7dd8e0efe84d50654bc92f83bf6ac0bb0c6b432e
e28e8fe5443db48b761cd2e4e6a43e0d0c3590ff
refs/heads/main
2023-08-25T16:15:36.092192
2021-10-23T10:41:00
2021-10-23T10:41:00
364,218,123
0
0
null
null
null
null
UTF-8
Python
false
false
88
py
class BadRowKeyError(Exception): pass class EmptyColumnError(Exception): pass
[ "543587590@qq.com" ]
543587590@qq.com
1158acb79cf822c0ded1ea29f10b77727305c073
cd142a4e15d3576546fcb44841417039f0b8fb00
/build/double/catkin_generated/pkg.installspace.context.pc.py
9b014836f2e3e476722b6c40aa901294660dad37
[]
no_license
mgou123/rplidar
4389819eb1998d404d1066c7b4a983972d236ce7
608c1f6da2d3e5a8bac06e8d55d8569af828a40b
refs/heads/master
2022-11-10T05:51:56.403293
2020-06-29T04:16:14
2020-06-29T04:16:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
393
py
# generated from catkin/cmake/template/pkg.context.pc.in CATKIN_PACKAGE_PREFIX = "" PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else [] PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;sensor_msgs".replace(';', ' ') PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else [] PROJECT_NAME = "double" PROJECT_SPACE_DIR = "/home/xu/dogkin_ws/install" PROJECT_VERSION = "0.0.0"
[ "492798337@qq.com" ]
492798337@qq.com
7ff7ebba377cd3e6d83e88368536f529b763202f
e966ac971af90faff55fce232620f3d0ad7f7fb8
/com/swj/OOP/Fundamental.py
a0241ea4e38f9bd58018cf65265a64b4d8590778
[]
no_license
shouguouo/PythonDemo
f987b9849e01806ccb6c370bbd4d4ba9675629ec
d9011506e3474054e2f5b1246f8e014facea7961
refs/heads/master
2021-09-21T23:55:53.258819
2018-09-03T15:47:27
2018-09-03T15:47:27
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,441
py
# -*- coding: utf-8 -*- # class Student(object): # 表示从object类中继承 # pass # bart = Student() # print(bart) # print(Student) # bart.name = 'swj' # print(bart.name) # class Student(object): # def __init__(self, name, score): # 第一个参数永远是self表示创建的实例本身 不用传 其他参数需要传 # self.name = name # self.score = score # def print_score(self): # print('%s:%s'%(self.name, self.score)) # def get_grade(self): # if self.score >= 90: # return 'A' # elif self.score >= 60: # return 'B' # else: # return 'C' # bart = Student('swj', 99) # print(bart.get_grade()) # # # 数据封装 # bart.print_score() # 访问权限 实例变量以__开头就变成了私有变量 # class Student(object): # def __init__(self, name, score): # self.__name = name # self.__score = score # def print_score(self): # print('%s:%s'%(self.__name, self.__score)) # def get_name(self): # return self.__name # def get_score(self): # return self.__score # def set_score(self, score): # if 0 <= score <= 100: # self.__score = score # else: # raise ValueError('bad score') # # s = Student('swj', 99) # # print(s.__name) # 无法访问 # print(s.get_name()) # s.print_score() # # # 类似__xx__的实例变量名 是特殊变量(以双下划线开头以双下划线结尾) 可以直接访问 但是不能用__name__ __score__这样的变量名 # # 以单下划线开头的变量 外部可以访问 但是“虽然我可以被访问,但是,请把我视为私有变量,不要随意访问” # print(s._Student__name) # 私有变量也可以访问 但是强烈建议不要这么做 私有变量被内部包装为_Student__name # s._Student__name = 'xhy' # print(s.get_name()) # 继承与多态 class Animal(object): def run(self): print('animal is running...') class Dog(Animal): def run(self): print('dog is running...') def eat(self): print('dog is eating...') class Cat(Animal): def run(self): print('cat is running...') def eat(self): print('cat is running...') def run_twice(animal): animal.run() animal.run() Dog().run() Cat().run() run_twice(Animal()) run_twice(Dog()) # 多态 # 开闭原则 对扩展开放:允许新增Animal子类 对修改封闭:不需要修改run_twice()等接受Animal类型的函数 # 静态语言VS动态语言 静态语言必须要传入Animal或子类 否则就无法调用run()方法 动态语言则只需保证传入的对象一个run()方法 ----鸭子类型(file-like object) # 获取对象信息 type(123) # int type('str') # str type(None) # NoneType type(abs) # builtin_function_or_method type(Animal()) # __main__.Animal # 更多的type在types模块中定义 # 对于class的继承关系 使用type()不方便 可以用isinstance() 函数 优先使用isinstance() # 使用dir()函数获得一个对象的所有属性和方法 返回包含str的list dir('dir') # len('ABC') 和'ABC'.__len__() 也可以自定义len方法 class MyDog(Dog): def __len__(self): return 100 print(len(MyDog())) # 通过getattr()、setattr()、hasattr()可以直接操作一个对象的状态 # 类属性与实例属性 不要对实例属性和类属性使用相同的名字 会屏蔽掉类属性
[ "1132331056@qq.com" ]
1132331056@qq.com
bb49d8dd28b9c93d2856e8511907f5a8c6efa6fb
ade3b5a88b2129d2e305d7be1a36dcda283a4c59
/Lab3/utils.py
8d718c9de473a5d7800d0b6f86c650cb1ac74dc5
[]
no_license
jelenab98/DL_FER
0e299003d1a41a7b502853b0643cb9e0bf8138a9
258eba86c708b53f96e92f2c2f5e9cb458e093ef
refs/heads/master
2023-08-21T21:51:47.256101
2021-10-26T14:44:54
2021-10-26T14:44:54
347,167,124
1
0
null
null
null
null
UTF-8
Python
false
false
14,674
py
<<<<<<< HEAD from sklearn.metrics import confusion_matrix as conf_matrix from torch.nn.utils.rnn import pad_sequence from torch.nn.utils import clip_grad_norm_ from torch.utils.data import Dataset from torch.nn import Embedding from pathlib import Path from tqdm import tqdm import pandas as pd import numpy as np import torch device = "cuda" if torch.cuda.is_available() else "cpu" PADDING_TOKEN = "<PAD>" # 0 UNKNOWN_TOKEN = "<UNK>" # 1 class Instance: def __init__(self, input_text: [str], target: str): self.text = input_text self.label = target class Vocab: def __init__(self, frequencies: dict, max_size: int = -1, min_freq: int = 0, is_target: bool = False): if is_target: self.stoi = dict() self.itos = dict() else: self.stoi = {PADDING_TOKEN: 0, UNKNOWN_TOKEN: 1} self.itos = {0: PADDING_TOKEN, 1: UNKNOWN_TOKEN} self.is_target = is_target self.max_size = max_size self.min_freq = min_freq i = len(self.itos) for key, value in sorted(frequencies.items(), key=lambda x: x[1], reverse=True): if (self.max_size != -1) and (len(self.itos) >= self.max_size): break if value >= self.min_freq: self.stoi[key] = i self.itos[i] = key i += 1 else: break def __len__(self): return len(self.itos) def encode(self, inputs: [str]): numericalized_inputs = [] for token in inputs: if token in self.stoi: numericalized_inputs.append(self.stoi[token]) else: numericalized_inputs.append(self.stoi[UNKNOWN_TOKEN]) return torch.tensor(numericalized_inputs) def reverse_numericalize(self, inputs: list): tokens = [] for numericalized_item in inputs: if numericalized_item in self.itos: tokens.append(self.itos[numericalized_item]) else: tokens.append(UNKNOWN_TOKEN) return tokens class NLPDataset(Dataset): def __init__(self, text_vocab: Vocab, target_vocab: Vocab, path: Path): self.vocab_input_text = text_vocab self.vocab_targets = target_vocab self.instances = [] data = pd.read_csv(path, header=None) for i in range(len(data)): text = data[0][i] label = data[1][i] self.instances.append(Instance(space_tokenizer(text), label.strip())) def __len__(self): return len(self.instances) def __getitem__(self, item): instance_item = self.instances[item] text = instance_item.text label = [instance_item.label] return self.vocab_input_text.encode(text), self.vocab_targets.encode(label) def space_tokenizer(raw_text: str): return raw_text.strip("\n").strip("\r").split(" ") def get_embedding_matrix(vocab: Vocab, dim: int = 300, freeze: bool = True, path: Path = None): matrix = torch.normal(mean=0, std=1, size=(len(vocab), dim)) matrix[0] = torch.zeros(size=[dim]) if path is not None: data = pd.read_csv(path, header=None, delimiter=" ") for i in range(len(data)): row = data.loc[i] token = row.loc[0] if token in vocab.stoi: tmp_array = [] for j in range(1, len(row)): tmp_array.append(row[j]) matrix[vocab.stoi[token]] = torch.tensor(tmp_array) return Embedding.from_pretrained(matrix, padding_idx=0, freeze=freeze) def pad_collate_fn(batch, pad_index=0): texts, labels = zip(*batch) lengths = torch.tensor([len(text) for text in texts]) return pad_sequence(texts, batch_first=True, padding_value=pad_index), torch.tensor(labels), lengths def get_frequencies(path, is_target=False): frequencies = {} data = pd.read_csv(path, header=None) idx = 1 if is_target else 0 for i in range(len(data)): inputs = data[idx][i].strip().split(" ") for token in inputs: if token in frequencies: frequencies[token] += 1 else: frequencies[token] = 1 return frequencies def train_valid(model, train_data, valid_data, optimizer, criterion, train_logger, valid_logger, save_path: Path = None, epochs=100, gradient_clip=0.25): best_f1 = -1 for epoch in range(epochs): model.train() confusion_matrix = np.zeros(shape=(2, 2)) losses = [] for idx, batch in tqdm(enumerate(train_data), total=len(train_data)): model.zero_grad() x, y, lengths = batch x = x.to(device) y = y.to(device) output = model(x).reshape(y.shape) loss = criterion(output, y.float()) loss.backward() clip_grad_norm_(model.parameters(), max_norm=gradient_clip) optimizer.step() predictions = torch.sigmoid(output).round().int().detach().cpu().numpy() confusion_matrix += conf_matrix(y.detach().cpu().numpy(), predictions) losses.append(loss.item()) acc, p, r, f1 = calculate_stats(confusion_matrix) train_stats = f"Loss: {np.average(losses):.4f}, Acc: {100 * acc:.2f}%, F1: {100 * f1:.2f}%" train_stats2 = f"{np.average(losses)}, {acc}, {f1}" print("[TRAIN STATS:] " + train_stats) train_logger.update(train_stats2) acc_v, p_v, r_v, f1_v, loss_v = evaluate(model, valid_data, criterion) valid_stats = f"Loss: {np.average(loss_v):.4f}, Acc: {100 * acc_v:.2f}%, F1: {100 * f1_v:.2f}%" valid_stats2 = f"{np.average(loss_v)}, {acc_v}, {f1_v}" print("[VALID STATS:] " + valid_stats) valid_logger.update(valid_stats2) if f1_v > best_f1: torch.save(model, save_path / "best_model.pth") print(f"Best model saved at {epoch} epoch.") def calculate_stats(confusion_matrix): acc = np.sum(confusion_matrix.diagonal()) / np.sum(confusion_matrix) p = confusion_matrix[0, 0] / np.sum(confusion_matrix[0, :]) r = confusion_matrix[0, 0] / np.sum(confusion_matrix[:, 0]) f1 = 2 * p * r / (p + r) return acc, p, r, f1 def evaluate(model, data, criterion): confusion_matrix = np.zeros(shape=(2, 2)) losses = list() model.eval() with torch.no_grad(): for idx, batch in tqdm(enumerate(data), total=len(data)): x, y, lengths = batch x = x.to(device) y = y.to(device) output = model(x).reshape(shape=y.shape) loss = criterion(output, y.float()) losses.append(loss.item()) predictions = torch.sigmoid(output).round().int().detach().cpu().numpy() confusion_matrix += conf_matrix(y.detach().cpu().numpy(), predictions) acc, p, r, f1 = calculate_stats(confusion_matrix) loss = np.average(losses) return acc, p, r, f1, loss class Logger: def __init__(self, path: Path, start_message: str): with path.open(mode="w") as f: f.write(f"{start_message}\n") self.path = path def update(self, message): with self.path.open(mode="a") as f: f.write(f"{message}\n") ======= from sklearn.metrics import confusion_matrix as conf_matrix from torch.nn.utils.rnn import pad_sequence from torch.nn.utils import clip_grad_norm_ from torch.utils.data import Dataset from torch.nn import Embedding from pathlib import Path from tqdm import tqdm import pandas as pd import numpy as np import torch device = "cuda" if torch.cuda.is_available() else "cpu" PADDING_TOKEN = "<PAD>" # 0 UNKNOWN_TOKEN = "<UNK>" # 1 class Instance: def __init__(self, input_text: [str], target: str): self.text = input_text self.label = target class Vocab: def __init__(self, frequencies: dict, max_size: int = -1, min_freq: int = 0, is_target: bool = False): if is_target: self.stoi = dict() self.itos = dict() else: self.stoi = {PADDING_TOKEN: 0, UNKNOWN_TOKEN: 1} self.itos = {0: PADDING_TOKEN, 1: UNKNOWN_TOKEN} self.is_target = is_target self.max_size = max_size self.min_freq = min_freq i = len(self.itos) for key, value in sorted(frequencies.items(), key=lambda x: x[1], reverse=True): if (self.max_size != -1) and (len(self.itos) >= self.max_size): break if value >= self.min_freq: self.stoi[key] = i self.itos[i] = key i += 1 else: break def __len__(self): return len(self.itos) def encode(self, inputs: [str]): numericalized_inputs = [] for token in inputs: if token in self.stoi: numericalized_inputs.append(self.stoi[token]) else: numericalized_inputs.append(self.stoi[UNKNOWN_TOKEN]) return torch.tensor(numericalized_inputs) def reverse_numericalize(self, inputs: list): tokens = [] for numericalized_item in inputs: if numericalized_item in self.itos: tokens.append(self.itos[numericalized_item]) else: tokens.append(UNKNOWN_TOKEN) return tokens class NLPDataset(Dataset): def __init__(self, text_vocab: Vocab, target_vocab: Vocab, path: Path): self.vocab_input_text = text_vocab self.vocab_targets = target_vocab self.instances = [] data = pd.read_csv(path, header=None) for i in range(len(data)): text = data[0][i] label = data[1][i] self.instances.append(Instance(space_tokenizer(text), label.strip())) def __len__(self): return len(self.instances) def __getitem__(self, item): instance_item = self.instances[item] text = instance_item.text label = [instance_item.label] return self.vocab_input_text.encode(text), self.vocab_targets.encode(label) def space_tokenizer(raw_text: str): return raw_text.strip("\n").strip("\r").split(" ") def get_embedding_matrix(vocab: Vocab, dim: int = 300, freeze: bool = True, path: Path = None): matrix = torch.normal(mean=0, std=1, size=(len(vocab), dim)) matrix[0] = torch.zeros(size=[dim]) if path is not None: data = pd.read_csv(path, header=None, delimiter=" ") for i in range(len(data)): row = data.loc[i] token = row.loc[0] if token in vocab.stoi: tmp_array = [] for j in range(1, len(row)): tmp_array.append(row[j]) matrix[vocab.stoi[token]] = torch.tensor(tmp_array) return Embedding.from_pretrained(matrix, padding_idx=0, freeze=freeze) def pad_collate_fn(batch, pad_index=0): texts, labels = zip(*batch) lengths = torch.tensor([len(text) for text in texts]) return pad_sequence(texts, batch_first=True, padding_value=pad_index), torch.tensor(labels), lengths def get_frequencies(path, is_target=False): frequencies = {} data = pd.read_csv(path, header=None) idx = 1 if is_target else 0 for i in range(len(data)): inputs = data[idx][i].strip().split(" ") for token in inputs: if token in frequencies: frequencies[token] += 1 else: frequencies[token] = 1 return frequencies def train_valid(model, train_data, valid_data, optimizer, criterion, train_logger, valid_logger, save_path: Path = None, epochs=100, gradient_clip=0.25): best_f1 = -1 for epoch in range(epochs): model.train() confusion_matrix = np.zeros(shape=(2, 2)) losses = [] for idx, batch in tqdm(enumerate(train_data), total=len(train_data)): model.zero_grad() x, y, lengths = batch x = x.to(device) y = y.to(device) output = model(x).reshape(y.shape) loss = criterion(output, y.float()) loss.backward() clip_grad_norm_(model.parameters(), max_norm=gradient_clip) optimizer.step() predictions = torch.sigmoid(output).round().int().detach().cpu().numpy() confusion_matrix += conf_matrix(y.detach().cpu().numpy(), predictions) losses.append(loss.item()) acc, p, r, f1 = calculate_stats(confusion_matrix) train_stats = f"Loss: {np.average(losses):.4f}, Acc: {100 * acc:.2f}%, F1: {100 * f1:.2f}%" train_stats2 = f"{np.average(losses)}, {acc}, {f1}" print("[TRAIN STATS:] " + train_stats) train_logger.update(train_stats2) acc_v, p_v, r_v, f1_v, loss_v = evaluate(model, valid_data, criterion) valid_stats = f"Loss: {np.average(loss_v):.4f}, Acc: {100 * acc_v:.2f}%, F1: {100 * f1_v:.2f}%" valid_stats2 = f"{np.average(loss_v)}, {acc_v}, {f1_v}" print("[VALID STATS:] " + valid_stats) valid_logger.update(valid_stats2) if f1_v > best_f1: torch.save(model, save_path / "best_model.pth") print(f"Best model saved at {epoch} epoch.") def calculate_stats(confusion_matrix): acc = np.sum(confusion_matrix.diagonal()) / np.sum(confusion_matrix) p = confusion_matrix[0, 0] / np.sum(confusion_matrix[0, :]) r = confusion_matrix[0, 0] / np.sum(confusion_matrix[:, 0]) f1 = 2 * p * r / (p + r) return acc, p, r, f1 def evaluate(model, data, criterion): confusion_matrix = np.zeros(shape=(2, 2)) losses = list() model.eval() with torch.no_grad(): for idx, batch in tqdm(enumerate(data), total=len(data)): x, y, lengths = batch x = x.to(device) y = y.to(device) output = model(x).reshape(shape=y.shape) loss = criterion(output, y.float()) losses.append(loss.item()) predictions = torch.sigmoid(output).round().int().detach().cpu().numpy() confusion_matrix += conf_matrix(y.detach().cpu().numpy(), predictions) acc, p, r, f1 = calculate_stats(confusion_matrix) loss = np.average(losses) return acc, p, r, f1, loss class Logger: def __init__(self, path: Path, start_message: str): with path.open(mode="w") as f: f.write(f"{start_message}\n") self.path = path def update(self, message): with self.path.open(mode="a") as f: f.write(f"{message}\n") >>>>>>> ca8923228a32a1117eff983cbec160e90b72ca02
[ "jelena.bratulic@gmail.hr" ]
jelena.bratulic@gmail.hr
488243e5d4538da2bac8bd00083dfb737797e000
4dcee7dff58a6f0364283787aa7ad1dff16721e1
/pre_pred_bert.py
85a565945bd985a6dbd43cc220760b93320738a5
[]
no_license
karthikpuranik11/Masked-LM
ead8bcb5bcaedb8b62b627cc6dab2ce3c5fefcbe
bb049e493bc9968e3c50cac1fe88ebe7c436523f
refs/heads/main
2023-03-18T21:51:27.842906
2021-03-07T17:37:54
2021-03-07T17:37:54
342,780,366
0
0
null
null
null
null
UTF-8
Python
false
false
371
py
a=predict_masked_sent('The animals came to the meeting.', top_k=5) for j in range(len(a)): x=0 a[j]=a[j].split() #print(a[j]) tok = pos_tag(a[j]) for k in range(len(tok)): if tok[k][0]=='[MASK]': break elif tok[k][1]=='IN' or tok[k][1]=='TO': pred=' '.join(a[j]) print(pred) x=1 break if x==1: break
[ "noreply@github.com" ]
karthikpuranik11.noreply@github.com
0d3f672dc0e572c955fb17809d11692cbcc434be
c01e107f3b781df76f83ca470c22c32cacf7ddb3
/src/qsimulator.py
1d6549fd4668f7e4d098970cfbe3b45a9491cc92
[]
no_license
UB-Quantic/EG-VQClass
593d24d10da3295532fa2064d098b59de433e91e
ff3ae612d666c80d6dbc38d461ecae79e3c82208
refs/heads/master
2020-04-27T17:28:11.308759
2019-03-26T20:28:21
2019-03-26T20:28:21
174,520,998
0
1
null
null
null
null
UTF-8
Python
false
false
13,336
py
import numpy as np import math import cmath Pi = math.pi class QC(object): def __init__(self, qubits): self.size = qubits """ The quantum state is initialized with all qubits at 0. """ self.state = [0]*2**self.size self.state[0] = 1. def initialize(self): """Brings the state vector back to its initial state. """ self.state = [0]*2**self.size self.state[0] = 1. ############################### # 1-Qubit Gates ############################### def h(self, m): """Apply the Hadamard Gate on the m'th qubit. Args. m (int): the qubit we apply our gate on. """ s = 1/np.sqrt(2) if m>=self.size: raise ValueError('Qubit does not exist.') for i in range(2**(self.size-1)): I = 2*i-i%(2**m) J = I+2**m a = s*self.state[I] + s*self.state[J] b = s*self.state[I] - s*self.state[J] self.state[I] = a self.state[J] = b def x(self, m): """Apply the X Pauli Gate on the m'th qubit. Args. m (int): the qubit we apply our gate on. """ if m>=self.size: raise ValueError('Qubit does not exist.') for i in range(2**(self.size-1)): I = 2*i-i%(2**m) J = I+2**m a = self.state[I] self.state[I] = self.state[J] self.state[J] = a def y(self, m): """Apply the Y Pauli Gate on the m'th qubit. Args. m (int): the qubit we apply our gate on. """ if m>=self.size: raise ValueError('Qubit does not exist.') for i in range(2**(self.size-1)): I = 2*i -i%(2**m) J = I+2**m a = -1.j * self.state[I] self.state[I] = 1.j*self.state[J] self.state[J] = a def z(self, m): """Apply the Z Pauli Gate on the m'th qubit. Args. m (int): the qubit we apply our gate on. """ if m>=self.size: raise ValueError('Qubit does not exist.') for i in range(2**(self.size-1)): J = 2*i - i%(2**m) + 2**m self.state[J] *= -1 def s(self, m): """Apply the Phase Gate on the m'th qubit. Args. m (int): the qubit we apply our gate on. """ if m>=self.size: raise ValueError('Qubit does not exist.') for i in range(2**(self.size-1)): J = 2*i - i%(2**m) + 2**m self.state[J] *= 1.j def t(self, m): """Apply the pi/8 Gate on the m'th qubit. Args. m (int): the qubit we apply our gate on. """ if m>=self.size: raise ValueError('Qubit does not exist.') aux = cmath.exp(0.25j*math.pi) for i in range(2**(self.size-1)): J = 2*i - i%(2**m) + 2**m self.state[J] *= aux def rx(self, m, th): """Apply a x-rotation on the m'th qubit. Args. m (int): the qubit we apply our gate on. th (float): angle we rotate. """ if m>=self.size: raise ValueError('Qubit does not exist.') th2 = 0.5*th c = math.cos(th2) s = -1.j * math.sin(th2) # beware of conventions for i in range(2**(self.size-1)): I = 2*i - i%2**m J = I + 2**m a = c*self.state[I] + s*self.state[J] b = s*self.state[I] + c*self.state[J] self.state[I] = a self.state[J] = b def ry(self, m, th): """Apply a y-rotation on the m'th qubit. Args. m (int): the qubit we apply our gate on. th (float): angle we rotate. """ if m>=self.size: raise ValueError('Qubit does not exist.') th2 = 0.5*th c = math.cos(th2) s = math.sin(th2) # beware of conventions for i in range(2**(self.size-1)): I = 2*i - i%2**m J = I + 2**m a = c*self.state[I] - s*self.state[J] b = s*self.state[I] + c*self.state[J] self.state[I] = a self.state[J] = b def rz(self, m, th): """Apply a z-rotation on the m'th qubit. Args. m (int): the qubit we apply our gate on. th (float): angle we rotate. """ if m>=self.size: raise ValueError('Qubit does not exist.') aux1 = cmath.exp(0.5j*th) aux2 = cmath.exp(-0.5j*th) for i in range(2**(self.size-1)): I = 2*i - i%2**m J = I + 2**m self.state[I] *= aux1 self.state[J] *= aux2 ####################################### # 2-Qubit Gates, Entanglement ####################################### def cnot(self, c, t): """Apply a Controlled-NOT gate. Args. c (int): control qubit. t (int): target qubit. """ if c>=self.size: raise ValueError('Control does not exist.') if t>=self.size: raise ValueError('Target does not exist.') if c==t: raise ValueError('Control and Target cannot be the same.') for i in range(2**(self.size-2)): I = (2**c + i%2**c + ((i-i%2**c)*2)%2**t + 2*((i-i%2**c)*2 - ((2*(i-i%2**c))%2**t))) J = I + 2**t self.state[I], self.state[J] = self.state[J], self.state[I] def cz(self, c, t): """Apply a Controlled-Z gate. Args. c (int): control qubit. t (int): target qubit. """ if c>=self.size: raise ValueError('Control does not exist.') if t>=self.size: raise ValueError('Target does not exist.') if c==t: raise ValueError('Control and Target cannot be the same.') if t<c: t,c = c,t for i in range(2**(self.size-2)): I = (2**c + i%2**c + ((i-i%2**c)*2)%2**t + 2*((i-i%2**c)*2 - ((2*(i-i%2**c))%2**t)) + 2**t) self.state[I] *= -1 def swap(self, m, n): """Apply a SWAP gate. Args. m (int): first qubit. n (int): second qubit. """ if m>=self.size: raise ValueError('First Qubit does not exist.') if n>=self.size: raise ValueError('Second Qubit does not exist.') if m==n: raise ValueError('Both Qubits cannot be the same.') for i in range(2**(self.size-2)): I = (i%2**m + ((i-i%2**m)*2)%2**n + 2*((i-i%2**m)*2 - ((2*(i-i%2**m))%2**n)) + 2**n) J = I + 2**m - 2**n self.state[I], self.state[J] = self.state[J], self.state[I] ############################################ # Circuits ############################################ # The following were created for classification using 4-qubits def encode(self, point): """Creates the encoding layer. Args. point (dim=2 float): coordinates of one input point. """ for i in range(self.size): self.h(i) self.rz(i, point[i%2]) def blocka(self, angles, qubits=[0,1,2,3]): """Adds a block of type a. Args. angles (dim=8 float): rotation angles for each gate . qubits (dim=4 int): qubits the block acts on. """ for i in range(4): self.rx(qubits[i], angles[i]) self.cz(qubits[0], qubits[1]) self.cz(qubits[2], qubits[3]) for i in range(4): self.ry(qubits[i], angles[4+i]) self.cz(qubits[1], qubits[2]) self.cz(qubits[0], qubits[3]) def blockb(self, angles, qubits=[0,1,2,3]): """Adds a block of type b. Args. angles (dim=8 float): rotation angles for each gate. qubits (dim=4 int): qubits the block acts on. """ for i in range(4): self.ry(qubits[i], angles[i]) self.cz(qubits[0], qubits[1]) self.cz(qubits[2], qubits[3]) for i in range(4): self.rx(qubits[i], angles[4+i]) self.cz(qubits[1], qubits[2]) self.cz(qubits[0], qubits[3]) def blockc(self, angles, qubits=[0,1,2,3]): """Adds a block of type c. Args. angles (dim=8 float): rotation angles for each gate. qubits (dim=4 int): qubits the block acts on. """ self.rx(qubits[0], angles[0]) self.ry(qubits[1], angles[1]) self.rx(qubits[2], angles[2]) self.ry(qubits[3], angles[3]) self.cz(qubits[0], qubits[1]) self.cz(qubits[2], qubits[3]) self.ry(qubits[0], angles[4]) self.rx(qubits[1], angles[5]) self.ry(qubits[2], angles[6]) self.rx(qubits[3], angles[7]) self.cz(qubits[1], qubits[2]) self.cz(qubits[0], qubits[3]) def blockd(self, angles, qubits=[0,1,2,3]): """Adds a block of type d. Args. angles (dim=8 float): rotation angles for each gate. qubits (dim=4 int): qubits the block acts on. """ self.rx(qubits[0], angles[0]) self.ry(qubits[1], angles[1]) self.rx(qubits[2], angles[2]) self.ry(qubits[3], angles[3]) self.cz(qubits[0], qubits[1]) self.cz(qubits[2], qubits[3]) self.rx(qubits[0], angles[4]) self.ry(qubits[1], angles[5]) self.rx(qubits[2], angles[6]) self.ry(qubits[3], angles[7]) self.cz(qubits[1], qubits[2]) self.cz(qubits[0], qubits[3]) def blockx(self, angles, qubits=[0,1,2,3]): """Adds a block of type x. Args. angles (dim=8 float): rotation angles for each gate. qubits (dim=4 int): qubits the block acts on. """ for i in range(4): self.rx(qubits[i], angles[i]) self.cz(qubits[0], qubits[1]) self.cz(qubits[2], qubits[3]) for i in range(4): self.rx(qubits[i], angles[4+i]) self.cz(qubits[1], qubits[2]) self.cz(qubits[0], qubits[3]) def blocky(self, angles, qubits=[0,1,2,3]): """Adds a block of type y. Args. angles(dim=8 float): rotation angles for each gate. qubits (dim=4 int): qubits the block acts on. """ for i in range(4): self.ry(qubits[i], angles[i]) self.cz(qubits[0], qubits[1]) self.cz(qubits[2], qubits[3]) for i in range(4): self.ry(qubits[i], angles[4+i]) self.cz(qubits[1], qubits[2]) self.cz(qubits[0], qubits[3]) def add(self, typ, angles, qubits=[0,1,2,3]): """Adds a block of a certain type in a given position. Args. typ (char): type of circuit 'a', 'b', 'c' or 'd'. angles (dim=8 float): rotation angles for each gate. qubits (dim=4 int): which qubits the block acts on. Rets. success (int): indicates whether some error flag was raised. """ if(typ not in 'abcdxy'): print("Wrong key for type.") return 1 return { 'a': self.blocka(angles, qubits), 'b': self.blockb(angles, qubits), 'c': self.blockc(angles, qubits), 'd': self.blockd(angles, qubits), 'x': self.blockx(angles, qubits), 'y': self.blocky(angles, qubits) }.get(typ, 1) # The following are intended to be used with 1-qubit circuits. def unitary(self, m, theta, phi, lamb): """Apply an arbitrary unitary gate on the m'th qubit. Every unitary gate is characterized by three angles. Args. m (int): qubit the gate is applied on. beta (float): first angle. gamma (float): second angle. delta (float): third angle. """ if m>=self.size: raise ValueError('Qubit does not exist.') c = math.cos(0.5*gamma) s = math.sin(0.5*gamma) ephi = cmath.exp(1j*phi) elamb = cmath.exp(1j*lamb) for i in range(2**(self.size-1)): I = 2*i -i%(2**m) J = I+2**m a = c*self.state[I] - s*elamb*self.state[J] b = s*ephi*self.state[I] + c*ephi*elamb*self.state[J] self.state[I] = a self.state[J] = b def block(self, m, point, angles, style=0): """Apply a learning block on the m'th qubit. Args. m (int): qubit the block is applied on. point (dim=2 float): coordinates of input. angles (dim=3 float): angles that determine a unitary gate. style (int): customizes the block. """ if m>=self.size: raise ValueError('Qubit does not exist.') if style: self.unitary(m, point[0]+angles[0], point[1]+angles[1], angles[2]) else: self.ry(m, point[0]*0.5*Pi) self.rz(m, (1+point[1])*Pi) self.unitary(m, angles[0], angles[1], angles[2])
[ "emgilfuster@gmail.com" ]
emgilfuster@gmail.com
76755ff963dbd261a204a635342afde89fe3cf1b
f12ca610566e7249c892811bafc37594abe7895a
/orangecontrib/text/country_codes.py
17a5b1ff2687507b4e62449ea0e34095ab18856a
[ "BSD-2-Clause" ]
permissive
nagyistoce/orange3-text
d04e6dfa68a7e86a4947c08bc2a078b4c0e772f5
fbdc3320b00a88c62ba866a671f28694958f6921
refs/heads/master
2021-01-21T09:43:25.598139
2015-06-27T14:32:09
2015-06-27T14:32:09
null
0
0
null
null
null
null
UTF-8
Python
false
false
9,422
py
"""Country names to ISO3166_alpha2 codes mapping Roughly generated by the following bash script on GNU/Linux: while read cc name; do [ ! "$cc" ] && continue out=$(isoquery $cc | cut -f3 --complement); [ ! "$out" ] && out="$cc" [ "$(echo $out | cut -f3)" = "$name" ] && name='' echo -e "$out\t$name" | sed -r 's/\s+$//' | sed -r "s/\t/': ['/" | sed -r "s/\t/', '/g" | sed -r "s/^/'/" | sed -r 's/$/'"'"',],/' done < input/cc.list # cc.list from jVectorMap; format: lines start with ISO3166_alpha2_code else copied as is Certain details updated by hand. """ CC_EUROPE = { '_0': ['Kosovo', 'Kosovo, Republic of'], '-99': ['N. Cyprus', 'North Cyprus'], 'AD': ['AND', 'Andorra'], 'AL': ['ALB', 'Albania'], 'AT': ['AUT', 'Austria'], 'AX': ['ALA', 'Åland Islands', 'Aland'], 'BA': ['BIH', 'Bosnia and Herzegovina', 'Bosnia and Herz.'], 'BE': ['BEL', 'Belgium'], 'BG': ['BGR', 'Bulgaria'], 'BY': ['BLR', 'Belarus'], 'CH': ['CHE', 'Switzerland'], 'CY': ['CYP', 'Cyprus'], 'CZ': ['CZE', 'Czech Republic', 'Czech Rep.'], 'DE': ['DEU', 'Germany'], 'DK': ['DNK', 'Denmark'], 'DZ': ['DZA', 'Algeria'], 'EE': ['EST', 'Estonia'], 'EG': ['EGY', 'Egypt'], 'ES': ['ESP', 'Spain'], 'FI': ['FIN', 'Finland'], 'FO': ['FRO', 'Faroe Islands', 'Faeroe Is.'], 'FR': ['FRA', 'France'], 'GB': ['GBR', 'United Kingdom'], 'GE': ['GEO', 'Georgia'], 'GG': ['GGY', 'Guernsey'], 'GR': ['GRC', 'Greece'], 'HR': ['HRV', 'Croatia'], 'HU': ['HUN', 'Hungary'], 'IE': ['IRL', 'Ireland'], 'IL': ['ISR', 'Israel'], 'IM': ['IMN', 'Isle of Man'], 'IQ': ['IRQ', 'Iraq'], 'IS': ['ISL', 'Iceland'], 'IT': ['ITA', 'Italy'], 'JE': ['JEY', 'Jersey'], 'JO': ['JOR', 'Jordan'], 'LB': ['LBN', 'Lebanon'], 'LI': ['LIE', 'Liechtenstein'], 'LT': ['LTU', 'Lithuania'], 'LU': ['LUX', 'Luxembourg'], 'LV': ['LVA', 'Latvia'], 'LY': ['LBY', 'Libya'], 'MA': ['MAR', 'Morocco'], 'MD': ['MDA', 'Moldova, Republic of', 'Moldova'], 'ME': ['MNE', 'Montenegro'], 'MK': ['MKD', 'Macedonia, Republic of', 'Macedonia'], 'MT': ['MLT', 'Malta'], 'NL': ['NLD', 'Netherlands'], 'NO': ['NOR', 'Norway'], 'PL': ['POL', 'Poland'], 'PS': ['PSE', 'Palestine, State of', 'Palestine'], 'PT': ['PRT', 'Portugal'], 'RO': ['ROU', 'Romania'], 'RS': ['SRB', 'Serbia'], 'RU': ['RUS', 'Russian Federation', 'Russia'], 'SA': ['SAU', 'Saudi Arabia'], 'SE': ['SWE', 'Sweden'], 'SI': ['SVN', 'Slovenia'], 'SK': ['SVK', 'Slovakia'], 'SM': ['SMR', 'San Marino'], 'SY': ['SYR', 'Syrian Arab Republic', 'Syria'], 'TN': ['TUN', 'Tunisia'], 'TR': ['TUR', 'Turkey'], 'UA': ['UKR', 'Ukraine'], } CC_WORLD = { # Does NOT include CC_EUROPE '_1': ['Somaliland',], 'AE': ['ARE', 'United Arab Emirates'], 'AF': ['AFG', 'Afghanistan'], 'AM': ['ARM', 'Armenia'], 'AO': ['AGO', 'Angola'], 'AR': ['ARG', 'Argentina'], 'AU': ['AUS', 'Australia'], 'AZ': ['AZE', 'Azerbaijan'], 'BD': ['BGD', 'Bangladesh'], 'BF': ['BFA', 'Burkina Faso'], 'BI': ['BDI', 'Burundi'], 'BJ': ['BEN', 'Benin'], 'BN': ['BRN', 'Brunei Darussalam', 'Brunei'], 'BO': ['BOL', 'Bolivia, Plurinational State of', 'Bolivia'], 'BR': ['BRA', 'Brazil'], 'BS': ['BHS', 'Bahamas'], 'BT': ['BTN', 'Bhutan'], 'BW': ['BWA', 'Botswana'], 'BZ': ['BLZ', 'Belize'], 'CA': ['CAN', 'Canada'], 'CD': ['COD', 'Congo, The Democratic Republic of the', 'Dem. Rep. Congo'], 'CF': ['CAF', 'Central African Republic', 'Central African Rep.'], 'CG': ['COG', 'Congo'], 'CI': ['CIV', "Côte d'Ivoire"], 'CL': ['CHL', 'Chile'], 'CM': ['CMR', 'Cameroon'], 'CN': ['CHN', 'China'], 'CO': ['COL', 'Colombia'], 'CR': ['CRI', 'Costa Rica'], 'CU': ['CUB', 'Cuba'], 'DJ': ['DJI', 'Djibouti'], 'DO': ['DOM', 'Dominican Republic', 'Dominican Rep.'], 'EC': ['ECU', 'Ecuador'], 'EH': ['ESH', 'Western Sahara', 'W. Sahara'], 'ER': ['ERI', 'Eritrea'], 'ET': ['ETH', 'Ethiopia'], 'FJ': ['FJI', 'Fiji'], 'FK': ['FLK', 'Falkland Islands [Malvinas]', 'Falkland Is.'], 'GA': ['GAB', 'Gabon'], 'GH': ['GHA', 'Ghana'], 'GL': ['GRL', 'Greenland'], 'GM': ['GMB', 'Gambia'], 'GN': ['GIN', 'Guinea'], 'GQ': ['GNQ', 'Equatorial Guinea', 'Eq. Guinea'], 'GT': ['GTM', 'Guatemala'], 'GW': ['GNB', 'Guinea-Bissau'], 'GY': ['GUY', 'Guyana'], 'HN': ['HND', 'Honduras'], 'HT': ['HTI', 'Haiti'], 'ID': ['IDN', 'Indonesia'], 'IN': ['IND', 'India'], 'IR': ['IRN', 'Iran, Islamic Republic of', 'Iran'], 'JM': ['JAM', 'Jamaica'], 'JP': ['JPN', 'Japan'], 'KE': ['KEN', 'Kenya'], 'KG': ['KGZ', 'Kyrgyzstan'], 'KH': ['KHM', 'Cambodia'], 'KP': ['PRK', "Korea, Democratic People's Republic of", 'Dem. Rep. Korea', 'North Korea'], 'KR': ['KOR', 'Korea, Republic of', 'Korea', 'South Korea'], 'KW': ['KWT', 'Kuwait'], 'KZ': ['KAZ', 'Kazakhstan'], 'LA': ['LAO', "Lao People's Democratic Republic", 'Lao PDR'], 'LK': ['LKA', 'Sri Lanka'], 'LR': ['LBR', 'Liberia'], 'LS': ['LSO', 'Lesotho'], 'MG': ['MDG', 'Madagascar'], 'ML': ['MLI', 'Mali'], 'MM': ['MMR', 'Myanmar'], 'MN': ['MNG', 'Mongolia'], 'MR': ['MRT', 'Mauritania'], 'MW': ['MWI', 'Malawi'], 'MX': ['MEX', 'Mexico'], 'MY': ['MYS', 'Malaysia'], 'MZ': ['MOZ', 'Mozambique'], 'NA': ['NAM', 'Namibia'], 'NC': ['NCL', 'New Caledonia'], 'NE': ['NER', 'Niger'], 'NG': ['NGA', 'Nigeria'], 'NI': ['NIC', 'Nicaragua'], 'NP': ['NPL', 'Nepal'], 'NZ': ['NZL', 'New Zealand'], 'OM': ['OMN', 'Oman'], 'PA': ['PAN', 'Panama'], 'PE': ['PER', 'Peru'], 'PG': ['PNG', 'Papua New Guinea'], 'PH': ['PHL', 'Philippines'], 'PK': ['PAK', 'Pakistan'], 'PR': ['PRI', 'Puerto Rico'], 'PY': ['PRY', 'Paraguay'], 'QA': ['QAT', 'Qatar'], 'RW': ['RWA', 'Rwanda'], 'SB': ['SLB', 'Solomon Islands', 'Solomon Is.'], 'SD': ['SDN', 'Sudan'], 'SL': ['SLE', 'Sierra Leone'], 'SN': ['SEN', 'Senegal'], 'SO': ['SOM', 'Somalia'], 'SR': ['SUR', 'Suriname'], 'SS': ['SSD', 'South Sudan', 'S. Sudan'], 'SV': ['SLV', 'El Salvador'], 'SZ': ['SWZ', 'Swaziland'], 'TD': ['TCD', 'Chad'], 'TF': ['ATF', 'French Southern Territories', 'Fr. S. Antarctic Lands'], 'TG': ['TGO', 'Togo'], 'TH': ['THA', 'Thailand'], 'TJ': ['TJK', 'Tajikistan'], 'TL': ['TLS', 'Timor-Leste'], 'TM': ['TKM', 'Turkmenistan'], 'TT': ['TTO', 'Trinidad and Tobago'], 'TW': ['TWN', 'Taiwan, Province of China', 'Taiwan'], 'TZ': ['TZA', 'Tanzania, United Republic of', 'Tanzania'], 'UG': ['UGA', 'Uganda'], 'US': ['USA', 'United States', 'United States of America'], 'UY': ['URY', 'Uruguay'], 'UZ': ['UZB', 'Uzbekistan'], 'VE': ['VEN', 'Venezuela, Bolivarian Republic of', 'Venezuela'], 'VN': ['VNM', 'Viet Nam', 'Vietnam'], 'VU': ['VUT', 'Vanuatu'], 'YE': ['YEM', 'Yemen'], 'ZA': ['ZAF', 'South Africa'], 'ZM': ['ZMB', 'Zambia'], 'ZW': ['ZWE', 'Zimbabwe'], } CC_WORLD.update(CC_EUROPE) CC_USA = { 'US-AK': ['AK', 'Alaska'], 'US-AL': ['AL', 'Alabama'], 'US-AR': ['AR', 'Arkansas'], 'US-AZ': ['AZ', 'Arizona'], 'US-CA': ['CA', 'California'], 'US-CO': ['CO', 'Colorado'], 'US-CT': ['CT', 'Connecticut'], 'US-DC': ['DC', 'District of Columbia'], 'US-DE': ['DE', 'Delaware'], 'US-FL': ['FL', 'Florida'], 'US-GA': ['GA', 'Georgia'], 'US-HI': ['HI', 'Hawaii'], 'US-IA': ['IA', 'Iowa'], 'US-ID': ['ID', 'Idaho'], 'US-IL': ['IL', 'Illinois'], 'US-IN': ['IN', 'Indiana'], 'US-KS': ['KS', 'Kansas'], 'US-KY': ['KY', 'Kentucky'], 'US-LA': ['LA', 'Louisiana'], 'US-MA': ['MA', 'Massachusetts'], 'US-MD': ['MD', 'Maryland'], 'US-ME': ['ME', 'Maine'], 'US-MI': ['MI', 'Michigan'], 'US-MN': ['MN', 'Minnesota'], 'US-MO': ['MO', 'Missouri'], 'US-MS': ['MS', 'Mississippi'], 'US-MT': ['MT', 'Montana'], 'US-NC': ['NC', 'North Carolina'], 'US-ND': ['ND', 'North Dakota'], 'US-NE': ['NE', 'Nebraska'], 'US-NH': ['NH', 'New Hampshire'], 'US-NJ': ['NJ', 'New Jersey'], 'US-NM': ['NM', 'New Mexico'], 'US-NV': ['NV', 'Nevada'], 'US-NY': ['NY', 'New York'], 'US-OH': ['OH', 'Ohio'], 'US-OK': ['OK', 'Oklahoma'], 'US-OR': ['OR', 'Oregon'], 'US-PA': ['PA', 'Pennsylvania'], 'US-RI': ['RI', 'Rhode Island'], 'US-SC': ['SC', 'South Carolina'], 'US-SD': ['SD', 'South Dakota'], 'US-TN': ['TN', 'Tennessee'], 'US-TX': ['TX', 'Texas'], 'US-UT': ['UT', 'Utah'], 'US-VA': ['VA', 'Virginia'], 'US-VT': ['VT', 'Vermont'], 'US-WA': ['WA', 'Washington'], 'US-WI': ['WI', 'Wisconsin'], 'US-WV': ['WV', 'West Virginia'], 'US-WY': ['WY', 'Wyoming'], } def _invert_mapping(dict): return {v:k for k in dict for v in dict[k]} INV_CC_EUROPE = _invert_mapping(CC_EUROPE) INV_CC_WORLD = _invert_mapping(CC_WORLD) INV_CC_USA = _invert_mapping(CC_USA) SET_CC_EUROPE = set(INV_CC_EUROPE.keys()) | set(INV_CC_EUROPE.values()) SET_CC_USA = set(INV_CC_USA.keys()) | set(INV_CC_USA.values())
[ "kerncece@gmail.com" ]
kerncece@gmail.com
2847baf0977045d715c296153c4a804ffd01798a
0592c83ef8bed931d310c1233a0e329a21876cbe
/tests/test_dataset.py
d4f1a84980c48f6536ff0ed15f5ab4dc09a3e1f3
[]
no_license
datastory-org/frame2package
81d12439715f42dce8cdbd80853c16bba481da28
bea7e7d45ced2e9792078088b1e6271360bc86f8
refs/heads/master
2020-04-17T04:50:40.112731
2019-06-18T10:58:18
2019-06-18T10:58:18
166,248,711
1
0
null
null
null
null
UTF-8
Python
false
false
2,867
py
import unittest import pandas as pd from frame2package import Dataset, Concept class DatasetTestCase(unittest.TestCase): def setUp(self): data = [ { 'country': 'Sweden', 'capital': 'Stockholm', 'year': 2000, 'population': 9_000_000 }, { 'country': 'Sweden', 'capital': 'Stockholm', 'year': 2019, 'population': 10_000_000 }, { 'country': 'Norway', 'capital': 'Oslo', 'year': 2000, 'population': 5_000_000 }, { 'country': 'Norway', 'capital': 'Oslo', 'year': 2019, 'population': 6_000_000 }, ] concepts = [ { 'concept': 'country', 'concept_type': 'entity_domain' }, { 'concept': 'capital', 'concept_type': 'string' }, { 'concept': 'population', 'concept_type': 'measure' }, { 'concept': 'year', 'concept_type': 'time' } ] self.data = data self.concepts = concepts self.dataset = Dataset(pd.DataFrame(data), concepts) def test_has_concepts(self): self.assertTrue(hasattr(self.dataset, 'concepts')) def test_has_entities(self): self.assertTrue(hasattr(self.dataset, 'entities')) def test_has_tables(self): self.assertTrue(hasattr(self.dataset, 'tables')) def test_has_data(self): self.assertTrue(hasattr(self.dataset, 'data')) def test_data_is_frame(self): self.assertTrue(type(self.dataset.data) is pd.DataFrame) def test_concept_type(self): self.assertTrue(all([type(x) is Concept for x in self.dataset.concepts])) def test_has_correct_number_of_entities(self): self.assertEqual(len(self.dataset.entities), 1) def test_fails_if_missing_concepts(self): data = pd.DataFrame(self.data) def create_dataset_with_missing_concepts(): return Dataset(data, self.concepts[:-1]) self.assertRaises(ValueError, create_dataset_with_missing_concepts) def test_creates_correct_table_name(self): table_name = self.dataset.tables[0][0] expected = 'ddf--datapoints--population--by--country--year.csv' self.assertEqual(table_name, expected) def test_creates_correct_table_size(self): self.assertEqual(self.dataset.tables[0][1].shape, (4, 3)) def test_records_extra_string_concepts(self): self.assertIn('capital', self.dataset.concepts)
[ "robin.linderborg@gmail.com" ]
robin.linderborg@gmail.com
6af4d1ec5bd8fce9532cd1238fb58d598e8ad97f
ad7dd3db001cbf322d0944c120b42e78b9fe00b9
/champakraja/ramu.py
febc73ee66c76c06e50b7ac645f3a8c690a56002
[ "MIT" ]
permissive
jeldikk/champakraja
ebfd4ff04a0a1e48b2d6f31c4695e4ddae532e64
1462be4c8458b5bc2816b9aa69c1845482e702e1
refs/heads/master
2022-12-03T03:04:33.217318
2020-08-22T16:53:09
2020-08-22T16:53:09
null
0
0
null
null
null
null
UTF-8
Python
false
false
562
py
from .base import character class ramu(character): def __init__(self,name): self._name = name @property def name(self): return self._name def books(self): return ('chandamama', 'swathi', 'ramayanam', 'Mahabharatham',) def hobbies(self): return ('respecting', 'worship god',) def activities(self): return ('job', 'early namaskara', 'orthodox rituals') def hairstyle(self): return ('long hair with a pony tail',) def nature(self): return ('cowardice', 'responsible',)
[ "jeldi.kamal2011@gmail.com" ]
jeldi.kamal2011@gmail.com
d371052b610c7808f4397cc46872d84018712958
78224a508b75e7958cec6a2759b8ba4c46cb4bfc
/exchange/okex/HttpMD5Util.py
a15b45816241c7353aa08cd55590e4fe1a805b91
[]
no_license
80000v/CryptoArb
34e731b11c3b29a3643c1aa79b921e0ef879b4d9
5b9d3e05af99a70a09481f1370bc863f7ca84d66
refs/heads/master
2021-04-20T10:24:07.959747
2019-04-04T14:17:46
2019-04-04T14:17:46
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,370
py
#!/usr/bin/python # -*- coding: utf-8 -*- #用于进行http请求,以及MD5加密,生成签名的工具类 import requests import hashlib #初始化apikey,secretkey,url apikey = '1cd704d7-d549-436b-a5ee-df7e401843d3' secretkey = '1AE1EE7238F5485D35E128194B821181' okcoinRESTURL = 'https://www.okcoin.cn' BaseUrl = "/v2/auth/login" DEFAULT_POST_HEADERS = { # "Authorization":"eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiI5NjQ5MGI4Ni0zOWExLTQyMWEtYmEzYi03YTAxNTkwYTg1N2MiLCJhdWRpZW5jZSI6IndlYiIsImNyZWF0ZWQiOjE1MDE1NTkzMzE0MzEsImV4cCI6MTUwMjE2NDEzMX0.crVupk8Tc4ki_TIT-tLmTpBxEjdOt4Ww3b3GoP0TJebCUT_TIxvBjzeTFRnnchbGwUHvrSoqp0cVofVaENkA6Q" "Authorization":None, 'Content-Type': 'application/json', "User-Agent": "Chrome/39.0.2171.71", "Accept": "application/json", "authRequest":"authRequest" } def buildMySign(params,secretKey): sign = '' for key in sorted(params.keys()): sign += key + '=' + str(params[key]) +'&' data = sign+'secret_key='+secretKey return hashlib.md5(data.encode("utf8")).hexdigest().upper() def httpGet(url,resource,params=''): # conn = http.client.HTTPSConnection(url, timeout=10) # conn.request("GET",resource + '?' + params) # response = conn.getresponse() # data = response.read().decode('utf-8') # return json.loads(data) try: response = requests.get(url, params, timeout=5) if response.status_code == 200: return response.json() else: return {"result":"fail"} except Exception as e: print("httpGet failed, detail is:%s" % e) return def httpPost(url,resource,params): headers = { "Content-type" : "application/x-www-form-urlencoded", } # conn = http.client.HTTPSConnection(url, timeout=10) # temp_params = urllib.parse.urlencode(params) # conn.request("POST", resource, temp_params, headers) # response = conn.getresponse() # data = response.read().decode('utf-8') # params.clear() # conn.close() # return data try: if resource: url = url + resource response = requests.post(url, params, headers=headers, timeout=5) if response.status_code == 200: return response.json() else: return except Exception as e: print("httpPost failed, detail is:%s" % e) return
[ "huang.xinyu@wanlitechnologies.com" ]
huang.xinyu@wanlitechnologies.com
237743cb29e83580cbade37977253888764a05b4
f4f54015298eedfbbdfcaaf5e2a9603112f803a5
/sachin/gocept.filestore-0.3/gocept.filestore-0.3/src/gocept/filestore/tests.py
39487c46c2cf44f18a2df60610d46b4e1e9848c4
[]
no_license
raviramawat8/Old_Python_Codes
f61e19bff46856fda230a096aa789c7e54bd97ca
f940aed0611b0636e1a1b6826fa009ceb2473c2b
refs/heads/master
2020-03-22T22:54:50.964816
2018-06-16T01:39:43
2018-06-16T01:39:43
null
0
0
null
null
null
null
UTF-8
Python
false
false
335
py
# Copyright (c) 2007 gocept gmbh & co. kg # See also LICENSE.txt # $Id: tests.py 5111 2007-08-30 11:27:23Z zagy $ import unittest from zope.testing import doctest def test_suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocFileSuite( 'README.txt', optionflags=doctest.ELLIPSIS)) return suite
[ "sachinyadav3496@gmail.com" ]
sachinyadav3496@gmail.com
672931fd1ee8dae6d584fb3ff8d812002ab628cc
e62a8c1ee3ac295f8028164d6ba4993c189fd774
/btpython/testbikieu.py
4397d561992f473bb43fea830ed8f408eaa117f4
[]
no_license
thanhthai3457/Linux
8ac32919a59189ff35e9c2c3883303893bd245f7
55fd16be99922a1c6c9958ae3c1f0af40879b5a7
refs/heads/master
2020-03-11T18:43:05.248945
2018-06-13T15:45:50
2018-06-13T15:45:50
130,185,935
0
0
null
null
null
null
UTF-8
Python
false
false
94
py
from bikeu import thai sv1 = thai() sv1.set_ten() sv1.set_sdt() print ("Thông tin") sv1.In()
[ "thanh@example.com" ]
thanh@example.com
1b6117c360304db090e45da73264909875f05ed9
5beb2410b95be9d26cfca2094a446ec2be16ce50
/ma/01.py
4d605bc065cc51c0c1723ede396f4b18f370e22a
[]
no_license
1361217049/python
abbde08f88125aa21e6f24aa5183798972c02af3
ae92c33437e617203b28aaf6c644c26a0c17fb69
refs/heads/master
2020-04-01T06:42:34.757234
2018-10-14T10:00:33
2018-10-14T10:00:33
152,960,495
0
0
null
null
null
null
UTF-8
Python
false
false
139
py
#定义一个类 class Student(): def out(self): print("我爱++++") pass han=Student() han.out() Student.__dict__ print(1)
[ "1361217049@qq.com" ]
1361217049@qq.com
1eb7d4b356ecdfbafd7359821f946512d7724998
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
/lib/googlecloudsdk/generated_clients/apis/artifactregistry/v1beta2/resources.py
1c5440583e39b379a1c8a68cde0b2d6841f35146
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
google-cloud-sdk-unofficial/google-cloud-sdk
05fbb473d629195f25887fc5bfaa712f2cbc0a24
392abf004b16203030e6efd2f0af24db7c8d669e
refs/heads/master
2023-08-31T05:40:41.317697
2023-08-23T18:23:16
2023-08-23T18:23:16
335,182,594
9
2
NOASSERTION
2022-10-29T20:49:13
2021-02-02T05:47:30
Python
UTF-8
Python
false
false
3,295
py
# -*- coding: utf-8 -*- # # Copyright 2023 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Resource definitions for Cloud Platform Apis generated from apitools.""" import enum BASE_URL = 'https://artifactregistry.googleapis.com/v1beta2/' DOCS_URL = 'https://cloud.google.com/artifacts/docs/' class Collections(enum.Enum): """Collections for all supported apis.""" PROJECTS = ( 'projects', 'projects/{projectsId}', {}, ['projectsId'], True ) PROJECTS_LOCATIONS = ( 'projects.locations', '{+name}', { '': 'projects/{projectsId}/locations/{locationsId}', }, ['name'], True ) PROJECTS_LOCATIONS_OPERATIONS = ( 'projects.locations.operations', '{+name}', { '': 'projects/{projectsId}/locations/{locationsId}/operations/' '{operationsId}', }, ['name'], True ) PROJECTS_LOCATIONS_REPOSITORIES = ( 'projects.locations.repositories', '{+name}', { '': 'projects/{projectsId}/locations/{locationsId}/repositories/' '{repositoriesId}', }, ['name'], True ) PROJECTS_LOCATIONS_REPOSITORIES_FILES = ( 'projects.locations.repositories.files', '{+name}', { '': 'projects/{projectsId}/locations/{locationsId}/repositories/' '{repositoriesId}/files/{filesId}', }, ['name'], True ) PROJECTS_LOCATIONS_REPOSITORIES_PACKAGES = ( 'projects.locations.repositories.packages', '{+name}', { '': 'projects/{projectsId}/locations/{locationsId}/repositories/' '{repositoriesId}/packages/{packagesId}', }, ['name'], True ) PROJECTS_LOCATIONS_REPOSITORIES_PACKAGES_TAGS = ( 'projects.locations.repositories.packages.tags', '{+name}', { '': 'projects/{projectsId}/locations/{locationsId}/repositories/' '{repositoriesId}/packages/{packagesId}/tags/{tagsId}', }, ['name'], True ) PROJECTS_LOCATIONS_REPOSITORIES_PACKAGES_VERSIONS = ( 'projects.locations.repositories.packages.versions', '{+name}', { '': 'projects/{projectsId}/locations/{locationsId}/repositories/' '{repositoriesId}/packages/{packagesId}/versions/{versionsId}', }, ['name'], True ) def __init__(self, collection_name, path, flat_paths, params, enable_uri_parsing): self.collection_name = collection_name self.path = path self.flat_paths = flat_paths self.params = params self.enable_uri_parsing = enable_uri_parsing
[ "cloudsdk.mirror@gmail.com" ]
cloudsdk.mirror@gmail.com
691231d66568dfb3947334005eca7c99975d2ce9
32d4e716d6291b95716541e55e166e9b8fc87ef4
/parser.py
7650fbaadac29f60240aa6bd7799aca1dd83e175
[]
no_license
ShamilyanOksana/Parser
d0555e8e27679fb3c9876e1b2eab5503e032013a
05dcf604ff14d8ff60f4c8cdb619bd9c540dfa3c
refs/heads/master
2021-09-01T12:08:17.433032
2017-12-26T22:25:25
2017-12-26T22:25:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,514
py
import requests from bs4 import BeautifulSoup class Phone: pass def get_html(url): url = "https://www.avito.ru/taganrog/telefony/samsung?q=sumsung&p=1" r = requests.get(url) return r.text def get_total_pages(html): soup = BeautifulSoup(html, 'lxml') pages = soup.find('div', class_='pagination-pages').find_all('a', class_='pagination-page')[-1].get('href') total_pages = pages.split('=')[1].split('&')[0] return int(total_pages) def print_information(all_info): all_info.sort(key=lambda phone: phone.price) for info in all_info: print(info.title) print(info.url) print(info.price) print(info.currency) def get_page_data(html): soup = BeautifulSoup(html, 'lxml') ads = soup.find('div', class_='catalog-list').find_all('div', class_='description') count = 0 for ad in ads: all_info.append(Phone()) all_info[count].title = get_title(ad) all_info[count].url = get_link(ad) pre_price = get_price(ad) all_info[count].price = pre_price[0] all_info[count].currency = pre_price[1] count+=1 return all_info def get_title(current_ads): try: title = current_ads.find('a', class_='item-description-title-link').get('title') return title except Exception: pass def get_link(current_ads): try: link = "https://www.avito.ru" + current_ads.find('a', class_='item-description-title-link').get('href') return link except Exception: pass def get_price(current_ads): try: price = current_ads.find('div', class_='about').text.split(' ')[2:] if price[0].isdigit() and price[1].isdigit(): currency = price[2] price = int(price[0])*1000 + int(price[1]) else: currency = price[1] price = int(price[0]) return [price, currency] except Exception: pass def main(): url = "https://www.avito.ru/taganrog/telefony/samsung?q=sumsung&p=1" base_url = "https://www.avito.ru/taganrog/telefony/samsung?" page_part = "p=" query_part = "&q=sumsung" html = get_html(url) total_pages = get_total_pages(html) # for i in range(1, total_pages+1): for i in range(1, 2): url_gen = base_url + page_part + str(i) + query_part html = get_html(url_gen) all_info = get_page_data(html) print_information(all_info) all_info = [] if __name__ == "__main__": main()
[ "shamilyanoksana@gmail.com" ]
shamilyanoksana@gmail.com
9805ffe4daef50c8bdfe737999913fe9357c8479
e4da82e4beb9b1af7694fd5b49824a1c53ee59ff
/AutoWorkup/SEMTools/registration/averagebraingenerator.py
b206faa7d7b842adead8675771f35338e6d91db4
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
ipekoguz/BRAINSTools
c8732a9206525adb5779eb0c2ed97f448e2df47f
dc32fa0820a0d0b3bd882fa744e79194c9c137bc
refs/heads/master
2021-01-18T08:37:03.883250
2013-05-14T21:08:33
2013-05-14T21:08:33
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,709
py
# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class AverageBrainGeneratorInputSpec(CommandLineInputSpec): inputDirectory = File(desc="Image To Warp", exists=True, argstr="--inputDirectory %s") templateVolume = File(desc="Reference image defining the output space", exists=True, argstr="--templateVolume %s") resolusion = traits.Str(desc="The resolusion.", argstr="--resolusion %s") iteration = traits.Str(desc="The iteration.", argstr="--iteration %s") pixelType = traits.Enum("uchar", "short", "ushort", "int", "uint", "float", desc="Specifies the pixel type for the input/output images", argstr="--pixelType %s") outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Resulting deformed image", argstr="--outputVolume %s") class AverageBrainGeneratorOutputSpec(TraitedSpec): outputVolume = File(desc="Resulting deformed image", exists=True) class AverageBrainGenerator(SEMLikeCommandLine): """title: Average Brain Generator category: Registration description: This programs creates synthesized average brain. version: 0.1 documentation-url: http:://mri.radiology.uiowa.edu/mriwiki license: NEED TO ADD contributor: This tool was developed by Yongqiang Zhao. """ input_spec = AverageBrainGeneratorInputSpec output_spec = AverageBrainGeneratorOutputSpec _cmd = " AverageBrainGenerator " _outputs_filenames = {'outputVolume':'outputVolume'}
[ "hans-johnson@uiowa.edu" ]
hans-johnson@uiowa.edu
85daa9a73cfbe7b2a17557ab40ced26375f501d9
911fc2c6bc552d83fb0d2481d556e0979cd20101
/mdb.py
1756c62bf6bbe864e38ce14e929d13411a37b47c
[]
no_license
riyasleo10/AM_filter_bot
75ed5b67632efa8c3d18911b6fdeb5437ad190c5
4193148a0cc4e5e2eaf7caf94943d2a44f4cb3f6
refs/heads/main
2023-03-22T11:31:36.098569
2021-03-18T17:42:54
2021-03-18T17:42:54
348,805,967
0
0
null
null
null
null
UTF-8
Python
false
false
4,001
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @trojanzhex import re import pymongo from pymongo.errors import DuplicateKeyError from marshmallow.exceptions import ValidationError from config import DATABASE_URI, DATABASE_NAME myclient = pymongo.MongoClient(DATABASE_URI) mydb = myclient[DATABASE_NAME] async def savefiles(docs, group_id): mycol = mydb[str(group_id)] try: mycol.insert_many(docs, ordered=False) except Exception: pass async def channelgroup(channel_id, channel_name, group_id, group_name): mycol = mydb["ALL DETAILS"] channel_details = { "channel_id" : channel_id, "channel_name" : channel_name } data = { '_id': group_id, 'group_name' : group_name, 'channel_details' : [channel_details], } if mycol.count_documents( {"_id": group_id} ) == 0: try: mycol.insert_one(data) except: print('Some error occured!') else: print(f"files in '{channel_name}' linked to '{group_name}' ") else: try: mycol.update_one({'_id': group_id}, {"$push": {"channel_details": channel_details}}) except: print('Some error occured!') else: print(f"files in '{channel_name}' linked to '{group_name}' ") async def ifexists(channel_id, group_id): mycol = mydb["ALL DETAILS"] query = mycol.count_documents( {"_id": group_id} ) if query == 0: return False else: ids = mycol.find( {'_id': group_id} ) channelids = [] for id in ids: for chid in id['channel_details']: channelids.append(chid['channel_id']) if channel_id in channelids: return True else: return False async def deletefiles(channel_id, channel_name, group_id, group_name): mycol1 = mydb["ALL DETAILS"] try: mycol1.update_one( {"_id": group_id}, {"$pull" : { "channel_details" : {"channel_id":channel_id} } } ) except: pass mycol2 = mydb[str(group_id)] query2 = {'channel_id' : channel_id} try: mycol2.delete_many(query2) except: print("Couldn't delete channel") return False else: print(f"filters from '{channel_name}' deleted in '{group_name}'") return True async def deletealldetails(group_id): mycol = mydb["ALL DETAILS"] query = { "_id": group_id } try: mycol.delete_one(query) except: pass async def deletegroupcol(group_id): mycol = mydb[str(group_id)] if mycol.count() == 0: return 1 try: mycol.drop() except Exception as e: print(f"delall group col drop error - {str(e)}") return 2 else: return 0 async def channeldetails(group_id): mycol = mydb["ALL DETAILS"] query = mycol.count_documents( {"_id": group_id} ) if query == 0: return False else: ids = mycol.find( {'_id': group_id} ) chdetails = [] for id in ids: for chid in id['channel_details']: chdetails.append( str(chid['channel_name']) + " ( <code>" + str(chid['channel_id']) + "</code> )" ) return chdetails async def countfilters(group_id): mycol = mydb[str(group_id)] query = mycol.count() if query == 0: return False else: return query async def findgroupid(channel_id): mycol = mydb["ALL DETAILS"] ids = mycol.find() groupids = [] for id in ids: for chid in id['channel_details']: if channel_id == chid['channel_id']: groupids.append(id['_id']) return groupids async def searchquery(group_id, name): mycol = mydb[str(group_id)] filenames = [] filelinks = [] # looking for a better regex :( pattern = name.lower().strip().replace(' ','.*'
[ "noreply@github.com" ]
riyasleo10.noreply@github.com
2ff9dcabc42e8fe5f217ef5bf6abf5b015fb7183
4f5513932010a81b0330917d2aa2f4fde39a04d6
/wall_app/models.py
948459eec11f5d2f4b83d54fc36e7e806e8f502e
[]
no_license
pfuentea/the_wall
c58067f0219040900b4240ec71f50afcbb4ceff2
550f59945720d8b148aed12b7856cbc443dd8c60
refs/heads/main
2023-07-22T07:18:13.667222
2021-09-08T03:15:57
2021-09-08T03:15:57
402,581,597
0
0
null
null
null
null
UTF-8
Python
false
false
1,918
py
from django.db import models # Create your models here. class UserManager(models.Manager): def basic_validator(self, postData): errors={} if postData['password_confirm']!=postData['password']: errors["password"] = "Las contraseñas deben coincidir" return errors class User(models.Model): name = models.CharField(max_length=255) email= models.EmailField(unique=True) password=models.CharField(max_length=255) allowed= models.BooleanField(default =True) avatar = models.URLField( default="" ) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) objects = UserManager() def __repr__(self) -> str: return f'{self.id}:{self.name}' def __str__(self) -> str: return f'{self.id}:{self.name}' class Mensaje(models.Model): texto= models.TextField() escritor= models.ForeignKey(User, related_name="mensajes", on_delete = models.CASCADE) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __repr__(self) -> str: return f'({self.id}){self.escritor.id} {self.escritor.name}:{self.texto}' def __str__(self) -> str: return f'({self.id}){self.escritor.id}:{self.texto}' class Comentario(models.Model): texto= models.TextField() escritor= models.ForeignKey(User, related_name="comentarios", on_delete = models.CASCADE) mensaje= models.ForeignKey(Mensaje, related_name="comentarios", on_delete = models.CASCADE) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def __repr__(self) -> str: return f'({self.id}){self.escritor.id}/{self.mensaje.id}:{self.texto}' def __str__(self) -> str: return f'({self.id}){self.escritor.id}{self.escritor.name}/{self.mensaje.id}:{self.texto}'
[ "patricio.fuentealba.feliu@gmail.com" ]
patricio.fuentealba.feliu@gmail.com
4d9f3c3aaa1eb99f9250a21ad48e579ff04e13ed
211092990562ac699369246c59dff2bee9192a49
/hw2/T2_P3.py
233ca33a51ad90369b3a0ad7bccce2b706851567
[]
no_license
haritoshpatel1997/Harvard_Course_CS181_2021
337b00211b6f34586d9c1fd7950bbeee56dae9eb
3bc223f1f022bd4e224298b6d299b42c45672100
refs/heads/main
2023-04-06T10:35:31.811861
2021-04-23T15:16:05
2021-04-23T15:16:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,703
py
# Don't change these imports. Note that the last two are the # class implementations that you will implement in # T2_P3_LogisticRegression.py and T2_P3_GaussianGenerativeModel.py import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.colors as c import matplotlib.patches as mpatches from T2_P3_LogisticRegression import LogisticRegression from T2_P3_GaussianGenerativeModel import GaussianGenerativeModel from T2_P3_KNNModel import KNNModel # These are the hyperparameters to the classifiers. You may need to # adjust these as you try to find the best fit for each classifier. # Logistic Regression hyperparameters eta = 0.1 # Learning rate lam = 0.1 # Lambda for regularization # Whether or not you want the plots to be displayed show_charts = True # DO NOT CHANGE ANYTHING BELOW THIS LINE! # ----------------------------------------------------------------- # Visualize the decision boundary that a model produces def visualize_boundary(model, X, y, title, width=2): # Create a grid of points x_min, x_max = min(X[:, 0] - width), max(X[:, 0] + width) y_min, y_max = min(X[:, 1] - width), max(X[:, 1] + width) xx, yy = np.meshgrid( np.arange(x_min, x_max, 0.05), np.arange(y_min, y_max, 0.05) ) # Flatten the grid so the values match spec for self.predict xx_flat = xx.flatten() yy_flat = yy.flatten() X_pred = np.vstack((xx_flat, yy_flat)).T # Get the class predictions Y_hat = model.predict(X_pred) Y_hat = Y_hat.reshape((xx.shape[0], xx.shape[1])) # Visualize them. cmap = c.ListedColormap(['r', 'b', 'g']) plt.figure() plt.title(title) plt.xlabel('Magnitude') plt.ylabel('Temperature') plt.pcolormesh(xx, yy, Y_hat, cmap=cmap, alpha=0.3) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap, linewidths=1, edgecolors='black') # Adding a legend and a title red = mpatches.Patch(color='red', label='Dwarf') blue = mpatches.Patch(color='blue', label='Giant') green = mpatches.Patch(color='green', label='Supergiant') plt.legend(handles=[red, blue, green]) # Saving the image to a file, and showing it as well plt.savefig(title + '.png') if show_charts: plt.show() # A mapping from string name to id star_labels = { 'Dwarf': 0, # also corresponds to 'red' in the graphs 'Giant': 1, # also corresponds to 'blue' in the graphs 'Supergiant': 2 # also corresponds to 'green' in the graphs } # Read from file and extract X and y df = pd.read_csv('data/hr.csv') X = df[['Magnitude', 'Temperature']].values y = np.array([star_labels[x] for x in df['Type']]) # Setting up and evaluating a number of different classification models nb1 = GaussianGenerativeModel(is_shared_covariance=False) nb1.fit(X, y) visualize_boundary(nb1, X, y, 'generative_result_separate_covariances') print('Separate Covariance negative log-likelihood: {}\n' .format(nb1.negative_log_likelihood(X, y))) nb2 = GaussianGenerativeModel(is_shared_covariance=True) nb2.fit(X, y) visualize_boundary(nb2, X, y, 'generative_result_shared_covariances') print('Shared Covariance negative log-likelihood: {}\n' .format(nb2.negative_log_likelihood(X, y))) lr = LogisticRegression(eta=eta, lam=lam) lr.fit(X, y) lr.visualize_loss('logistic_regression_loss', show_charts=show_charts) visualize_boundary(lr, X, y, 'logistic_regression_result') knn1 = KNNModel(k=1) knn1.fit(X, y) visualize_boundary(knn1, X, y, 'knn1_result') knn3 = KNNModel(k=3) knn3.fit(X, y) visualize_boundary(knn3, X, y, 'knn3_result') knn5 = KNNModel(k=5) knn5.fit(X, y) visualize_boundary(knn5, X, y, 'knn5_result') # Setting up some sample data X_test = np.array([[6, 2]]) y_nb1 = nb1.predict(X_test) y_nb2 = nb2.predict(X_test) y_lr = lr.predict(X_test) y_knn1 = knn1.predict(X_test) y_knn3 = knn3.predict(X_test) y_knn5 = knn5.predict(X_test) # Predicting an unseen example print('Test star type predictions for Separate Covariance Gaussian Model:') print('magnitude 6 and temperature 2: {}\n'.format(y_nb1[0])) print('Test star type predictions for Shared Covariance Gaussian Model:') print('magnitude 6 and temperature 2: {}\n'.format(y_nb2[0])) print('Test star type predictions for Linear Regression:') print('magnitude 6 and temperature 2: {}'.format(y_lr[0])) print('Test star type predictions for KNN Model with k=1:') print('magnitude 6 and temperature 2: {}'.format(y_knn1[0])) print('Test star type predictions for KNN Model with k=3:') print('magnitude 6 and temperature 2: {}'.format(y_knn3[0])) print('Test star type predictions for KNN Model with k=5:') print('magnitude 6 and temperature 2: {}'.format(y_knn5[0]))
[ "jonathanchu33@gmail.com" ]
jonathanchu33@gmail.com
4ff8a625e52e7a2fc0f40fd40fdb70a36086c6e2
ad13583673551857615498b9605d9dcab63bb2c3
/output/instances/sunData/SType/ST_facets/ST_facets00201m/ST_facets00201m9_p.py
6b09bb1b8dd9512268b76bbd79e2c658e0d3fc7d
[ "MIT" ]
permissive
tefra/xsdata-w3c-tests
397180205a735b06170aa188f1f39451d2089815
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
refs/heads/main
2023-08-03T04:25:37.841917
2023-07-29T17:10:13
2023-07-30T12:11:13
239,622,251
2
0
MIT
2023-07-25T14:19:04
2020-02-10T21:59:47
Python
UTF-8
Python
false
false
139
py
from output.models.sun_data.stype.st_facets.st_facets00201m.st_facets00201m9_xsd.st_facets00201m9 import Test obj = Test( value=10 )
[ "tsoulloftas@gmail.com" ]
tsoulloftas@gmail.com
d733e8db920ee09bf0f15babc827291aeda2b2a9
af6e9d54859eaa36742bd670da15ea5542793ca8
/5task/send.py
8833b1047975b74b572e5f9ffd283c0979f66fbe
[]
no_license
gavritenkov/vezdecode
e5c068addfa56d0c5a277b861766330ad0c725e0
79c9dda1044dd69cbebb0cdf1e08030188251b4b
refs/heads/master
2023-04-11T09:31:14.123289
2021-04-24T18:07:59
2021-04-24T18:07:59
361,219,335
0
0
null
null
null
null
UTF-8
Python
false
false
1,509
py
import string import smtplib import random import urllib.request from cryptography.fernet import Fernet import base64 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC #Генератор ключей def key_generator(size=6, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) msg=str(input('Введите сообщение : ')) mail = str(input('Введите почту получателя : ')) password_provided = key_generator() password = password_provided.encode() salt = b'salt_' kdf = PBKDF2HMAC( algorithm=hashes.SHA256(), length=32, salt=salt, iterations=100000, backend=default_backend() ) key = base64.urlsafe_b64encode(kdf.derive(password)) urllib.request.urlopen msg=msg.encode() f = Fernet(key) msg=f.encrypt(msg) msg=str(msg) print("\nВаш зашифрованный текст: "+msg) #SMTP server = smtplib.SMTP('smtp.gmail.com', 587) server.starttls() #Почта, созданная для ВездеКода. Именнно с нее будут отправляться сообщения server.login("ExampleVezdehod@gmail.com", "VezdehodTula71") #Отправка server.sendmail("ExampleVezdehod@gmail.com", mail, msg) print("\nСообщение было отправлено!\nПолучателю необходим ключ для расшифровки: " +password_provided) input("")
[ "kgavritenkov@gmail.com" ]
kgavritenkov@gmail.com
625ed010dc1eb9f52ce77596a5a4e7dfeafa600d
6226e852484e3ceaf27389a021b3215a6ee02e3d
/Entrega 1/balistica.py
884537fba547bed4c42d61deec34e11d28f84cec
[]
no_license
DiegoAparicio/MCOC2020-P1
d7e7dd2cd1a66694c914d4f552bf0ba5e76f44d2
84e2b7c7a1d3dfdd9eddb3f8f3e6ff4a111240ff
refs/heads/master
2022-12-23T08:21:15.692241
2020-09-12T01:11:16
2020-09-12T01:11:16
289,975,895
0
0
null
null
null
null
UTF-8
Python
false
false
1,411
py
# -*- coding: utf-8 -*- """ Created on Mon Aug 24 09:48:20 2020 @author: 56977 """ import scipy as sp from scipy.integrate import odeint #parametros: p = 1.225 #kg/m3 cd = 0.47 cm = 0.01 inch = 2.54*cm D = 8.5*inch r = D/2 A = sp.pi*r**2 CD = 0.5*p*cd*A g = 9.81 #m/s2 m = 15 Vs = [0,10.,20.] #V = 20 #funcion a integrar: for V in Vs: def bala(z,t): zp = sp.zeros(4) zp[0] = z[2] zp[1] = z[3] v = z[2:4] v[0]= v[0]-V #velocidad menos viento vnorm = sp.sqrt(sp.dot(v,v)) FD = -CD*sp.dot(v,v)*(v/vnorm) zp[2] = FD[0]/m zp[3] = FD[1]/m -g return zp #vector de tiempo t = sp.linspace(0,30,1001) #parte en el origen y tiene vx=vy=2 m/s vi = 100*1000/3600 z0 = sp.array([0,0,vi,vi]) sol = odeint(bala,z0,t) import matplotlib.pylab as plt x = sol[:,0] y = sol[:,1] plt.figure(1) plt.title("Trayectoria para distintos vientos") plt.grid() plt.axis([0,150,0,50]) plt.plot(x,y,label =f"V = {V} m/s") plt.ylabel("Y (m)") plt.xlabel("X (m)") plt.legend(loc="upper right") plt.savefig("trayectoria.png") #se genera la imagen en formato png #plt.show() #se omite plt.show debido a que en el enunciado decia no abrir ventana de visualizacion
[ "noreply@github.com" ]
DiegoAparicio.noreply@github.com
eddff0d30d84daa619346f62be32cd51bd14262c
702c8a229ec80537e9864959220c75aaabb28548
/taobao.py
01f8d6dd7e92faac959d6bc370761b440d2e7af4
[]
no_license
17181370591/wode
67de606298da7daf9e73dae8822a03ade9065ddc
4c574ec33f17c2b65f1fec7eb0adfb6dd05f141e
refs/heads/master
2021-06-23T18:21:18.796955
2019-06-13T09:56:48
2019-06-13T09:56:48
114,825,512
0
3
null
null
null
null
UTF-8
Python
false
false
222
py
from urllib.request import urlopen from bs4 import BeautifulSoup import re p=urlopen('https://buyertrade.taobao.com/trade/itemlist/list_bought_items.htm?spm=a3204.7139825.a2226mz.9.I5133L&t=20110530') print(p.read())
[ "noreply@github.com" ]
17181370591.noreply@github.com
8927c9bfdeb3e5161e03c5bbfb20291758317781
163bbb4e0920dedd5941e3edfb2d8706ba75627d
/Code/CodeRecords/2367/60791/254956.py
3891197249694bfc95edf61b7fdb4f59e0c7209d
[]
no_license
AdamZhouSE/pythonHomework
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
ffc5606817a666aa6241cfab27364326f5c066ff
refs/heads/master
2022-11-24T08:05:22.122011
2020-07-28T16:21:24
2020-07-28T16:21:24
259,576,640
2
1
null
null
null
null
UTF-8
Python
false
false
127
py
k = int(input()) n = '1' if(k%2==0 or k%5==0): print(-1) else: while(int(n)%k != 0): n += '1' print(len(n))
[ "1069583789@qq.com" ]
1069583789@qq.com
c7ffbc120879b204d210b4e4d5cc28f0f5f98edd
086722e5e0a7a88654ad78c696d5e22e6b700e1a
/pythonwithcsv.py
be793bbffb4556da3531ea3dc7e4b33b3c8fe6d2
[]
no_license
84karandeepsingh/datavizwithcsv
5541840d3e7e3b126c1017fd28c6b0865f164415
7cb82d8063802aebdf59a647bfc19f9b32063850
refs/heads/master
2020-04-06T19:51:51.419261
2018-11-15T21:01:34
2018-11-15T21:01:34
157,752,241
0
0
null
2018-11-15T21:01:35
2018-11-15T18:05:13
Python
UTF-8
Python
false
false
2,459
py
import csv import numpy as np import matplotlib.pyplot as plt # figure out what data we want to use categories = [] # these are the column headers in the CSV file installs = [] # this is the installs row ratings = [] # this is the ratings row with open('data/googeplaystore.csv') as csvfile: reader = csv.reader(csvfile) line_count = 0 for row in reader: # move the page column header out of the actual data to get a clean dataset if line_count is 0: # this will be text, not data print('pushing categories into a separate array') categories.append(row) # push the text into this array line_count += 1 # increment the line count for the next loop else: # grab the ratings and push them into the ratings array ratingsData= row[2] ratingsData = ratingsData.replace("NaN", "0") ratings.append(float(ratingsData)) # int turn a string (piece of text) into a number # print('pushing ratings data into the ratings array') installData = row[5] installData = installData.replace(",", "") # get rid of the commas # get rid of the trailing "+" installs.append(np.char.strip(installData, "+")) line_count += 1 # get some values we can work with # how many ratings are 4+? # how many are below 2? # how many are the middle? np_ratings = np.array(ratings) # turn a plain Python list into a Numpy array popular_apps = np_ratings > 4 print("popular apps:", len(np_ratings[popular_apps])) percent_popular = int(len(np_ratings[popular_apps]) / len(np_ratings) * 100) print(percent_popular) unpopular_apps = np_ratings < 4 print("unpopular apps:", len(np_ratings[unpopular_apps])) percent_unpopular = int(len(np_ratings[unpopular_apps]) / len(np_ratings) * 100) print(percent_unpopular) kinda_popular = int(100 - (percent_popular + percent_unpopular)) print(kinda_popular) # do a visualization with our shiny new data labels = "Sucks", "Meh", "Love it!" sizes = [percent_unpopular, kinda_popular, percent_popular] colors = ['yellowgreen', 'lightgreen', 'lightskyblue'] explode = (0.1, 0.1, 0.15) plt.pie(sizes, explode=explode, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140) plt.axis('equal') plt.legend(labels, loc=1) plt.title("Do we love us some apps?") plt.xlabel("User Ratings - App Install (10,000+ apps") plt.show() print(categories) print('first row of data:', installs[0]) print('last row of data:', installs[-1])
[ "k_thind92494@tss-hr420-dm29.fc.ca" ]
k_thind92494@tss-hr420-dm29.fc.ca
e6dfd9cb391b1dc09795b1911c78d7980a0ff1ee
b7f45072d056b80ed49e6bcde91877d8576e970d
/ImageJ/py/Wayne-blob-example.py
610a35e6e5ddb80455ce608015ed6b1efdfc7ff2
[]
no_license
jrminter/tips
128a18ee55655a13085c174d532c77bcea412754
f48f8b202f8bf9e36cb6d487a23208371c79718e
refs/heads/master
2022-06-14T08:46:28.972743
2022-05-30T19:29:28
2022-05-30T19:29:28
11,463,325
5
8
null
2019-12-18T16:24:02
2013-07-17T00:16:43
Jupyter Notebook
UTF-8
Python
false
false
321
py
from org.python.core import codecs codecs.setDefaultEncoding('utf-8') import os from ij import IJ, WindowManager IJ.run("Close All") img = IJ.openImage("http://wsr.imagej.net/images/blobs.gif") IJ.setAutoThreshold(img, "Default") IJ.run(img, "Analyze Particles...", " show=[Bare Outlines] include in_situ") img.show()
[ "jrminter@gmail.com" ]
jrminter@gmail.com
6d89a520c12d1cf396cf97386448d6f6738ff2d8
2597f120ba197ec63497263b75b003f17dd41d37
/manage.py
54c3bd9664497fe87eee913f00da595bf9bbee72
[]
no_license
vir-mir/reactjs-test
e86be29b939d77b9d5be5ea5c7ffe47dd7a54293
8e2da61fb7c13ae85a2ce835fb84893ad855d547
refs/heads/master
2021-01-02T09:26:14.815846
2014-07-30T04:11:26
2014-07-30T04:11:26
29,864,903
0
0
null
null
null
null
UTF-8
Python
false
false
250
py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "virmir.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
[ "virmir49@gmail.com" ]
virmir49@gmail.com
3477ca40e3be9c089491a0edef84de178170c43e
533c1ccd1eb1c4c735c6473381c64770d8103246
/lbpi/wrappers/adt_ens.py
29a1a2ef5f96d665989172968bf34b8e0216c90f
[ "MIT" ]
permissive
nairvinayv/random_scripts
fc9278ce4be4908368311993918854de8330e032
6e1cc8d82cf61ae245108a69ffa1359f636f37f7
refs/heads/master
2022-02-14T09:01:20.866580
2022-02-02T21:33:05
2022-02-02T21:33:05
39,131,991
0
0
MIT
2021-09-27T04:50:05
2015-07-15T10:58:07
Shell
UTF-8
Python
false
false
3,320
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri May 26 09:14:50 2017 This script is for making sure two uploaded receptor conformations are of the same ensembles which will be used for the calculation of average SNR of multiple conformations @author: nabina """ #import re import sys,os import subprocess as sb import shutil arg1 = sys.argv[1] # flag arguments for defining the modules to be operated arg2 = sys.argv[2] # wrapper files arg3 = sys.argv[3] # working folder def work_files(): """ This function is to find out the folders present in ensembles and prepare receptor in each folders and also to copy ligands to each folders. Finally, depending on the flag defined it helps in running autodock or LIBSA """ pdb_path = '{}protein.pdb'.format(arg3) ligand_path = '{}ligand.pdbqt'.format(arg3) chnatm_path = '{}chnatm.dat'.format(arg3) protein_path = [] protein_pdbqt = [] receptor_path = [] aname = {}; a_count = list(range(len(os.listdir('{}ensembles'.format(arg3))))) for i in range(len(a_count)):aname[(i)] = '{}ensembles/folder{}/'.format(arg3, a_count[i]) dirr = list(aname.values()) for i in range(len(dirr)): protein_path.append('{}protein.pdb'.format(dirr[i])) protein_pdbqt.append('{}protein.pdbqt'.format(dirr[i])) receptor_path.append('{}receptor.pdb'.format(dirr[i])) for i in range(len(dirr)): if arg1 == 'adt': space = sys.argv[4] points = sys.argv[5] evals = sys.argv[6] gens = sys.argv[7] run = sys.argv[8] sb.call(['python', '{}pdb_prepare.py'.format(arg2), pdb_path, chnatm_path, protein_path[i], protein_pdbqt[i], receptor_path[i]]) shutil.copy(ligand_path, dirr[i]) # copying of ligand_pdbqt to ensemble folders sb.call(['python', '{}adt.py'.format(arg2), dirr[i], space, points, evals, gens, run]) if arg1 == 'libsa_none': LIBSA = sys.argv[4] sb.call(['python', '{}libsa_none.py'.format(arg2), dirr[i], LIBSA, 'none', '0.05','1','0.4','4']) sb.call(['python', '{}libsa_none.py'.format(arg2), dirr[i], LIBSA, 'affinity_only', '0.05','1','0.4','4']) if arg1 == 'libsa': LIBSA = sys.argv[4] arg5 = sys.argv[5] arg6 = sys.argv[6] arg7 = sys.argv[7] arg8 = sys.argv[8] arg9 = sys.argv[9] sb.call(['python', '{}libsa.py'.format(arg2), dirr[i], LIBSA, arg5,arg6, arg7, arg8, arg9]) """ Subprocess call for views file to automate adt and LIBSA for ensembles sb.call(['python', '{}adt_ens.py'.format(wrappers), 'adt', wrappers, current_dir, space, points, evals, gens, run]) sb.call(['python', '{}adt_ens.py'.format(wrappers), 'libsa_none', wrappers, current_dir,LIBSA]) sb.call(['python', '{}adt_ens.py'.format(wrappers), 'libsa', wrappers, current_dir, LIBSA, libsa_flags, energy_steps, percentchange, aux_peak, cutoff]) """ if __name__ == '__main__': work_files()
[ "noreply@github.com" ]
nairvinayv.noreply@github.com
078d8878c03008b44ffb9bcebc52d9ae1bf3d187
dbc08e2b8b1d257b4ad0a12eeefb5d8ac2168045
/ClassifyProducts.py
1377966c9d462c28a9a812de9fbcb8962c764702
[]
no_license
lauraabend/NLP_ProductClassifier
885329bad7f0dd26688361b679cfa1e25f14be5e
0c659165a7c444ef07c34cbf2452ad57ad6510de
refs/heads/master
2021-01-18T12:46:00.842400
2016-07-01T21:17:08
2016-07-01T21:17:08
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,295
py
from nltk.corpus import wordnet import numpy as np import pandas as pd from nltk.tokenize import TweetTokenizer from nltk.tag import pos_tag from nltk.stem.porter import * from nltk.corpus import stopwords from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report def assign_product_to_class(class_descriptions, description_of_product): comparison_list = [] description_of_product = list(set(description_of_product)) description_of_product = [word for word in description_of_product if word not in stopwords.words('english')] for className in class_descriptions.keys(): comparison_per_class = [] for word1 in class_descriptions[className]: word_from_list1 = wordnet.synsets(word1) for word2 in description_of_product: word_from_list2 = wordnet.synsets(word2) if word_from_list1 and word_from_list2: s = word_from_list1[0].wup_similarity(word_from_list2[0]) comparison_per_class.append(s) comparison_per_class = [item for item in comparison_per_class if item != None] list_of_similar_values = sorted(comparison_per_class, reverse=True)[:5] comparison_list.append([np.mean(list_of_similar_values), className]) return sorted(comparison_list, reverse=True) stemmer = PorterStemmer() tknzr = TweetTokenizer() classDescriptions = { "Camera & Photo": ["lens", "camera", "photo", "camcorder", "photography", "image", "film", "digital", "monitor", "record"], "Bedding & Bath": ["bed", "bath", "sheet", "towel", "shower", "tube", "bathroom", "bedroom", "pillow", "mattress", "sleep"], "Exercise & Fitness": ["exercise", "fitness", "sport", "games", "weight", "train", "resistance", "soccer", "tennis", "golf", "yoga", "basketball", "fit"] } for i in classDescriptions.keys(): classDescriptions[i] = [stemmer.stem(word) for word in classDescriptions[i]] file = pd.read_csv("./test_set2.csv", delimiter=";", encoding='latin-1') list_of_products = list(zip(file["Product_id"].tolist(), file["Description"], file["Category"])) list_of_products_ready = [list(elem) for elem in list_of_products] real_label = [] prediction = [] for i in range(len(list_of_products_ready)): # Tokenize the sentence tokenized_words = tknzr.tokenize(list_of_products_ready[i][1]) list_of_products_ready[i].pop(1) # Stem the words stemed_words = [stemmer.stem(plural) for plural in tokenized_words] # Tag the morphology of the word tagged_words = pos_tag(stemed_words) # Only select the NN and NNP only_nouns = [word for word, pos in tagged_words if pos == 'NN' or pos == 'NNP'] # Append the resulting words list_of_products_ready[i].append(only_nouns) # Start classification similatiry_to_classes = assign_product_to_class(classDescriptions, list_of_products_ready[i][2]) list_of_products_ready[i].insert(2, similatiry_to_classes[0][1]) real_label.append(list_of_products_ready[i][1]) prediction.append(list_of_products_ready[i][2]) print(list_of_products_ready[i]) print(confusion_matrix(real_label, prediction)) print(classification_report(real_label, prediction, target_names=["Exercise & Fitness", "Camera & Photo", "Bedding & Bath"]))
[ "martin.maseda@gmail.com" ]
martin.maseda@gmail.com
e18cb7dd81804a2ba328dc66b22ac4a5eb10f3e6
92d79bbe1e94e192e9d4a728f99f6aecea500645
/attack/df_attack_2_tab_next.py
33f57d3a58533a0b661b278d2c17db343feda980
[]
no_license
westzyan/attackWFP
e6f61dc7a6636640a298162941c5b7c882c1fc80
5e2227308b3ab7be5b607c4d8dddb4871ed56fc4
refs/heads/master
2023-03-02T19:16:14.846449
2021-02-09T18:04:42
2021-02-09T18:04:42
325,488,478
2
0
null
null
null
null
UTF-8
Python
false
false
5,558
py
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, classification_report from Model_DF import DFNet import random from keras.utils import np_utils from keras.optimizers import Adamax import numpy as np import os import tensorflow as tf import keras config = tf.ConfigProto() config.gpu_options.allow_growth = True keras.backend.tensorflow_backend.set_session(tf.Session(config=config)) # Load data for non-defended dataset for CW setting def LoadDataNoDefCW(second): print("Loading defended dataset for closed-world scenario") # Point to the directory storing data # dataset_dir = '../dataset/ClosedWorld/NoDef/' # dataset_dir = "/media/zyan/软件/张岩备份/PPT/DeepFingerprinting/df-master/dataset/ClosedWorld/NoDef/" dataset_dir = "/home/thinkst/zyan/real_specified_split/round/second{}/".format(second) # X represents a sequence of traffic directions # y represents a sequence of corresponding label (website's label) data = np.loadtxt(dataset_dir + "df_tcp_95000_10000_head_math_order.csv", delimiter=",") print(data) np.random.shuffle(data) print(data) print(len(data)) train_length = int(0.8 * len(data)) valid_length = int(0.1 * len(data)) test_length = len(data) - train_length - valid_length train = data[:train_length, :] valid = data[train_length: train_length + valid_length, :] test = data[train_length + valid_length:, :] X_train = train[:, :-1] y_train = train[:, -1] X_valid = valid[:, :-1] y_valid = valid[:, -1] X_test = test[:, :-1] y_test = test[:, -1] print("X: Training data's shape : ", X_train.shape) print("y: Training data's shape : ", y_train.shape) print("X: Validation data's shape : ", X_valid.shape) print("y: Validation data's shape : ", y_valid.shape) print("X: Testing data's shape : ", X_test.shape) print("y: Testing data's shape : ", y_test.shape) # return X_train, y_train, X_valid, y_valid, X_test, y_test if __name__ == '__main__': for second in range(2, 9): random.seed(0) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' description = "Training and evaluating DF model for closed-world scenario on non-defended dataset" print(description) # Training the DF model NB_EPOCH = 20 # Number of training epoch print("Number of Epoch: ", NB_EPOCH) BATCH_SIZE = 128 # Batch size VERBOSE = 2 # Output display mode LENGTH = 10000 # Packet sequence length OPTIMIZER = Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # Optimizer NB_CLASSES = 95 # number of outputs = number of classes INPUT_SHAPE = (LENGTH, 1) # Data: shuffled and split between train and test sets print("Loading and preparing data for training, and evaluating the model") X_train, y_train, X_valid, y_valid, X_test, y_test = LoadDataNoDefCW(second) # Please refer to the dataset format in readme # K.set_image_dim_ordering("tf") # tf is tensorflow # Convert data as float32 type X_train = X_train.astype('float32') X_valid = X_valid.astype('float32') X_test = X_test.astype('float32') y_train = y_train.astype('float32') y_valid = y_valid.astype('float32') y_test = y_test.astype('float32') # we need a [Length x 1] x n shape as input to the DF CNN (Tensorflow) X_train = X_train[:, :, np.newaxis] X_valid = X_valid[:, :, np.newaxis] X_test = X_test[:, :, np.newaxis] print(X_train.shape[0], 'train samples') print(X_valid.shape[0], 'validation samples') print(X_test.shape[0], 'test samples') # Convert class vectors to categorical classes matrices y_train = np_utils.to_categorical(y_train, NB_CLASSES) y_valid = np_utils.to_categorical(y_valid, NB_CLASSES) y_test = np_utils.to_categorical(y_test, NB_CLASSES) # Building and training model print("Building and training DF model") model = DFNet.build(input_shape=INPUT_SHAPE, classes=NB_CLASSES) model.compile(loss="categorical_crossentropy", optimizer=OPTIMIZER, metrics=["accuracy"]) print("Model compiled") # Start training history = model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=NB_EPOCH, verbose=VERBOSE, validation_data=(X_valid, y_valid)) # model.save('my_model_undef_tcp_10000_round2.h5') # Start evaluating model with testing data score_test = model.evaluate(X_test, y_test, verbose=VERBOSE) print("Testing accuracy:", score_test[1]) y_pre = model.predict(X_test) index_test = np.argmax(y_test, axis=1) index_pre = np.argmax(y_pre, axis=1) print(precision_recall_fscore_support(index_test, index_pre, average='macro')) # Macro-P,Macro-R,Macro-F1 print(precision_recall_fscore_support(index_test, index_pre, average='micro')) # Micro-P,Micro-R,Micro-F1 print(classification_report(index_test, index_pre)) score = classification_report(index_test, index_pre) # 混淆矩阵并可视化 confmat = confusion_matrix(y_true=index_test, y_pred=index_pre) # 输出混淆矩阵 print(confmat) with open("./overlap_second.txt", 'a') as f: f.write("second:{} acc:{}\n".format(second, score_test[1])) f.write(score) f.close()
[ "15639067131@163.com" ]
15639067131@163.com
1864abd09c45d30c777b5127b78b028f192c006a
ce3f2b03f38076b75544ab901662e6aeda35d97a
/manage.py
0f521b7d0f5bf1caf2215a08b13f9ff161682059
[]
no_license
DivingCats/reflask
98799b7f693101a211152701cace06ef627233f3
1be5c61f3cf48b4e6e6a15fee56930f8166d3cd6
refs/heads/master
2022-12-09T22:13:58.644735
2020-02-08T09:56:14
2020-02-08T09:56:14
230,551,305
0
0
null
2022-12-08T03:34:28
2019-12-28T03:11:44
Python
UTF-8
Python
false
false
610
py
# -*- coding: utf-8 -*- # @Time : 2020/2/5 20:55 # @Author : DivingKitten # @File : manage.py # @Software: PyCharm # @Desc : 启动脚本 from app import create_app from flask_script import Manager, Shell app = create_app('default') manager = Manager(app) def make_shell_context(): return dict(app=app) manager.add_command("shell", Shell(make_context=make_shell_context)) @manager.command def test(): import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests) if __name__ == '__main__': # app.run() manager.run()
[ "Unility@163.com" ]
Unility@163.com
a0a6e2b478307867d176521ffe24feb3a9ea24cb
382c3368b5a8a13d57bcff7951334e57f919d964
/remote-scripts/samples/APC40_20/SpecialMixerComponent.py
c157f77a22c018cdeaf7228a4fc43b005f301133
[ "Apache-2.0" ]
permissive
jim-cooley/abletonremotescripts
c60a22956773253584ffce9bc210c0804bb153e1
a652c1cbe496548f16a79bb7f81ce3ea3545649c
refs/heads/master
2021-01-22T02:48:04.820586
2017-04-06T09:58:58
2017-04-06T09:58:58
28,599,515
2
1
null
null
null
null
UTF-8
Python
false
false
3,246
py
# emacs-mode: -*- python-*- # -*- coding: utf-8 -*- from _Framework.MixerComponent import MixerComponent from SpecialChannelStripComponent import SpecialChannelStripComponent from _Framework.ButtonElement import ButtonElement #added from _Framework.EncoderElement import EncoderElement #added class SpecialMixerComponent(MixerComponent): ' Special mixer class that uses return tracks alongside midi and audio tracks, and only maps prehear when shifted ' __module__ = __name__ def __init__(self, num_tracks): MixerComponent.__init__(self, num_tracks) self._shift_button = None #added self._shift_pressed = False #added def set_shift_button(self, button): #added assert ((button == None) or (isinstance(button, ButtonElement) and button.is_momentary())) if (self._shift_button != button): if (self._shift_button != None): self._shift_button.remove_value_listener(self._shift_value) self._shift_button = button if (self._shift_button != None): self._shift_button.add_value_listener(self._shift_value) self.update() def _shift_value(self, value): #added assert (self._shift_button != None) assert (value in range(128)) self._shift_pressed = (value != 0) self.update() def update(self): #added override if self._allow_updates: master_track = self.song().master_track if self.is_enabled(): if (self._prehear_volume_control != None): if self._shift_pressed: #added self._prehear_volume_control.connect_to(master_track.mixer_device.cue_volume) else: self._prehear_volume_control.release_parameter() #added if (self._crossfader_control != None): self._crossfader_control.connect_to(master_track.mixer_device.crossfader) else: if (self._prehear_volume_control != None): self._prehear_volume_control.release_parameter() if (self._crossfader_control != None): self._crossfader_control.release_parameter() if (self._bank_up_button != None): self._bank_up_button.turn_off() if (self._bank_down_button != None): self._bank_down_button.turn_off() if (self._next_track_button != None): self._next_track_button.turn_off() if (self._prev_track_button != None): self._prev_track_button.turn_off() self._rebuild_callback() else: self._update_requests += 1 def tracks_to_use(self): return (self.song().visible_tracks + self.song().return_tracks) def _create_strip(self): return SpecialChannelStripComponent() def disconnect(self): #added MixerComponent.disconnect(self) if (self._shift_button != None): self._shift_button.remove_value_listener(self._shift_value) self._shift_button = None # local variables: # tab-width: 4
[ "jim@ubixlabs.com" ]
jim@ubixlabs.com
f45604bd7b04946b6c72a23f38771783457a5ae7
9998ff1d80a5442970ffdc0b2dd343e3cab30ee8
/fiaqm.py
8d1e36ec5d2bdf3d45320c614cf86650b2282341
[ "MIT" ]
permissive
cgomezsu/FIAQM
9baac1a9410a6ad19e67fff024a9ff15f24df70c
da44e370f40e573233a148414229359e7782ad0c
refs/heads/main
2023-05-13T02:17:19.833979
2021-05-24T01:25:17
2021-05-24T01:25:17
301,475,198
0
0
null
null
null
null
UTF-8
Python
false
false
14,902
py
""" v.e.s. Federated Congestion Predictor + Intelligent AQM MIT License Copyright (c) 2020 Cesar A. Gomez Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from mininet.topo import Topo from mininet.net import Mininet from mininet.link import TCLink from mininet.log import info, setLogLevel from mininet.cli import CLI import numpy as np import pandas as pd import time, random, os import substring as ss import tuner as tnr random.seed(7) # For reproducibility class CreateTopo(Topo): def build(self, n): bra = self.addSwitch('bra1') # Border Router A brb = self.addSwitch('brb1') # Border Router B ixp = self.addSwitch('ixp1') # IXP switch vs = self.addSwitch('vs1') # Virtual switch to emulate a tunnel connection between Learning Orchestrator and Local Learners self.addLink(bra, ixp, bw=1000, delay='2ms') # Link between bra-eth1 and ixp-eth1 self.addLink(ixp, brb, bw=1000, delay='2ms') # Link between ixp-eth2 and brb-eth1 # Creation of hosts connected to Border Routers in each domain for j in range(n): BW = random.randint(250,500) d = str(random.randint(2,10))+'ms' # Each host has a random propagation delay (between 2 and 10 ms) on its link connected to the corresponding router ha = self.addHost('a%s' % (j+1), ip='10.10.0.%s' % (j+1)) self.addLink(ha, bra, bw=BW, delay=d) BW = random.randint(250,500) # Random BW to limit rate on each interface d = str(random.randint(2,10))+'ms' hb = self.addHost('b%s' % (j+1), ip='10.11.0.%s' % (j+1)) self.addLink(hb, brb, bw=BW, delay=d) hlla = self.addHost('lla1', ip='10.10.11.11') # Host acting as the Local Learner A self.addLink(hlla, vs, bw=100)#, delay='2ms') hllb = self.addHost('llb1', ip='10.10.11.12') # Host acting as the Local Learner B self.addLink(hllb, vs, bw=100)#, delay='2ms') hlo = self.addHost('lo1', ip='10.10.10.10') # Host acting as the Learning Orchestrator self.addLink(hlo, vs, bw=100)#, delay='2ms') ma = self.addHost('ma1', ip='10.0.0.10') # There are two monitor hosts for probing self.addLink(ma, bra, bw=1000) # The BW of the monitor hosts are the same as the inter-domain links mb = self.addHost('mb1', ip='10.0.0.11') self.addLink(mb, brb, bw=1000) setLogLevel('info') # To show info messages n = 20 # Number of network elements connected per border router topo = CreateTopo(n) net = Mininet(topo, link=TCLink, autoSetMacs=True) # We use Traffic Control links info('\n*** Starting network\n') net.start() # Creating network devices from topology lo1 = net['lo1'] lla1 = net['lla1'] llb1 = net['llb1'] bra1 = net['bra1'] ixp1 = net['ixp1'] brb1 = net['brb1'] ma1 = net['ma1'] mb1 = net['mb1'] # AQM configuration for link between IXP switch and Border Router A ixp1.cmd('tc qdisc del dev ixp1-eth1 root') # Clear current qdisc ixp1.cmd('tc qdisc add dev ixp1-eth1 root handle 1:0 htb default 1') # Set the name of the root as 1:, for future references. The default class is 1 ixp1.cmd('tc class add dev ixp1-eth1 classid 1:1 htb rate 1000mbit') # Create class 1:1 as direct descendant of root (the parent is 1:) ixp1.cmd('tc qdisc add dev ixp1-eth1 parent 1:1 handle 10:1 fq_codel limit 1000 target 50ms interval 1000ms noecn') # Create qdisc with ID (handle) 10 of class 1. Its parent class is 1:1. Queue size limited to 1000 pkts # AQM configuration for link between Border Router B and IXP switch. This will be the IAQM aqm_target = 5000 aqm_interval = 100 # Initial parameters brb1.cmd('tc qdisc del dev brb1-eth1 root') brb1.cmd('tc qdisc add dev brb1-eth1 root handle 1:0 htb default 1') brb1.cmd('tc class add dev brb1-eth1 classid 1:1 htb rate 1000mbit') brb1.cmd('tc qdisc add dev brb1-eth1 parent 1:1 handle 10:1 fq_codel limit 1000 target {}us interval {}ms noecn'.format(aqm_target,aqm_interval)) # Both target and interval of this queue will be set dynamically as the emulation runs info('\n*** Setting up AQM for intra-domain link buffers at Border Router\n') a = [0 for j in range(n)] # List initialization for hosts A b = [0 for j in range(n)] # List initialization for hosts B for j in range(n): # Changing the queue discipline on interfaces connected to Border Router A BW = random.randint(250,500) # Random BW to limit rate on each interface bra1.cmd('tc qdisc del dev bra1-eth{} root'.format(j+2)) bra1.cmd('tc qdisc add dev bra1-eth{} root handle 1:0 htb default 1'.format(j+2)) bra1.cmd('tc class add dev bra1-eth{} classid 1:1 htb rate {}mbit'.format(j+2,BW)) bra1.cmd('tc qdisc add dev bra1-eth{} parent 1:1 handle 10:1 fq_codel limit 1000 target 2000us interval 40ms noecn'.format(j+2)) time.sleep(3) # Wait a moment while the AQM is configured a[j] = net['a%s' % (j+1)] # Creating net devices from topology b[j] = net['b%s' % (j+1)] time.sleep(5) info('\n*** Testing connectivity...\n') for j in range(n): net.ping(hosts=[a[j],b[j]]) net.ping(hosts=[lla1,lo1]) net.ping(hosts=[llb1,lo1]) net.ping(hosts=[ma1,mb1]) info('\n*** Starting AQM stat captures...\n') bra1.cmd('bash ~/stat_bra.sh') # Bash script to capture AQM stats at Border Router A ixp1.cmd('bash ~/stat_ixp.sh &') # Bash script to capture AQM stats at the IXP switch brb1.cmd('bash ~/stat_brb.sh &') # Bash script to capture AQM stats at Border Router B info('\n*** Starting RRUL traffic between pairs...\n') for j in range(n): a[j].cmd('netserver &') # Start server on domain-A hosts for RRUL tests l = random.randint(300,900) # Random length for each RRUL test b[j].cmd('bash rrul.sh 10.10.0.{} {} &'.format(j+1,l)) # Start RRUL tests on domain-B hosts time.sleep(20) # Waiting time while all RRUL tests start info('\n*** Capturing AQM stats for initial training...\n') time.sleep(150) # Waiting time for the getting initial training samples # If emulation gets stuck in the first period, it's porbably because there's not enough traffic samples to train the model. Leave a longer time info('\n*** Starting Federated Congestion Predictor process...\n') lo1.cmd('mkdir ~/LO') # Directory to store parameter files of Learning Orchestrator lo1.cmd('mkdir ~/LO/Predictions') # Directory to store predictions of Learning Orchestrator lla1.cmd('mkdir ~/LLA') # Directory to store files of Local Learner A llb1.cmd('mkdir ~/LLB') # Directory to store files of Local Learner B # Start SCP server on Learning Orchestrator lo1.cmd('/usr/sbin/sshd -p 54321') # Start learning process on Learning Orchestrator lo1.cmd('source PyTorch/bin/activate') # Activate virtual environment where PyTorch is installed lo1.cmd('python lo_train.py &') lo1.cmd('python lo_predict.py &') # Start learning process on Local Learner A lla1.cmd('source PyTorch/bin/activate') lla1.cmd('python ll_train.py &') # Receive predictions periods = 300 # Number of periods (of aprox 2 secs) to run the emulation factor = 1000 S = 100 # Number of states: discrete levels of congestion [0, 100] in a period of 2 s A = np.arange(1100, 11100, 100) # Set of actions: set value of target parameter in us epsilon = 0.5 s_curr = random.randint(0,S) # Random initialization of the first observed state for tuning ind_action = len(A)-1 # Initial action for tuning: max FQ-CoDel target considered (7.5 ms) cong_pred_max = 1e-6 hist_pred = np.zeros(periods) hist_local = np.zeros(periods) hist_r = np.zeros(periods) hist_rtt = np.ones(periods)*8 hist_tput = np.ones(periods)*100 ma1.cmd('iperf -s &') # Iperf server on Monitor A to measure throughput t0 = time.time() # To take time of all periods preds_file = '/home/ubuntu/LLB/dr_est.npy' for i in range(periods): # Measure RTT and throughput in 1 sec mb1.cmd('iperf -c 10.0.0.10 -i 0.1 -t 1 | tail -1 > ~/LLB/tput.out &') mb1.cmd('ping 10.0.0.10 -i 0.1 -w 1 -q | tail -1 > ~/LLB/ping.out') print("*** Period",i) print("+++ Configured target and interval parameters:",aqm_target,"us",aqm_interval,"ms") # Load received file with predictions, sent by the Learning Orchestrator while not os.path.isfile(preds_file): time.sleep(0) while not os.path.getsize(preds_file) >= 928: # To make sure that the file is not empty and still being transferred (file size with 100 predictions) time.sleep(0) dr_est = np.load(preds_file) # Some signal rearrangements of the predictions dr_est = dr_est*factor dr_est = dr_est-dr_est.mean() dr_est = np.abs(dr_est) dr_est[dr_est > 1] = 1 # Avoid any possible outlier after rearranging hist_pred[i] = dr_est.mean() # Discretize values of predictions if hist_pred[i] > cong_pred_max: cong_pred_max = hist_pred[i] # Stores the max value of predicted congestion s_next = int((hist_pred[i]/cong_pred_max)*S-1) print("+++ Predicted level of congestion ahead:",s_next+1) time.sleep(1) # The duration of each period is this waiting time + ping time statinfo = os.stat('/home/ubuntu/LLB/ping.out') if statinfo.st_size < 10: mRTT = hist_rtt.max() # If no ping response was gotten, take the maximum in the records (worst case) print ('>>> No mRTT response. Taking maximum known: %.3f' % mRTT, 'ms') else: din = open('/home/ubuntu/LLB/ping.out').readlines() slice = ss.substringByInd(din[0],26,39) text = (slice.split('/')) mRTT = float(text[1]) print ('>>> mRTT: %.3f' % mRTT, 'ms') hist_rtt[i] = mRTT statinfo = os.stat('/home/ubuntu/LLB/tput.out') if statinfo.st_size < 10: tput = hist_tput.min() # If no tput response was gotten, take the minimum in the records (worst case) print ('>>> No Tput response. Taking minimum known: %.3f' % tput, 'Mbps') else: din = open('/home/ubuntu/LLB/tput.out').readlines() tput = float(ss.substringByInd(din[0],34,37)) unit = ss.substringByInd(din[0],39,39) if unit == 'K': tput = tput*0.001 print ('>>> Tput: %.3f' % tput, 'Mbps') hist_tput[i] = tput hist_r[i] = tput/mRTT R = hist_r[i] # Reward is based on power function print ('>>> Power: %.2f' % R) if i >= 75: # Start the AQM tuning process after this period # Update Q-values Q = tnr.update(s_curr, ind_action, R, s_next) s_curr = s_next # Select action for next iteration ind_action = tnr.action(s_curr, epsilon) aqm_target = A[ind_action] # Select a FQ-CoDel target aqm_interval = int(aqm_target/(0.05*1000)) # Select a FQ-CoDel interval. Tipycally, target is 5% of interval brb1.cmd('tc qdisc change dev brb1-eth1 parent 1:1 handle 10:1 fq_codel limit 1000 target {}us interval {}ms noecn'.format(aqm_target,aqm_interval)) # Change the AQM parameters at Border Router B print("*** Total wall time: ", time.time() - t0, " seconds") np.save('/home/ubuntu/hist_pred.npy',hist_pred) np.save('/home/ubuntu/hist_power.npy',hist_r) np.save('/home/ubuntu/q-table.npy',Q) #CLI(net) # Uncomment this line to explore the temporary files before exiting Mininet info('*** Deleting temporary files...\n') lo1.cmd('rm -r ~/LO') lla1.cmd('rm -r ~/LLA') llb1.cmd('rm -r ~/LLB') info('*** Experiment finished!\n') net.stop()
[ "noreply@github.com" ]
cgomezsu.noreply@github.com
500aef746d79ed087cee6c69260b5b7ab0ba585d
12c15a95f6105f58cce4595db4541e2967abc86a
/PyFury/CodeMonk V2/XOR.py
dec64e446ce09fcec64e31f94dcc7013cd2395c2
[]
no_license
avinash28196/PyFury-V1.0
da5c85fd561ee7edc01f7ece9f4657191ae0f015
84ed41c13e2fdd96fc1556915709f0c87655af56
refs/heads/master
2021-07-05T08:42:10.291793
2019-02-25T17:06:46
2019-02-25T17:06:46
125,209,323
0
0
null
null
null
null
UTF-8
Python
false
false
277
py
Test = int(input()) for i in range(Test): N = int(input()) count = 0 for i in range (1,N+1): for j in range(i+1, N+1): # print (i, j) xor = i ^ j if (xor <= N or xor == N): count += 1 print(count)
[ "nextbitgeek@Avinashs-MacBook-Pro.local" ]
nextbitgeek@Avinashs-MacBook-Pro.local
235950d4728104e2a077b449c15418d2a6e7154c
057554afbdfec2f8689a999a15ba0848c620ab4f
/find_available_room.py
d3ca3e14301c511f7ba7b3c8037a7f21148694ab
[ "Apache-2.0" ]
permissive
cnaert/roomfinder2
fe40c2728d19c92688ef4b86699db660f36fefb8
75040a2842058334fb5dfa9d12491e321bc88b43
refs/heads/master
2021-01-19T21:59:25.368371
2017-04-19T11:13:33
2017-04-19T11:13:33
88,688,826
0
0
null
null
null
null
UTF-8
Python
false
false
2,600
py
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys reload(sys) sys.setdefaultencoding("utf-8") import subprocess import getpass from string import Template import xml.etree.ElementTree as ET import csv, codecs import argparse import datetime now = datetime.datetime.now().replace(microsecond=0) starttime_default = now.isoformat() end_time_default = None parser = argparse.ArgumentParser() parser.add_argument("-url","--url", help="url for exhange server, e.g. 'https://mail.domain.com/ews/exchange.asmx'.",required=True) parser.add_argument("-u","--user", help="user name for exchange/outlook",required=True) parser.add_argument("-p","--password", help="password for exchange/outlook", required=True) parser.add_argument("-start","--starttime", help="Starttime e.g. 2014-07-02T11:00:00 (default = now)", default=starttime_default) parser.add_argument("-end","--endtime", help="Endtime e.g. 2014-07-02T12:00:00 (default = now+1h)", default=end_time_default) #parser.add_argument("-n","--now", help="Will set starttime to now and endtime to now+1h", action="store_true") parser.add_argument("-f","--file", help="csv filename with rooms to check (default=favorites.csv). Format: Name,email",default="favorites.csv") args=parser.parse_args() url = args.url reader = csv.reader(codecs.open(args.file, 'r', encoding='utf-8')) start_time = args.starttime if not args.endtime: start = datetime.datetime.strptime( start_time, "%Y-%m-%dT%H:%M:%S" ) end_time = (start + datetime.timedelta(hours=1)).isoformat() else: end_time = args.endtime user = args.user password = args.password print "Searching for a room from " + start_time + " to " + end_time + ":" print "{0:10s} {1:25s} {2:40s} {3:10s} {4:10s} {5:10s} {6:50s}".format("Status", "Room", "Email", "Level", "Zone", "Seats", "Description") xml_template = open("getavailibility_template.xml", "r").read() xml = Template(xml_template) for room in reader: data = unicode(xml.substitute(email=room[1],starttime=start_time,endtime=end_time)) header = "\"content-type: text/xml;charset=utf-8\"" command = "curl --silent --header " + header +" --data '" + data + "' --ntlm -u "+ user+":"+password+" "+ url response = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).communicate()[0] tree = ET.fromstring(response) status = "Free" # arrgh, namespaces!! elems=tree.findall(".//{http://schemas.microsoft.com/exchange/services/2006/types}BusyType") for elem in elems: status=elem.text print "{0:10s} {1:25s} {2:40s} {3:10s} {4:10s} {5:10s} {6:50s}".format(status, room[0], room[1], room[2], room[3], room[4], room[5] )
[ "cyrille.naert@dimensiondata.com" ]
cyrille.naert@dimensiondata.com
e6e7dda1c960f07e3ef950b406a97d1171f4fa8d
fe0edb968d9d20c8dcdd994e293db418c451ce53
/amazon/LCAOfBinaryTree/solution.py
5d3bd6f8a9f7c77189b09b2403dbaeac851e2ceb
[]
no_license
childxr/lintleetcode
d079b1a01fb623f2cb093b0fe665c21a18ec1b6a
e8d472ab237d61fed923df25c91823371c63445b
refs/heads/master
2020-03-20T08:39:10.473582
2018-08-04T22:56:55
2018-08-04T22:56:55
137,315,070
0
0
null
null
null
null
UTF-8
Python
false
false
894
py
""" Definition of TreeNode: class TreeNode: def __init__(self, val): self.val = val self.left, self.right = None, None """ class Solution: """ @param: root: The root of the binary search tree. @param: A: A TreeNode in a Binary. @param: B: A TreeNode in a Binary. @return: Return the least common ancestor(LCA) of the two nodes. """ def findNodes(self, root, A, B): if root is None: return root if root == A or root == B: return root left = self.findNodes(root.left, A, B) right = self.findNodes(root.right, A, B) if left is not None and right is not None: return root return left if left is not None else right def lowestCommonAncestor(self, root, A, B): # write your code here return self.findNodes(root, A, B)
[ "rxie@juniper.net" ]
rxie@juniper.net
172576681e45df0d4e6966c9a2513b6ebdfbff4e
846cbb8cc97c667c1f2969fca12b835c3843f170
/magpy/lib/format_sfs.py
a36994e4972d2df583ae37a95c7050c0af4825eb
[ "LicenseRef-scancode-unknown-license-reference" ]
permissive
geomagpy/magpy
f33a4a7ae95f95d2e5e3d09b571d2fa6f2905174
79f3420c4526c735869715e8c358848d790e982b
refs/heads/master
2023-08-17T08:39:48.757501
2023-07-19T11:25:00
2023-07-19T11:25:00
47,394,862
40
20
BSD-3-Clause
2021-01-26T12:29:02
2015-12-04T09:38:09
Python
UTF-8
Python
false
false
5,996
py
""" MagPy Auxiliary input filter - WIC/WIK Written by Roman Leonhardt June 2012 - contains test and read function, toDo: write function """ from magpy.stream import * def isSFDMI(filename): """ Checks whether a file is spanish DMI format. Time is in seconds relative to one day """ try: temp = open(filename, 'rt').readline() except: return False if len(temp) >= 9: if temp[9] in ['o','+','-']: # Prevent errors with GFZ kp return False sp = temp.split() if not len(sp) == 6: return False if not isNumber(sp[0]): return False #logging.info(" Found SFS file") return True def isSFGSM(filename): """ Checks whether a file is spanish GSM format. Time is in seconds relative to one day """ try: fh = open(filename, 'rt') temp = fh.readline() except: return False sp = temp.split() if len(sp) != 2: return False if not isNumber(sp[0]): return False try: if not 20000 < float(sp[1]) < 80000: return False except: return False return True def readSFDMI(filename, headonly=False, **kwargs): """ Reading SF DMI format data. Looks like: 0.03 99.11 -29.76 26.14 22.05 30.31 5.04 98.76 -29.78 26.20 22.04 30.31 10.01 98.85 -29.76 26.04 22.04 30.31 15.15 98.63 -29.79 26.20 22.04 30.31 20.12 98.85 -29.78 26.11 22.04 30.31 first column are seconds of day """ starttime = kwargs.get('starttime') endtime = kwargs.get('endtime') getfile = True fh = open(filename, 'rt') # read file and split text into channels stream = DataStream() if stream.header is None: headers = {} else: headers = stream.header data = [] key = None # get day from filename (platform independent) splitpath = os.path.split(filename) daystring = splitpath[1].split('.') try: day = datetime.strftime(datetime.strptime(daystring[0], "%d%m%Y"),"%Y-%m-%d") except: logging.warning("Wrong dateformat in Filename %s" % daystring[0]) fh.close() return DataStream([], headers) # Select only files within eventually defined time range if starttime: if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'): getfile = False if endtime: if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'): getfile = False if getfile: for line in fh: if line.isspace(): # blank line continue else: row = LineStruct() elem = line.split() if (len(elem) == 6): row.time=date2num(datetime.strptime(day,"%Y-%m-%d"))+ float(elem[0])/86400 xval = float(elem[1]) yval = float(elem[2]) zval = float(elem[3]) row.x = xval row.y = yval row.z = zval row.t1 = float(elem[4]) row.t2 = float(elem[5]) stream.add(row) stream.header['col-x'] = 'x' stream.header['col-y'] = 'y' stream.header['col-z'] = 'z' stream.header['col-t1'] = 'T1' stream.header['col-t2'] = 'T2' stream.header['unit-col-x'] = 'nT' stream.header['unit-col-y'] = 'nT' stream.header['unit-col-z'] = 'nT' stream.header['unit-col-t1'] = 'deg C' stream.header['unit-col-t2'] = 'deg C' else: headers = stream.header stream =[] fh.close() return DataStream(stream, headers) def readSFGSM(filename, headonly=False, **kwargs): """ Reading SF GSM format data. Looks like: 22 42982.35 52 42982.43 82 42982.47 first column are seconds of day """ starttime = kwargs.get('starttime') endtime = kwargs.get('endtime') getfile = True fh = open(filename, 'rt') # read file and split text into channels stream = DataStream() if stream.header is None: headers = {} else: headers = stream.header data = [] key = None # get day from filename (platform independent) splitpath = os.path.split(filename) daystring = splitpath[1].split('.') try: day = datetime.strftime(datetime.strptime(daystring[0], "%d%m%Y"),"%Y-%m-%d") except: logging.warning("Wrong dateformat in Filename %s" % daystring[0]) return [] # Select only files within eventually defined time range if starttime: if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'): getfile = False if endtime: if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'): getfile = False if getfile: for line in fh: if line.isspace(): # blank line continue else: row = LineStruct() elem = line.split() if (len(elem) == 2): row.time=date2num(datetime.strptime(day,"%Y-%m-%d"))+ float(elem[0])/86400 row.f = float(elem[1]) stream.add(row) stream.header['col-f'] = 'f' stream.header['unit-col-f'] = 'nT' else: headers = stream.header stream =[] fh.close() return DataStream(stream, headers)
[ "roman.leonhardt@zamg.ac.at" ]
roman.leonhardt@zamg.ac.at
014cbf61158fb280b11d2f149b026f48d5234c0e
2e2a54e30f8c8018fe0d163a5fd4b0d854ef165d
/src/gluonts/torch/model/deep_npts/_network.py
c29d1935c3d32e884ec124b33fde866e0b55aa92
[ "Apache-2.0" ]
permissive
kashif/gluon-ts
b742021ca0292ca2885b3b079150f24cdf3e6dec
a818f69dc049c1c1d57e09d2ccb8b5f7a0cff656
refs/heads/master
2023-09-05T00:00:22.861992
2023-08-09T15:47:28
2023-08-09T15:47:28
222,552,468
5
0
null
2019-11-18T21:56:52
2019-11-18T21:56:52
null
UTF-8
Python
false
false
14,377
py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. from functools import partial from typing import Optional, Callable, List, Union import torch from torch import nn from torch.distributions import ( Categorical, MixtureSameFamily, Normal, ) from gluonts.core.component import validated from gluonts.torch.distributions import DiscreteDistribution from .scaling import ( min_max_scaling, standard_normal_scaling, ) INPUT_SCALING_MAP = { "min_max_scaling": partial(min_max_scaling, dim=1, keepdim=True), "standard_normal_scaling": partial( standard_normal_scaling, dim=1, keepdim=True ), } def init_weights(module: nn.Module, scale: float = 1.0): if type(module) == nn.Linear: nn.init.uniform_(module.weight, -scale, scale) nn.init.zeros_(module.bias) class FeatureEmbedder(nn.Module): """Creates a feature embedding for the static categorical features.""" @validated() def __init__( self, cardinalities: List[int], embedding_dimensions: List[int], ): super().__init__() assert ( len(cardinalities) > 0 ), "Length of `cardinalities` list must be greater than zero" assert len(cardinalities) == len( embedding_dimensions ), "Length of `embedding_dims` and `embedding_dims` should match" assert all( [c > 0 for c in cardinalities] ), "Elements of `cardinalities` should be > 0" assert all( [d > 0 for d in embedding_dimensions] ), "Elements of `embedding_dims` should be > 0" self.embedders = [ torch.nn.Embedding(num_embeddings=card, embedding_dim=dim) for card, dim in zip(cardinalities, embedding_dimensions) ] for embedder in self.embedders: embedder.apply(init_weights) def forward(self, features: torch.Tensor): """ Parameters ---------- features Input features to the model, shape: (-1, num_features). Returns ------- torch.Tensor Embedding, shape: (-1, sum(self.embedding_dimensions)). """ embedded_features = torch.cat( [ embedder(features[:, i].long()) for i, embedder in enumerate(self.embedders) ], dim=-1, ) return embedded_features class DeepNPTSNetwork(nn.Module): """Base class implementing a simple feed-forward neural network that takes in static and dynamic features and produces `num_hidden_nodes` independent outputs. These outputs are then used by derived classes to construct the forecast distribution for a single time step. Note that the dynamic features are just treated as independent features without considering their temporal nature. """ @validated() def __init__( self, context_length: int, num_hidden_nodes: List[int], cardinality: List[int], embedding_dimension: List[int], num_time_features: int, batch_norm: bool = False, input_scaling: Optional[Union[Callable, str]] = None, dropout_rate: float = 0.0, ): super().__init__() self.context_length = context_length self.num_hidden_nodes = num_hidden_nodes self.batch_norm = batch_norm self.input_scaling = ( INPUT_SCALING_MAP[input_scaling] if isinstance(input_scaling, str) else input_scaling ) self.dropout_rate = dropout_rate # Embedding for categorical features self.embedder = FeatureEmbedder( cardinalities=cardinality, embedding_dimensions=embedding_dimension ) total_embedding_dim = sum(embedding_dimension) # We have two target related features: past_target and observed value # indicator each of length `context_length`. # Also, +1 for the static real feature. dimensions = [ context_length * (num_time_features + 2) + total_embedding_dim + 1 ] + num_hidden_nodes modules: List[nn.Module] = [] for in_features, out_features in zip(dimensions[:-1], dimensions[1:]): modules += [nn.Linear(in_features, out_features), nn.ReLU()] if self.batch_norm: modules.append(nn.BatchNorm1d(out_features)) if self.dropout_rate > 0: modules.append(nn.Dropout(self.dropout_rate)) self.model = nn.Sequential(*modules) self.model.apply(partial(init_weights, scale=0.07)) # TODO: Handle missing values using the observed value indicator. def forward( self, feat_static_cat: torch.Tensor, feat_static_real: torch.Tensor, past_target: torch.Tensor, past_observed_values: torch.Tensor, past_time_feat: torch.Tensor, ): """ Parameters ---------- feat_static_cat Shape (-1, num_features). feat_static_real Shape (-1, num_features). past_target Shape (-1, context_length). past_observed_values Shape (-1, context_length). past_time_feat Shape (-1, context_length, self.num_time_features). """ x = past_target if self.input_scaling: loc, scale = self.input_scaling(x) x_scaled = (x - loc) / scale else: x_scaled = x embedded_cat = self.embedder(feat_static_cat) static_feat = torch.cat( (embedded_cat, torch.tensor(feat_static_real)), dim=1, ) time_features = torch.cat( [ x_scaled.unsqueeze(dim=-1), past_observed_values.unsqueeze(dim=-1), past_time_feat, ], dim=-1, ) features = torch.cat( [ time_features.reshape(time_features.shape[0], -1), static_feat, ], dim=-1, ) return self.model(features) class DeepNPTSNetworkDiscrete(DeepNPTSNetwork): """ Extends `DeepNTPSNetwork` by implementing the output layer which converts the outputs from the base network into probabilities of length `context_length`. These probabilities together with the past values in the context window constitute the one-step-ahead forecast distribution. Specifically, the forecast is always one of the values observed in the context window with the corresponding predicted probability. Parameters ---------- *args Arguments to ``DeepNPTSNetwork``. use_softmax Flag indicating whether to use softmax or normalization for converting the outputs of the base network to probabilities. kwargs Keyword arguments to ``DeepNPTSNetwork``. """ @validated() def __init__(self, *args, use_softmax: bool = False, **kwargs): super().__init__(*args, **kwargs) self.use_softmax = use_softmax modules: List[nn.Module] = ( [] if self.dropout_rate > 0 else [nn.Dropout(self.dropout_rate)] ) modules.append( nn.Linear(self.num_hidden_nodes[-1], self.context_length) ) self.output_layer = nn.Sequential(*modules) self.output_layer.apply(init_weights) def forward( self, feat_static_cat: torch.Tensor, feat_static_real: torch.Tensor, past_target: torch.Tensor, past_observed_values: torch.Tensor, past_time_feat: torch.Tensor, ) -> DiscreteDistribution: h = super().forward( feat_static_cat=feat_static_cat, feat_static_real=feat_static_real, past_target=past_target, past_observed_values=past_observed_values, past_time_feat=past_time_feat, ) outputs = self.output_layer(h) probs = ( nn.functional.softmax(outputs, dim=1) if self.use_softmax else nn.functional.normalize( nn.functional.softplus(outputs), p=1, dim=1 ) ) return DiscreteDistribution(values=past_target, probs=probs) class DeepNPTSNetworkSmooth(DeepNPTSNetwork): """ Extends `DeepNTPSNetwork` by implementing the output layer which converts the outputs from the base network into a smoothed mixture distribution. The components of the mixture are Gaussians centered around the observations in the context window. The mixing probabilities as well as the width of the Gaussians are predicted by the network. This mixture distribution represents the one-step-ahead forecast distribution. Note that the forecast can contain values not observed in the context window. """ @validated() def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) modules = ( [] if self.dropout_rate > 0 else [nn.Dropout(self.dropout_rate)] ) modules += [ nn.Linear(self.num_hidden_nodes[-1], self.context_length + 1), nn.Softplus(), ] self.output_layer = nn.Sequential(*modules) self.output_layer.apply(init_weights) def forward( self, feat_static_cat: torch.Tensor, feat_static_real: torch.Tensor, past_target: torch.Tensor, past_observed_values: torch.Tensor, past_time_feat: torch.Tensor, ) -> MixtureSameFamily: h = super().forward( feat_static_cat=feat_static_cat, feat_static_real=feat_static_real, past_target=past_target, past_observed_values=past_observed_values, past_time_feat=past_time_feat, ) outputs = self.output_layer(h) probs = outputs[:, :-1] kernel_width = outputs[:, -1:] mix = Categorical(probs) components = Normal(loc=past_target, scale=kernel_width) return MixtureSameFamily( mixture_distribution=mix, component_distribution=components ) class DeepNPTSMultiStepNetwork(nn.Module): """ Implements multi-step prediction given a trained `DeepNPTSNetwork` model that outputs one-step-ahead forecast distribution. """ @validated() def __init__( self, net: DeepNPTSNetwork, prediction_length: int, num_parallel_samples: int = 100, ): super().__init__() self.net = net self.prediction_length = prediction_length self.num_parallel_samples = num_parallel_samples def forward( self, feat_static_cat: torch.Tensor, feat_static_real: torch.Tensor, past_target: torch.Tensor, past_observed_values: torch.Tensor, past_time_feat: torch.Tensor, future_time_feat: torch.Tensor, ): """Generates samples from the forecast distribution. Parameters ---------- feat_static_cat Shape (-1, num_features). feat_static_real Shape (-1, num_features). past_target Shape (-1, context_length). past_observed_values Shape (-1, context_length). past_time_feat Shape (-1, context_length, self.num_time_features). future_time_feat Shape (-1, prediction_length, self.num_time_features). Returns ------- torch.Tensor Tensor containing samples from the predicted distribution. Shape is (-1, self.num_parallel_samples, self.prediction_length). """ # Blow up the initial `x` by the number of parallel samples required. # (batch_size * num_parallel_samples, context_length) past_target = past_target.repeat_interleave( self.num_parallel_samples, dim=0 ) # Note that gluonts returns empty future_observed_values. future_observed_values = torch.ones( (past_observed_values.shape[0], self.prediction_length) ) observed_values = torch.cat( [past_observed_values, future_observed_values], dim=1 ) observed_values = observed_values.repeat_interleave( self.num_parallel_samples, dim=0 ) time_feat = torch.cat([past_time_feat, future_time_feat], dim=1) time_feat = time_feat.repeat_interleave( self.num_parallel_samples, dim=0 ) feat_static_cat = feat_static_cat.repeat_interleave( self.num_parallel_samples, dim=0 ) feat_static_real = feat_static_real.repeat_interleave( self.num_parallel_samples, dim=0 ) future_samples = [] for t in range(self.prediction_length): distr = self.net( feat_static_cat=feat_static_cat, feat_static_real=feat_static_real, past_target=past_target, past_observed_values=observed_values[ :, t : -self.prediction_length + t ], past_time_feat=time_feat[ :, t : -self.prediction_length + t, : ], ) samples = distr.sample() if past_target.dim() != samples.dim(): samples = samples.unsqueeze(dim=-1) future_samples.append(samples) past_target = torch.cat([past_target[:, 1:], samples], dim=1) # (batch_size * num_parallel_samples, prediction_length) samples_out = torch.stack(future_samples, dim=1) # (batch_size, num_parallel_samples, prediction_length) return samples_out.reshape( -1, self.num_parallel_samples, self.prediction_length )
[ "noreply@github.com" ]
kashif.noreply@github.com
edbda326ea8cc86ed561de36cac7f9cfb7b215e5
97763df96bc21d91e46e3a98f9ee2b55f557035e
/qa/rpc-tests/wallet.py
096b0a373b81c77ca811d0f202f25b8aea30c591
[ "MIT" ]
permissive
jaagcoin/JAAGCoin-Core
2f0138c38e28b98878bbcd5f011ab84d1441bb57
87073dbff406e2d95a6e9d81521973c3c8cef350
refs/heads/master
2020-03-26T05:34:39.790028
2018-08-30T15:46:16
2018-08-30T15:46:16
144,563,529
0
0
null
null
null
null
UTF-8
Python
false
false
18,705
py
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class WalletTest (BitcoinTestFramework): def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size): """Return curr_balance after asserting the fee was in range""" fee = balance_with_fee - curr_balance assert_fee_amount(fee, tx_size, fee_per_byte * 1000) return curr_balance def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 4 self.extra_args = [['-usehd={:d}'.format(i%2==0)] for i in range(4)] def setup_network(self, split=False): self.nodes = start_nodes(3, self.options.tmpdir, self.extra_args[:3], redirect_stderr=True) connect_nodes_bi(self.nodes,0,1) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) self.is_network_split=False self.sync_all() def run_test (self): # Check that there's no UTXO on none of the nodes assert_equal(len(self.nodes[0].listunspent()), 0) assert_equal(len(self.nodes[1].listunspent()), 0) assert_equal(len(self.nodes[2].listunspent()), 0) print("Mining blocks...") self.nodes[0].generate(1) walletinfo = self.nodes[0].getwalletinfo() assert_equal(walletinfo['immature_balance'], 500) assert_equal(walletinfo['balance'], 0) self.sync_all() self.nodes[1].generate(101) self.sync_all() assert_equal(self.nodes[0].getbalance(), 500) assert_equal(self.nodes[1].getbalance(), 500) assert_equal(self.nodes[2].getbalance(), 0) # Check that only first and second nodes have UTXOs assert_equal(len(self.nodes[0].listunspent()), 1) assert_equal(len(self.nodes[1].listunspent()), 1) assert_equal(len(self.nodes[2].listunspent()), 0) # Send 210 JAAG from 0 to 2 using sendtoaddress call. # Second transaction will be child of first, and will require a fee self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 110) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 100) walletinfo = self.nodes[0].getwalletinfo() assert_equal(walletinfo['immature_balance'], 0) # Have node0 mine a block, thus it will collect its own fee. self.nodes[0].generate(1) self.sync_all() # Exercise locking of unspent outputs unspent_0 = self.nodes[2].listunspent()[0] unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]} self.nodes[2].lockunspent(False, [unspent_0]) assert_raises_message(JSONRPCException, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 200) assert_equal([unspent_0], self.nodes[2].listlockunspent()) self.nodes[2].lockunspent(True, [unspent_0]) assert_equal(len(self.nodes[2].listlockunspent()), 0) # Have node1 generate 100 blocks (so node0 can recover the fee) self.nodes[1].generate(100) self.sync_all() # node0 should end up with 1000 JAAG in block rewards plus fees, but # minus the 210 plus fees sent to node2 assert_equal(self.nodes[0].getbalance(), 1000-210) assert_equal(self.nodes[2].getbalance(), 210) # Node0 should have two unspent outputs. # Create a couple of transactions to send them to node2, submit them through # node1, and make sure both node0 and node2 pick them up properly: node0utxos = self.nodes[0].listunspent(1) assert_equal(len(node0utxos), 2) # create both transactions txns_to_send = [] for utxo in node0utxos: inputs = [] outputs = {} inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]}) outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"] raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx)) # Have node 1 (miner) send the transactions self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True, False, True) self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True, False, True) # Have node1 mine a block to confirm transactions: self.nodes[1].generate(1) self.sync_all() assert_equal(self.nodes[0].getbalance(), 0) assert_equal(self.nodes[2].getbalance(), 1000) assert_equal(self.nodes[2].getbalance("from1"), 1000-210) # Send 100 JAAG normal address = self.nodes[0].getnewaddress("test") fee_per_byte = Decimal('0.00001') / 1000 self.nodes[2].settxfee(fee_per_byte * 1000) txid = self.nodes[2].sendtoaddress(address, 100, "", "", False) self.nodes[2].generate(1) self.sync_all() node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('900'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid))) assert_equal(self.nodes[0].getbalance(), Decimal('100')) # Send 100 JAAG with subtract fee from amount txid = self.nodes[2].sendtoaddress(address, 100, "", "", True) self.nodes[2].generate(1) self.sync_all() node_2_bal -= Decimal('100') assert_equal(self.nodes[2].getbalance(), node_2_bal) node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('200'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid))) # Sendmany 100 JAAG txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", []) self.nodes[2].generate(1) self.sync_all() node_0_bal += Decimal('100') node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid))) assert_equal(self.nodes[0].getbalance(), node_0_bal) # Sendmany 100 JAAG with subtract fee from amount txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [address]) self.nodes[2].generate(1) self.sync_all() node_2_bal -= Decimal('100') assert_equal(self.nodes[2].getbalance(), node_2_bal) node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid))) # Test ResendWalletTransactions: # Create a couple of transactions, then start up a fourth # node (nodes[3]) and ask nodes[0] to rebroadcast. # EXPECT: nodes[3] should have those transactions in its mempool. txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1) sync_mempools(self.nodes) self.nodes.append(start_node(3, self.options.tmpdir, self.extra_args[3], redirect_stderr=True)) connect_nodes_bi(self.nodes, 0, 3) sync_blocks(self.nodes) relayed = self.nodes[0].resendwallettransactions() assert_equal(set(relayed), {txid1, txid2}) sync_mempools(self.nodes) assert(txid1 in self.nodes[3].getrawmempool()) # Exercise balance rpcs assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1) assert_equal(self.nodes[0].getunconfirmedbalance(), 1) #check if we can list zero value tx as available coins #1. create rawtx #2. hex-changed one output to 0.0 #3. sign and send #4. check if recipient (node0) can list the zero value tx usp = self.nodes[1].listunspent() inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}] outputs = {self.nodes[1].getnewaddress(): 499.998, self.nodes[0].getnewaddress(): 11.11} rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32) decRawTx = self.nodes[1].decoderawtransaction(rawTx) signedRawTx = self.nodes[1].signrawtransaction(rawTx) decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex']) zeroValueTxid= decRawTx['txid'] sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex']) self.sync_all() self.nodes[1].generate(1) #mine a block self.sync_all() unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output found = False for uTx in unspentTxs: if uTx['txid'] == zeroValueTxid: found = True assert_equal(uTx['amount'], Decimal('0')) assert(found) #do some -walletbroadcast tests stop_nodes(self.nodes) self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]]) connect_nodes_bi(self.nodes,0,1) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) self.sync_all() txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2) txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted) self.nodes[1].generate(1) #mine a block, tx should not be in there self.sync_all() assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted #now broadcast from another node, mine a block, sync, and check the balance self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex']) self.nodes[1].generate(1) self.sync_all() node_2_bal += 2 txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted) assert_equal(self.nodes[2].getbalance(), node_2_bal) #create another tx txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2) #restart the nodes with -walletbroadcast=1 stop_nodes(self.nodes) self.nodes = start_nodes(3, self.options.tmpdir) connect_nodes_bi(self.nodes,0,1) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) sync_blocks(self.nodes) self.nodes[0].generate(1) sync_blocks(self.nodes) node_2_bal += 2 #tx should be added to balance because after restarting the nodes tx should be broadcastet assert_equal(self.nodes[2].getbalance(), node_2_bal) #send a tx with value in a string (PR#6380 +) txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2") txObj = self.nodes[0].gettransaction(txId) assert_equal(txObj['amount'], Decimal('-2')) txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001") txObj = self.nodes[0].gettransaction(txId) assert_equal(txObj['amount'], Decimal('-0.0001')) #check if JSON parser can handle scientific notation in strings txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4") txObj = self.nodes[0].gettransaction(txId) assert_equal(txObj['amount'], Decimal('-0.0001')) try: txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4") except JSONRPCException as e: assert("Invalid amount" in e.error['message']) else: raise AssertionError("Must not parse invalid amounts") try: self.nodes[0].generate("2") raise AssertionError("Must not accept strings as numeric") except JSONRPCException as e: assert("not an integer" in e.error['message']) # Import address and private key to check correct behavior of spendable unspents # 1. Send some coins to generate new UTXO address_to_import = self.nodes[2].getnewaddress() txid = self.nodes[0].sendtoaddress(address_to_import, 1) self.nodes[0].generate(1) self.sync_all() # 2. Import address from node2 to node1 self.nodes[1].importaddress(address_to_import) # 3. Validate that the imported address is watch-only on node1 assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"]) # 4. Check that the unspents after import are not spendable assert_array_result(self.nodes[1].listunspent(), {"address": address_to_import}, {"spendable": False}) # 5. Import private key of the previously imported address on node1 priv_key = self.nodes[2].dumpprivkey(address_to_import) self.nodes[1].importprivkey(priv_key) # 6. Check that the unspents are now spendable on node1 assert_array_result(self.nodes[1].listunspent(), {"address": address_to_import}, {"spendable": True}) # Mine a block from node0 to an address from node1 cbAddr = self.nodes[1].getnewaddress() blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0] cbTxId = self.nodes[0].getblock(blkHash)['tx'][0] self.sync_all() # Check that the txid and balance is found by node1 self.nodes[1].gettransaction(cbTxId) # check if wallet or blockchain maintenance changes the balance self.sync_all() blocks = self.nodes[0].generate(2) self.sync_all() balance_nodes = [self.nodes[i].getbalance() for i in range(3)] block_count = self.nodes[0].getblockcount() # Check modes: # - True: unicode escaped as \u.... # - False: unicode directly as UTF-8 for mode in [True, False]: self.nodes[0].ensure_ascii = mode # unicode check: Basic Multilingual Plane, Supplementary Plane respectively for s in [u'рыба', u'𝅘𝅥𝅯']: addr = self.nodes[0].getaccountaddress(s) label = self.nodes[0].getaccount(addr) assert_equal(label, s) assert(s in self.nodes[0].listaccounts().keys()) self.nodes[0].ensure_ascii = True # restore to default # maintenance tests maintenance = [ '-rescan', '-reindex', '-zapwallettxes=1', '-zapwallettxes=2', # disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463 # '-salvagewallet', ] chainlimit = 6 for m in maintenance: print("check " + m) stop_nodes(self.nodes) # set lower ancestor limit for later self.nodes = start_nodes(3, self.options.tmpdir, [[m, "-limitancestorcount="+str(chainlimit)]] * 3) while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]: # reindex will leave rpc warm up "early"; Wait for it to finish time.sleep(0.1) assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)]) # Exercise listsinceblock with the last two blocks coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0]) assert_equal(coinbase_tx_1["lastblock"], blocks[1]) assert_equal(len(coinbase_tx_1["transactions"]), 1) assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1]) assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0) # ==Check that wallet prefers to use coins that don't exceed mempool limits ===== # Get all non-zero utxos together chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()] singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True) self.nodes[0].generate(1) node0_balance = self.nodes[0].getbalance() # Split into two chains rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')}) signedtx = self.nodes[0].signrawtransaction(rawtx) singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"]) self.nodes[0].generate(1) # Make a long chain of unconfirmed payments without hitting mempool limit # Each tx we make leaves only one output of change on a chain 1 longer # Since the amount to send is always much less than the outputs, we only ever need one output # So we should be able to generate exactly chainlimit txs for each original output sending_addr = self.nodes[1].getnewaddress() txid_list = [] for i in range(chainlimit*2): txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))) assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit*2) assert_equal(len(txid_list), chainlimit*2) # Without walletrejectlongchains, we will still generate a txid # The tx will be stored in the wallet but not accepted to the mempool extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')) assert(extra_txid not in self.nodes[0].getrawmempool()) assert(extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()]) self.nodes[0].abandontransaction(extra_txid) total_txs = len(self.nodes[0].listtransactions("*",99999)) # Try with walletrejectlongchains # Double chain limit but require combining inputs, so we pass SelectCoinsMinConf stop_node(self.nodes[0],0) self.nodes[0] = start_node(0, self.options.tmpdir, ["-walletrejectlongchains", "-limitancestorcount="+str(2*chainlimit)]) # wait for loadmempool timeout = 10 while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit*2): time.sleep(0.5) timeout -= 0.5 assert_equal(len(self.nodes[0].getrawmempool()), chainlimit*2) node0_balance = self.nodes[0].getbalance() # With walletrejectlongchains we will not create the tx and store it in our wallet. assert_raises_message(JSONRPCException, "mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01')) # Verify nothing new in wallet assert_equal(total_txs, len(self.nodes[0].listtransactions("*",99999))) if __name__ == '__main__': WalletTest().main()
[ "dmitriy@Dmitriys-iMac.local" ]
dmitriy@Dmitriys-iMac.local
0cca7a3e106b4584b3b916276cb3e704eb75122f
e66efe2baf16c48398087e1e2322ae4e8e77b5f1
/deepbrain/train/segmenter/count_frequency.py
858fb7942997b6c83726df7342a20dcfe6b2a46e
[ "MIT" ]
permissive
codewithkaranjeswani/deepbrain
b43e72e95c185dd96ec78f92f42afd7741fac75c
ac16db831ba0fb213c08b4449657f5895b136324
refs/heads/master
2022-11-25T11:12:41.954520
2020-08-03T15:52:48
2020-08-03T15:52:48
284,741,744
0
0
MIT
2020-08-03T15:50:43
2020-08-03T15:50:43
null
UTF-8
Python
false
false
1,581
py
# Count class frequency to deal with unbalance import tensorflow as tf import os import nibabel as nib import numpy as np import random import re from skimage.transform import resize from pathlib import Path from const import * # CSF: 1, 2, 23, 24, 0, 18 -> 1 # WM: 16, 17 -> 2 # GM: Rest -> 3 # Brain Stem: 7 -> 4 # Cerebellum WM: 12, 13 -> 5 # Cerebellum GM: 10, 11, 36, 37, 38 -> 6 def shrink_labels(labels): labels[np.isin(labels, [1,2,23,24,0,18])] = 1 labels[np.isin(labels, [16,17])] = 2 labels[~np.isin(labels, [1,2,23,24,0,18,16,17,7,12,13,10,11,36,37,38])] = 3 labels[np.isin(labels, [7])] = 4 labels[np.isin(labels, [12,13])] = 5 labels[np.isin(labels, [10,11,36,37,38])] = 6 return labels def run(): _dir = ADNI_DATASET_DIR labels = Path(os.path.join(_dir, "masks", "malpem")) brains = Path(os.path.join(_dir, "masks", "brain_masks")) ret = {} index = 0 for each in os.listdir(labels): aux = each[7:] p = labels / each b = brains / aux img = nib.load(str(p)) brain = (nib.load(str(b)).get_fdata().squeeze()) == 1 x = img.get_fdata() x = x.astype(np.uint8).squeeze() assert x.shape == brain.shape x = x[brain] x = shrink_labels(x) y = np.bincount(x) ii = np.nonzero(y)[0] index +=1 if index % 100 == 0: print("Processed {}".format(index)) for k, v in zip(ii,y[ii]): ret[k] = ret.get(k, 0) + v print(ret) if __name__ == "__main__": run()
[ "i.itzcovich@gmail.com" ]
i.itzcovich@gmail.com
725d9518757190bbff990c8383cf7ec9d56e3cc5
c0d537532f11cf742493093c3c325b4625fdc6e4
/Q4/HW3_Q4.py
ca25aabc6ff5e1c28eeec4da92089f07eb0f066c
[]
no_license
plusbzz/cs224w-hw3
c9b4296425f467e203d12e4008b871d6dd89333f
7d513c991ff6e16433b6a4241950a2a3b2c15a96
refs/heads/master
2016-09-06T06:56:45.495051
2013-11-07T05:35:46
2013-11-07T05:35:46
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,469
py
# Homework 3: Question 4 from snap import * from random import sample,choice from ggplot import * N=10670 M=22002 nodes=arange(N) dia_sample = 20 # Creating graphs # Create a random Gnm network g_nm = PUNGraph_New() for i in nodes: g_nm.AddNode(i) while True: s,t = sample(nodes,2) g_nm.AddEdge(s,t) if g_nm.GetEdges() == M: break g_nm.GetNodes(),g_nm.GetEdges() # Save graph SaveEdgeList_PUNGraph(g_nm,"Gnm.txt") # Create a graph G_pa with preferential attachment # Start with a complete graph of 40 nodes N_init = 40 edges = [] g_pa = PUNGraph_New() for n in xrange(N_init): g_pa.AddNode(n) for m in xrange(n): g_pa.AddEdge(m,n) edges.append((m,n)) for n in nodes[N_init:]: g_pa.AddNode(n) for i in xrange(2): m = choice(choice(edges)) g_pa.AddEdge(m,n) edges.append((m,n)) if g_pa.GetEdges() == M: break g_pa.GetNodes(),g_pa.GetEdges() SaveEdgeList_PUNGraph(g_pa,"Gpa.txt") # Load Autonomous network graph g_as = LoadEdgeList_PUNGraph("oregon1_010331.txt") SaveEdgeList_PUNGraph(g_as,"Gas.txt") # Q4.1) Deletion experiments for failure vs attack # Failure deletion def failure1(graph,batchsize,percent): del_nodes = 0 # number of deleted nodes N = graph.GetNodes() stopN = (percent*N)/100 # number of nodes at which to stop X = [0] Y = [GetBfsFullDiam_PUNGraph(graph,dia_sample)] nodeset = set(range(N)) while True: # start deleting for d in sample(nodeset,batchsize): graph.DelNode(d) nodeset.remove(d) del_nodes += batchsize dia = GetBfsFullDiam_PUNGraph(graph,dia_sample) X.append((100.0*del_nodes)/N) Y.append(dia) if del_nodes >= stopN: break return X,Y # Attack deletion def attack1(graph,batchsize,percent): del_nodes = 0 # number of deleted nodes N = graph.GetNodes() stopN = (percent*N)/100 # number of nodes at which to stop X = [0] Y = [GetBfsFullDiam_PUNGraph(graph,dia_sample)] nodeset = set(range(N)) while True: # start deleting for i in xrange(batchsize): d = GetMxDegNId_PUNGraph(graph) graph.DelNode(d) nodeset.remove(d) del_nodes += batchsize dia = GetBfsFullDiam_PUNGraph(graph,dia_sample) X.append((100.0*del_nodes)/N) Y.append(dia) if del_nodes >= stopN: break return X,Y # Plot for average diameter vs. deleted nodes def plots(X,Y,xlab,ylab,tpref,failure_func,attack_func): g_nm = LoadEdgeListStr_PUNGraph("Gnm.txt") f_g_nm_x,f_g_nm_y = failure_func(g_nm,X,Y) g_as = LoadEdgeListStr_PUNGraph("Gas.txt") f_g_as_x,f_g_as_y = failure_func(g_as,X,Y) g_pa = LoadEdgeListStr_PUNGraph("Gpa.txt") f_g_pa_x,f_g_pa_y = failure_func(g_pa,X,Y) g_nm = LoadEdgeListStr_PUNGraph("Gnm.txt") a_g_nm_x,a_g_nm_y = attack_func(g_nm,X,Y) g_as = LoadEdgeListStr_PUNGraph("Gas.txt") a_g_as_x,a_g_as_y = attack_func(g_as,X,Y) g_pa = LoadEdgeListStr_PUNGraph("Gpa.txt") a_g_pa_x,a_g_pa_y = attack_func(g_pa,X,Y) p = plt.plot(f_g_as_x,f_g_as_y,'-o',f_g_nm_x,f_g_nm_y,'-x',f_g_pa_x,f_g_pa_y,'-+', a_g_as_x,a_g_as_y,'-.',a_g_nm_x,a_g_nm_y,'--',a_g_pa_x,a_g_pa_y,'-4', lw=1,mew=2) p = plt.legend(("Failure: AS","Failure: NM","Failure: PA", "Attack: AS","Attack: NM","Attack: PA"),loc="best") p = plt.title(tpref + ': ' + ylab + " vs. " + xlab) p = plt.xlabel(xlab) p = plt.ylabel(ylab) # Scenario 1: X = N/100, Y = 50 X = N/100 Y = 50 plots(X,Y,"Percent of deleted nodes","Average sampled diameter","Q4.1)X=N/100,Y=50", failure1,attack1) # Scenario 2: X = N/1000, Y = 2 X = N/1000 Y = 2 plots(X,Y,"Percent of deleted nodes","Average sampled diameter","Q4.1)X=N/1000,Y=2", failure1,attack1) # Q4.2) Change in size of largest connected component # Failure deletion def failure2(graph,batchsize,percent): del_nodes = 0 # number of deleted nodes N = graph.GetNodes() stopN = (percent*N)/100 # number of nodes at which to stop X = [0] Y = [float(GetMxWccSz_PUNGraph(graph))] nodeset = set(range(N)) while True: # start deleting for d in sample(nodeset,batchsize): graph.DelNode(d) nodeset.remove(d) del_nodes += batchsize lcc = float(GetMxWccSz_PUNGraph(graph)) # size of LCC X.append((100.0*del_nodes)/N) Y.append(lcc) if del_nodes >= stopN: break return X,Y # Attack deletion def attack2(graph,batchsize,percent): del_nodes = 0 # number of deleted nodes N = graph.GetNodes() stopN = (percent*N)/100 # number of nodes at which to stop X = [0] Y = [float(GetMxWccSz_PUNGraph(graph))] nodeset = set(range(N)) while True: # start deleting for i in xrange(batchsize): d = GetMxDegNId_PUNGraph(graph) graph.DelNode(d) nodeset.remove(d) del_nodes += batchsize lcc = float(GetMxWccSz_PUNGraph(graph)) X.append((100.0*del_nodes)/N) Y.append(lcc) if del_nodes >= stopN: break return X,Y # Plots of fraction in largest connected component vs. percent deleted nodes X = N/100 Y = 50 plots(X,Y,"Percent of deleted nodes","Fraction of nodes in LCC","Q4.2)X=N/100,Y=50", failure2,attack2)
[ "plusbzz@gmail.com" ]
plusbzz@gmail.com
6d39bfe89b71cb0c05cdf5b5824bde77c2647498
fca3fe7557c00a379e90cda8016a8719ca57fe28
/jexp/tests.py
32a07ef74a77e0e252e4893441888e74bb850e8b
[ "BSD-3-Clause" ]
permissive
mhluongo/jexp
d8a4db0a2d4f0f5f70471c2e36ecc22c8835b73e
e23b375c00bb62cab9671bc76250023125b4e60f
refs/heads/master
2021-01-13T01:15:08.980614
2011-09-02T00:11:13
2011-09-02T00:11:13
2,158,299
0
0
null
null
null
null
UTF-8
Python
false
false
279
py
from nose.tools import eq_ from jexp import J a = J('a') b = J('b') #logical tests def test_or(): eq_(str(a | b), '(a||b)') def test_and(): eq_(str(a & b), '(a&&b)') def test_inv(): eq_(str(~a), '(!a)') #math tests def test_negate(): eq_(str(-a), '(-a)')
[ "mhluongo@gmail.com" ]
mhluongo@gmail.com
fd8cfb47b2d8e17dae6ea7bb6a37a38a95978a58
ef5f8a1d7b098391b5e5fce57edc83870204fe69
/albert_model/clue_classifier_utils_char_no_space.py
b1755d70cbfbb75c08b321f41ecb2ab40f4d9ea6
[ "Apache-2.0" ]
permissive
guome/subchar-transformers
9829ded6c312adabf481c11ea25a2eaa069a1aaa
54c3bfb5c197946fa5a8b6ed5524b81284259613
refs/heads/master
2022-07-04T16:21:12.589815
2020-05-13T12:49:54
2020-05-13T12:49:54
263,630,138
1
0
null
2020-05-13T12:57:25
2020-05-13T12:57:24
null
UTF-8
Python
false
false
21,002
py
# -*- coding: utf-8 -*- # @Author: bo.shi # @Date: 2019-12-01 22:28:41 # @Last Modified by: bo.shi # @Last Modified time: 2019-12-02 18:36:50 # coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions for GLUE classification tasks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import csv import os import six import tensorflow as tf def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text.decode("utf-8", "ignore") elif isinstance(text, unicode): return text else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class PaddingInputExample(object): """Fake example so the num input examples is a multiple of the batch size. When running eval/predict on the TPU, we need to pad the number of examples to be a multiple of the batch size, because the TPU requires a fixed batch size. The alternative is to drop the last batch, which is bad because it means the entire output data won't be generated. We use this class instead of `None` because treating `None` as padding battches could cause silent errors. """ class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def __init__(self, args): self.args = args def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, delimiter="\t", quotechar=None): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines @classmethod def _read_txt(cls, input_file): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = f.readlines() lines = [] for line in reader: lines.append(line.strip().split("_!_")) return lines @classmethod def _read_json(cls, input_file): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = f.readlines() lines = [] for line in reader: lines.append(json.loads(line.strip())) return lines class XnliProcessor(DataProcessor): """Processor for the XNLI data set.""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "train.json")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "dev.json")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "test.json")), "test") def _create_examples(self, lines, set_type): """See base class.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = convert_to_unicode(line['premise']) text_b = convert_to_unicode(line['hypo']) label = convert_to_unicode(line['label']) if set_type != 'test' else 'contradiction' examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] class TnewsProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "train.json")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "dev.json")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "test.json")), "test") def get_labels(self): """See base class.""" labels = [] for i in range(17): if i == 5 or i == 11: continue labels.append(str(100 + i)) return labels def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line['sentence'].strip() if hasattr(self.args, "max_sent_length"): text_a = text_a[: self.args.max_sent_length] if self.args.do_lower_case: text_a = text_a.lower() text_a = convert_to_unicode(text_a) text_b = None label = convert_to_unicode(line['label']) if set_type != 'test' else "100" examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class iFLYTEKDataProcessor(DataProcessor): """Processor for the iFLYTEKData data set (GLUE version).""" def __init__(self, args): super(iFLYTEKDataProcessor, self).__init__(args) self.args = args def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "train.json")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "dev.json")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "test.json")), "test") def get_labels(self): """See base class.""" labels = [] for i in range(119): labels.append(str(i)) return labels def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" # dict_char2comp = json.load(open("./resources/char2comp.json", "r")) examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line['sentence'].strip() if hasattr(self.args, "max_sent_length"): text_a = text_a[: self.args.max_sent_length] if self.args.do_lower_case: text_a = text_a.lower() # print(text_a) text_a = convert_to_unicode(text_a) text_b = None label = convert_to_unicode(line['label']) if set_type != 'test' else "0" examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) if i < 5: print(text_a) print(text_b) return examples class ChnSentiCorpDataProcessor(DataProcessor): """Processor for the iFLYTEKData data set (GLUE version).""" def __init__(self, args): super(ChnSentiCorpDataProcessor, self).__init__(args) self.args = args def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "train.json")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "dev.json")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "test.json")), "test") def get_labels(self): """See base class.""" labels = [] for i in range(2): labels.append(str(i)) return labels def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" # dict_char2comp = json.load(open("./resources/char2comp.json", "r")) examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line['sentence'].strip() if hasattr(self.args, "max_sent_length"): text_a = text_a[: self.args.max_sent_length] if self.args.do_lower_case: text_a = text_a.lower() # print(text_a) text_a = convert_to_unicode(text_a) text_b = None label = convert_to_unicode(line['label']) if set_type != 'test' else "0" examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) if i < 5: print(text_a) print(text_b) return examples class LCQMCProcessor(DataProcessor): """Processor for the internal data set. sentence pair classification""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "train.json")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "dev.json")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "test.json")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line['sentence1'].strip() if hasattr(self.args, "max_sent_length"): text_a = text_a[: self.args.max_sent_length] if self.args.do_lower_case: text_a = text_a.lower() text_a = convert_to_unicode(text_a) text_b = line['sentence2'].strip() if hasattr(self.args, "max_sent_length"): text_b = text_b[: self.args.max_sent_length] if self.args.do_lower_case: text_b = text_b.lower() text_b = convert_to_unicode(text_b) label = convert_to_unicode(line['label']) if set_type != 'test' else '0' examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) if i < 5: print(text_a) print(text_b) return examples class AFQMCProcessor(DataProcessor): """Processor for the internal data set. sentence pair classification""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "train.json")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "dev.json")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "test.json")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = convert_to_unicode(line['sentence1']) text_b = convert_to_unicode(line['sentence2']) label = convert_to_unicode(line['label']) if set_type != 'test' else '0' examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class CMNLIProcessor(DataProcessor): """Processor for the CMNLI data set.""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples_json(os.path.join(data_dir, "train.json"), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples_json(os.path.join(data_dir, "dev.json"), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples_json(os.path.join(data_dir, "test.json"), "test") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples_json(self, file_name, set_type): """Creates examples for the training and dev sets.""" examples = [] lines = tf.gfile.Open(file_name, "r") index = 0 for line in lines: line_obj = json.loads(line) index = index + 1 guid = "%s-%s" % (set_type, index) text_a = convert_to_unicode(line_obj["sentence1"]) text_b = convert_to_unicode(line_obj["sentence2"]) label = convert_to_unicode(line_obj["label"]) if set_type != 'test' else 'neutral' if label != "-": examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class CslProcessor(DataProcessor): """Processor for the CSL data set.""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "train.json")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "dev.json")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "test.json")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = convert_to_unicode(" ".join(line['keyword'])) text_b = convert_to_unicode(line['abst']) label = convert_to_unicode(line['label']) if set_type != 'test' else '0' examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WSCProcessor(DataProcessor): """Processor for the internal data set. sentence pair classification""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "train.json")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "dev.json")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "test.json")), "test") def get_labels(self): """See base class.""" return ["true", "false"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = convert_to_unicode(line['text']) text_a_list = list(text_a) target = line['target'] query = target['span1_text'] query_idx = target['span1_index'] pronoun = target['span2_text'] pronoun_idx = target['span2_index'] assert text_a[pronoun_idx: (pronoun_idx + len(pronoun)) ] == pronoun, "pronoun: {}".format(pronoun) assert text_a[query_idx: (query_idx + len(query))] == query, "query: {}".format(query) if pronoun_idx > query_idx: text_a_list.insert(query_idx, "_") text_a_list.insert(query_idx + len(query) + 1, "_") text_a_list.insert(pronoun_idx + 2, "[") text_a_list.insert(pronoun_idx + len(pronoun) + 2 + 1, "]") else: text_a_list.insert(pronoun_idx, "[") text_a_list.insert(pronoun_idx + len(pronoun) + 1, "]") text_a_list.insert(query_idx + 2, "_") text_a_list.insert(query_idx + len(query) + 2 + 1, "_") text_a = "".join(text_a_list) if set_type == "test": label = "true" else: label = line['label'] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class COPAProcessor(DataProcessor): """Processor for the internal data set. sentence pair classification""" def __init__(self): self.language = "zh" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "train.json")), "train") # dev_0827.tsv def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "dev.json")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_json(os.path.join(data_dir, "test.json")), "test") def get_labels(self): """See base class.""" return ["0", "1"] @classmethod def _create_examples_one(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): guid1 = "%s-%s" % (set_type, i) # try: if line['question'] == 'cause': text_a = convert_to_unicode(line['premise'] + '原因是什么呢?' + line['choice0']) text_b = convert_to_unicode(line['premise'] + '原因是什么呢?' + line['choice1']) else: text_a = convert_to_unicode(line['premise'] + '造成了什么影响呢?' + line['choice0']) text_b = convert_to_unicode(line['premise'] + '造成了什么影响呢?' + line['choice1']) label = convert_to_unicode(str(1 if line['label'] == 0 else 0)) if set_type != 'test' else '0' examples.append( InputExample(guid=guid1, text_a=text_a, text_b=text_b, label=label)) # except Exception as e: # print('###error.i:',e, i, line) return examples @classmethod def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): i = 2 * i guid1 = "%s-%s" % (set_type, i) guid2 = "%s-%s" % (set_type, i + 1) # try: premise = convert_to_unicode(line['premise']) choice0 = convert_to_unicode(line['choice0']) label = convert_to_unicode(str(1 if line['label'] == 0 else 0)) if set_type != 'test' else '0' #text_a2 = convert_to_unicode(line['premise']) choice1 = convert_to_unicode(line['choice1']) label2 = convert_to_unicode( str(0 if line['label'] == 0 else 1)) if set_type != 'test' else '0' if line['question'] == 'effect': text_a = premise text_b = choice0 text_a2 = premise text_b2 = choice1 elif line['question'] == 'cause': text_a = choice0 text_b = premise text_a2 = choice1 text_b2 = premise else: print('wrong format!!') return None examples.append( InputExample(guid=guid1, text_a=text_a, text_b=text_b, label=label)) examples.append( InputExample(guid=guid2, text_a=text_a2, text_b=text_b2, label=label2)) # except Exception as e: # print('###error.i:',e, i, line) return examples
[ "michael_wzhu91@163.com" ]
michael_wzhu91@163.com
35509fe6b955bd2603e79013b82691a6ac50a9c7
1fa21cd2c288a9f87295631e10f747fe075a1502
/Trip Planner APIs/trip.py
fe94241adebdc4d7399310e515b6760df7830685
[]
no_license
bmrn/TfNSW_APIs
4bc22e800796f848ff5f1ced2c04dd56a0666472
b4cbe176ce811698739b5fd33517fb36edbfa68d
refs/heads/master
2021-01-19T22:55:33.722331
2017-04-28T06:33:23
2017-04-28T06:33:23
88,893,404
0
0
null
null
null
null
UTF-8
Python
false
false
1,853
py
from urllib.parse import urlencode import requests import json import tssetup from pprint import pprint api_key = tssetup.getKey() base_url = "https://api.transport.nsw.gov.au/v1/tp/" query_type = "trip?" #initialise query param dictionary qdict = {} #add parameters qdict["outputFormat"] = "rapidJSON" qdict["coordOutputFormat"] = "EPSG:4326" qdict["depArrMacro"] = "dep" #dep after or arr before qdict["itdDate"] = "20170707" qdict["itdTime"] = "1200" qdict["type_origin"] = "any" qdict["name_origin"] = "10101331" #get location/stop id from stop_finder.py qdict["type_destination"] = "any" qdict["name_destination"] = "10102027" qdict["calcNumberOfTrips"] = 5 qdict["wheelchair"] = "" #or "on" qdict["TfNSWSF"] = "true" qdict["version"] = "10.2.1.15" #encode params as querystring qstring = urlencode(qdict) #buildurl urlsend = base_url + query_type + qstring print(urlsend) #get authentication headers = {'Authorization': 'apikey ' + api_key, 'Accept': 'application/json'} response = requests.get(urlsend, headers=headers) #decode response and convert to JSON format respdict = json.loads(response.content.decode('utf-8')) #simple example to look at data for x in range(len(respdict["journeys"])): print("********* TRIP " + str(x+1) + " *********") for y in range(len(respdict["journeys"][x]["legs"])): print("LEG " + str(y+1) + "") print("Duration " + str(respdict["journeys"][x]["legs"][y]["duration"]/60) + " mins", end="\n") print(respdict["journeys"][x]["legs"][y]["origin"]["departureTimeEstimated"], end="\t") print(respdict["journeys"][x]["legs"][y]["origin"]["name"], end="\n") print(respdict["journeys"][x]["legs"][y]["destination"]["arrivalTimeEstimated"], end="\t") print(respdict["journeys"][x]["legs"][y]["destination"]["name"], end="\n") print("\t\t")
[ "ausben@gmail.com" ]
ausben@gmail.com
25bd69a2f916412574ed02402bb69fe8bb639fc1
a1bfa15fdb28c2eb4f46c6a694dd310e0a174846
/jpg2mojo.py
c128a00be0cb268eea657c795fa607d5b2657c2a
[]
no_license
Rhoana/MojoToolkit
2971f6634adbcf40a5b8658b29de7fb6215498c2
c64e6d0c266dbb61105a8cadda16db7a2f76e0eb
refs/heads/master
2020-12-21T12:06:05.149710
2017-09-18T16:42:31
2017-09-18T16:42:31
73,499,035
0
1
null
2016-11-11T17:49:48
2016-11-11T17:49:47
null
UTF-8
Python
false
false
2,135
py
#!/usr/bin/python import os import cv2 import glob import argparse import numpy as np from toMojo.np2imgo import Imgo from toMojo.np2sego import Sego help = { 'out': 'output mojo parent (default mojo)', 'jpg2mojo': 'Stack all jpgs into a mojo folder!', 'jpgs': 'input folder with all jpgs (default jpgs)', 't': 'datatype for output file (default uint8)', 'c': '-c enables -t uint32 (and default -o bgr)', 'o': 'Little Endian channel order as rgba,bgr (default none)', } paths = {} stack = {} rgba = { 'r': 0, 'g': 1, 'b': 2, 'a': 3 } parser = argparse.ArgumentParser(description=help['jpg2mojo']) parser.add_argument('-t', metavar='string', default='uint8', help=help['t']) parser.add_argument('-o', metavar='string', default='', help=help['o']) parser.add_argument('jpgs', default='jpgs', nargs='?', help=help['jpgs']) parser.add_argument('out', default='mojo', nargs='?', help=help['out']) parser.add_argument('-c', help=help['c'], action='store_true') # attain all arguments args = vars(parser.parse_args()) for key in ['jpgs', 'out']: paths[key] = os.path.realpath(os.path.expanduser(args[key])) [order, color, dtype] = [args['o'], args['c'], args['t']] # Set color datatype if color: dtype = 'uint32' order = order or 'bgr' dtype = getattr(np,dtype) # read all jpgs in jpgs folder search = os.path.join(paths['jpgs'],'*.jpg') stack = sorted(glob.glob(search)) # Size input files sliceShape = cv2.imread(stack[0], 0).shape shape = (len(stack),) + sliceShape # Open an output file outfile = Imgo(paths['out']) if order: outfile = Sego(paths['out']) # Add each jpg file as a slice for zi, file in enumerate(stack): written = np.zeros(sliceShape,dtype=dtype) if not order: written = cv2.imread(file, 0).astype(dtype) else: # pixel to integer volume = cv2.imread(file) for ci, char in enumerate(order): colorbyte = volume[:, :, rgba[char]] * (256 ** ci) written = written + colorbyte # Write as image or segmentation outfile.run(written,zi) # Write metadata to ouput file outfile.save(shape)
[ "thejohnhoffer@coxgpu04.rc.fas.harvard.edu" ]
thejohnhoffer@coxgpu04.rc.fas.harvard.edu
808213727226448e77ae3540979e0a54ba99ac8c
29d6101cc76550b3dbb47e885a6c160f46551bc1
/test
61dbb6fd4297fef87fa9d79ea8c095ff1b07c43c
[]
no_license
awesomebjt/lpic-self-tests
b7dcc4062550b6ec06ef20ecb3c31c976ce46b32
bd772b1e25549e96caf02671b882212a3ab0cc13
refs/heads/master
2023-07-12T02:42:21.945693
2020-08-27T02:13:42
2020-08-27T02:13:42
null
0
0
null
null
null
null
UTF-8
Python
false
false
558
#!/usr/bin/python import json from random import randint import sys try: content = json.loads(open(sys.argv[1],'r').read()) except Exception as e: print("Failed to load self-test. Did you provide the right file name as the first argument?") raise e total = 0 correct = 0 while len(content) > 0: q = content.pop(randint(0,len(content)-1)) total += 1 print(q['Q']) a=input("# ") if a == q['A']: correct += 1 print("Total: {}\tCorrect: {}\tGrade: {}%".format( total, correct, int((correct/total)*100)))
[ "bjt@rabidquill.com" ]
bjt@rabidquill.com
2c2aebeebd8ad4a79fc47d44907d6a0fd9cdc88d
f68c7045d39039bcc58b8d096aca7edf433429ca
/env/bin/easy_install
2a206bc40153a884b403668cb00e8f28646c0b1c
[ "MIT" ]
permissive
kelvin-daniel/instagram
beca157eb4eb1130ebd86825a9f99d96b903da02
2ede5319266f4312a9440d4985d098bc7545c2ae
refs/heads/master
2022-12-30T17:31:37.451798
2020-10-26T09:02:55
2020-10-26T09:02:55
304,535,375
0
0
null
null
null
null
UTF-8
Python
false
false
283
#!/home/kevon/Documents/moringa_school/Django/instagram/env/bin/python3 # -*- coding: utf-8 -*- import re import sys from setuptools.command.easy_install import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "kaymutor@gmail.com" ]
kaymutor@gmail.com
0639bb8bfaf5d12027ea12b6ee6bbe9dec7363a0
6b7176e32e8e6b105d5ad8b4bda038ad9ae6a281
/P25034-zhaojie/week-11/homework.py
31a196df26a7d4580903c49e0900fa52b26d02c2
[ "Apache-2.0" ]
permissive
xiaohh2016/python-25
20c7e0a157c4be5707891d1839644e015b28dbb4
8981ba89bfb32754c3f9c881ee8fcaf13332ce51
refs/heads/master
2021-01-05T18:50:53.838337
2020-02-12T08:46:53
2020-02-12T08:46:53
241,107,053
0
0
Apache-2.0
2020-02-17T12:52:31
2020-02-17T12:52:31
null
UTF-8
Python
false
false
1,910
py
#!/usr/bin/env python # encoding:utf-8 # file: homework.py # 自己实现python自带的map、zip和filter函数 # 还没学到 yield语法不熟 先简单实现 # 实现map函数 def my_map(*args): """文档字符串位置 """ if len(args) < 2: # 先不用异常的方式处理 只是打印 print('map()至少需要两个参数') else: # 判断是否为可迭代对象 先不处理 fnc_nme = args[0] new_tpl = args[1:] min_len = len(min(new_tpl, key=len)) for idx in range(min_len): # yield后的代码会继续执行 yield只要存在函数就变成生成器 yield fnc_nme(*[itr[idx] for itr in new_tpl]) # 实现zip函数 def my_zip(*args): if not len(args): return tuple() min_len = len(min(args, key=len)) for idx in range(min_len): yield tuple(itr[idx] for itr in args) # 实现filter函数 def my_filter(func, itr): if func is not None: for it in itr: if func(it): yield it else: for it in itr: if it: yield it # 测试函数 加法 def func1(x, y): return x + y # 测试函数 平方 def func2(x): return x ** 2 # 测试函数 取大于100的数 def func3(x): return True if x > 100 else False if __name__ == '__main__': l1 = [3, 2, 3] l2 = [6, 5] print(list(my_map(func1, l1, l2))) print(list(my_zip([1, 2, 3], [4, 5], 'abcdefg'))) print(list(my_filter(func3, [0, 201, 1, 2, 3, 100, 101]))) print(list(my_zip())) print(list(my_filter(None, [0, 201, 1, 2, 3, 100, 101]))) print('-------- 对照组 --------') print(list(map(func1, l1, l2))) print(list(zip([1, 2, 3], [4, 5], 'abcdefg'))) print(list(filter(func3, [0, 201, 1, 2, 3, 100, 101]))) print(list(zip())) print(list(filter(None, [0, 201, 1, 2, 3, 100, 101])))
[ "jasonz666@qq.com" ]
jasonz666@qq.com
fe2dc08589eec0c27d13129f015869399ee3dae0
4bb72ba6ee6ed3ad887b799b27434946a92ff9d2
/algo/CryptoSystem.py
07c2ccc7e3f2ab9244ec99bd40722b77700c684c
[]
no_license
Libi92/ECCBDD
33de3d9b2a91d671304f3e5bc6b134e7046d55f8
baa7b2c9177c6110e1cfa57bea6c936b30a4985a
refs/heads/master
2020-03-22T10:04:52.317899
2018-07-08T07:24:56
2018-07-08T07:24:56
139,879,602
0
0
null
null
null
null
UTF-8
Python
false
false
4,516
py
import datetime from functools import reduce from algo import curve from algo.ecc import EC from algo.logger import Logger from algo.point import Point PXY_MATRIX_SIZE = 5 class CryptoSystem: def __init__(self, g, ec): self.g = g self.ec = ec def bit_invert(self, b): inv = map(lambda x: '0' if x == '1' else '1', b) return reduce(lambda x, y: x + y, inv) def constructPxPyMetrix(self, decimal_list): pxy_list = [] list_5 = [] for i in range(len(decimal_list)): if i != 0 and i % 5 == 0: pxy_list.append(list_5) list_5 = [] py = i px = decimal_list[i] + i list_5.append(Point(px, py)) pxy_list.append(list_5) return pxy_list def get_gMatrix(self): return [self.ec.mul(self.g, i) for i in range(1, 6)] def add(self, a, b): return [self.ec.add(m, n) for m, n in zip(a, b)] def sub(self, a, b): return [self.ec.sub(m, n) for m, n in zip(a, b)] def matrixShiftAdd(self, a_list, b): c_list = [] for a in a_list: c = self.add(a, b) b.append(b.pop(0)) c_list.append(c) return c_list def matrixShiftSub(self, a_list, b): c_list = [] for a in a_list: c = self.sub(a, b) b.append(b.pop(0)) c_list.append(c) return c_list def print_matrix(self, matrix): for x in matrix: Logger.log(str(x.x) + ', ' + str(x.y)) def extractPx(self, pxy_list): extracted = [] for list_5 in pxy_list: ext = map(lambda p: Point(p.x - p.y, p.y), list_5) extracted.append(list(ext)) return extracted def encode(self, message): start_time = datetime.datetime.now().microsecond eq_ascii = [ord(x) for x in message] Logger.log('ascii: ', eq_ascii) bin_array = [format(x, '08b') for x in eq_ascii] num_append = len(bin_array) % PXY_MATRIX_SIZE if num_append != 0: num_append = PXY_MATRIX_SIZE - num_append for i in range(num_append): bin_array.append(format(0, '08b')) Logger.log('binary: ', bin_array) inv_array = [self.bit_invert(b) for b in bin_array] Logger.log('inverse binary: ', inv_array) decimal_arr = [int(x, 2) for x in inv_array] Logger.log('decimal: ', decimal_arr) pxy_matrix = self.constructPxPyMetrix(decimal_arr) Logger.log('PxPy (5x2)matrix: ', pxy_matrix) g_matrix = self.get_gMatrix() Logger.log('(5x2)g matrix: ') self.print_matrix(g_matrix) mapped_list = self.matrixShiftAdd(pxy_matrix, g_matrix) Logger.log('encoded matrix: ') for x in mapped_list: self.print_matrix(x) end_time = datetime.datetime.now().microsecond execution_time = end_time - start_time Logger.log("Encoding time: {} μs".format(execution_time)) return mapped_list def decode(self, encoded_list): start_time = datetime.datetime.now().microsecond g_matrix = self.get_gMatrix() subs_matrix = self.matrixShiftSub(encoded_list, g_matrix) Logger.log('Subtracted Matrix: ') for x in subs_matrix: self.print_matrix(x) extracted = self.extractPx(subs_matrix) Logger.log('Px Extracted: ') for x in extracted: self.print_matrix(x) temp = [] for x in extracted: temp.extend(x) extracted = temp bin_array = [self.frmt(x) for x in extracted] Logger.log(bin_array) inv_bits = [self.bit_invert(b) for b in bin_array] decimal_arr = [int(x, 2) for x in inv_bits] Logger.log(decimal_arr) chars = [chr(d) for d in decimal_arr] plain_text = reduce(lambda x, y: x + y, chars) end_time = datetime.datetime.now().microsecond execution_time = end_time - start_time Logger.log("Decoding time: {} μs".format(execution_time)) return plain_text def frmt(self, X): Logger.log(X, display=True) return format(int(X.x), '08b') if __name__ == '__main__': plain_text = input("Enter your message: ") curve = curve.P256 g = Point(curve.gy, curve.gy) ec = EC(curve.a, curve.b, curve.p) crypto = CryptoSystem(g, ec) encoded = crypto.encode(plain_text) decoded = crypto.decode(encoded) print(decoded)
[ "libinbabup@hotmail.com" ]
libinbabup@hotmail.com